├── .gitattributes ├── .gitignore ├── .paket ├── paket.bootstrapper.exe ├── paket.exe └── paket.targets ├── AleaTK.sln ├── LICENSE ├── README.md ├── SolutionInfo.cs ├── SolutionInfo.fs ├── build.bat ├── build.fsx ├── gallery.html ├── index.html ├── paket.dependencies ├── paket.lock ├── src ├── AleaTK │ ├── AleaTK.csproj │ ├── Buffer.cs │ ├── Common.cs │ ├── Context.cs │ ├── Expr.cs │ ├── ExprImpl │ │ ├── DotExpr.cs │ │ ├── MapExpr.cs │ │ ├── RandomExpr.cs │ │ ├── ReduceExpr.cs │ │ ├── ScalarExpr.cs │ │ ├── ShapeExpr.cs │ │ └── TakeExpr.cs │ ├── Layout.cs │ ├── Library.cs │ ├── ML │ │ ├── Batcher.cs │ │ ├── Differentiable.cs │ │ ├── Executor.cs │ │ ├── Library.cs │ │ ├── Operator │ │ │ ├── Activation.cs │ │ │ ├── Add.cs │ │ │ ├── Convolution.cs │ │ │ ├── Dot.cs │ │ │ ├── Dropout.cs │ │ │ ├── Embedding.cs │ │ │ ├── FullyConnected.cs │ │ │ ├── IteratedRnnCell.cs │ │ │ ├── L2Loss.cs │ │ │ ├── Pooling.cs │ │ │ ├── RNN.cs │ │ │ ├── Reshape.cs │ │ │ ├── RnnCell.cs │ │ │ ├── RnnDynamic.cs │ │ │ ├── SeqToSeq.cs │ │ │ └── Softmax.cs │ │ ├── Optimizer.cs │ │ ├── Symbol.cs │ │ └── Variable.cs │ ├── Properties │ │ └── AssemblyInfo.cs │ ├── Tensor.cs │ └── paket.references └── AleaTKUtil │ ├── AleaTKUtil.csproj │ ├── CSMatIOExtensions.cs │ ├── Common.cs │ ├── GradientChecker.cs │ ├── Properties │ └── AssemblyInfo.cs │ └── paket.references ├── tests └── AleaTKTest │ ├── AleaTKTest.csproj │ ├── App.config │ ├── BinPackingLab.cs │ ├── Common.cs │ ├── Experiment.cs │ ├── GradientChecker.cs │ ├── MLFramework.cs │ ├── MLOperators.cs │ ├── MachineLearning.cs │ ├── Program.cs │ ├── Properties │ └── AssemblyInfo.cs │ ├── Rnn.cs │ ├── TensorComputing.cs │ └── paket.references ├── tutorial ├── design │ ├── content │ │ ├── gallery.css │ │ ├── src_highlight_tango.css │ │ ├── style.css │ │ └── style_old.css │ ├── fonts │ │ ├── icomoon.eot │ │ ├── icomoon.svg │ │ ├── icomoon.ttf │ │ └── icomoon.woff │ ├── images │ │ ├── Alea-TK-images.eps │ │ ├── Alea-TK-images.png │ │ ├── Alea-TK-images.svg │ │ ├── Alea-TK.eps │ │ ├── Alea-TK.png │ │ ├── Alea-TK.svg │ │ ├── QuantAlea_cube_blau-grau_square.svg │ │ ├── QuantAlea_cube_blau-grau_square_100.png │ │ ├── QuantAlea_cube_blau-grau_square_200.png │ │ ├── QuantAlea_logo_blau-grau.svg │ │ ├── aleagpu_logo_v3.eps │ │ ├── aleagpu_logo_v3.png │ │ ├── aleagpu_logo_v3.svg │ │ ├── class_hierarchy.png │ │ ├── cuBLAS.png │ │ ├── cuBLAS.svg │ │ ├── cuDNN.png │ │ ├── cuDNN.svg │ │ ├── cuFFT.png │ │ ├── cuFFT.svg │ │ ├── cuRAND.png │ │ ├── cuRAND.svg │ │ ├── expr_interface.png │ │ ├── favicon.ico │ │ ├── gpu_device.svg │ │ ├── logo.png │ │ ├── logo2x.png │ │ ├── mnist.png │ │ ├── mnist_large.jpg │ │ ├── mnist_rect.png │ │ ├── montecarlo_pi.gif │ │ ├── mxnet.png │ │ ├── resources │ │ │ ├── colors.xml │ │ │ ├── cuBLAS.ai │ │ │ ├── cuDNN.ai │ │ │ ├── cuFFT.ai │ │ │ ├── cuRAND.ai │ │ │ └── gpu_device.xml │ │ ├── rnn.png │ │ ├── rnn.svg │ │ └── rnn.xml │ ├── scripts │ │ ├── bootbox.min.js │ │ ├── jquery.powertip.css │ │ ├── jquery.powertip.js │ │ ├── jquery.quicksand.js │ │ ├── jquery.vibrate.js │ │ ├── jquery.waitforimages.min.js │ │ ├── scale.fix.js │ │ ├── scripts.js │ │ ├── tips.js │ │ ├── version_list.js │ │ └── version_list2.js │ └── templates │ │ ├── doc_page.html │ │ ├── extended_sample_doc_page.html │ │ ├── ieee-with-url.csl │ │ ├── ieee.csl │ │ └── references.bib ├── doc │ ├── design_details.md │ ├── get_started.md │ ├── how_to.md │ ├── index.md │ ├── ml_tools.md │ ├── resources.md │ └── tutorials.md ├── generate │ ├── AssemblyInfo.fs │ ├── Build.fsx │ ├── ExtendedSampleDoc.tpl │ ├── Generate.fsproj │ ├── HtmlBuilders.fsx │ ├── Pandoc.fs │ ├── SampleGallery.tpl │ ├── SampleProjects.fs │ ├── Types.fs │ ├── Util.fs │ ├── paket.references │ └── readme.md ├── readme.md └── samples │ ├── MNIST │ ├── Extended.md │ ├── MNIST.cs │ ├── MNIST.csproj │ ├── MnistTest.cs │ ├── Properties │ │ └── AssemblyInfo.cs │ ├── Readme.md │ └── paket.references │ ├── MonteCarloPi │ ├── Extended.md │ ├── MonteCarloPi.cs │ ├── MonteCarloPi.csproj │ ├── Properties │ │ └── AssemblyInfo.cs │ ├── Readme.md │ └── paket.references │ ├── PTB │ ├── App.config │ ├── Extended.md │ ├── Lstm.cs │ ├── PTB.cs │ ├── PTB.csproj │ ├── Properties │ │ └── AssemblyInfo.cs │ ├── Readme.md │ ├── lstm_batched.py │ ├── lstm_small.mat │ └── paket.references │ └── WMT │ ├── App.config │ ├── BucketedData.cs │ ├── BucketedDataBatcher.cs │ ├── Data.cs │ ├── Model.cs │ ├── Program.cs │ ├── Properties │ └── AssemblyInfo.cs │ ├── Vocabulary.cs │ ├── WMT.csproj │ ├── WMTTest.cs │ └── paket.references └── version_list.html /.gitattributes: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/.gitattributes -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tutorial/output 2 | 3 | ## Ignore Visual Studio temporary files, build results, and 4 | ## files generated by popular Visual Studio add-ons. 5 | 6 | # User-specific files 7 | *.suo 8 | *.user 9 | *.userosscache 10 | *.sln.docstates 11 | 12 | # User-specific files (MonoDevelop/Xamarin Studio) 13 | *.userprefs 14 | 15 | # Build results 16 | [Dd]ebug/ 17 | [Dd]ebugPublic/ 18 | [Rr]elease/ 19 | [Rr]eleases/ 20 | x64/ 21 | x86/ 22 | build/ 23 | bld/ 24 | [Bb]in/ 25 | [Oo]bj/ 26 | 27 | # Visual Studio 2015 cache/options directory 28 | .vs/ 29 | # Uncomment if you have tasks that create the project's static files in wwwroot 30 | #wwwroot/ 31 | 32 | # MSTest test Results 33 | [Tt]est[Rr]esult*/ 34 | [Bb]uild[Ll]og.* 35 | 36 | # NUNIT 37 | *.VisualState.xml 38 | TestResult.xml 39 | 40 | # Build Results of an ATL Project 41 | [Dd]ebugPS/ 42 | [Rr]eleasePS/ 43 | dlldata.c 44 | 45 | # DNX 46 | project.lock.json 47 | artifacts/ 48 | 49 | *_i.c 50 | *_p.c 51 | *_i.h 52 | *.ilk 53 | *.meta 54 | *.obj 55 | *.pch 56 | *.pdb 57 | *.pgc 58 | *.pgd 59 | *.rsp 60 | *.sbr 61 | *.tlb 62 | *.tli 63 | *.tlh 64 | *.tmp 65 | *.tmp_proj 66 | *.log 67 | *.vspscc 68 | *.vssscc 69 | .builds 70 | *.pidb 71 | *.svclog 72 | *.scc 73 | 74 | # Chutzpah Test files 75 | _Chutzpah* 76 | 77 | # Visual C++ cache files 78 | ipch/ 79 | *.aps 80 | *.ncb 81 | *.opensdf 82 | *.sdf 83 | *.cachefile 84 | 85 | # Visual Studio profiler 86 | *.psess 87 | *.vsp 88 | *.vspx 89 | *.sap 90 | 91 | # TFS 2012 Local Workspace 92 | $tf/ 93 | 94 | # Guidance Automation Toolkit 95 | *.gpState 96 | 97 | # ReSharper is a .NET coding add-in 98 | _ReSharper*/ 99 | *.[Rr]e[Ss]harper 100 | *.DotSettings.user 101 | 102 | # JustCode is a .NET coding add-in 103 | .JustCode 104 | 105 | # TeamCity is a build add-in 106 | _TeamCity* 107 | 108 | # DotCover is a Code Coverage Tool 109 | *.dotCover 110 | 111 | # NCrunch 112 | _NCrunch_* 113 | .*crunch*.local.xml 114 | nCrunchTemp_* 115 | 116 | # MightyMoose 117 | *.mm.* 118 | AutoTest.Net/ 119 | 120 | # Web workbench (sass) 121 | .sass-cache/ 122 | 123 | # Installshield output folder 124 | [Ee]xpress/ 125 | 126 | # DocProject is a documentation generator add-in 127 | DocProject/buildhelp/ 128 | DocProject/Help/*.HxT 129 | DocProject/Help/*.HxC 130 | DocProject/Help/*.hhc 131 | DocProject/Help/*.hhk 132 | DocProject/Help/*.hhp 133 | DocProject/Help/Html2 134 | DocProject/Help/html 135 | 136 | # Click-Once directory 137 | publish/ 138 | 139 | # Publish Web Output 140 | *.[Pp]ublish.xml 141 | *.azurePubxml 142 | # TODO: Comment the next line if you want to checkin your web deploy settings 143 | # but database connection strings (with potential passwords) will be unencrypted 144 | *.pubxml 145 | *.publishproj 146 | 147 | # NuGet Packages 148 | *.nupkg 149 | # The packages folder can be ignored because of Package Restore 150 | **/packages/* 151 | # except build/, which is used as an MSBuild target. 152 | !**/packages/build/ 153 | # Uncomment if necessary however generally it will be regenerated when needed 154 | #!**/packages/repositories.config 155 | 156 | # Windows Azure Build Output 157 | csx/ 158 | *.build.csdef 159 | 160 | # Windows Store app package directory 161 | AppPackages/ 162 | 163 | # Visual Studio cache files 164 | # files ending in .cache can be ignored 165 | *.[Cc]ache 166 | # but keep track of directories ending in .cache 167 | !*.[Cc]ache/ 168 | 169 | # Others 170 | ClientBin/ 171 | [Ss]tyle[Cc]op.* 172 | ~$* 173 | *~ 174 | *.dbmdl 175 | *.dbproj.schemaview 176 | *.pfx 177 | *.publishsettings 178 | node_modules/ 179 | orleans.codegen.cs 180 | 181 | # RIA/Silverlight projects 182 | Generated_Code/ 183 | 184 | # Backup & report files from converting an old project file 185 | # to a newer Visual Studio version. Backup files are not needed, 186 | # because we have git ;-) 187 | _UpgradeReport_Files/ 188 | Backup*/ 189 | UpgradeLog*.XML 190 | UpgradeLog*.htm 191 | 192 | # SQL Server files 193 | *.mdf 194 | *.ldf 195 | 196 | # Business Intelligence projects 197 | *.rdl.data 198 | *.bim.layout 199 | *.bim_*.settings 200 | 201 | # Microsoft Fakes 202 | FakesAssemblies/ 203 | 204 | # Node.js Tools for Visual Studio 205 | .ntvs_analysis.dat 206 | 207 | # Visual Studio 6 build log 208 | *.plg 209 | 210 | # Visual Studio 6 workspace options file 211 | *.opt 212 | 213 | # Visual Studio LightSwitch build output 214 | **/*.HTMLClient/GeneratedArtifacts 215 | **/*.DesktopClient/GeneratedArtifacts 216 | **/*.DesktopClient/ModelManifest.xml 217 | **/*.Server/GeneratedArtifacts 218 | **/*.Server/ModelManifest.xml 219 | _Pvt_Extensions 220 | 221 | # Ignores for Alea GPU project 222 | !src/Alea.IL/Microsoft.Cci.Pdb/**/* 223 | !tools/CUDA/**/* 224 | !tools/AleaGpuWebSites/NugetServer/Packages/**/* 225 | .fake 226 | output 227 | temp 228 | src/Alea/CryptoObfuscator_Output/Alea.* 229 | #*.obproj.map 230 | 231 | .idea/ -------------------------------------------------------------------------------- /.paket/paket.bootstrapper.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/.paket/paket.bootstrapper.exe -------------------------------------------------------------------------------- /.paket/paket.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/.paket/paket.exe -------------------------------------------------------------------------------- /.paket/paket.targets: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | true 6 | 7 | true 8 | $(MSBuildThisFileDirectory) 9 | $(MSBuildThisFileDirectory)..\ 10 | /Library/Frameworks/Mono.framework/Commands/mono 11 | mono 12 | 13 | 14 | 15 | $(PaketToolsPath)paket.exe 16 | $(PaketToolsPath)paket.bootstrapper.exe 17 | "$(PaketExePath)" 18 | $(MonoPath) --runtime=v4.0.30319 "$(PaketExePath)" 19 | "$(PaketBootStrapperExePath)" $(PaketBootStrapperCommandArgs) 20 | $(MonoPath) --runtime=v4.0.30319 $(PaketBootStrapperExePath) $(PaketBootStrapperCommandArgs) 21 | 22 | $(MSBuildProjectDirectory)\paket.references 23 | $(MSBuildStartupDirectory)\paket.references 24 | $(MSBuildProjectFullPath).paket.references 25 | $(PaketCommand) restore --references-files "$(PaketReferences)" 26 | $(PaketBootStrapperCommand) 27 | 28 | RestorePackages; $(BuildDependsOn); 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Alea TK 2 | 3 | *** 4 | 5 | Alea TK is an **open source** library for general purpose **numerical computing** and **Machine Learning** based on tensors and tensor expressions. 6 | 7 | - GPU accelerated 8 | - Designed for rapid prototyping 9 | - Developed in C# and usable from any .NET language 10 | 11 | http://www.aleatk.com 12 | 13 | ## Package and building system 14 | 15 | We use [Paket](http://fsprojects.github.io/Paket/index.html) to manage the packages used in this project, and a [FAKE](http://fsharp.github.io/FAKE/) script to build and publish. Here are some notes: 16 | 17 | - Please always use `paket` to manage the packages, do not use the default NuGet package manager. This has many advantages, such as easier to make building script and publish. 18 | - Some packages are pinned to certain version, for example, the NUnit is pinned to version 2 because of Resharper test runner. For more details, please check the `paket.dependencies` file in the solution root folder. 19 | - When you made a fresh copy locally, you can always restore all packages by executing `.paket\paket.exe restore`. We also enabed the [package auto-restore](http://fsprojects.github.io/Paket/paket-auto-restore.html) feature, but if you created new project, you'd better enable that again for you new project. 20 | - To add a package to your project, you could use `.paket\paket.exe add nuget YOUPACK version VERSION project YOUPROJ`, then commit the changed package management files. -------------------------------------------------------------------------------- /SolutionInfo.cs: -------------------------------------------------------------------------------- 1 | // 2 | using System.Reflection; 3 | 4 | [assembly: AssemblyProductAttribute("Alea TK")] 5 | [assembly: AssemblyCompanyAttribute("QuantAlea AG.")] 6 | [assembly: AssemblyCopyrightAttribute("QuantAlea AG. 2016-2016")] 7 | [assembly: AssemblyVersionAttribute("0.9.0.5")] 8 | [assembly: AssemblyFileVersionAttribute("0.9.0.5")] 9 | [assembly: AssemblyInformationalVersionAttribute("0.9.0.5")] 10 | namespace System { 11 | internal static class AssemblyVersionInformation { 12 | internal const string Version = "0.9.0.5"; 13 | internal const string InformationalVersion = "0.9.0.5"; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /SolutionInfo.fs: -------------------------------------------------------------------------------- 1 | namespace System 2 | open System.Reflection 3 | 4 | [] 5 | [] 6 | [] 7 | [] 8 | [] 9 | [] 10 | do () 11 | 12 | module internal AssemblyVersionInformation = 13 | let [] Version = "0.9.0.5" 14 | let [] InformationalVersion = "0.9.0.5" 15 | -------------------------------------------------------------------------------- /build.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | cls 3 | .paket\paket.exe restore 4 | packages\FAKE\tools\FAKE.exe build.fsx %* -------------------------------------------------------------------------------- /gallery.html: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | Alea TK 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |

Redirecting to latest version 0.9.0-beta3 ...

13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | Alea TK 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |

Redirecting to latest version 0.9.0-beta3 ...

13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /paket.dependencies: -------------------------------------------------------------------------------- 1 | content: none 2 | source https://www.nuget.org/api/v2 3 | source http://nuget.aleagpu.com/nuget 4 | source http://beta.aleagpu.com/nuget 5 | nuget Alea 6 | nuget CSMatIO 7 | nuget Fake 8 | nuget Nuget.CommandLine 9 | nuget NUnit 2.6.4 10 | nuget NUnit.Runners.Net4 11 | nuget SharpZipLib 12 | nuget Templatus -------------------------------------------------------------------------------- /paket.lock: -------------------------------------------------------------------------------- 1 | CONTENT: NONE 2 | NUGET 3 | remote: https://www.nuget.org/api/v2 4 | Alea (3.0.2) 5 | CSMatIO (1.0.2) 6 | zlib.net (>= 1.0.4) 7 | FAKE (4.28) 8 | NuGet.CommandLine (3.3) 9 | NUnit (2.6.4) 10 | NUnit.Runners.Net4 (2.6.4) 11 | SharpZipLib (0.86) 12 | Templatus (0.3) 13 | zlib.net (1.0.4) 14 | -------------------------------------------------------------------------------- /src/AleaTK/AleaTK.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | AnyCPU 7 | {EC47C435-0D7D-4819-BE05-11B9E57FB8F3} 8 | Library 9 | Properties 10 | AleaTK 11 | AleaTK 12 | v4.5.2 13 | 512 14 | 15 | 16 | true 17 | full 18 | false 19 | ..\..\debug\ 20 | DEBUG;TRACE 21 | prompt 22 | 4 23 | true 24 | ..\..\debug\AleaTK.XML 25 | 1591 26 | 27 | 28 | pdbonly 29 | true 30 | ..\..\release\ 31 | TRACE 32 | prompt 33 | 4 34 | true 35 | ..\..\release\AleaTK.XML 36 | 1591 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | Properties\SolutionInfo.cs 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 99 | 100 | 101 | 102 | 103 | 104 | ..\..\packages\Alea\lib\net45\Alea.IL.dll 105 | True 106 | True 107 | 108 | 109 | ..\..\packages\Alea\lib\net45\Alea.Parallel.dll 110 | True 111 | True 112 | 113 | 114 | ..\..\packages\Alea\lib\net45\Alea.dll 115 | True 116 | True 117 | 118 | 119 | 120 | 121 | 122 | -------------------------------------------------------------------------------- /src/AleaTK/ExprImpl/MapExpr.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | 3 | namespace AleaTK.ExprImpl 4 | { 5 | public class Map1Expr : RExpr 6 | { 7 | public Map1Expr(Expr input, Func transform, string opCode = OpCodes.Map1) 8 | { 9 | Shape = input.Shape; 10 | OpCode = opCode; 11 | Input = input; 12 | Transform = transform; 13 | AddOperand(Input); 14 | } 15 | 16 | public Expr Input { get; } 17 | 18 | public Func Transform { get; } 19 | 20 | public override Shape Shape { get; } 21 | 22 | protected override IRValue GenerateRValue(Assignment assignment) 23 | { 24 | var device = assignment.Context.Device; 25 | var input = assignment.GetInput(Input).ToRValue(); 26 | var transform = Transform; 27 | var layout = input.Layout; 28 | var inputRawReader = input.BufferReader.RawReader; 29 | Func rawReader = i => transform(inputRawReader(i)); 30 | return new TensorReader(device, layout, rawReader); 31 | } 32 | } 33 | 34 | public class Map2Expr : RExpr 35 | { 36 | public Map2Expr(Expr input1, Expr input2, Func transform, string opCode = OpCodes.Map2) 37 | { 38 | Input1 = input1; 39 | Input2 = input2; 40 | Transform = transform; 41 | OpCode = opCode; 42 | Shape = Shape.Broadcast(input1.Shape, input2.Shape); 43 | AddOperand(Input1); 44 | AddOperand(Input2); 45 | } 46 | 47 | public Expr Input1 { get; } 48 | 49 | public Expr Input2 { get; } 50 | 51 | public Func Transform { get; } 52 | 53 | public override Shape Shape { get; } 54 | 55 | public override void Prepare(Assignment assignment) 56 | { 57 | assignment.RequireLayoutInnerChangeMost(Input1); 58 | assignment.RequireLayoutInnerChangeMost(Input2); 59 | assignment.RequireLayoutFullyUnitStride(Input1); 60 | assignment.RequireLayoutFullyUnitStride(Input2); 61 | } 62 | 63 | protected override IRValue GenerateRValue(Assignment assignment) 64 | { 65 | var device = assignment.Context.Device; 66 | var input1 = assignment.GetInput(Input1).ToRValue(); 67 | var input2 = assignment.GetInput(Input2).ToRValue(); 68 | var transform = Transform; 69 | var shape = Shape; 70 | 71 | if (Layout.CanFullyUnitStrideMapping(input1.Layout, input2.Layout)) 72 | { 73 | var read1 = input1.BufferReader.GetFlatReader1(shape); 74 | var read2 = input2.BufferReader.GetFlatReader1(shape); 75 | var layout = new Layout(shape); 76 | Func rawReader = i => transform(read1(i), read2(i)); 77 | return new TensorReader(device, layout, rawReader); 78 | } 79 | 80 | throw new NotImplementedException(); 81 | } 82 | } 83 | } -------------------------------------------------------------------------------- /src/AleaTK/ExprImpl/RandomExpr.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using Alea; 3 | using Alea.cuRAND; 4 | 5 | namespace AleaTK.ExprImpl 6 | { 7 | public class PseudoRandomExpr : LExpr 8 | { 9 | public PseudoRandomExpr(Shape shape, PseudoRandomType type, Distribution distribution, ulong seed, ulong offset, string opCode = OpCodes.Random) 10 | { 11 | OpCode = opCode; 12 | Shape = shape; 13 | Seed = seed; 14 | Distribution = distribution; 15 | Offset = offset; 16 | Type = type; 17 | } 18 | 19 | public PseudoRandomType Type { get; } 20 | 21 | public Distribution Distribution { get; } 22 | 23 | public ulong Seed { get; } 24 | 25 | public ulong Offset { get; } 26 | 27 | public override Shape Shape { get; } 28 | 29 | protected override bool Execute(Assignment assignment, ILValue output) 30 | { 31 | var ctx = assignment.Context; 32 | var rng = ctx.GetRandomGenerator(Type); 33 | 34 | lock (rng) 35 | { 36 | rng.SetPseudoRandomGeneratorSeed(Seed); 37 | rng.SetGeneratorOrdering(Ordering.PSEUDO_DEFAULT); 38 | rng.SetGeneratorOffset(Offset); 39 | 40 | if (typeof(T) == typeof(double)) 41 | { 42 | var ptr = output.Buffer.Ptr.Reinterpret(); 43 | 44 | if (Distribution is UniformDistribution) 45 | { 46 | rng.GenerateUniform(ptr, (ulong)output.Layout.Shape.Length); 47 | return true; 48 | } 49 | 50 | if (Distribution is NormalDistribution) 51 | { 52 | var dist = Distribution as NormalDistribution; 53 | rng.GenerateNormal(ptr, (ulong)output.Layout.Shape.Length, dist.Mean, dist.Stddev); 54 | return true; 55 | } 56 | 57 | throw new InvalidOperationException(); 58 | } 59 | 60 | if (typeof(T) == typeof(float)) 61 | { 62 | var ptr = output.Buffer.Ptr.Reinterpret(); 63 | 64 | if (Distribution is UniformDistribution) 65 | { 66 | rng.GenerateUniform(ptr, (ulong)output.Layout.Shape.Length); 67 | return true; 68 | } 69 | 70 | if (Distribution is NormalDistribution) 71 | { 72 | var dist = Distribution as NormalDistribution; 73 | rng.GenerateNormal(ptr, (ulong)output.Layout.Shape.Length, (float)dist.Mean, (float)dist.Stddev); 74 | return true; 75 | } 76 | 77 | throw new InvalidOperationException(); 78 | } 79 | 80 | if (typeof(T) == typeof(double2)) 81 | { 82 | var ptr = output.Buffer.Ptr.Reinterpret(); 83 | 84 | if (Distribution is UniformDistribution) 85 | { 86 | rng.GenerateUniform(ptr, (ulong)output.Layout.Shape.Length * 2UL); 87 | return true; 88 | } 89 | 90 | if (Distribution is NormalDistribution) 91 | { 92 | var dist = Distribution as NormalDistribution; 93 | rng.GenerateNormal(ptr, (ulong)output.Layout.Shape.Length * 2UL, dist.Mean, dist.Stddev); 94 | return true; 95 | } 96 | 97 | throw new InvalidOperationException(); 98 | } 99 | 100 | if (typeof(T) == typeof(uint) || typeof(T) == typeof(int)) 101 | { 102 | var ptr = output.Buffer.Ptr.Reinterpret(); 103 | 104 | if (Distribution is UniformDistribution) 105 | { 106 | rng.Generate(ptr, (ulong)output.Layout.Shape.Length); 107 | return true; 108 | } 109 | 110 | throw new InvalidOperationException(); 111 | } 112 | } 113 | 114 | return false; 115 | } 116 | } 117 | } -------------------------------------------------------------------------------- /src/AleaTK/ExprImpl/ScalarExpr.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | 3 | namespace AleaTK.ExprImpl 4 | { 5 | public class ScalarExpr : RExpr 6 | { 7 | public ScalarExpr(Shape shape, T value) 8 | { 9 | OpCode = OpCodes.Scalar; 10 | Shape = shape; 11 | Value = value; 12 | } 13 | 14 | public ScalarExpr(T value) 15 | { 16 | OpCode = OpCodes.Scalar; 17 | Shape = Shape.Scalar; 18 | Value = value; 19 | } 20 | 21 | public T Value { get; } 22 | 23 | public override Shape Shape { get; } 24 | 25 | protected override IRValue GenerateRValue(Assignment assignment) 26 | { 27 | var device = assignment.Context.Device; 28 | var layout = new Layout(Shape); 29 | var value = Value; 30 | Func rawReader = _ => value; 31 | return new TensorReader(device, layout, rawReader); 32 | } 33 | } 34 | } -------------------------------------------------------------------------------- /src/AleaTK/ExprImpl/ShapeExpr.cs: -------------------------------------------------------------------------------- 1 | using System.Linq; 2 | 3 | namespace AleaTK.ExprImpl 4 | { 5 | public class ReShapeExpr : RExpr 6 | { 7 | public ReShapeExpr(Expr input, long[] dims, string opCode = OpCodes.ReShape) 8 | { 9 | // -1 means calc the shape, but only one -1 allowed. 10 | var numNegOne = dims.Select(x => x < 0 ? 1 : 0).Sum(); 11 | Util.EnsureTrue(numNegOne == 0 || numNegOne == 1); 12 | 13 | if (numNegOne == 0) 14 | { 15 | var shape = new Shape(dims); 16 | // length must match old one 17 | Util.EnsureEqual(input.Shape.Length, shape.Length); 18 | Shape = shape; 19 | } 20 | else 21 | { 22 | var remainLength = dims.Select(x => x >= 0 ? x : 1L).Aggregate(ScalarOps.Mul); 23 | for (var i = 0; i < dims.Length; ++i) 24 | { 25 | if (dims[i] < 0) 26 | { 27 | dims[i] = input.Shape.Length/remainLength; 28 | break; 29 | } 30 | } 31 | // check if it is multiply correct 32 | var shape = new Shape(dims); 33 | Util.EnsureEqual(input.Shape.Length, shape.Length); 34 | Shape = shape; 35 | } 36 | 37 | Input = input; 38 | AddOperand(Input); 39 | } 40 | 41 | public Expr Input { get; } 42 | 43 | public override Shape Shape { get; } 44 | 45 | protected override IRValue GenerateRValue(Assignment assignment) 46 | { 47 | var device = assignment.Context.Device; 48 | var input = assignment.GetInput(Input).ToRValue(); 49 | var shape = Shape; 50 | 51 | if (input.Layout.IsInnerChangeMost && input.Layout.IsFullyUnitStride) 52 | { 53 | var read = input.BufferReader.GetFlatReader1(); 54 | var layout = new Layout(shape); 55 | return new TensorReader(device, layout, read); 56 | } 57 | 58 | return null; 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/AleaTK/ExprImpl/TakeExpr.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Text; 5 | using System.Threading.Tasks; 6 | 7 | namespace AleaTK.ExprImpl 8 | { 9 | public class TakeExpr : RExpr 10 | { 11 | public TakeExpr(Expr indices, Expr source) 12 | { 13 | Indices = indices; 14 | Source = source; 15 | Util.EnsureTrue(Source.Shape.Rank == 2); 16 | Shape = Shape.Create(Indices.Shape.Concat(new[] {Source.Shape[1]}).ToArray()); 17 | AddOperand(Indices); 18 | AddOperand(Source); 19 | } 20 | 21 | public Expr Indices { get; } 22 | 23 | public Expr Source { get; } 24 | 25 | public override Shape Shape { get; } 26 | 27 | protected override IRValue GenerateRValue(Assignment assignment) 28 | { 29 | 30 | var device = assignment.Context.Device; 31 | var indices = assignment.GetInput(Indices).ToRValue(); 32 | var source = assignment.GetInput(Source).ToRValue(); 33 | Util.EnsureTrue(indices.Layout.IsInnerChangeMostFullyPacked); 34 | 35 | var indicesReader = indices.BufferReader.RawReader; 36 | var sourceReader = source.BufferReader.GetReader2(); 37 | var layout = new Layout(Shape); 38 | var dim = source.Layout.Shape[1]; 39 | Func rawReader = i => sourceReader(indicesReader(i/dim), i%dim); 40 | return new TensorReader(device, layout, rawReader); 41 | } 42 | } 43 | 44 | public class TakeGradExpr : RExpr 45 | { 46 | public TakeGradExpr(Expr indices, Expr outputGradient, int sourceRows, T zero, Func add) 47 | { 48 | Indices = indices; 49 | OutputGradient = outputGradient; 50 | SourceRows = sourceRows; 51 | Zero = zero; 52 | Add = add; 53 | Shape = Shape.Create(SourceRows, OutputGradient.Shape[OutputGradient.Shape.Rank - 1]); 54 | AddOperand(Indices); 55 | AddOperand(OutputGradient); 56 | } 57 | 58 | public Expr Indices { get; } 59 | 60 | public Expr OutputGradient { get; } 61 | 62 | public int SourceRows { get; } 63 | 64 | public override Shape Shape { get; } 65 | 66 | public T Zero { get; } 67 | 68 | public Func Add { get; } 69 | 70 | protected override IRValue GenerateRValue(Assignment assignment) 71 | { 72 | var device = assignment.Context.Device; 73 | var indices = assignment.GetInput(Indices).ToRValue(); 74 | var dOutput = assignment.GetInput(OutputGradient).ToRValue(); 75 | Util.EnsureTrue(indices.Layout.IsInnerChangeMostFullyPacked); 76 | Util.EnsureTrue(dOutput.Layout.IsInnerChangeMostFullyPacked); 77 | 78 | var indicesReader = indices.BufferReader.RawReader; 79 | var dOutputReader = dOutput.BufferReader.RawReader; 80 | var layout = new Layout(Shape); 81 | var sourceRows = SourceRows; 82 | var batchSize = Indices.Shape.Length; 83 | var zero = Zero; 84 | var add = Add; 85 | var dims = Shape[1]; 86 | 87 | Func rawReader = i => 88 | { 89 | var row = i/ dims; 90 | var col = i% dims; 91 | var ret = zero; 92 | for (var j = 0; j < batchSize; ++j) 93 | { 94 | var idx = indicesReader(j); 95 | if (idx == row) 96 | { 97 | var value = dOutputReader(j*dims + col); 98 | ret = add(ret, value); 99 | } 100 | } 101 | return ret; 102 | }; 103 | 104 | return new TensorReader(device, layout, rawReader); 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/AleaTK/ML/Batcher.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Linq; 3 | using Alea; 4 | using Alea.Parallel.Device; 5 | 6 | namespace AleaTK.ML { 7 | public class Batcher: IDisposable { 8 | [GpuParam] 9 | private readonly int cols_, rows_, outputs_; 10 | private readonly bool doReset_; 11 | 12 | public long Index { get; private set; } 13 | public int Rows => rows_; 14 | public int Cols => cols_; 15 | public int Outputs => outputs_; 16 | 17 | public Batcher(Context context, float[,] data, float[,] labels, bool doReset=true) { 18 | doReset_ = doReset; 19 | Context = context; 20 | Random = new Random(0); 21 | rows_ = data.GetLength(0); 22 | cols_ = data.GetLength(1); 23 | outputs_ = labels.GetLength(1); 24 | 25 | Indices = Enumerable.Range(0, Rows).ToArray(); 26 | Data = data; 27 | Labels = labels; 28 | 29 | IndicesTensor = context.Allocate(Indices); 30 | DataTensor1 = context.Allocate(data); 31 | LabelsTensor1 = context.Allocate(labels); 32 | DataTensor2 = context.Device.Allocate(Shape.Create(Rows, Cols)); 33 | LabelsTensor2 = context.Device.Allocate(Shape.Create(Rows, Outputs)); 34 | DataTensor = DataTensor1; 35 | LabelsTensor = LabelsTensor1; 36 | 37 | if (!doReset_) return; 38 | Index = -1; 39 | Reset(); 40 | } 41 | 42 | #region props 43 | 44 | public Context Context { get; } 45 | public Random Random { get; } 46 | public int[] Indices { get; } 47 | public float[,] Data { get; } 48 | public float[,] Labels { get; } 49 | public Tensor IndicesTensor { get; } 50 | public Tensor DataTensor { get; private set; } 51 | public Tensor DataTensor1 { get; } 52 | public Tensor DataTensor2 { get; } 53 | public Tensor LabelsTensor { get; private set; } 54 | public Tensor LabelsTensor1 { get; } 55 | public Tensor LabelsTensor2 { get; } 56 | 57 | #endregion 58 | 59 | private void ShuffleIndices() { 60 | var rng = Random; 61 | var array = Indices; 62 | var n = array.Length; 63 | while (n > 1) { 64 | var k = rng.Next(n--); 65 | var temp = array[n]; 66 | array[n] = array[k]; 67 | array[k] = temp; 68 | } 69 | } 70 | 71 | public void Reset() { 72 | if (!doReset_ || Index == 0L || Context.Type != ContextType.Gpu) return; 73 | 74 | Index = 0L; 75 | ShuffleIndices(); 76 | Context.Copy(IndicesTensor, Indices.AsTensor()); 77 | var stream = Context.ToGpuContext().Stream; 78 | var srcData = DataTensor == DataTensor1 ? DataTensor1.Buffer.Ptr : DataTensor2.Buffer.Ptr; 79 | var dstData = DataTensor == DataTensor1 ? DataTensor2.Buffer.Ptr : DataTensor1.Buffer.Ptr; 80 | var srcLabels = LabelsTensor == LabelsTensor1 ? LabelsTensor1.Buffer.Ptr : LabelsTensor2.Buffer.Ptr; 81 | var dstLabels = LabelsTensor == LabelsTensor1 ? LabelsTensor2.Buffer.Ptr : LabelsTensor1.Buffer.Ptr; 82 | var idx = IndicesTensor.Buffer.Ptr; 83 | DeviceFor.For(stream, 0, Rows, i => { 84 | var j = idx[i]; 85 | var srcDataOffset = srcData + i*cols_; 86 | var dstDataOffset = dstData + j* cols_; 87 | for (var k = 0; k < cols_; ++k) dstDataOffset[k] = srcDataOffset[k]; 88 | var srcLabelsOffset = srcLabels + i*outputs_; 89 | var dstLabelsOffset = dstLabels + j* outputs_; 90 | for (var k = 0; k < outputs_; ++k) dstLabelsOffset[k] = srcLabelsOffset[k]; 91 | }); 92 | DataTensor = DataTensor == DataTensor1 ? DataTensor2 : DataTensor1; 93 | LabelsTensor = LabelsTensor == LabelsTensor1 ? LabelsTensor2 : LabelsTensor1; 94 | } 95 | 96 | public static Buffer CreateBuffer(Tensor t, long rows, int cols, long idx) { 97 | return new Buffer(t.Device, t.Memory, new Layout(Shape.Create(rows, cols)), t.Buffer.Ptr.LongPtr(idx * cols)); 98 | } 99 | 100 | public bool Next(long batchSize, Executor executor, Variable dataVar, Variable labelsVar) { 101 | if (Index >= Rows) { 102 | Reset(); 103 | return false; 104 | } 105 | var size = Index + batchSize >= Rows ? Rows - Index : batchSize; 106 | var dataBuffer = CreateBuffer(DataTensor, size, Cols, Index); 107 | var labelsBuffer = CreateBuffer(LabelsTensor, size, Outputs, Index); 108 | Index += batchSize; 109 | 110 | executor.SetTensor(dataVar, new Tensor(dataBuffer)); 111 | executor.SetTensor(labelsVar, new Tensor(labelsBuffer)); 112 | return true; 113 | } 114 | 115 | public void Dispose() { 116 | foreach (var t in new[] {DataTensor, DataTensor1, DataTensor2, LabelsTensor1, LabelsTensor2}) t.Buffer.Dispose(); 117 | IndicesTensor.Buffer.Dispose(); 118 | } 119 | } 120 | } -------------------------------------------------------------------------------- /src/AleaTK/ML/Differentiable.cs: -------------------------------------------------------------------------------- 1 | using System.Collections.Generic; 2 | 3 | namespace AleaTK.ML 4 | { 5 | public abstract class Differentiable : Symbol 6 | { 7 | private readonly List _inputs = new List(); 8 | private readonly List _outputs = new List(); 9 | private readonly List _auxvars = new List(); 10 | 11 | public IEnumerable Inputs => _inputs; 12 | 13 | public IEnumerable Outputs => _outputs; 14 | 15 | public IEnumerable AuxVars => _auxvars; 16 | 17 | protected void AddInput(Variable input) 18 | { 19 | _inputs.Add(input); 20 | } 21 | 22 | protected void AddOutput(Variable output) 23 | { 24 | output.Owner = this; 25 | _outputs.Add(output); 26 | } 27 | 28 | protected void AddAuxVar(Variable auxvar) 29 | { 30 | _auxvars.Add(auxvar); 31 | } 32 | 33 | public abstract void Forward(Executor executor); 34 | 35 | public abstract void Backward(Executor executor); 36 | 37 | public virtual void Initialize(Executor executor) 38 | { 39 | } 40 | } 41 | 42 | public interface ILayer { 43 | Variable Output { get; } 44 | } 45 | } -------------------------------------------------------------------------------- /src/AleaTK/ML/Library.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Text; 5 | using System.Threading.Tasks; 6 | using AleaTK.ML.Operator; 7 | 8 | namespace AleaTK.ML 9 | { 10 | public static class Library 11 | { 12 | public static Variable Variable() 13 | { 14 | return new Variable(VariableType.Common); 15 | } 16 | 17 | public static Variable Variable(PartialShape shape) 18 | { 19 | return new Variable(VariableType.Common, shape); 20 | } 21 | 22 | public static Variable Parameter(Expr initializer) 23 | { 24 | return new Variable(VariableType.Parameter, initializer); 25 | } 26 | 27 | public static Variable Parameter() 28 | { 29 | return new Variable(VariableType.Parameter); 30 | } 31 | 32 | public static Variable AuxVariable() 33 | { 34 | return new Variable(VariableType.Auxilliary); 35 | } 36 | 37 | public static Variable Dot(Variable a, Variable b) 38 | { 39 | var op = new Dot(a, b); 40 | return op.C; 41 | } 42 | 43 | public static Variable L2Loss(Variable pred, Variable label) 44 | { 45 | var op = new L2Loss(pred, label); 46 | return op.Loss; 47 | } 48 | 49 | public static Variable Reshape(this Variable input, params long[] shape) 50 | { 51 | return new Reshape(input, PartialShape.Create(shape)).Output; 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/AleaTK/ML/Operator/Activation.cs: -------------------------------------------------------------------------------- 1 | using static AleaTK.Library; 2 | using static AleaTK.ML.Library; 3 | 4 | namespace AleaTK.ML.Operator 5 | { 6 | public abstract class Activation : Differentiable, ILayer { 7 | protected Activation(Variable input) 8 | { 9 | Input = input; 10 | Output = Variable(input.Shape); 11 | AddInput(Input); 12 | AddOutput(Output); 13 | } 14 | 15 | public Variable Input { get; } 16 | 17 | public Variable Output { get; } 18 | 19 | public override void Forward(Executor executor) 20 | { 21 | var input = executor.GetTensor(Input); 22 | executor.AssignTensor(Output, ForwardExpr(input)); 23 | } 24 | 25 | protected abstract Expr ForwardExpr(Tensor input); 26 | 27 | public override void Backward(Executor executor) 28 | { 29 | var output = executor.GetTensor(Output); 30 | var dOutput = executor.GetGradient(Output); 31 | executor.AssignGradient(Input, BackwardExpr(output)*dOutput); 32 | } 33 | 34 | protected abstract Expr BackwardExpr(Tensor output); 35 | } 36 | 37 | public class ActivationReLU : Activation { 38 | public ActivationReLU(Variable input) : base(input) { } 39 | 40 | protected override Expr ForwardExpr(Tensor input) { return Max(input, 0.0.AsScalar()); } 41 | 42 | protected override Expr BackwardExpr(Tensor output) { return ReLUGrad(output); } 43 | } 44 | 45 | public class ActivationSigmoid : Activation 46 | { 47 | public ActivationSigmoid(Variable input) : base(input) { } 48 | 49 | protected override Expr ForwardExpr(Tensor input) 50 | { 51 | return 1.0.AsScalar()/(1.0.AsScalar() + Exp(-input)); 52 | } 53 | 54 | protected override Expr BackwardExpr(Tensor output) { return output*(1.0.AsScalar() - output); } 55 | } 56 | 57 | public class ActivationTanh : Activation 58 | { 59 | public ActivationTanh(Variable input) : base(input) { } 60 | 61 | protected override Expr ForwardExpr(Tensor input) { return Tanh(input); } 62 | 63 | protected override Expr BackwardExpr(Tensor output) { return 1.0.AsScalar() - output*output; } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/AleaTK/ML/Operator/Add.cs: -------------------------------------------------------------------------------- 1 | using System.Linq; 2 | using static AleaTK.Library; 3 | using static AleaTK.ML.Library; 4 | 5 | namespace AleaTK.ML.Operator 6 | { 7 | public class Add : Differentiable 8 | { 9 | public Add(Variable a, Variable b) 10 | { 11 | A = a; 12 | B = b; 13 | C = Variable(); 14 | AddInput(a); 15 | AddInput(b); 16 | AddOutput(C); 17 | } 18 | 19 | public Variable A { get; } 20 | 21 | public Variable B { get; } 22 | 23 | public Variable C { get; } 24 | 25 | public override void Forward(Executor executor) 26 | { 27 | var a = executor.GetTensor(A); 28 | var b = executor.GetTensor(B); 29 | executor.AssignTensor(C, a + b); 30 | } 31 | 32 | public override void Backward(Executor executor) 33 | { 34 | var a = executor.GetTensor(A); 35 | var b = executor.GetTensor(B); 36 | var dC = executor.GetGradient(C); 37 | 38 | var dA = a.Shape.Rank < dC.Shape.Rank 39 | ? ReduceSum(dC, Enumerable.Range(0, dC.Shape.Rank - a.Shape.Rank).ToArray()) 40 | : dC; 41 | 42 | var dB = b.Shape.Rank < dC.Shape.Rank 43 | ? ReduceSum(dC, Enumerable.Range(0, dC.Shape.Rank - b.Shape.Rank).ToArray()) 44 | : dC; 45 | 46 | executor.AssignGradient(A, dA); 47 | executor.AssignGradient(B, dB); 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/AleaTK/ML/Operator/Dot.cs: -------------------------------------------------------------------------------- 1 | using static AleaTK.Library; 2 | using static AleaTK.ML.Library; 3 | 4 | namespace AleaTK.ML.Operator 5 | { 6 | public class Dot : Differentiable 7 | { 8 | public Dot(Variable a, Variable b) 9 | { 10 | A = a; 11 | B = b; 12 | C = Variable(); 13 | AddInput(a); 14 | AddInput(b); 15 | AddOutput(C); 16 | } 17 | 18 | public Variable A { get; } 19 | 20 | public Variable B { get; } 21 | 22 | public Variable C { get; } 23 | 24 | public override void Forward(Executor executor) 25 | { 26 | var a = executor.GetTensor(A); 27 | var b = executor.GetTensor(B); 28 | executor.AssignTensor(C, Dot(a, b)); 29 | } 30 | 31 | public override void Backward(Executor executor) 32 | { 33 | var a = executor.GetTensor(A); 34 | var b = executor.GetTensor(B); 35 | var dC = executor.GetGradient(C); 36 | executor.AssignGradient(A, Dot(dC, b.T)); 37 | executor.AssignGradient(B, Dot(a.T, dC)); 38 | } 39 | } 40 | } -------------------------------------------------------------------------------- /src/AleaTK/ML/Operator/Dropout.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Linq; 3 | using Alea; 4 | using static AleaTK.Library; 5 | using static AleaTK.ML.Library; 6 | 7 | namespace AleaTK.ML.Operator 8 | { 9 | /// 10 | /// With probability (1 - dropoutProb) outputs the input element scaled up by 1 / (1 - dropoutProb), 11 | /// otherwise outputs 0. The scaling is so that the expected sum is unchanged. 12 | /// 13 | public class Dropout : Differentiable 14 | { 15 | public Dropout(Variable input, double dropoutProb = 0.5) 16 | { 17 | Util.EnsureTrue(dropoutProb > 0.0); 18 | Util.EnsureTrue(dropoutProb < 1.0); 19 | 20 | Input = input; 21 | Output = Variable(input.Shape); 22 | 23 | Scale = 1.0 / (1.0 - dropoutProb); 24 | 25 | Threshold = (uint) ((double) UInt32.MaxValue*dropoutProb); 26 | 27 | Mask = AuxVariable(); 28 | 29 | AddInput(input); 30 | AddAuxVar(Mask); 31 | AddOutput(Output); 32 | } 33 | 34 | public Variable Input { get; } 35 | 36 | public Variable Output { get; } 37 | 38 | public Variable Mask { get; } 39 | 40 | public uint Threshold { get; } 41 | 42 | public double Scale { get; } 43 | 44 | public override void Forward(Executor executor) 45 | { 46 | var ctx = executor.Context; 47 | var input = executor.GetTensor(Input); 48 | // TODO: make sure the offset is correct in one training. 49 | executor.AssignTensor(Mask, RandomUniform(input.Shape)); 50 | var mask = executor.GetTensor(Mask); 51 | executor.AssignTensor(Output, Dropout(input, mask, Threshold, Scale)); 52 | } 53 | 54 | public override void Backward(Executor executor) 55 | { 56 | var dOutput = executor.GetGradient(Output); 57 | var mask = executor.GetTensor(Mask); 58 | executor.AssignGradient(Input, Dropout(dOutput, mask, Threshold, Scale)); 59 | } 60 | } 61 | } -------------------------------------------------------------------------------- /src/AleaTK/ML/Operator/Embedding.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Linq; 3 | using Alea; 4 | using Alea.CSharp; 5 | using static AleaTK.Library; 6 | 7 | namespace AleaTK.ML.Operator 8 | { 9 | public class Embedding : Differentiable, ILayer { 10 | public Embedding(Variable indices, int embedSize, int embedDim, double initScale = 0.5) 11 | { 12 | Indices = indices; 13 | Weights = Library.Parameter((initScale * 2.0).AsScalar()*RandomUniform(Shape.Create(embedSize, embedDim)) - initScale.AsScalar()); 14 | Output = Library.Variable(PartialShape.Create(Indices.Shape.Concat(new long[] { embedDim }).ToArray())); 15 | EmbedSize = embedSize; 16 | EmbedDim = embedDim; 17 | 18 | AddInput(Indices); 19 | AddInput(Weights); 20 | AddOutput(Output); 21 | } 22 | 23 | public Variable Indices { get; } 24 | 25 | public Variable Weights { get; } 26 | 27 | public Variable Output { get; } 28 | 29 | public int EmbedSize { get; } 30 | 31 | public int EmbedDim { get; } 32 | 33 | public override void Forward(Executor executor) 34 | { 35 | var indices = executor.GetTensor(Indices); 36 | var weights = executor.GetTensor(Weights); 37 | executor.AssignTensor(Output, Take(indices, weights)); 38 | } 39 | 40 | public override void Backward(Executor executor) 41 | { 42 | var ctx = executor.Context; 43 | var indices = executor.GetTensor(Indices); 44 | var gradout = executor.GetGradient(Output); 45 | 46 | // for performance fix. 47 | if (ctx.Type == ContextType.Gpu && gradout.Layout.IsInnerChangeMostFullyPacked && indices.Layout.IsInnerChangeMostFullyPacked) 48 | { 49 | var embedDim = EmbedDim; 50 | var batchSize = (int)indices.Shape.Length; 51 | var threadSize = 256; 52 | 53 | // first set all to 0 54 | executor.AssignGradient(Weights, Fill(executor.GetTensor(Weights).Shape, ScalarOps.Conv(0.0))); 55 | var dW = executor.GetGradient(Weights); 56 | 57 | // then use a 1 block kernel to update it, cause usually the batch size is not huge, but the embedsize is huge! 58 | var stream = ctx.ToGpuContext().Stream; 59 | var iPtr = indices.Buffer.Ptr; 60 | 61 | // the following kernel is for 1 block, so there is no need for synchornization, 62 | // there could be further optimized. 63 | 64 | if (typeof(T) == typeof(float)) 65 | { 66 | var dOPtr = gradout.Buffer.Ptr.Reinterpret(); 67 | var dWPtr = dW.Buffer.Ptr.Reinterpret(); 68 | var lp = new LaunchParam(1, threadSize); 69 | //Console.WriteLine($"{indices.Shape} {gradout.Shape} {dW.Shape}"); 70 | stream.Launch(() => 71 | { 72 | for (var i = 0; i < batchSize; ++i) 73 | { 74 | var row = iPtr[i]; 75 | 76 | for (var k = threadIdx.x; k < embedDim; k += blockDim.x) 77 | { 78 | dWPtr[row * embedDim + k] += dOPtr[i * embedDim + k]; 79 | } 80 | } 81 | }, lp); 82 | 83 | return; 84 | } 85 | 86 | throw new NotImplementedException(); 87 | } 88 | else 89 | { 90 | executor.AssignGradient(Weights, TakeGrad(indices, gradout, EmbedSize)); 91 | } 92 | } 93 | } 94 | } -------------------------------------------------------------------------------- /src/AleaTK/ML/Operator/FullyConnected.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Linq; 3 | using static AleaTK.Library; 4 | using static AleaTK.ML.Library; 5 | 6 | namespace AleaTK.ML.Operator 7 | { 8 | public class FullyConnected : Differentiable, ILayer { 9 | public FullyConnected(Variable data, long numHidden) 10 | { 11 | Util.EnsureTrue(data.HasShape); 12 | Util.EnsureEqual(2, data.Shape.Rank, "Input must be matrix."); 13 | Util.EnsureTrue(data.Shape[1] > 0L); 14 | 15 | Data = data; 16 | 17 | var numInput = data.Shape[1]; 18 | var scale = Sqrt(12.0.AsScalar() / ((double)(numInput + numHidden)).AsScalar()); 19 | Weights = Parameter(scale * (RandomUniform(Shape.Create(numInput, numHidden), 0UL, 0UL) - 0.5.AsScalar())); 20 | 21 | Bias = Parameter(Fill(Shape.Create(numHidden), ScalarOps.Conv(0.0))); 22 | Output = Variable(PartialShape.Create(data.Shape[0], numHidden)); 23 | 24 | AddInput(Data); 25 | AddInput(Weights); 26 | AddInput(Bias); 27 | AddOutput(Output); 28 | } 29 | 30 | public Variable Data { get; } 31 | 32 | public Variable Weights { get; } 33 | 34 | public Variable Bias { get; } 35 | 36 | public Variable Output { get; } 37 | 38 | public override void Forward(Executor executor) 39 | { 40 | var data = executor.GetTensor(Data); 41 | var weights = executor.GetTensor(Weights); 42 | var bias = executor.GetTensor(Bias); 43 | executor.AssignTensor(Output, Dot(data.Reshape(data.Shape[0], -1), weights) + bias); 44 | } 45 | 46 | public override void Backward(Executor executor) 47 | { 48 | var data = executor.GetTensor(Data); 49 | var weights = executor.GetTensor(Weights); 50 | var dOutput = executor.GetGradient(Output); 51 | executor.AssignGradient(Data, Dot(dOutput, weights.T).Reshape(data.Shape.AsArray)); 52 | executor.AssignGradient(Weights, Dot(data.Reshape(data.Shape[0], -1).T, dOutput)); 53 | executor.AssignGradient(Bias, ReduceSum(dOutput, 0)); 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/AleaTK/ML/Operator/L2Loss.cs: -------------------------------------------------------------------------------- 1 | using static AleaTK.Library; 2 | using static AleaTK.ML.Library; 3 | 4 | namespace AleaTK.ML.Operator 5 | { 6 | public class L2Loss : Differentiable 7 | { 8 | public L2Loss(Variable pred, Variable label) 9 | { 10 | Pred = pred; 11 | Label = label; 12 | Loss = Variable(); 13 | AddInput(pred); 14 | AddInput(label); 15 | AddOutput(Loss); 16 | } 17 | 18 | public Variable Pred { get; } 19 | 20 | public Variable Label { get; } 21 | 22 | public Variable Loss { get; } 23 | 24 | public override void Forward(Executor executor) 25 | { 26 | var pred = executor.GetTensor(Pred); 27 | var label = executor.GetTensor(Label); 28 | executor.AssignTensor(Loss, ReduceSum((pred - label) * (pred - label))); 29 | } 30 | 31 | public override void Backward(Executor executor) 32 | { 33 | var pred = executor.GetTensor(Pred); 34 | var label = executor.GetTensor(Label); 35 | executor.AssignGradient(Pred, 2.0.AsScalar() * (pred - label)); 36 | } 37 | } 38 | } -------------------------------------------------------------------------------- /src/AleaTK/ML/Operator/Pooling.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using Alea.cuDNN; 3 | using static AleaTK.ML.Library; 4 | 5 | namespace AleaTK.ML.Operator 6 | { 7 | public class Pooling2D : Differentiable, ILayer { 8 | public Pooling2D(Variable data, PoolingMode mode, int kernelH, int kernelW, int strideH, int strideW) 9 | { 10 | Descriptor = new PoolingDescriptor(); 11 | Descriptor.Set2D(mode, NanPropagation.NOT_PROPAGATE_NAN, kernelH, kernelW, 0, 0, strideH, strideW); 12 | 13 | var dataType = Dnn.DataTypeOf(); 14 | var dataDesc = new TensorDescriptor(); 15 | dataDesc.Set4D(dataType, TensorFormat.CUDNN_TENSOR_NCHW, 10, (int)data.Shape[1], (int)data.Shape[2], (int)data.Shape[3]); 16 | 17 | int n, c, h, w; 18 | Descriptor.Get2dForwardOutputDim(dataDesc, out n, out c, out h, out w); 19 | 20 | Data = data; 21 | Output = Variable(PartialShape.Create(-1, c, h, w)); 22 | 23 | AddInput(Data); 24 | AddOutput(Output); 25 | 26 | dataDesc.Dispose(); 27 | } 28 | 29 | public PoolingDescriptor Descriptor { get; } 30 | 31 | public Variable Data { get; } 32 | 33 | public Variable Output { get; } 34 | 35 | public override void Forward(Executor executor) 36 | { 37 | var data = executor.GetTensor(Data); 38 | var output = executor.GetTensor(Output, Shape.Create(data.Shape[0], Output.Shape[1], Output.Shape[2], Output.Shape[3])); 39 | 40 | if (executor.Context.Type == ContextType.Gpu) 41 | { 42 | var dnn = executor.Context.ToGpuContext().Dnn; 43 | 44 | using (var dataDescRcpt = executor.TensorDescRepo.Acquire()) 45 | using (var outputDescRcpt = executor.TensorDescRepo.Acquire()) 46 | { 47 | var dataDesc = dataDescRcpt.Value; 48 | var outputDesc = outputDescRcpt.Value; 49 | var dataType = Dnn.DataTypeOf(); 50 | 51 | dataDesc.Set4D(dataType, TensorFormat.CUDNN_TENSOR_NCHW, (int)data.Shape[0], (int)data.Shape[1], (int)data.Shape[2], (int)data.Shape[3]); 52 | outputDesc.Set4D(dataType, TensorFormat.CUDNN_TENSOR_NCHW, (int)data.Shape[0], (int)Output.Shape[1], (int)Output.Shape[2], (int)Output.Shape[3]); 53 | 54 | dnn.PoolingForward(Descriptor, ScalarOps.Conv(1.0), dataDesc, data.Buffer.Ptr, ScalarOps.Conv(0.0), outputDesc, output.Buffer.Ptr); 55 | 56 | return; 57 | } 58 | } 59 | 60 | throw new NotImplementedException(); 61 | } 62 | 63 | public override void Backward(Executor executor) 64 | { 65 | var data = executor.GetTensor(Data); 66 | var output = executor.GetTensor(Output); 67 | var dOutput = executor.GetGradient(Output); 68 | var dData = executor.GetGradient(Data, Shape.Create(data.Shape.AsArray)); 69 | 70 | if (executor.Context.Type == ContextType.Gpu) 71 | { 72 | var dnn = executor.Context.ToGpuContext().Dnn; 73 | 74 | using (var dataDescRcpt = executor.TensorDescRepo.Acquire()) 75 | using (var outputDescRcpt = executor.TensorDescRepo.Acquire()) 76 | using (var dDataDescRcpt = executor.TensorDescRepo.Acquire()) 77 | using (var dOutputDescRcpt = executor.TensorDescRepo.Acquire()) 78 | { 79 | var dataDesc = dataDescRcpt.Value; 80 | var outputDesc = outputDescRcpt.Value; 81 | var dDataDesc = dDataDescRcpt.Value; 82 | var dOutputDesc = dOutputDescRcpt.Value; 83 | var dataType = Dnn.DataTypeOf(); 84 | 85 | dataDesc.Set4D(dataType, TensorFormat.CUDNN_TENSOR_NCHW, (int)data.Shape[0], (int)data.Shape[1], (int)data.Shape[2], (int)data.Shape[3]); 86 | outputDesc.Set4D(dataType, TensorFormat.CUDNN_TENSOR_NCHW, (int)data.Shape[0], (int)Output.Shape[1], (int)Output.Shape[2], (int)Output.Shape[3]); 87 | dDataDesc.Set4D(dataType, TensorFormat.CUDNN_TENSOR_NCHW, (int)data.Shape[0], (int)data.Shape[1], (int)data.Shape[2], (int)data.Shape[3]); 88 | dOutputDesc.Set4D(dataType, TensorFormat.CUDNN_TENSOR_NCHW, (int)data.Shape[0], (int)Output.Shape[1], (int)Output.Shape[2], (int)Output.Shape[3]); 89 | 90 | dnn.PoolingBackward(Descriptor, ScalarOps.Conv(1.0), outputDesc, output.Buffer.Ptr, dOutputDesc, 91 | dOutput.Buffer.Ptr, dataDesc, data.Buffer.Ptr, ScalarOps.Conv(0.0), dDataDesc, dData.Buffer.Ptr); 92 | 93 | return; 94 | } 95 | } 96 | 97 | throw new NotImplementedException(); 98 | } 99 | } 100 | } -------------------------------------------------------------------------------- /src/AleaTK/ML/Operator/Reshape.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Text; 5 | using System.Threading.Tasks; 6 | using static AleaTK.Library; 7 | using static AleaTK.ML.Library; 8 | 9 | namespace AleaTK.ML.Operator 10 | { 11 | public class Reshape : Differentiable, ILayer { 12 | public Reshape(Variable input, PartialShape shape) 13 | { 14 | Util.EnsureTrue(input.Type != VariableType.Parameter); 15 | Shape = input.HasShape ? PartialShape.Reshape(input.Shape, shape) : shape; 16 | Input = input; 17 | Output = Variable(Shape); 18 | AddInput(Input); 19 | AddOutput(Output); 20 | } 21 | 22 | public Variable Input { get; } 23 | 24 | public Variable Output { get; } 25 | 26 | public PartialShape Shape { get; } 27 | 28 | public override void Forward(Executor executor) 29 | { 30 | if (executor.GetData(Input).Tensor.Layout.IsInnerChangeMostFullyPacked) 31 | { 32 | var tensor = executor.GetData(Input).Tensor; 33 | var shape = tensor.Layout.Shape.Reshape(Shape.AsArray); 34 | var layout = new Layout(shape); 35 | var newTensor = new Tensor(tensor.Device, tensor.Memory, layout, tensor.Ptr); 36 | executor.GetData(Output).SetTensor(newTensor); 37 | } 38 | else 39 | { 40 | throw new NotImplementedException("TODO: Assign a new tensor for different layout."); 41 | } 42 | } 43 | 44 | public override void Backward(Executor executor) 45 | { 46 | if (executor.GetData(Output).Gradient == null) return; 47 | 48 | if (executor.GetData(Output).Gradient.Layout.IsInnerChangeMostFullyPacked) 49 | { 50 | var inputData = executor.GetData(Input); 51 | var outputData = executor.GetData(Output); 52 | var inputCounter = inputData.GradientAggregationCounter; 53 | var outputCounter = outputData.GradientAggregationCounter; 54 | var outputGradient = outputData.Gradient; 55 | var inputShape = inputData.Tensor.Layout.Shape; 56 | 57 | if (inputCounter == 0) 58 | { 59 | var inputLayout = new Layout(inputShape); 60 | var inputGradient = new Tensor(outputGradient.Device, outputGradient.Memory, inputLayout, outputGradient.Ptr); 61 | inputData.SetGradient(inputGradient); 62 | inputData.GradientAggregationCounter = outputCounter; 63 | } 64 | else 65 | { 66 | var inputGradient = inputData.Gradient.Cast(); 67 | var layout = new Layout(inputShape); 68 | var gradient = (new Tensor(outputGradient.Device, outputGradient.Memory, layout, outputGradient.Ptr)).Cast(); 69 | var ctx = executor.Context; 70 | ctx.Assign(inputGradient, inputGradient + gradient); 71 | inputData.GradientAggregationCounter = inputCounter + outputCounter; 72 | } 73 | } 74 | else 75 | { 76 | throw new NotImplementedException("TODO: Assign a new tensor for different layout."); 77 | } 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/AleaTK/ML/Symbol.cs: -------------------------------------------------------------------------------- 1 | using AleaTK.ML.Operator; 2 | 3 | namespace AleaTK.ML 4 | { 5 | public class Symbol 6 | { 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /src/AleaTK/ML/Variable.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Threading.Tasks; 3 | using AleaTK.ML.Operator; 4 | 5 | namespace AleaTK.ML 6 | { 7 | public enum VariableType 8 | { 9 | Common = 0, 10 | Parameter, 11 | Auxilliary 12 | } 13 | 14 | public abstract class Variable : Symbol 15 | { 16 | private readonly PartialShape _shape; 17 | private readonly Expr _initializer; 18 | private Differentiable _owner; 19 | 20 | protected Variable(Type dataType, VariableType type) 21 | { 22 | DataType = dataType; 23 | Type = type; 24 | _shape = null; 25 | _initializer = null; 26 | _owner = null; 27 | } 28 | 29 | protected Variable(Type dataType, VariableType type, PartialShape shape) 30 | { 31 | DataType = dataType; 32 | Type = type; 33 | _shape = shape; 34 | _initializer = null; 35 | _owner = null; 36 | } 37 | 38 | protected Variable(Type dataType, VariableType type, Expr initializer) 39 | { 40 | DataType = dataType; 41 | Type = type; 42 | _shape = initializer != null ? new PartialShape(initializer.Shape.AsArray) : null; 43 | _initializer = initializer; 44 | _owner = null; 45 | } 46 | 47 | public Type DataType { get; } 48 | 49 | public VariableType Type { get; } 50 | 51 | public PartialShape Shape 52 | { 53 | get 54 | { 55 | Util.EnsureTrue(_shape != null, "This variable doesn't have shape."); 56 | return _shape; 57 | } 58 | } 59 | 60 | public Expr UntypedInitializer 61 | { 62 | get 63 | { 64 | Util.EnsureTrue(_initializer != null, "This variable doesn't have initializer."); 65 | return _initializer; 66 | } 67 | } 68 | 69 | public Differentiable Owner 70 | { 71 | get { return _owner; } 72 | set 73 | { 74 | Util.EnsureTrue(_owner == null, "Owner is already set."); 75 | _owner = value; 76 | } 77 | } 78 | 79 | public bool HasShape => _shape != null; 80 | 81 | public bool HasInitializer => _initializer != null; 82 | 83 | public bool HasOwner => _owner != null; 84 | 85 | public abstract IValue TensorToValue(Tensor blob); 86 | 87 | public abstract Expr TensorToExpr(Tensor blob); 88 | 89 | public abstract void GetOrAllocate(Device device, Layout layout, long length, ref Tensor blob); 90 | 91 | public abstract Task Initialize(Context ctx, ref Tensor blob); 92 | } 93 | 94 | public class Variable : Variable 95 | { 96 | public Variable(VariableType type) : base(typeof (T), type) 97 | { 98 | } 99 | 100 | public Variable(VariableType type, PartialShape shape) : base(typeof (T), type, shape) 101 | { 102 | } 103 | 104 | public Variable(VariableType type, Expr initalizer) : base(typeof (T), type, initalizer) 105 | { 106 | } 107 | 108 | public Expr Initalizer => UntypedInitializer.CastExpr(); 109 | 110 | public override IValue TensorToValue(Tensor blob) 111 | { 112 | return blob.Cast(); 113 | } 114 | 115 | public override Expr TensorToExpr(Tensor blob) 116 | { 117 | return blob.Cast(); 118 | } 119 | 120 | private void VerifyShape(Shape targetShape) 121 | { 122 | if (!HasShape) return; 123 | 124 | var myShape = Shape; 125 | Util.EnsureEqual(myShape.Rank, targetShape.Rank, "Tensor shape doesn't match variable shape."); 126 | for (var i = 0; i < myShape.Rank; ++i) 127 | { 128 | if (myShape[i] >= 0) 129 | { 130 | Util.EnsureEqual(myShape[i], targetShape[i], "Shape must match."); 131 | } 132 | } 133 | } 134 | 135 | public override void GetOrAllocate(Device device, Layout layout, long length, ref Tensor blob) 136 | { 137 | VerifyShape(layout.Shape); 138 | 139 | Tensor tensor; 140 | 141 | if (blob == null) 142 | { 143 | tensor = device.Allocate(layout, length); 144 | blob = tensor.ToTensor(); 145 | return; 146 | } 147 | 148 | if (Layout.Match(blob.Layout, layout)) 149 | { 150 | return; 151 | } 152 | 153 | if (blob.Memory.Memory.Length >= length) 154 | { 155 | var memory = new BufferMemory(blob.Memory.Memory, 0L, length); 156 | blob = new Tensor(device, memory, layout, memory.Memory.Handle); 157 | return; 158 | } 159 | 160 | tensor = device.Allocate(layout, length); 161 | blob = tensor.ToTensor(); 162 | } 163 | 164 | public override Task Initialize(Context ctx, ref Tensor blob) 165 | { 166 | if (!HasInitializer) return Task.Run(() => { }); 167 | var shape = Initalizer.Shape; 168 | var layout = new Layout(shape); 169 | var length = layout.Shape.Length; 170 | GetOrAllocate(ctx.Device, layout, length, ref blob); 171 | var tensor = blob.Cast(); 172 | return ctx.Assign(tensor, Initalizer); 173 | } 174 | 175 | public static Variable operator +(Variable lhs, Variable rhs) 176 | { 177 | var op = new Add(lhs, rhs); 178 | return op.C; 179 | } 180 | } 181 | } -------------------------------------------------------------------------------- /src/AleaTK/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | [assembly: AssemblyTitle("AleaTK")] 6 | [assembly: AssemblyDescription("")] 7 | [assembly: ComVisible(false)] 8 | -------------------------------------------------------------------------------- /src/AleaTK/paket.references: -------------------------------------------------------------------------------- 1 | Alea -------------------------------------------------------------------------------- /src/AleaTKUtil/AleaTKUtil.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | AnyCPU 7 | {19810D0A-BD63-4360-ACA3-1DB47C91E0A3} 8 | Library 9 | Properties 10 | AleaTKUtil 11 | AleaTKUtil 12 | v4.5.2 13 | 512 14 | 15 | 16 | true 17 | full 18 | false 19 | ..\..\debug\ 20 | DEBUG;TRACE 21 | prompt 22 | 4 23 | 24 | 25 | pdbonly 26 | true 27 | ..\..\release\ 28 | TRACE 29 | prompt 30 | 4 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 56 | 57 | 58 | 59 | 60 | 61 | ..\..\packages\CSMatIO\lib\net40\CSMatIO.dll 62 | True 63 | True 64 | 65 | 66 | 67 | 68 | 69 | 70 | ..\..\packages\CSMatIO\lib\net45\CSMatIO.dll 71 | True 72 | True 73 | 74 | 75 | 76 | 77 | 78 | 79 | ..\..\packages\NUnit\lib\nunit.framework.dll 80 | True 81 | True 82 | 83 | 84 | 85 | 86 | ..\..\packages\zlib.net\lib\zlib.net.dll 87 | True 88 | True 89 | 90 | 91 | -------------------------------------------------------------------------------- /src/AleaTKUtil/GradientChecker.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Runtime.InteropServices; 3 | 4 | namespace AleaTKUtil 5 | { 6 | public static class GradientChecker 7 | { 8 | /// 9 | /// Jacobian of a vector valued function around value defined by variable calculated with finite differences. 10 | /// 11 | /// 12 | /// 13 | /// 14 | /// 15 | public static void EvaluateJacobi(double[] variable, Func function, out double[,] jacobi, double bump = 1e-5) 16 | { 17 | var f0 = function(variable); 18 | 19 | var inputDim = variable.Length; 20 | var outputDim = f0.Length; 21 | jacobi = new double[outputDim, inputDim]; 22 | 23 | for (var i = 0; i < inputDim; ++i) 24 | { 25 | var temp = variable[i]; 26 | variable[i] += bump; 27 | 28 | var f = function(variable); 29 | 30 | for (var j = 0; j < outputDim; ++j) 31 | { 32 | jacobi[j, i] = (f[j] - f0[j])/bump; 33 | } 34 | 35 | variable[i] = temp; 36 | } 37 | } 38 | 39 | public static double[] FiniteDifferenceGradient(double[] variable, double[] outputGrad, Func function, double bump = 1e-5) 40 | { 41 | double[,] jacobi = null; 42 | EvaluateJacobi(variable, function, out jacobi, bump); 43 | 44 | var inputDim = variable.Length; 45 | var outputDim = jacobi.GetLength(0); 46 | 47 | if (outputDim != outputGrad.GetLength(0)) throw new Exception("Wrong size"); 48 | 49 | var grad = Common.Dot(outputGrad, jacobi); 50 | return grad; 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/AleaTKUtil/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("AleaTKUtil")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("AleaTKUtil")] 13 | [assembly: AssemblyCopyright("Copyright © 2016")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("19810d0a-bd63-4360-aca3-1db47c91e0a3")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /src/AleaTKUtil/paket.references: -------------------------------------------------------------------------------- 1 | NUnit 2 | CSMatIO -------------------------------------------------------------------------------- /tests/AleaTKTest/App.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /tests/AleaTKTest/Common.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Runtime.InteropServices; 5 | using AleaTK; 6 | using NUnit.Framework; 7 | 8 | namespace AleaTKTest 9 | { 10 | public static class Common 11 | { 12 | // Specify the gpu id which will be used in tests, 13 | // so that we can change this value to test on different gpu. 14 | public const int GpuId = 0; 15 | 16 | // Specify the stream id which will be used in tests, 17 | // so that we can change this value to test on different gpu. 18 | public const int StreamId = 1; 19 | 20 | public static void AreClose(Tensor expected, Tensor actual, double error) 21 | { 22 | var equalShape = expected.Shape.SequenceEqual(actual.Shape); 23 | if (!equalShape) Assert.Fail($"Shapes don't match"); 24 | var expectedArray = expected.Reshape(-1).ToArray(); 25 | var actualArray = actual.Reshape(-1).ToArray(); 26 | AleaTKUtil.Common.AreClose(expectedArray, actualArray, error); 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /tests/AleaTKTest/GradientChecker.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Text; 5 | using System.Threading.Tasks; 6 | using AleaTK; 7 | using AleaTK.ML; 8 | 9 | namespace AleaTKTest 10 | { 11 | public static class GradientChecker 12 | { 13 | //public static Func Evaluator(Executor executor, Variable input, Variable output) 14 | //{ 15 | // return w => 16 | // { 17 | // var inputTensor = executor.GetTensor(input); 18 | // executor.AssignTensor(input, w.AsTensor(inputTensor.Shape)); 19 | // executor.Forward(); 20 | // var o = executor.GetTensor(output).Reshape(-1); 21 | // return o.ToArray(); 22 | // }; 23 | //} 24 | 25 | //public static Tensor FiniteDifferenceGradient(Executor executor, Variable input, Variable output, float bump = 1e-5f) 26 | //{ 27 | // var evaluator = Evaluator(executor, input, output); 28 | // var inputTensor = executor.GetTensor(input); 29 | // var outputGradientTensor = executor.GetGradient(output); 30 | // var inputArray = inputTensor.Reshape(-1).ToArray(); 31 | // var outputGradientArray = outputGradientTensor.Reshape(-1).ToArray(); 32 | // var grad = AleaTKUtil.GradientChecker.FiniteDifferenceGradient(inputArray, outputGradientArray, evaluator, bump); 33 | // var shape = inputTensor.Shape.AsArray; 34 | // return grad.AsTensor().Reshape(shape); 35 | //} 36 | 37 | public static Tensor FiniteDifferenceGradient(Executor executor, Variable input, double bump = 1e-5f, Variable output = null) 38 | { 39 | if (output == null) 40 | { 41 | output = (Variable) executor.Output; 42 | } 43 | 44 | // first, backup the x 45 | var ctx = executor.Context; 46 | var inputTensor = executor.GetTensor(input); 47 | var inputShape = inputTensor.Shape; 48 | var inputTensorBackup = ctx.Device.Allocate(inputShape); 49 | ctx.Assign(inputTensorBackup, inputTensor); 50 | 51 | // evaluator 52 | Func evaluator = inputBlob => 53 | { 54 | var inputBlobSingle = inputBlob.Select(x => (float) x).ToArray(); 55 | executor.AssignTensor(input, inputBlobSingle.AsTensor(inputShape)); 56 | executor.Forward(); 57 | var outputTensor = executor.GetTensor(output); 58 | return outputTensor.ToArray().Select(x => (double) x).ToArray(); 59 | }; 60 | 61 | var inputArray = inputTensor.ToArray().Select(x => (double)x).ToArray(); 62 | var outputGradientArray = executor.GetGradient(output).ToArray().Select(x => (double)x).ToArray(); 63 | var inputGradientArray = AleaTKUtil.GradientChecker.FiniteDifferenceGradient(inputArray, outputGradientArray, evaluator, bump).Select(x => (float)x).ToArray(); 64 | var inputGradientTensor = inputGradientArray.AsTensor(inputShape); 65 | 66 | // now we need recover the data 67 | executor.AssignTensor(input, inputTensorBackup); 68 | executor.Forward(); 69 | 70 | return inputGradientTensor; 71 | } 72 | 73 | public static Tensor FiniteDifferenceGradient(Executor executor, Variable input, double bump = 1e-5f, Variable output = null) 74 | { 75 | if (output == null) 76 | { 77 | output = (Variable)executor.Output; 78 | } 79 | 80 | // first, backup the x 81 | var ctx = executor.Context; 82 | var inputTensor = executor.GetTensor(input); 83 | var inputShape = inputTensor.Shape; 84 | var inputTensorBackup = ctx.Device.Allocate(inputShape); 85 | ctx.Assign(inputTensorBackup, inputTensor); 86 | 87 | // evaluator 88 | Func evaluator = inputBlob => 89 | { 90 | executor.AssignTensor(input, inputBlob.AsTensor(inputShape)); 91 | executor.Forward(); 92 | var outputTensor = executor.GetTensor(output); 93 | return outputTensor.ToArray(); 94 | }; 95 | 96 | var inputArray = inputTensor.ToArray(); 97 | var outputGradientArray = executor.GetGradient(output).ToArray(); 98 | var inputGradientArray = AleaTKUtil.GradientChecker.FiniteDifferenceGradient(inputArray, outputGradientArray, evaluator, bump); 99 | var inputGradientTensor = inputGradientArray.AsTensor(inputShape); 100 | 101 | // now we need recover the data 102 | executor.AssignTensor(input, inputTensorBackup); 103 | executor.Forward(); 104 | 105 | return inputGradientTensor; 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /tests/AleaTKTest/MachineLearning.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Linq; 3 | using AleaTK; 4 | using AleaTK.ML; 5 | using AleaTK.ML.Operator; 6 | using csmatio.io; 7 | using csmatio.types; 8 | using NUnit.Framework; 9 | using static AleaTK.Library; 10 | using static AleaTK.ML.Library; 11 | using static AleaTKUtil.Common; 12 | 13 | namespace AleaTKTest 14 | { 15 | public static class MachineLearning 16 | { 17 | [Test] 18 | public static void SimpleLogisticRegression() 19 | { 20 | //const int N = 8; 21 | //const int D = 5; 22 | //const int P = 3; 23 | //const double learn = 0.001; 24 | 25 | const int N = 100; 26 | const int D = 784; 27 | const int P = 10; 28 | const double learn = 0.00005; 29 | 30 | var input = Variable(); 31 | var label = Variable(); 32 | var weights = Parameter(0.01 * RandomUniform(Shape.Create(D, P))); 33 | var pred = Dot(input, weights); 34 | var loss = L2Loss(pred, label); 35 | 36 | var ctx = Context.GpuContext(0); 37 | var opt = new GradientDescentOptimizer(ctx, loss, learn); 38 | 39 | // set some data 40 | var inputData = new double[N, D]; 41 | var matA = new double[D, P]; 42 | var matB = new double[N, P]; 43 | NormalRandomArray(inputData); 44 | NormalRandomArray(matA); 45 | NormalRandomArray(matB); 46 | var labelData = Dot(inputData, matA).Add(matB.Mul(0.1)); 47 | opt.AssignTensor(input, inputData.AsTensor()); 48 | opt.AssignTensor(label, labelData.AsTensor()); 49 | 50 | opt.Initalize(); 51 | for (var i = 0; i < 800; ++i) 52 | { 53 | opt.Forward(); 54 | opt.Backward(); 55 | opt.Optimize(); 56 | if (i % 20 == 0) 57 | { 58 | Console.WriteLine($"loss = {opt.GetTensor(loss).ToScalar()}"); 59 | } 60 | } 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /tests/AleaTKTest/Program.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Text; 5 | using System.Threading.Tasks; 6 | 7 | namespace AleaTKTest 8 | { 9 | class Program 10 | { 11 | static void Main(string[] args) 12 | { 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /tests/AleaTKTest/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("AleaTKTest")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("AleaTKTest")] 13 | [assembly: AssemblyCopyright("Copyright © 2016")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("c45c2dcd-0ce2-495b-b805-a53a2d7ff0ce")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /tests/AleaTKTest/paket.references: -------------------------------------------------------------------------------- 1 | NUnit 2 | Alea 3 | NUnit.Runners.Net4 4 | CSMatIO 5 | SharpZipLib -------------------------------------------------------------------------------- /tutorial/design/content/src_highlight_tango.css: -------------------------------------------------------------------------------- 1 | div.sourceCode { overflow-x: auto; } 2 | table.sourceCode, tr.sourceCode, td.lineNumbers, td.sourceCode { 3 | margin: 0; padding: 0; vertical-align: baseline; border: none; } 4 | table.sourceCode { width: 100%; line-height: 100%; background-color: #f8f8f8; } 5 | td.lineNumbers { text-align: right; padding-right: 4px; padding-left: 4px; color: #aaaaaa; border-right: 1px solid #aaaaaa; } 6 | td.sourceCode { padding-left: 5px; } 7 | pre, code { background-color: #f8f8f8; color: #006666;} 8 | code > span.kw { color: #204a87; font-weight: bold; } /* Keyword */ 9 | code > span.dt { color: #204a87; } /* DataType */ 10 | code > span.dv { color: #0000cf; } /* DecVal */ 11 | code > span.bn { color: #0000cf; } /* BaseN */ 12 | code > span.fl { color: #0000cf; } /* Float */ 13 | code > span.ch { color: #4e9a06; } /* Char */ 14 | code > span.st { color: #4e9a06; } /* String */ 15 | code > span.co { color: #8f5902; font-style: italic; } /* Comment */ 16 | code > span.ot { color: #8f5902; } /* Other */ 17 | code > span.al { color: #ef2929; } /* Alert */ 18 | code > span.fu { color: #000000; } /* Function */ 19 | code > span.er { color: #a40000; font-weight: bold; } /* Error */ 20 | code > span.wa { color: #8f5902; font-weight: bold; font-style: italic; } /* Warning */ 21 | code > span.cn { color: #000000; } /* Constant */ 22 | code > span.sc { color: #000000; } /* SpecialChar */ 23 | code > span.vs { color: #4e9a06; } /* VerbatimString */ 24 | code > span.ss { color: #4e9a06; } /* SpecialString */ 25 | code > span.im { } /* Import */ 26 | code > span.va { color: #000000; } /* Variable */ 27 | code > span.cf { color: #204a87; font-weight: bold; } /* ControlFlow */ 28 | code > span.op { color: #ce5c00; font-weight: bold; } /* Operator */ 29 | code > span.pp { color: #8f5902; font-style: italic; } /* Preprocessor */ 30 | code > span.ex { } /* Extension */ 31 | code > span.at { color: #c4a000; } /* Attribute */ 32 | code > span.do { color: #8f5902; font-weight: bold; font-style: italic; } /* Documentation */ 33 | code > span.an { color: #8f5902; font-weight: bold; font-style: italic; } /* Annotation */ 34 | code > span.cv { color: #8f5902; font-weight: bold; font-style: italic; } /* CommentVar */ 35 | code > span.in { color: #8f5902; font-weight: bold; font-style: italic; } /* Information */ 36 | -------------------------------------------------------------------------------- /tutorial/design/fonts/icomoon.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/fonts/icomoon.eot -------------------------------------------------------------------------------- /tutorial/design/fonts/icomoon.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/fonts/icomoon.ttf -------------------------------------------------------------------------------- /tutorial/design/fonts/icomoon.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/fonts/icomoon.woff -------------------------------------------------------------------------------- /tutorial/design/images/Alea-TK-images.eps: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/Alea-TK-images.eps -------------------------------------------------------------------------------- /tutorial/design/images/Alea-TK-images.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/Alea-TK-images.png -------------------------------------------------------------------------------- /tutorial/design/images/Alea-TK.eps: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/Alea-TK.eps -------------------------------------------------------------------------------- /tutorial/design/images/Alea-TK.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/Alea-TK.png -------------------------------------------------------------------------------- /tutorial/design/images/Alea-TK.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | 9 | 10 | 14 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /tutorial/design/images/QuantAlea_cube_blau-grau_square.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | 8 | 9 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 22 | 23 | 24 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 37 | 38 | 39 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /tutorial/design/images/QuantAlea_cube_blau-grau_square_100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/QuantAlea_cube_blau-grau_square_100.png -------------------------------------------------------------------------------- /tutorial/design/images/QuantAlea_cube_blau-grau_square_200.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/QuantAlea_cube_blau-grau_square_200.png -------------------------------------------------------------------------------- /tutorial/design/images/aleagpu_logo_v3.eps: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/aleagpu_logo_v3.eps -------------------------------------------------------------------------------- /tutorial/design/images/aleagpu_logo_v3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/aleagpu_logo_v3.png -------------------------------------------------------------------------------- /tutorial/design/images/class_hierarchy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/class_hierarchy.png -------------------------------------------------------------------------------- /tutorial/design/images/cuBLAS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/cuBLAS.png -------------------------------------------------------------------------------- /tutorial/design/images/cuBLAS.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 16 | 17 | 22 | 25 | 30 | 31 | 33 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /tutorial/design/images/cuDNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/cuDNN.png -------------------------------------------------------------------------------- /tutorial/design/images/cuDNN.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 16 | 17 | 22 | 25 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /tutorial/design/images/cuFFT.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/cuFFT.png -------------------------------------------------------------------------------- /tutorial/design/images/cuFFT.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 16 | 17 | 22 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /tutorial/design/images/cuRAND.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/cuRAND.png -------------------------------------------------------------------------------- /tutorial/design/images/cuRAND.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 16 | 17 | 22 | 25 | 28 | 30 | 31 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /tutorial/design/images/expr_interface.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/expr_interface.png -------------------------------------------------------------------------------- /tutorial/design/images/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/favicon.ico -------------------------------------------------------------------------------- /tutorial/design/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/logo.png -------------------------------------------------------------------------------- /tutorial/design/images/logo2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/logo2x.png -------------------------------------------------------------------------------- /tutorial/design/images/mnist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/mnist.png -------------------------------------------------------------------------------- /tutorial/design/images/mnist_large.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/mnist_large.jpg -------------------------------------------------------------------------------- /tutorial/design/images/mnist_rect.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/mnist_rect.png -------------------------------------------------------------------------------- /tutorial/design/images/montecarlo_pi.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/montecarlo_pi.gif -------------------------------------------------------------------------------- /tutorial/design/images/mxnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/mxnet.png -------------------------------------------------------------------------------- /tutorial/design/images/resources/colors.xml: -------------------------------------------------------------------------------- 1 | 3Zpfc+I2EMA/DY/tYMt/8CM2JH1oZzrNTK991IECboTFKOII/fS3tlf4XyBJgxauPDBoLa+l365WuzIjlm1e7jXfrn9TSyFH/nj5MmKzke8n/gS+S8GhFkRJUgtWOl/WIq8RPOT/ChSOUbrLl+K509EoJU2+7QoXqijEwnRkXGu173Z7VLL71C1f2Sc2gocFl0Ppl3xp1rV04keN/BeRr9b2yV6E8/vKF08rrXYFPm/ks8fqU1/ecKurmiibA0OtFKgpf21eMiFLjpZRTePuxNXjILUocCBv3IDD/8blDieaSr6rhc/mYGe/X+dGPGz5omzvwcAjlq7NRkLLg59byfPipwPoVntorjRf5jCCTEmloUuhCrgvfcyltCKgwFg8TWOQPxutnkSvM45LaCPQhV6ZXCXCmd0LtRFGH6AL3uDH6Dvoct4E2/vGgB4CGq9bxmMo4+gzq6PqBiX8QJonyMYDsiRUZ0EawOjcUWVJl2pACRWDSAvqH7BUKLguYj8MSrkrrl54RW+1elsQxRLiHzaVNmu1UgWX80aaVkFNlBrGXcDiJTd/leKfQ2z9jVf+EcYcMLjznVEganT/qhRYqupXj6YcwnmaMGK105WdW2vOcL0StlslG1LXQnKTf+vq/wxCa4aWb/6ZKwkTJvFP/hiHi/Juqmh63JlJ/BP3qR/NP0/SDC0pS/OcC6KW31UOyt+vol4aeFc7Cfioono9DRRVRjtO7X12xAB+aTv6fUOCGfShfa1sf9TK3aUEi+zuLmVR9Cn7vzvuvG6roLf7/mejv6nogkbHTPhWFu8NmvUYONEarB8032vWNxVdzqyMXSfDPV83KHhYbsrZR6XjXGDji3pISRMzFlwH8vl09/KQw3EPcoxB403Il0jdhlUwCeQkymJvdj3IpCkcG9ZuN5AYuw8XpJADWxXd/Fb7KvjBztuu7uwqbVd3VvaBMjvBItGax5Y1n8KOW6HTtLbsdTKtLS/S5T89w+DsO4apZARld3DqBHP8RXytVpiUQtMkJtPJNINhuyvBB8ldSBhahgeabdD/hwNjz3KyscHWSCR8h/tjx5GXu+KJypP9DF7PwCidke6dddI6MmaWLdD3epSxUZoVN5D2OThKJk1B7LP7Z/RkYcL9SX0vTASQwdDhxeG+jpdyu5unfhq6fNPk98qVkNSNsTo5wZkyGidzb1YO0hlo679X4ewodY7PnAjX165zItxNne1qbqfOtYwgdQ6HZ0utjZAylEzj2RwczKWHnzgWJXFxTG5OcKYMJdE88bzS+12BDgaZHSXoYS3YBr0nSkCc53fhNeuUcFgHHt9zU8aMGZvM5hOXMaOf5pFSHlaDDWXKiDGJwmkSuIwY9nXKVTAPa8EGM1W8cP7XjdD+R8siDtyV29Bs/r5Yv1Vs/g/K5t8B -------------------------------------------------------------------------------- /tutorial/design/images/resources/cuBLAS.ai: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/resources/cuBLAS.ai -------------------------------------------------------------------------------- /tutorial/design/images/resources/cuDNN.ai: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/resources/cuDNN.ai -------------------------------------------------------------------------------- /tutorial/design/images/resources/cuFFT.ai: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/resources/cuFFT.ai -------------------------------------------------------------------------------- /tutorial/design/images/resources/cuRAND.ai: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/resources/cuRAND.ai -------------------------------------------------------------------------------- /tutorial/design/images/resources/gpu_device.xml: -------------------------------------------------------------------------------- 1 | 7Ztdb9owFIZ/DZdIsU0DuVy7rruZNKnadp0SE6KGmJlQ6H597eS4OCQsbC2HoR2Q+Hj9mfMcg98QBuJmsb3T8XL+RSUyH/Ag2Q7ExwHn4eTKPFrhuRZYyKJaSXWWgLYT7rNfsimus0SuGlKpVF5my6Y4VUUhp2VDi7VWm2a1mcqbgy7jtDmgFe6ncd5Wf2RJOa/VCQ93+meZpXM3Mgvh8B7i6WOq1bqA8QpVyLpkEbtuAiuIWxM+rZTpwb5abG9kbkPoolNV4p8OlL7OT8sC5vD7Bnwi6iZPcb6GgxzwMDeNr2fK9GFjmStdlYQ/13Ze1wMuEmbvvhSsymcIkxNtB8NVxfCDqcDFcuu3CFP7fPf1mxvQTLMesy6pJ7brllfxk3bmzBRv5lkp75fx1JZuTLoZbV4ucihelVo9yhuYfBVvJzp0ttosy3NXyRwXZzwRgdFTHSeZCeJeB3Z+kJVsBO+95kF1M3qsp1Atej2SJ6lLCSuhA1QlAaU7qRay1M+mCjTgsHJg4QwZG01qZbNLxHENO5h7ORgZoUp/SP30tetdWpgXkBmHsmTUkSV7dP5pHu9AwAXSIwCKR4B1EABybwMAnRAAH4CAtMQAAB/xBMAH4EKLAWBMANoAGEQFAwB83xAAH0AAGzwMADAUAWgAgGgjAIigXwLgAQicggEApksAfADOwmEAgMEJgA9gjLcLirrs+n8PIMTbBUXkhDmDneBZnHBETrgDAKITjsgJdwBAdMIROeEOAIhOOCIn3AEA0QlH5IS7AKA5YXMgBKAFANEJi4CccAcAPCcsAnLCHQDwnLAIyAl3AMBzwiK4dCc8y7PldxhCqzIuM1WYt0Nzmvdd6Jg57tG5QqRz6Tb55HRaaweTzqV76FPTGQGL89C5dIN9ajpX+xcjodK5dPd9ajohfDGfh86lW/NT0xm3fr1ApOP6JToH6EzOuWdjR5h6exGxJXL8wcO10PGD6wJm/wdfxq2LLtyVED1BcdF8W1SOcNouKn+XHsdECOC0Y4QSgkv3um9ftpAGB3N06NIEktRt7zGWLRnhXjrNLQkmHPLBfXCam3lMNuSC+9i4T/4zwCET3AfHReQMcMgD98HZO/uKCYcscC+c5snXk8Exb3f/pazKvP+litsX -------------------------------------------------------------------------------- /tutorial/design/images/rnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/design/images/rnn.png -------------------------------------------------------------------------------- /tutorial/design/images/rnn.xml: -------------------------------------------------------------------------------- 1 | 7VxNk6M2EP01PiY1Bvx13PF4N4ekaqvmkOSoMRqbDEYuBq/H+fUroGVAMpYQEhM78sUgxJPU/dRqtdoe+cvdx7cU7bd/kBDHI+8h/Bj5TyPPG4/9Cf3KS05lyXT+UBZs0iiESlXBc/QvhkJW7RCF+L1RMSMkzqJ9s3BNkgSvs0YZSlNybFZ7JXGz1T3asBarguc1isXSP6Mw25alc29alf+Go82WtTyeLsonL2j9tknJIYH2Rp7/WnzKxzvEsGCg71sUkmOtyF9RuaaEUOT8avexxHEuWya28r2vLU/P/U5xAn2TvBCUb/xA8QHGDh3LTkwY+Xj2UA2nGWZabqgAvbDqMLJ6H4qq0IFvmOxwlp5oFQYUQCeALD7I8lhJfgKg25rQz2RBoOzNGbkaML2AMbeMH4h6bfzHbZTh5z1a5/dHSviR/7jNdhTyaUwv9zGKkl9OFDdX5OMmRWFER74kMUlplYQk9L3H1yiOWRElxXS1GNO2/cf3LCVvmKvcKuyGUAupiVIFKY6nIB6JGFlZHykyjJrQcEgnFNySNNuSDUlQvKpKH4tZgnOEh6ZA8UeU/ZUX/5oLKL/7GyrRsaen2qP8Nn9WvJWEX/KZT29fYrJ+K4u+UrFDhX9wlp3A1KBDRmhR1bHfCaFqLeo1NUJ1tZx5k4COG54wgxCcFZWPVaImKhtySAsGFboBm4bSDWb1ZpfVmeIYZdGPZgO9KA+ND035xXQ5Gz/ZpDxbZAZgfKmt4YX4Zfa0ohbWhhAHkJoHlt2snWDXhZWoDENhJ+pWonzWzQzUJ7vUwrRbDhU7IZoJsByfYiV8Kyb9jlTV7s4smu7MFG4vaQNAvpOIYisjlByBl+rOHYdD3YyrOCWxBJyCGudxqbGFsdexpSNbvDmsJNpskSGossVn1tk+W8S10rFFiS3+jNMRbF/U2SJDUGVLwDrfgmOQLb5ji+ZK9AATjW0J2Ua7w1Ikg1DlC7/JF4AMEobtuBxherouOoSRQeg6LzYJA7bLEaav96JBGCmErv9ikzBATUeYvg7MOOjtwQgQui6MAGSQMMBxR5i+PgwNRPX1YQQIXR9GADJImLkjjBkfRocwMghdH8YmYVxo1ZAPo0EYKYSuD2ORMIEL8BryYWjwo68PI0Do+jACkEHCuBiv7pLEhcq6b6qvA6iSZQKRNOsB3sAFeM1QRWN7JEPQJIvFzVHgArzamyPO+GvEd68jKLOF32LZsy1WYi+Xco3+o/lEF9KJiqnZIymghRssqfGcUQb35pU6seJb3LRSa+mgJpXqTZvrw9yaTgMrMa/bnqgg7HpGT289K6esQeO3mOp6fQPPnxQt1BKIjWS0AcYtJlN2EyqjzxBChd7eYnJlJ6EqZrqbkClz312KttrqKxrqYpcygJ2Gpu/OTHNbhCGtdGAlOH635If9eJ38LTnj5tOO73Q55ck/4Gqq8Auvm1xMOZEOuZZaOZy937X0gj0ZzKB8lkWxvpwK6U4Drqds6rkJoDYBLvyQpzzYHWICfJL9t76kChNgyB3qJ/1s2Pqiygt1wFWVHTj9r+OF7Uc9XDD+7ITox/MFiJbYrxxoxgG1nBrpHAxYyeW/F1LwwXwNUggQirqUAymyS4cULpalSZgZl6fWnS88gi5dZD0xyJbBDozvjS0+s776dBEgdPki7YtBwriDyw6OyLV/4VHMK+AQdN0QltFqwQsZLOBzD15Id0bwCIqalOIoMkuHEe5EwYwL0p0sHIAuVyT9MEcV9j9cjip9/Y/uXOERdMki64k2W+ht9Wd/ZfXqHxX91U8= -------------------------------------------------------------------------------- /tutorial/design/scripts/jquery.powertip.css: -------------------------------------------------------------------------------- 1 | /* PowerTip Plugin */ 2 | #powerTip 3 | { 4 | cursor: default; 5 | background-color: #333; 6 | background-color: rgba(0, 0, 0, 0.8); 7 | border-radius: 6px; 8 | color: #fff; 9 | display: none; 10 | padding: 10px; 11 | position: absolute; 12 | white-space: nowrap; 13 | z-index: 2147483647; 14 | text-align: center; 15 | } 16 | #powerTip:before 17 | { 18 | content: ""; 19 | position: absolute; 20 | } 21 | #powerTip.n:before, #powerTip.s:before 22 | { 23 | border-right: 5px solid transparent; 24 | border-left: 5px solid transparent; 25 | left: 50%; 26 | margin-left: -5px; 27 | } 28 | #powerTip.e:before, #powerTip.w:before 29 | { 30 | border-bottom: 5px solid transparent; 31 | border-top: 5px solid transparent; 32 | margin-top: -5px; 33 | top: 50%; 34 | } 35 | #powerTip.n:before 36 | { 37 | border-top: 10px solid #333; 38 | border-top: 10px solid rgba(0, 0, 0, 0.8); 39 | bottom: -10px; 40 | } 41 | #powerTip.e:before 42 | { 43 | border-right: 10px solid #333; 44 | border-right: 10px solid rgba(0, 0, 0, 0.8); 45 | left: -10px; 46 | } 47 | #powerTip.s:before 48 | { 49 | border-bottom: 10px solid #333; 50 | border-bottom: 10px solid rgba(0, 0, 0, 0.8); 51 | top: -10px; 52 | } 53 | #powerTip.w:before 54 | { 55 | border-left: 10px solid #333; 56 | border-left: 10px solid rgba(0, 0, 0, 0.8); 57 | right: -10px; 58 | } 59 | #powerTip.ne:before, #powerTip.se:before 60 | { 61 | border-right: 10px solid transparent; 62 | border-left: 0; 63 | left: 10px; 64 | } 65 | #powerTip.nw:before, #powerTip.sw:before 66 | { 67 | border-left: 10px solid transparent; 68 | border-right: 0; 69 | right: 10px; 70 | } 71 | #powerTip.ne:before, #powerTip.nw:before 72 | { 73 | border-top: 10px solid #333; 74 | border-top: 10px solid rgba(0, 0, 0, 0.8); 75 | bottom: -10px; 76 | } 77 | #powerTip.se:before, #powerTip.sw:before 78 | { 79 | border-bottom: 10px solid #333; 80 | border-bottom: 10px solid rgba(0, 0, 0, 0.8); 81 | top: -10px; 82 | } 83 | #powerTip.nw-alt:before, #powerTip.ne-alt:before, #powerTip.sw-alt:before, #powerTip.se-alt:before 84 | { 85 | border-top: 10px solid #333; 86 | border-top: 10px solid rgba(0, 0, 0, 0.8); 87 | bottom: -10px; 88 | border-left: 5px solid transparent; 89 | border-right: 5px solid transparent; 90 | left: 10px; 91 | } 92 | #powerTip.ne-alt:before 93 | { 94 | left: auto; 95 | right: 10px; 96 | } 97 | #powerTip.sw-alt:before, #powerTip.se-alt:before 98 | { 99 | border-top: none; 100 | border-bottom: 10px solid #333; 101 | border-bottom: 10px solid rgba(0, 0, 0, 0.8); 102 | bottom: auto; 103 | top: -10px; 104 | } 105 | #powerTip.se-alt:before 106 | { 107 | left: auto; 108 | right: 10px; 109 | } 110 | -------------------------------------------------------------------------------- /tutorial/design/scripts/jquery.vibrate.js: -------------------------------------------------------------------------------- 1 | /*** 2 | @title: 3 | Vibrate 4 | 5 | @version: 6 | 2.0 7 | 8 | @author: 9 | Andreas Lagerkvist 10 | 11 | @date: 12 | 2008-08-31 13 | 14 | @url: 15 | http://andreaslagerkvist.com/jquery/vibrate/ 16 | 17 | @license: 18 | http://creativecommons.org/licenses/by/3.0/ 19 | 20 | @copyright: 21 | 2008 Andreas Lagerkvist (andreaslagerkvist.com) 22 | 23 | @requires: 24 | jquery 25 | 26 | @does: 27 | This plug-in makes any element you want "vibrate" every now and then. Can be used in conjunction with blink for maximum annoyance! 28 | 29 | @howto: 30 | jQuery('#ad-area').vibrate(); would make #ad-area vibrate every now and then, options are available, please check the source. 31 | 32 | Vibrate currently only works with elements positioned 'static'. 33 | 34 | @exampleHTML: 35 | I should vibrate every now and then 36 | 37 | @exampleJS: 38 | jQuery('#jquery-vibrate-example').vibrate(); 39 | ***/ 40 | jQuery.fn.vibrate = function (conf) { 41 | var config = jQuery.extend({ 42 | start: 500, 43 | speed: 30, 44 | duration: 2000, 45 | frequency: 5000, 46 | spread: 3, 47 | angle: 40, 48 | }, conf); 49 | 50 | return this.each(function () { 51 | var t = jQuery(this); 52 | 53 | t.data('stop', false); 54 | 55 | var vibrate = function () { 56 | var topPos = Math.floor(Math.random() * config.spread) - ((config.spread - 1) / 2); 57 | var leftPos = Math.floor(Math.random() * config.spread) - ((config.spread - 1) / 2); 58 | var rotate = Math.floor(Math.random() * config.angle) - ((config.angle - 1) / 2); 59 | 60 | t.css({ 61 | position: 'relative', 62 | left: leftPos + 'px', 63 | top: topPos + 'px', 64 | WebkitTransform: 'rotate(' + rotate + 'deg)' // cheers to erik@birdy.nu for the rotation-idea 65 | }); 66 | }; 67 | 68 | var doVibration = function () { 69 | var vibrationInterval = setInterval(vibrate, config.speed); 70 | 71 | var stopVibration = function () { 72 | clearInterval(vibrationInterval); 73 | t.css({ 74 | position: 'static', 75 | WebkitTransform: 'rotate(0deg)' 76 | }); 77 | }; 78 | 79 | clearInterval(frequencyInterval); 80 | 81 | if (t.data('stop') == true) { 82 | clearInterval(vibrationInterval); 83 | } 84 | else { 85 | frequencyInterval = setInterval(doVibration, config.frequency); 86 | setTimeout(stopVibration, config.duration); 87 | } 88 | }; 89 | 90 | frequencyInterval = setInterval(doVibration, config.start); 91 | }); 92 | }; 93 | 94 | jQuery.fn.stopVibrate = function() { 95 | return this.each(function () { 96 | var t = jQuery(this); 97 | 98 | t.data('stop', true); 99 | }); 100 | }; -------------------------------------------------------------------------------- /tutorial/design/scripts/jquery.waitforimages.min.js: -------------------------------------------------------------------------------- 1 | /*! waitForImages jQuery Plugin 2013-07-20 */ 2 | !function (a) { var b = "waitForImages"; a.waitForImages = { hasImageProperties: ["backgroundImage", "listStyleImage", "borderImage", "borderCornerImage", "cursor"] }, a.expr[":"].uncached = function (b) { if (!a(b).is('img[src!=""]')) return !1; var c = new Image; return c.src = b.src, !c.complete }, a.fn.waitForImages = function (c, d, e) { var f = 0, g = 0; if (a.isPlainObject(arguments[0]) && (e = arguments[0].waitForAll, d = arguments[0].each, c = arguments[0].finished), c = c || a.noop, d = d || a.noop, e = !!e, !a.isFunction(c) || !a.isFunction(d)) throw new TypeError("An invalid callback was supplied."); return this.each(function () { var h = a(this), i = [], j = a.waitForImages.hasImageProperties || [], k = /url\(\s*(['"]?)(.*?)\1\s*\)/g; e ? h.find("*").addBack().each(function () { var b = a(this); b.is("img:uncached") && i.push({ src: b.attr("src"), element: b[0] }), a.each(j, function (a, c) { var d, e = b.css(c); if (!e) return !0; for (; d = k.exec(e); ) i.push({ src: d[2], element: b[0] }) }) }) : h.find("img:uncached").each(function () { i.push({ src: this.src, element: this }) }), f = i.length, g = 0, 0 === f && c.call(h[0]), a.each(i, function (e, i) { var j = new Image; a(j).on("load." + b + " error." + b, function (a) { return g++, d.call(i.element, g, f, "load" == a.type), g == f ? (c.call(h[0]), !1) : void 0 }), j.src = i.src }) }) } } (jQuery); -------------------------------------------------------------------------------- /tutorial/design/scripts/scale.fix.js: -------------------------------------------------------------------------------- 1 | var metas = document.getElementsByTagName('meta'); 2 | var i; 3 | if (navigator.userAgent.match(/iPhone/i)) { 4 | for (i=0; i(Shape.Create((long)batchSize)); 40 | var pi = ctx.Device.Allocate(Shape.Scalar); 41 | 42 | var pis = Map(points, point => (point.x * point.x + point.y * point.y) < 1.0 ? 4.0 : 0.0); 43 | 44 | for (var i = 0; i < batchs; ++i) 45 | { 46 | Console.WriteLine($"Batch {i}"); 47 | var offset = batchSize * (ulong)i; 48 | ctx.Assign(points, RandomUniform(seed, offset)); 49 | ctx.Assign(pi, i == 0 ? ReduceMean(pis) : (pi + ReduceMean(pis)) / 2.0); 50 | } 51 | ``` 52 | 53 | The full code for [Monte Carlo Pi](/samples/montecarlopi.html) is in the sample gallery. 54 | 55 | 56 | #### Symbolic Computing for Machine Learning 57 | 58 | The primary objects of symbolic calculations are **variables** and **operators**. A variable assigns an identifier to a future calculation. Alea TK has three variable types: common, parameter, auxiliary (primarily used for temporary results). A variable usually holds two tensors two tensors, one to keep the actual values of the variable and a second tensor that holds the gradients in a backward propagation process. An **operator** defines a future computation, which, given input variables, generates output variables. A so called **executor** binds a variable and its computation graph to a computation context, which can be a CPU or GPU device. With an executor it is possible to run forward and backward gradient calculations and to allocate and manage the memory. 59 | 60 | With symbolic computing we can implement various optimization algorithms to train machine learning models represented in terms of a computational graph defined with operators and variables. 61 | 62 | Here is the specification of a convolutional neural network for image classification: 63 | 64 | ```{.cs} 65 | var images = Variable(PartialShape.Create(-1, 1, 28, 28)); 66 | 67 | var conv1 = new Convolution2D(images, 5, 5, 20); 68 | var act1 = new ActivationTanh(conv1.Output); 69 | var pool1 = new Pooling2D(act1.Output, PoolingMode.MAX, 2, 2, 2, 2); 70 | 71 | var conv2 = new Convolution2D(pool1.Output, 5, 5, 50); 72 | var act2 = new ActivationTanh(conv2.Output); 73 | var pool2 = new Pooling2D(act2.Output, PoolingMode.MAX, 2, 2, 2, 2); 74 | 75 | var fc1 = new FullyConnected(pool2.Output, 500); 76 | var act3 = new ActivationTanh(fc1.Output); 77 | var fc2 = new FullyConnected(act3.Output, 10); 78 | 79 | var labels = Variable(); 80 | 81 | var loss = new SoftmaxCrossEntropy(fc2.Output, labels); 82 | 83 | ``` 84 | 85 | It uses variables and operators to express the layers of the network. The [MNIST](/samples/mnist.html) sample shows how to use such a convolutional neural network for image classification. 86 | 87 | 88 | ### Highlights 89 | *** 90 | 91 | It is worth mentioning that Alea TK has some unique features such as 92 | 93 | - Integration of high performance CUDA libraries such as cuDNN, cuBlas and cuRand 94 | - Tensor views and GPU based shuffling for mini batch epochs 95 | - GPU kernel fusing to reduce the number of kernel launching 96 | 97 | 98 | ### Next Steps 99 | *** 100 | 101 | - Read the [tutorials](/tutorials.html) to get a deeper understanding of Alea TK and how it can be used to develop and train machine learning models 102 | 103 | - If you are looking for self contained examples as starting point for developing new models check out the [sample gallery](/gallery.html) 104 | 105 | - The [how to](/how_to.html) section addresses topics such as how to extend Alea TK or how to contribute to the project 106 | 107 | -------------------------------------------------------------------------------- /tutorial/doc/how_to.md: -------------------------------------------------------------------------------- 1 | ## How To 2 | 3 | ### Build from Source 4 | *** 5 | 6 | Check out the source from the repository. Alea TK uses [paket](https://fsprojects.github.io/Paket/index.html) to manage its project dependencies. After checkout run 7 | 8 | ``` 9 | .paket\paket.exe restore 10 | ``` 11 | 12 | to download the required NuGet packages. Then open the Visual Studio solution and build the projects. 13 | 14 | ### Build Documentation {#build_doc} 15 | *** 16 | 17 | The Alea TK online documentation relies on Pandoc. 18 | 19 | 1. Install Python version 3, which is used for the local web server 20 | 1. Install [Pandoc](http://pandoc.org/) version 1.16 or later 21 | 1. Install the Pandoc filter pandoc-eqnos to handle equation numbers 22 | ``` 23 | pip install pandoc-eqnos 24 | ``` 25 | 1. Check where **pandoc-eqnos.exe** is installed and add it to the path, usually it is in **Python Installation\\Python\\Scripts** 26 | 27 | To build the documentation proceed as follows: 28 | 29 | 1. Build the project **Generate** in the project folder **tutorial\\generate**. 30 | 1. Open **Build.fsx**, select all, right-click and choose **Execute in Interactive** to execute the script in the F# interactive console. If you cannot find that window open it via **View → Other Windows → F# Interactive**. 31 | 1. Go to **tutorial\\output** and start **run_server.bat** to launch a local web server. Open http://localhost:8080 in a browser to display the generated pages. 32 | 33 | ### Adding New Tutorial Sample {#add_sample} 34 | *** 35 | 36 | To add a new sample create a new project in **tutorial\\samples**. Add the required NuGet packages with **paket** 37 | 38 | ``` 39 | .paket\paket.exe add nuget Alea project 40 | .paket\paket.exe add nuget Alea.Parallel project 41 | ``` 42 | 43 | Provide the meta information for the sample in **SampleProject.fs**: 44 | 45 | ```{.fsharp} 46 | let metaData = 47 | [ 48 | { 49 | Id = "0001" 50 | Title = "MNIST Digits Classificiation" 51 | Language = CSharp 52 | Tags = [ComputerVision; Regression; Classification; ConvolutionalNeuralNets] 53 | SourceCodeLink = sourceCodeLinkCsharp "MNIST" 54 | GitLink = gitLinkRoot + "MNIST" 55 | ImageLink = "images/gpu_device.svg" 56 | Src = "samples" @@ "MNIST" 57 | } 58 | 59 | // add here new meta data 60 | 61 | ] 62 | ``` 63 | 64 | We expect the following markdown files in the project folder: 65 | 66 | - Readme.md providing a short abstract of the samples 67 | - Extended.md containing detailed explanations 68 | 69 | You can use LaTex formulas including formula numbering and reference the formulas. Check out an existing sample for more details. If the existing classification tags are not enough, you can add a new class. A button to select that class will be automatically generated. 70 | 71 | First rebuild the project **Generate** and then the documentation as explained above. 72 | 73 | 74 | -------------------------------------------------------------------------------- /tutorial/doc/index.md: -------------------------------------------------------------------------------- 1 | Alea TK 2 | 3 | ## About Alea TK 4 | *** 5 | 6 | Alea TK is an **open source** library for general purpose **numerical computing** and **Machine Learning** based on tensors and tensor expressions. 7 | 8 | Alea TK is 9 | 10 | - Easy to install 11 | - GPU accelerated 12 | - Designed for rapid prototyping 13 | - Developed in C# and usable from any .NET language 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /tutorial/doc/resources.md: -------------------------------------------------------------------------------- 1 | ## Resources 2 | 3 | ### API Documentation 4 | *** 5 | 6 | ### Further Reading 7 | *** 8 | 9 | Deep learning is a vast field that is evolving fast. The following references and blogs are merly the tip of the iceberg but might give a good starting point to delve deeper into the subject. 10 | 11 | #### Blogs 12 | 13 | - [The Unreasonable Effectiveness of Recurrent Neural Networks](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) 14 | - [Deep Learning, NLP, and Representations](http://colah.github.io/posts/2014-07-NLP-RNNs-Representations/) 15 | - [Understanding LSTM Networks](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) 16 | - [Conv Nets: A Modular Perspective](http://colah.github.io/posts/2014-07-Conv-Nets-Modular/) 17 | - [Recurrent Neural Networks Tutorial, Part 1 - Introduction to RNNs](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/) 18 | - [Recurrent Neural Networks Tutorial, Part 2 – Implementing a RNN with Python, Numpy and Theano](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-2-implementing-a-language-model-rnn-with-python-numpy-and-theano/) 19 | - [Recurrent Neural Networks Tutorial, Part 3 – Backpropagation Through Time and Vanishing Gradients](http://www.wildml.com/2015/10/recurrent-neural-networks-tutorial-part-3-backpropagation-through-time-and-vanishing-gradients/) 20 | - [Recurrent Neural Network Tutorial, Part 4 – Implementing a GRU/LSTM RNN with Python and Theano](http://www.wildml.com/2015/10/recurrent-neural-network-tutorial-part-4-implementing-a-grulstm-rnn-with-python-and-theano/) 21 | - [Understanding Convolutional Neural Networks for NLP](http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/) 22 | - [Implementing a CNN for Text Classification in TensorFlow](http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/) 23 | - [Deep Learning for Chatbots, Part 1 – Introduction](http://www.wildml.com/2016/04/deep-learning-for-chatbots-part-1-introduction/) 24 | - [Deep Learning for Chatbots, Part 2 – Implementing a Retrieval-Based Model in Tensorflow](http://www.wildml.com/2016/07/deep-learning-for-chatbots-2-retrieval-based-model-tensorflow/) 25 | - [Attention and Memory in Deep Learning and NLP](http://www.wildml.com/2016/01/attention-and-memory-in-deep-learning-and-nlp/) 26 | - [An Explanation of Xavier Initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization) 27 | - [An overview of gradient descent optimization algorithms](http://sebastianruder.com/optimizing-gradient-descent/) 28 | - [A curated list of resources dedicated to recurrent neural networks](https://github.com/kjw0612/awesome-rnn) 29 | 30 | #### Papers 31 | 32 | ##### Convolutional Neural Networks 33 | 34 | - @Ciresan:2011:FHP:2283516.2283603 35 | 36 | ##### Recurrent Neural Networks 37 | 38 | - @DBLP:journals/corr/GreffSKSS15 39 | - @DBLP:journals/corr/ZarembaSV14 40 | - @DBLP:journals/corr/AppleyardKB16 41 | 42 | ##### Image Recognition and Object Detection 43 | 44 | - @conf/nips/KrizhevskySH12 45 | - @DBLP:journals/corr/SzegedyLJSRAEVR14 46 | 47 | ##### Neural Machine Translation 48 | 49 | - @DBLP:journals/corr/BahdanauCB14 50 | - @DBLP:journals/corr/VinyalsKKPSH14 51 | - @DBLP:journals/corr/JeanCMB14 52 | - @DBLP:journals/corr/BengioVJS15 53 | - @DBLP:journals/corr/ChoMGBSB14 54 | - @DBLP:journals/corr/ChiuN15 55 | 56 | ##### Embeddings 57 | 58 | - @linguistic-regularities-in-continuous-space-word-representations 59 | 60 | #### References -------------------------------------------------------------------------------- /tutorial/doc/tutorials.md: -------------------------------------------------------------------------------- 1 | ## Tutorials 2 | 3 | ### MNIST 4 | *** 5 | 6 | #### Multinomial Logistic Regression 7 | 8 | The [multinomial logistic regression](https://en.wikipedia.org/wiki/Multinomial_logistic_regression) model is the most simple classifier for categorical dependent variables. Using the Alea TK symbolic layer the model is represented as follows: 9 | 10 | ```{.cs} 11 | public static Model MultinomialRegressionModel() 12 | { 13 | var images = Variable(); 14 | var labels = Variable(); 15 | var w = Parameter(Fill(Shape.Create(28 * 28, 10), 0.0f)); 16 | var b = Parameter(Fill(Shape.Create(10), 1.0f)); 17 | var y = Dot(images, w) + b; 18 | return new Model() { Loss = new SoftmaxCrossEntropy(y, labels), Images = images, Labels = labels }; 19 | } 20 | ``` 21 | 22 | The differentiable operator `SoftmaxCrossEntropy` applies softmax and cross entropy loss at once so that it can better optimize the calculations. To actually use the model we build an execution context and an optimizer. 23 | 24 | ```{.cs} 25 | var model = MultinomialRegressionModel(); 26 | var ctx = Context.GpuContext(0); 27 | var opt = new GradientDescentOptimizer(ctx, model.Loss.Loss, eta); 28 | opt.Initalize(); 29 | ``` 30 | 31 | To access the training data we use a `Batcher` instance. 32 | 33 | ```{.cs} 34 | var mnist = new MNIST(); 35 | var batcher = new Batcher(ctx, mnist.TrainImages, mnist.TrainLabels); 36 | ``` 37 | 38 | We train the model with a basic [stochastic gradient descent](https://en.wikipedia.org/wiki/Stochastic_gradient_descent) optimizer using a fixed learning rate `eta` and multiple epochs. For each epoch the data is first shuffled with `Reset()`and then split into mini-batches, which are then passed to the optimizer. The forward propagation calculates the cross entropy loss. The gradients of the parameters are obtained by backward propagation. 39 | 40 | ```{.cs} 41 | for (var e = 1; e <= epochs; ++e) 42 | { 43 | batcher.Reset(); 44 | 45 | for (var i = 1; i <= MNIST.NumTrain / batchSize; ++i) 46 | { 47 | batcher.Next(batchSize, opt, model.Images, model.Labels); 48 | opt.Forward(); 49 | opt.Backward(); 50 | opt.Optimize(); 51 | } 52 | } 53 | ``` 54 | 55 | The `Optimize` method updates all parameters with a stochastic gradient descent 56 | 57 | ```{.cs} 58 | public override void Optimize() 59 | { 60 | foreach (var data in Data.Values) 61 | { 62 | if (data.Variable.Type == VariableType.Parameter) 63 | { 64 | var w = data.TensorAsExpr; 65 | var g = data.GradientAsExpr; 66 | Context.Assign(data.TensorAsValue, w - LearningRate.AsScalar(w.DataType)*g); 67 | } 68 | } 69 | } 70 | ``` 71 | 72 | A full project can be obtained in the [sample gallery](/samples/mnist.html). 73 | 74 | #### Multilayer Perceptron 75 | 76 | We can improve the model by adding additional fully connected layers: 77 | 78 | ```{.cs} 79 | public static Model MultiLayerPerceptronModel() 80 | { 81 | var images = Variable(PartialShape.Create(-1, 28 * 28)); 82 | var labels = Variable(PartialShape.Create(-1, 10)); 83 | var fc1 = new FullyConnected(images, 128); 84 | var act1 = new ActivationReLU(fc1.Output); 85 | var fc2 = new FullyConnected(act1.Output, 64); 86 | var act2 = new ActivationReLU(fc2.Output); 87 | var fc3 = new FullyConnected(act2.Output, 10); 88 | 89 | return new Model() { Loss = new SoftmaxCrossEntropy(fc3.Output, labels), Images = images, Labels = labels }; 90 | } 91 | ``` 92 | 93 | The above model training code can be reused for this more general model. 94 | 95 | 96 | #### Convolutional Neural Nets 97 | 98 | Convolutional neural nets exploit translation invariance and lead to a more effective representations since they are only sensitive to local information. 99 | They are particularly suited to image classification. We define convolutional neural net with two-dimensional convolution and pooling: 100 | 101 | ```{.cs} 102 | public static Model ConvolutionalNeuralNetworkModel() 103 | { 104 | var images = Variable(PartialShape.Create(-1, 1, 28, 28)); 105 | var labels = Variable(); 106 | 107 | var conv1 = new Convolution2D(images, 5, 5, 20); 108 | var act1 = new ActivationTanh(conv1.Output); 109 | var pool1 = new Pooling2D(act1.Output, PoolingMode.MAX, 2, 2, 2, 2); 110 | 111 | var conv2 = new Convolution2D(pool1.Output, 5, 5, 50); 112 | var act2 = new ActivationTanh(conv2.Output); 113 | var pool2 = new Pooling2D(act2.Output, PoolingMode.MAX, 2, 2, 2, 2); 114 | 115 | var fc1 = new FullyConnected(pool2.Output, 500); 116 | var act3 = new ActivationTanh(fc1.Output); 117 | var fc2 = new FullyConnected(act3.Output, 10); 118 | 119 | return new Model() { Loss = new SoftmaxCrossEntropy(fc2.Output, labels), Images = images, Labels = labels }; 120 | } 121 | ``` -------------------------------------------------------------------------------- /tutorial/generate/AssemblyInfo.fs: -------------------------------------------------------------------------------- 1 | namespace GenerateDoc.AssemblyInfo 2 | 3 | open System.Reflection 4 | open System.Runtime.CompilerServices 5 | open System.Runtime.InteropServices 6 | 7 | // General Information about an assembly is controlled through the following 8 | // set of attributes. Change these attribute values to modify the information 9 | // associated with an assembly. 10 | [] 11 | [] 12 | [] 13 | [] 14 | [] 15 | [] 16 | [] 17 | [] 18 | 19 | // Setting ComVisible to false makes the types in this assembly not visible 20 | // to COM components. If you need to access a type in this assembly from 21 | // COM, set the ComVisible attribute to true on that type. 22 | [] 23 | 24 | // The following GUID is for the ID of the typelib if this project is exposed to COM 25 | [] 26 | 27 | // Version information for an assembly consists of the following four values: 28 | // 29 | // Major Version 30 | // Minor Version 31 | // Build Number 32 | // Revision 33 | // 34 | // You can specify all the values or you can default the Build and Revision Numbers 35 | // by using the '*' as shown below: 36 | // [] 37 | [] 38 | [] 39 | 40 | do 41 | () -------------------------------------------------------------------------------- /tutorial/generate/Build.fsx: -------------------------------------------------------------------------------- 1 | #I @"..\..\packages\FAKE\tools" 2 | #load "HtmlBuilders.fsx" 3 | 4 | open HtmlBuilders 5 | 6 | // clean output 7 | // DeleteDir output 8 | 9 | buildMainDoc false 10 | buildSampleExtendedDocWithAbstract () 11 | buildSampleGallery () 12 | 13 | -------------------------------------------------------------------------------- /tutorial/generate/Generate.fsproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | AnyCPU 7 | 2.0 8 | cf152e30-8b00-440c-a1df-b6665212f8fa 9 | Library 10 | Generate 11 | Generate 12 | v4.5.2 13 | 4.4.0.0 14 | true 15 | Generate 16 | 17 | 18 | true 19 | full 20 | false 21 | false 22 | .\bin\ 23 | DEBUG;TRACE 24 | 3 25 | 26 | 27 | 28 | 29 | pdbonly 30 | true 31 | true 32 | .\bin\ 33 | TRACE 34 | 3 35 | 36 | 37 | 38 | 39 | 11 40 | 41 | 42 | 43 | 44 | $(MSBuildExtensionsPath32)\..\Microsoft SDKs\F#\3.0\Framework\v4.0\Microsoft.FSharp.Targets 45 | 46 | 47 | 48 | 49 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\FSharp\Microsoft.FSharp.Targets 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | ..\..\packages\FAKE\tools\FakeLib.dll 69 | 70 | 71 | 72 | 73 | True 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 88 | 89 | -------------------------------------------------------------------------------- /tutorial/generate/HtmlBuilders.fsx: -------------------------------------------------------------------------------- 1 | #I @"..\..\packages\FAKE\tools" 2 | 3 | #r "FakeLib.dll" 4 | 5 | #load "Util.fs" 6 | #load "Pandoc.fs" 7 | #load "Types.fs" 8 | #load "SampleProjects.fs" 9 | 10 | open System 11 | open System.Text 12 | open System.IO 13 | open System.Collections.Generic 14 | open System.Diagnostics 15 | open Fake 16 | open Util 17 | open Types 18 | open Pandoc 19 | 20 | Directory.SetCurrentDirectory __SOURCE_DIRECTORY__ 21 | 22 | let current = Directory.GetCurrentDirectory() 23 | let tutorial = current @@ ".." 24 | let solution = tutorial @@ ".." 25 | let design = tutorial @@ "design" 26 | let output = tutorial @@ "output" 27 | let doc = tutorial @@ "doc" 28 | let templatus = solution @@ "packages" @@ "Templatus" @@ "tools" @@ "Templatus.exe" 29 | 30 | Directory.SetCurrentDirectory tutorial 31 | 32 | let copyToOutput folder = 33 | CopyDir (output @@ folder) (design @@ folder) allFiles 34 | 35 | let toHtmlString filename = 36 | let template = design @@ "templates" @@ "doc_page.html" 37 | pandocString filename (Some template) 38 | 39 | let languages = ["csharp"; "fsharp"; "vb"] 40 | 41 | let designFolders = ["content"; "fonts"; "images"; "scripts"] 42 | 43 | let documents = 44 | [ 45 | "index.md" 46 | "get_started.md" 47 | "tutorials.md" 48 | "how_to.md" 49 | "ml_tools.md" 50 | "design_details.md" 51 | "resources.md" 52 | ] 53 | 54 | // the directory structure for the sample documentation with separate tree for the extended documentation 55 | let buildDirectoryStructure () = 56 | languages 57 | |> List.iter (fun lang -> 58 | ensureDirectory (doc @@ "samples" @@ lang) 59 | ensureCommonSubDirectories (tutorial @@ "samples" @@ lang) (doc @@ "samples" @@ lang)) 60 | 61 | let ensureReadmeFiles () = 62 | languages 63 | |> List.iter (fun lang -> 64 | let ensure dir = 65 | subDirectories (dir @@ "samples" @@ lang) 66 | |> Array.iter (fun subDir -> CreateFile (subDir @@ "Readme.md")) 67 | ensure tutorial 68 | ensure doc) 69 | 70 | let bib = design @@ "templates" @@ "references.bib" 71 | let csl = design @@ "templates" @@ "ieee-with-url.csl" 72 | 73 | let buildMainDoc clean = 74 | if clean then DeleteDirs [output] 75 | ensureDirectory output 76 | let runserver = output @@ "run_server.bat" 77 | if not (TestFile runserver) then 78 | createTextFile runserver "python -m http.server 8080" 79 | designFolders |> List.iter copyToOutput 80 | let template = design @@ "templates" @@ "doc_page.html" 81 | let opts = sprintf "--filter pandoc-eqnos --ascii --bibliography %s --filter pandoc-citeproc" bib 82 | let toHtml filename = 83 | let infile = tutorial @@ "doc" @@ filename 84 | let outfile = tutorial @@ "output" @@ filename 85 | pandoc infile outfile (Some template) opts 86 | documents |> List.iter toHtml 87 | 88 | let buildSampleExtendedDocWithAbstract () = 89 | let template = tutorial @@ "generate" @@ "ExtendedSampleDoc.tpl" 90 | let opts = sprintf "--filter pandoc-eqnos --ascii --bibliography %s --filter pandoc-citeproc" bib 91 | ensureDirectory output 92 | SampleProjects.metaData |> List.iter (fun meta -> 93 | let outfile = tutorial @@ "output" @@ meta.ExtendedDocFile 94 | ensureDirectory (directory outfile) 95 | CopyFile outfile template 96 | let abstractDoc = tutorial @@ meta.Src @@ "Readme.md" 97 | let extendedDoc = tutorial @@ meta.Src @@ "Extended.md" 98 | let abstractHtml = Pandoc.pandocString abstractDoc None opts 99 | let extendedHtml = Pandoc.pandocString extendedDoc None opts 100 | ReplaceInFiles 101 | [ 102 | "$title$", meta.Title 103 | "$sourceCodeLink$", ("../" + meta.SourceCodeLink) 104 | "$gitLink$", (meta.GitLink) 105 | "$imageLink$", ("../" + meta.ImageLink) 106 | "$abstractHtml$", abstractHtml 107 | "$extendedHtml$", extendedHtml 108 | ] [outfile] 109 | ) 110 | 111 | let buildSampleGallery () = 112 | ensureDirectory output 113 | let tutorial = tutorial.Replace(@"\", @"\\") // this is a fix to properly pass '\' in parameters to Templatus 114 | 115 | let cmd = sprintf @" -t generate\SampleGallery.tpl -p tutorial=%s" tutorial 116 | runCommand (templatus + cmd) 117 | 118 | 119 | -------------------------------------------------------------------------------- /tutorial/generate/Pandoc.fs: -------------------------------------------------------------------------------- 1 | module Pandoc 2 | 3 | open System.IO 4 | open Fake 5 | open Util 6 | 7 | // write to out file 8 | let pandoc infile outfile template opt = 9 | let outfile = changeExt "html" outfile 10 | let cmd = 11 | match template with 12 | | Some template -> sprintf "pandoc %s --template %s --mathjax --wrap=none --highlight-style tango -t html %s -o %s" infile template opt outfile 13 | | None -> sprintf "pandoc %s --mathjax --wrap=none --highlight-style tango -t html %s -o %s" infile opt outfile 14 | printfn "%s" cmd 15 | runCommand cmd 16 | 17 | // returns html as string 18 | let pandocString filename template opt = 19 | let cmd = 20 | match template with 21 | | Some template -> sprintf "pandoc %s --template %s --mathjax --wrap=none --highlight-style tango -t html %s" filename template opt 22 | | None -> sprintf "pandoc %s --mathjax --wrap=none --highlight-style tango -t html %s" filename opt 23 | printfn "%s" cmd 24 | runCommandCaptureStdOut cmd 25 | -------------------------------------------------------------------------------- /tutorial/generate/SampleProjects.fs: -------------------------------------------------------------------------------- 1 | module SampleProjects 2 | 3 | open Fake 4 | open Types 5 | 6 | let languages = 7 | [ 8 | CSharp, "C#" 9 | FSharp, "F#" 10 | VB, "VB" 11 | ] 12 | 13 | let tags = 14 | [ 15 | Computations, "General Purpose Computations" 16 | Regression, "Regression" 17 | Classification, "Classification" 18 | ConvolutionalNeuralNets, "Convolutional Neural Nets" 19 | RecurrentNeuralNets, "Recurrent Neural Nets" 20 | ComputerVision, "Computer Vision" 21 | LanguageModelling, "Natural Language Modelling" 22 | Image, "Image" 23 | Video, "Video" 24 | Speech, "Speech" 25 | Text, "Text" 26 | OptimizationTechniques, "Optimization Techniques" 27 | ] 28 | 29 | let gitLinkRoot = "http://github.com/quantalea/AleaTK/tree/master/tutorial/samples/" 30 | let sourceCodeLinkCsharp name = "../samples/" + name + ".zip" 31 | 32 | // Add all projects here so that html sample gallery builder can generate html code 33 | let metaData = 34 | [ 35 | { 36 | Id = "0001" 37 | Title = "Monte Carlo Pi Estimation" 38 | Language = CSharp 39 | Tags = [Computations] 40 | SourceCodeLink = sourceCodeLinkCsharp "MonteCarloPi" 41 | GitLink = gitLinkRoot + "MonteCarloPi" 42 | ImageLink = "images/montecarlo_pi.gif" 43 | Src = "samples" @@ "MonteCarloPi" 44 | } 45 | 46 | { 47 | Id = "0002" 48 | Title = "MNIST Digits Classificiation" 49 | Language = CSharp 50 | Tags = [ComputerVision; Regression; Classification; ConvolutionalNeuralNets] 51 | SourceCodeLink = sourceCodeLinkCsharp "MNIST" 52 | GitLink = gitLinkRoot + "MNIST" 53 | ImageLink = "images/mnist.png" 54 | Src = "samples" @@ "MNIST" 55 | } 56 | 57 | { 58 | Id = "0003" 59 | Title = "PTB Natural Language Modelling" 60 | Language = CSharp 61 | Tags = [ComputerVision; Regression; Classification; RecurrentNeuralNets] 62 | SourceCodeLink = sourceCodeLinkCsharp "PTB" 63 | GitLink = gitLinkRoot + "PTB" 64 | ImageLink = "images/rnn.svg" 65 | Src = "samples" @@ "PTB" 66 | } 67 | ] -------------------------------------------------------------------------------- /tutorial/generate/Types.fs: -------------------------------------------------------------------------------- 1 | module Types 2 | 3 | open Fake 4 | open Util 5 | 6 | type LangType = 7 | | CSharp 8 | | FSharp 9 | | VB 10 | 11 | type Tag = 12 | | Computations 13 | | Regression 14 | | Classification 15 | | ConvolutionalNeuralNets 16 | | RecurrentNeuralNets 17 | | ComputerVision 18 | | LanguageModelling 19 | | Image 20 | | Video 21 | | Speech 22 | | Text 23 | | OptimizationTechniques 24 | 25 | type MetaData = 26 | { 27 | Id : string 28 | Title : string 29 | Language : LangType 30 | Tags : list 31 | SourceCodeLink : string 32 | GitLink : string 33 | ImageLink : string 34 | Src : string 35 | } 36 | 37 | // the folder name can be used as page name 38 | member this.Folder = filename this.Src 39 | 40 | // the extended doc link for a Src location `samples/csharp/CopyGeneric` 41 | // is the path `samples/csharp/copygeneric.html` 42 | member this.ExtendedDocFile = 43 | let pagename = normalizeName this.Folder + ".html" 44 | (directory this.Src) @@ pagename 45 | 46 | member this.ExtendedDocLink = 47 | this.ExtendedDocFile.Replace(@"\", @"/") 48 | 49 | member this.Abstract = this.Src @@ "Readme.md" 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /tutorial/generate/Util.fs: -------------------------------------------------------------------------------- 1 | module Util 2 | 3 | open System 4 | open System.IO 5 | open System.Text 6 | open System.Diagnostics 7 | open Fake 8 | 9 | let joinStrings (sep:string) (s:seq) = 10 | String.Join(sep, s) 11 | 12 | let lowerStr (a:'a) = (sprintf "%A" a).ToLower() 13 | 14 | let normalizeName (name:string) = 15 | name.Replace(' ', '_') 16 | .Replace('.', '_') 17 | .Replace(",", "") 18 | .Replace("#", "sharp") 19 | .ToLower() 20 | 21 | let subDirectories path = 22 | let dInfo = new DirectoryInfo(path) 23 | dInfo.GetDirectories() |> Array.map (fun info -> info.FullName) 24 | 25 | let ensureCommonSubDirectories pathOrigin pathTarget = 26 | subDirectories pathOrigin 27 | |> Array.map filename 28 | |> Array.map (fun subDir -> pathTarget @@ subDir) 29 | |> Array.iter ensureDirectory 30 | 31 | let splitPath (path:string) = 32 | path.Split(Path.DirectorySeparatorChar) 33 | 34 | let createTempDirectory() = 35 | let tempDirectory = Path.Combine(Path.GetTempPath(), Path.GetRandomFileName()) 36 | let dirInfo = Directory.CreateDirectory tempDirectory 37 | tempDirectory 38 | 39 | let createTextFile (filename:string) (text:string) = 40 | use sw = File.CreateText filename 41 | sw.Write text 42 | sw.Close() 43 | 44 | let runCommand cmd = 45 | let startInfo = new ProcessStartInfo() 46 | startInfo.FileName <- "cmd.exe" 47 | startInfo.WindowStyle <- ProcessWindowStyle.Normal 48 | startInfo.UseShellExecute <- false 49 | startInfo.Arguments <- @"/c " + cmd 50 | use proc = Process.Start(startInfo) 51 | proc.WaitForExit() 52 | 53 | let runCommandCaptureStdOut cmd = 54 | let startInfo = new ProcessStartInfo() 55 | startInfo.FileName <- "cmd.exe" 56 | startInfo.WindowStyle <- ProcessWindowStyle.Normal 57 | startInfo.UseShellExecute <- false 58 | startInfo.RedirectStandardOutput <- true 59 | startInfo.Arguments <- @"/c " + cmd 60 | 61 | use proc = Process.Start(startInfo) 62 | 63 | let builder = new StringBuilder() 64 | while not proc.HasExited do 65 | builder.Append(proc.StandardOutput.ReadToEnd()) |> ignore 66 | builder.Append(proc.StandardOutput.ReadToEnd()) |> ignore 67 | builder.ToString() 68 | 69 | -------------------------------------------------------------------------------- /tutorial/generate/paket.references: -------------------------------------------------------------------------------- 1 | Templatus 2 | Fake -------------------------------------------------------------------------------- /tutorial/generate/readme.md: -------------------------------------------------------------------------------- 1 | # Documentation Tools 2 | 3 | ## Prerequisits 4 | 5 | The Alea TK online documentation relies on Pandoc. 6 | 7 | 1. Install Python version 3, which is used for the local web server 8 | 1. Install [Pandoc](http://pandoc.org/) version 1.16 or later 9 | 1. Install the Pandoc filter pandoc-eqnos to handle equation numbers 10 | ``` 11 | pip install pandoc-eqnos 12 | ``` 13 | 1. Check where **pandoc-eqnos.exe** is installed and add it to the path, usually it is in **Python Installation\\Python\\Scripts** 14 | 15 | To build the documentation proceed as follows: 16 | 17 | 1. Build the project **Generate** in the project folder **tutorial\\generate**. 18 | 1. Open **Build.fsx**, select all, right-click and choose **Execute in Interactive** to execute the script in the F# interactive console. If you cannot find that window open it via **View → Other Windows → F# Interactive**. 19 | 1. Go to **tutorial\\output** and start **run_server.bat** to launch a local web server. Open http://localhost:8080 in a browser to display the generated pages. 20 | 21 | ## Template Engine 22 | 23 | We use [Templatus](https://github.com/kerams/Templatus), an F# based template enging similar to Microsoft's T4 engine 24 | to generate html code from *.tpl files. 25 | -------------------------------------------------------------------------------- /tutorial/readme.md: -------------------------------------------------------------------------------- 1 | # Documentation Tools 2 | 3 | The [how to](www.aleatk.com/how_to.html#build_doc) explains how to build the online documentation. 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /tutorial/samples/MNIST/Extended.md: -------------------------------------------------------------------------------- 1 | *** 2 | 3 | ### Multinomial Logistic Regression 4 | 5 | The [multinomial logistic regression](https://en.wikipedia.org/wiki/Multinomial_logistic_regression) model is the most simple classifier for categorical dependent variables. It represents the probability of the dependent variable $Y$ to be of class $k$ 6 | 7 | $$ 8 | P(Y=k \mid x, W, b) = \frac{\exp(W_k x + b_k)}{\sum_{j=1}^K \exp(W_j x + b_j)} = \mathrm{softmax}(W x + b)_k 9 | $$ {#eq:mnl-regression} 10 | 11 | where $\mathrm{softmax}(W x + b)$ is the softmax function with $W \in \mathbb{R}^{K \times D}$ the weight matrix with rows $W_k$, $b \in \mathbb{R}^K$ the bias vector and $x \in \mathbb{R}^D$ the data or explanatory variable. We use the cross entropy 12 | 13 | $$ 14 | \mathrm{D}(p \parallel q) = - \sum_k p_k \log(q_k) 15 | $$ {#eq:cross-entropy} 16 | 17 | to measure the discrepancy between predicted probabilities ({@eq:mnl-regression}) and the actual class of a sample. 18 | 19 | 20 | ### Multilayer Perceptron 21 | 22 | We extend ({@eq:mnl-regression}) by adding 3 layers with a ReLU activation function so that 23 | 24 | $$ 25 | P(Y=k \mid x, W_1, W_2, W_3) = \mathrm{softmax}(\mathrm{ReLu}(W_3 \; \mathrm{ReLu}(W_2 \; \mathrm{ReLu}(W_1 x))))_k 26 | $$ {#eq:mlp-regression} 27 | 28 | 29 | ### Convolutional Neural Net 30 | 31 | Convolutional neural nets consist of multiple layers of small neuron collections, called receptive fields. They process portions of the input image. The outputs of these collections are tiled so that their input regions overlap. Tiling adds translation invariance and leads to a better representation of the input image. Convolutional neural nets usually contain pooling layers, which perform a nonlinear down-sampling to reduce overfitting. 32 | 33 | More details on convolutional neural nets can be found [here](https://en.wikipedia.org/wiki/Convolutional_neural_network), [here](http://cs231n.github.io/convolutional-networks/), [here](http://deeplearning.net/tutorial/lenet.html) and [here](http://cs.stanford.edu/people/karpathy/convnetjs/). 34 | 35 | We imporve model ({@eq:mnl-regression}) with the following convolutional neural network architecture: 36 | 37 | - Convolutional layer of windows size 5 with 20 output features followed by $tanh$ nonlinearity 38 | - A max pooling with windows size 2 and stride 2 39 | - Convolutional layer of windows size 5 with 50 output features followed by $tanh$ nonlinearity 40 | - A max pooling with windows size 2 and stride 2 41 | - A fully connected layer with 500 neurons followed by $tanh$ nonlinearity 42 | - A fully connected layer to map to 10 dimensions for the 10 categories 43 | - A softmax layer to map the 10 dimensions to categorical probabilities 44 | 45 | -------------------------------------------------------------------------------- /tutorial/samples/MNIST/MNIST.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.IO; 3 | using System.IO.Compression; 4 | using System.Linq; 5 | using System.Net; 6 | 7 | namespace Tutorial.Samples { 8 | public class MNIST { 9 | #region consts 10 | 11 | public const string Url = @"http://yann.lecun.com/exdb/mnist/"; 12 | public const string FileTrainImages = @"Data\MNIST\train-images-idx3-ubyte"; 13 | public const string FileTrainLabels = @"Data\MNIST\train-labels-idx1-ubyte"; 14 | public const string FileTestImages = @"Data\MNIST\t10k-images-idx3-ubyte"; 15 | public const string FileTestLabels = @"Data\MNIST\t10k-labels-idx1-ubyte"; 16 | public const long NumTrain = 55000L; 17 | public const long NumTest = 10000L; 18 | public const long NumValidation = 60000L - NumTrain; 19 | 20 | #endregion 21 | 22 | private static void SkipImages(BinaryReader brImages) { 23 | brImages.ReadInt32(); // skip magic 24 | brImages.ReadInt32(); // skip num images 25 | brImages.ReadInt32(); // skip rows 26 | brImages.ReadInt32(); // skip cols 27 | } 28 | 29 | private static void SkipLabels(BinaryReader brLabels) { 30 | brLabels.ReadInt32(); // skip magic 31 | brLabels.ReadInt32(); // skip num labels 32 | } 33 | 34 | private static void Decompress(string fileName) { 35 | var fileToDecompress = new FileInfo(fileName + ".gz"); 36 | using (var originalFileStream = fileToDecompress.OpenRead()) 37 | using (var decompressedFileStream = File.Create(fileName)) 38 | using (var decompressionStream = new GZipStream(originalFileStream, CompressionMode.Decompress)) { 39 | decompressionStream.CopyTo(decompressedFileStream); 40 | } 41 | } 42 | 43 | public static void Download() { 44 | var files = new[] 45 | {"train-images-idx3-ubyte", "train-labels-idx1-ubyte", "t10k-images-idx3-ubyte", "t10k-labels-idx1-ubyte"}; 46 | 47 | Directory.CreateDirectory(@"Data\MNIST\"); 48 | files.ToList().ForEach(file => { 49 | if (!File.Exists(@"Data\MNIST\" + file)) { 50 | using (var client = new WebClient()) { 51 | var url = Url + file + ".gz"; 52 | Console.WriteLine($"Downloading {url} ..."); 53 | client.DownloadFile(url, @"Data\MNIST\" + file + ".gz"); 54 | Decompress(@"Data\MNIST\" + file); 55 | } 56 | } 57 | }); 58 | } 59 | 60 | private static void ReadData(BinaryReader brImages, BinaryReader brLabels, float[,] images, float[,] labels) { 61 | var numSamples = images.GetLength(0); 62 | if (numSamples != labels.GetLength(0)) throw new InvalidOperationException(); 63 | 64 | for (var i = 0; i < numSamples; ++i) { 65 | for (var x = 0; x < 28; ++x) { 66 | for (var y = 0; y < 28; ++y) { 67 | images[i, x*28 + y] = brImages.ReadByte()/255.0f; 68 | } 69 | } 70 | labels[i, brLabels.ReadByte()] = 1.0f; 71 | } 72 | } 73 | 74 | public float[,] TrainImages { get; } 75 | 76 | public float[,] TrainLabels { get; } 77 | 78 | public float[,] ValidationImages { get; } 79 | 80 | public float[,] ValidationLabels { get; } 81 | 82 | public float[,] TestImages { get; } 83 | 84 | public float[,] TestLabels { get; } 85 | 86 | public MNIST() { 87 | Download(); 88 | 89 | using (var ifsTestLabels = new FileStream(FileTestLabels, FileMode.Open)) 90 | using (var ifsTestImages = new FileStream(FileTestImages, FileMode.Open)) 91 | using (var ifsTrainLabels = new FileStream(FileTrainLabels, FileMode.Open)) 92 | using (var ifsTrainImages = new FileStream(FileTrainImages, FileMode.Open)) 93 | using (var brTestLabels = new BinaryReader(ifsTestLabels)) 94 | using (var brTestImages = new BinaryReader(ifsTestImages)) 95 | using (var brTrainLabels = new BinaryReader(ifsTrainLabels)) 96 | using (var brTrainImages = new BinaryReader(ifsTrainImages)) { 97 | SkipImages(brTestImages); 98 | SkipLabels(brTestLabels); 99 | SkipImages(brTrainImages); 100 | SkipLabels(brTrainLabels); 101 | 102 | TestImages = new float[NumTest, 28*28]; 103 | TestLabels = new float[NumTest, 10]; 104 | ReadData(brTestImages, brTestLabels, TestImages, TestLabels); 105 | 106 | TrainImages = new float[NumTrain, 28*28]; 107 | TrainLabels = new float[NumTrain, 10]; 108 | ReadData(brTrainImages, brTrainLabels, TrainImages, TrainLabels); 109 | 110 | ValidationImages = new float[NumValidation, 28*28]; 111 | ValidationLabels = new float[NumValidation, 10]; 112 | ReadData(brTrainImages, brTrainLabels, ValidationImages, ValidationLabels); 113 | } 114 | } 115 | } 116 | } -------------------------------------------------------------------------------- /tutorial/samples/MNIST/MNIST.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | AnyCPU 7 | {87A9BBD7-C303-4A22-9175-74BCF148A81D} 8 | Library 9 | Properties 10 | Tutorial.Samples 11 | MNIST 12 | v4.5.2 13 | 512 14 | 15 | 16 | true 17 | full 18 | false 19 | ..\..\..\debug\ 20 | DEBUG;TRACE 21 | prompt 22 | 4 23 | false 24 | 25 | 26 | pdbonly 27 | true 28 | ..\..\..\release\ 29 | TRACE 30 | prompt 31 | 4 32 | AnyCPU 33 | false 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | {ec47c435-0d7d-4819-be05-11b9e57fb8f3} 56 | AleaTK 57 | 58 | 59 | 60 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | ..\..\..\packages\Alea\lib\net45\Alea.IL.dll 77 | True 78 | True 79 | 80 | 81 | ..\..\..\packages\Alea\lib\net45\Alea.Parallel.dll 82 | True 83 | True 84 | 85 | 86 | ..\..\..\packages\Alea\lib\net45\Alea.dll 87 | True 88 | True 89 | 90 | 91 | 92 | 93 | 94 | 95 | ..\..\..\packages\NUnit\lib\nunit.framework.dll 96 | True 97 | True 98 | 99 | 100 | 101 | -------------------------------------------------------------------------------- /tutorial/samples/MNIST/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("MNIST")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("MNIST")] 13 | [assembly: AssemblyCopyright("Copyright © 2016")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("87a9bbd7-c303-4a22-9175-74bcf148a81d")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /tutorial/samples/MNIST/Readme.md: -------------------------------------------------------------------------------- 1 | [MNIST](http://yann.lecun.com/exdb/mnist/) is a computer vision dataset consisting of handwritten digits. It is a good dataset to try out machine learning techniques and pattern recognition methods on real-world data. 2 | This tutorial shows how to train a multinomial logistic regression, a multilayer perceptron and a convolutional neural network to predict the digit from an image. 3 | 4 | 5 | -------------------------------------------------------------------------------- /tutorial/samples/MNIST/paket.references: -------------------------------------------------------------------------------- 1 | Alea 2 | NUnit -------------------------------------------------------------------------------- /tutorial/samples/MonteCarloPi/Extended.md: -------------------------------------------------------------------------------- 1 | *** 2 | 3 | ### Theoretical Foundations 4 | 5 | Monte Carlo simulation methods are computational algorithms which rely on repeated random sampling to estimate a results. Monte Carlo simulation can be used to calculate the value of $\pi$ as follows 6 | 7 | $$ 8 | \dfrac{\text{area of unit circle}}{\text{area of } [-1,1]^2} = \dfrac{\pi}{4} = 9 | \dfrac{\text{hits in unit circle}}{\text{randomly generated points in } [-1,1]^2}. 10 | $$ 11 | 12 | More precisely if $A \subset \mathbb{R}^n$, $f : A \rightarrow \mathbb{R}$ is an integrable function and $x^{(i)} \in A$, $i=1, \ldots, n$ are uniformly distributed points then 13 | 14 | $$ 15 | \int_A f(x) dx \approx \frac{\mathrm{vol}(A)}{n} \sum_{i=1}^n f(x^{(i)}). 16 | $$ {#eq:mc-integral-approx} 17 | 18 | Now apply ({@eq:mc-integral-approx}) with $A = [-1,1]^2$ and $f(x) = \mathbb{1}_{\{x_1^2 + x_2^2 \leq 1\}}$ the indicator function of the unit circle. 19 | 20 | 21 | ### Implementation 22 | 23 | Random points in the unit square are generated and we calculate how many of them are inside the unit circle. Given an execution context, which is either a CPU or a GPU device we allocate buffers for the generated points and a scalar to the simulated value of $\pi$. We define aa transformation that checks if point is inside unit square or not. The value 4.0 is because we only simulate points in positive quadrant. The actual compuations happen in the `for` loop where we iterate over multiple batches, generate random numbers, apply the transformation to count the number of points inside the unit circle followed by a mean reduction. 24 | 25 | ```{.cs} 26 | var points = ctx.Device.Allocate(Shape.Create((long)batchSize)); 27 | var pi = ctx.Device.Allocate(Shape.Scalar); 28 | 29 | var pis = Map(points, point => (point.x * point.x + point.y * point.y) < 1.0 ? 4.0 : 0.0); 30 | 31 | for (var i = 0; i < batchs; ++i) 32 | { 33 | Console.WriteLine($"Batch {i}"); 34 | var offset = batchSize * (ulong)i; 35 | ctx.Assign(points, RandomUniform(seed, offset)); 36 | ctx.Assign(pi, i == 0 ? ReduceMean(pis) : (pi + ReduceMean(pis)) / 2.0); 37 | } 38 | ``` 39 | 40 | 41 | -------------------------------------------------------------------------------- /tutorial/samples/MonteCarloPi/MonteCarloPi.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using NUnit.Framework; 3 | 4 | using Alea; 5 | using Alea.Parallel; 6 | using AleaTK; 7 | using Context = AleaTK.Context; 8 | using static AleaTK.Library; 9 | 10 | namespace Tutorial.Samples 11 | { 12 | public class MonteCarloPi 13 | { 14 | private static void Main() 15 | { 16 | PiEstimationGpu(); 17 | } 18 | 19 | public static void EstimatePi(Context ctx, int batchs, ulong batchSize, double error) 20 | { 21 | const ulong seed = 0UL; 22 | 23 | // allocate buffer for the generated points and a scalar to hold the simulated value of pi 24 | var points = ctx.Device.Allocate(Shape.Create((long)batchSize)); 25 | var pi = ctx.Device.Allocate(Shape.Scalar); 26 | 27 | // transform that checks if point is inside unit square or not 28 | // the value 4.0 is because we only simulate points in positive quadrant 29 | var pis = Map(points, point => (point.x * point.x + point.y * point.y) < 1.0 ? 4.0 : 0.0); 30 | 31 | // iterate over multiple batches 32 | for (var i = 0; i < batchs; ++i) 33 | { 34 | Console.WriteLine($"Batch {i}"); 35 | // generates random numbers, apply the mapping followed by a mean reduction 36 | var offset = batchSize * (ulong)i; 37 | ctx.Assign(points, RandomUniform(seed: seed, offset: offset)); 38 | ctx.Assign(pi, i == 0 ? ReduceMean(pis) : (pi + ReduceMean(pis)) / 2.0); 39 | } 40 | 41 | Console.WriteLine($"Pi = {pi.ToScalar()}"); 42 | Assert.That(pi.ToScalar(), Is.EqualTo(Math.PI).Within(error)); 43 | } 44 | 45 | [Test] 46 | public static void PiEstimationGpu() 47 | { 48 | EstimatePi(Context.GpuContext(0), 100, 10000000, 1e-3); 49 | } 50 | 51 | [Test] 52 | public static void PiEstimationCpu() 53 | { 54 | EstimatePi(Context.CpuContext, 5, 100000, 1e-2); 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /tutorial/samples/MonteCarloPi/MonteCarloPi.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | AnyCPU 7 | {5F2ECB6E-7F39-4889-99C2-BBE6144CF7AC} 8 | Exe 9 | Properties 10 | Tutorial.Samples 11 | MonteCarloPi 12 | v4.5.2 13 | 512 14 | 15 | 16 | true 17 | full 18 | false 19 | ..\..\..\debug\ 20 | DEBUG;TRACE 21 | prompt 22 | 4 23 | false 24 | 25 | 26 | pdbonly 27 | true 28 | ..\..\..\release\ 29 | TRACE 30 | prompt 31 | 4 32 | false 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | {ec47c435-0d7d-4819-be05-11b9e57fb8f3} 54 | AleaTK 55 | 56 | 57 | 58 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | ..\..\..\packages\Alea\lib\net45\Alea.IL.dll 75 | True 76 | True 77 | 78 | 79 | ..\..\..\packages\Alea\lib\net45\Alea.Parallel.dll 80 | True 81 | True 82 | 83 | 84 | ..\..\..\packages\Alea\lib\net45\Alea.dll 85 | True 86 | True 87 | 88 | 89 | 90 | 91 | 92 | 93 | ..\..\..\packages\NUnit\lib\nunit.framework.dll 94 | True 95 | True 96 | 97 | 98 | 99 | -------------------------------------------------------------------------------- /tutorial/samples/MonteCarloPi/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("MonteCarloPi")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("MonteCarloPi")] 13 | [assembly: AssemblyCopyright("Copyright © 2016")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("5f2ecb6e-7f39-4889-99c2-bbe6144cf7ac")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /tutorial/samples/MonteCarloPi/Readme.md: -------------------------------------------------------------------------------- 1 | This tutorial shows how to use imperative tensor layer of Alea TK for general purpose computations. We program a Monte Carlo simulation to approximate $\pi$. 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /tutorial/samples/MonteCarloPi/paket.references: -------------------------------------------------------------------------------- 1 | NUnit 2 | Alea -------------------------------------------------------------------------------- /tutorial/samples/PTB/App.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /tutorial/samples/PTB/Extended.md: -------------------------------------------------------------------------------- 1 | *** 2 | 3 | ### Natural Language Modelling 4 | 5 | In this tutorial we implement a probabilistic model based on a recurrent neural network to predict the next work in a sentence, given the history of previous words. 6 | 7 | We use the the [Penn Treebank dataset](http://www.cis.upenn.edu/~treebank). It has a vocabulary of 10k words and consists of 929k training words, 73k validation words, and 82k test words. We download a preprocessed version from Tomáš Mikolov's [web page](http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz). We follow the approach proposed in @DBLP:journals/corr/ZarembaSV14 [section 4.1]. 8 | 9 | The preprocessed input data is a sequence of words containing `` for words not in the vocabulary, `` to mark the end of the sequence and `N` to represent numbers. We translate the sequence of words into a sequence of integers, representing the indices of the words in the vocabulary of size $s$. We create minibatches of size $b$ by taking consecutively $b$ subsequences of fixed length $n$ from this sequence. Taking fixed length subsequences would possibly break up a sentence into multiple subsequences. As we are mainly concerned about the history of words to predict the next word, this simplification is an acceptable compromise and it allows us to unroll the LSTM layers by a fixed amount of steps. 10 | 11 | - Input data: integer matrix $x \in \mathbb{N}_0^{n \times b}$, where $n$ is the sequence length and $b$ the minibatch size 12 | - Embedding layer: translating $x$ into $y \in \mathbb{R}^{n \times b \times d}$ where $d$ is the dimension of the embedding, defined in terms of the embedding weights $w \in \mathbb{R}^{s, d}$ by $y_{i,j,k} = w_{x_{i,j},k}$ 13 | - Two [LSTM layers](/ml_tools.html#eq:lstm2): input and hidden dimension $d$, unrolled $n$ steps along the first dimension of $y$, producing output $z \in \mathbb{R}^{n \times b \times d}$ 14 | - [Fully connected layer](/ml_tools.html#eq:fully-connected): the input is $z$ reshaped to a matrix $\mathbb{R}^{n b \times d}$ and transformed to the output $u \in \mathbb{R}^{n b \times s}$, with $s$ the size of the vocabulary 15 | - [Softmax with cross entropy layer](/ml_tools.html#eq:softmax-cross-entropy): transforms $u$ into probabilities $p \in \mathbb{R}^{n b \times s}$ and calculates cross entropy $D(p \| q)$ from labels $q \in \mathbb{N}_0^{n \times b}$, given by the index in the vocabulary of the next word for each word in $x$, reshaped to an element of $\mathbb{N}_0^{n b}$. 16 | 17 | The hidden states of the LSTM layers are initialized to zero for the first minibatch. As we traverse the sequence sequentially with minibatches of size $n*b$ we can take the final hidden states of a minibatch as the initial hidden state of the next minibatch. 18 | 19 | The stochastic gradient descent for training uses gradient clipping to cope with possible gradient explosion. 20 | 21 | @DBLP:journals/corr/ZarembaSV14 show how to apply dropout regularization to recurrent neural networks in order to reduce overfitting. 22 | They suggest to apply the dropout operator only to the non-recurrent connections, including the input and the output connections. 23 | 24 | ### LSTM Implementations 25 | 26 | The sample provides a direct implementation of a single LSTM layer following [Karpathy](https://gist.github.com/karpathy/587454dc0146a6ae21fc). There is basic version and an optimized version which reduces the number of kernel calls. The optimized version is used as a baseline to compare against the cuDNN accelerated implementation `Rnn` of Alea TK. @DBLP:journals/corr/AppleyardKB16 describe in more detail the cuDNN optimizations for multiple stacked LSTM layers. 27 | 28 | The following benchmarks have been executed on a GTX 970 with 4 GB device memory. The performance is measured in words per seconds. 29 | 30 | 31 | 32 | 33 | 34 | 35 |
Model SizeSmallMediumLarge
Alea TK with cuDNN1446594694257
Direct LSTM implementation337133501732
TensorFlow with GPU104936341out of memory
36 | 37 | *** 38 | 39 | ### References -------------------------------------------------------------------------------- /tutorial/samples/PTB/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("PTB")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("PTB")] 13 | [assembly: AssemblyCopyright("Copyright © 2016")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("ec890853-7bcf-4c35-a714-ffb81776e433")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /tutorial/samples/PTB/Readme.md: -------------------------------------------------------------------------------- 1 | This tutorial performs word-level prediction using a 2 layer LSTM model trained on the [Penn Treebank](https://www.cis.upenn.edu/~treebank) (PTB) dataset. 2 | 3 | 4 | -------------------------------------------------------------------------------- /tutorial/samples/PTB/lstm_small.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quantalea/AleaTK/1ab88eeb296c9a6835c0d72295e1b57838aa66de/tutorial/samples/PTB/lstm_small.mat -------------------------------------------------------------------------------- /tutorial/samples/PTB/paket.references: -------------------------------------------------------------------------------- 1 | Alea 2 | NUnit 3 | SharpZipLib 4 | CSMatIO -------------------------------------------------------------------------------- /tutorial/samples/WMT/App.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /tutorial/samples/WMT/BucketedData.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | 5 | namespace Tutorial.Samples 6 | { 7 | public class BucketedData 8 | { 9 | public Tuple[] BucketSequenceLengths { get; } 10 | 11 | public int NumBuckets { get; } 12 | 13 | public int MaxSourceSequenceLength { get; } 14 | 15 | public int MaxTargetSequenceLength { get; } 16 | 17 | public List[] SourceLanguage { get; } 18 | 19 | public List[] TargetLanguage { get; } 20 | 21 | public int Skipped { get; private set; } = 0; 22 | 23 | public int[] BucketSizes => SourceLanguage.Select(bucket => bucket.Count).ToArray(); 24 | 25 | public int NumDataPoints => BucketSizes.Sum(); 26 | 27 | public Random Random { get; } 28 | 29 | public BucketedData(IEnumerable> bucketSequenceLengths) 30 | { 31 | BucketSequenceLengths = bucketSequenceLengths.ToArray(); 32 | NumBuckets = BucketSequenceLengths.Length; 33 | SourceLanguage = Enumerable.Range(0, BucketSequenceLengths.Length).Select(i => new List()).ToArray(); 34 | TargetLanguage = Enumerable.Range(0, BucketSequenceLengths.Length).Select(i => new List()).ToArray(); 35 | MaxSourceSequenceLength = BucketSequenceLengths.Select(i => i.Item1).Max(); 36 | MaxTargetSequenceLength = BucketSequenceLengths.Select(i => i.Item2).Max() - 2; 37 | Random = new Random(0); 38 | } 39 | 40 | public static int[] PadSourceSequence(int[] indices, int paddedLength) 41 | { 42 | if (indices.Length > paddedLength) 43 | throw new ArgumentException("input array too long"); 44 | 45 | var padded = new int[paddedLength]; 46 | Array.Copy(indices, padded, indices.Length); 47 | for (var i = indices.Length; i < paddedLength; ++i) 48 | { 49 | padded[i] = Vocabulary.PadId; 50 | } 51 | return padded; 52 | } 53 | 54 | public static int[] PadTargetSequence(int[] indices, int paddedLength) 55 | { 56 | if (indices.Length > paddedLength - 2) 57 | throw new ArgumentException("input array too long, need space for at beginning and at end"); 58 | 59 | var padded = new int[paddedLength]; 60 | padded[0] = Vocabulary.GoId; 61 | padded[indices.Length + 1] = Vocabulary.EosId; 62 | Array.Copy(indices, 0, padded, 1, indices.Length); 63 | for (var i = indices.Length + 2; i < paddedLength; ++i) 64 | { 65 | padded[i] = Vocabulary.PadId; 66 | } 67 | return padded; 68 | } 69 | 70 | public void Add(int[] source, int[] target) 71 | { 72 | if (source.Length > MaxSourceSequenceLength || target.Length > MaxTargetSequenceLength) 73 | { 74 | Skipped++; 75 | return; 76 | } 77 | for (var i = 0; i < BucketSequenceLengths.Length; ++i) 78 | { 79 | var sourceBucketLength = BucketSequenceLengths[i].Item1; 80 | var targetBucketLength = BucketSequenceLengths[i].Item2; 81 | if (source.Length <= sourceBucketLength && target.Length < targetBucketLength - 1) 82 | { 83 | SourceLanguage[i].Add(PadSourceSequence(source, sourceBucketLength)); 84 | TargetLanguage[i].Add(PadTargetSequence(target, targetBucketLength)); 85 | break; 86 | } 87 | } 88 | } 89 | } 90 | } -------------------------------------------------------------------------------- /tutorial/samples/WMT/BucketedDataBatcher.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | 4 | namespace Tutorial.Samples 5 | { 6 | public class BucketedDataBatcher 7 | { 8 | public Random Random { get; } 9 | 10 | public BucketedData Data { get; } 11 | 12 | public double[] CumulativeProbabilities { get; } 13 | 14 | public int BatchSize { get; } 15 | 16 | public BucketedDataBatcher(BucketedData data, int batchSize, Random random) 17 | { 18 | Random = random; 19 | Data = data; 20 | BatchSize = batchSize; 21 | var sizes = data.BucketSizes; 22 | var total = (double)data.NumDataPoints; 23 | CumulativeProbabilities = new double[data.NumBuckets]; 24 | CumulativeProbabilities[0] = sizes[0]/total; 25 | for (var i = 1; i < data.NumBuckets; ++i) 26 | CumulativeProbabilities[i] = CumulativeProbabilities[i - 1] + sizes[i]/total; 27 | } 28 | 29 | public class Batch 30 | { 31 | public int[,] Source { get; set; } 32 | public int[,] Target { get; set; } 33 | public int[,] Mask { get; set; } 34 | } 35 | 36 | public Batch SampleNewBatch(int bucketId) 37 | { 38 | var bucketSize = Data.SourceLanguage[bucketId].Count; 39 | var sourceSequenceLength = Data.BucketSequenceLengths[bucketId].Item1; 40 | var targetSequenceLength = Data.BucketSequenceLengths[bucketId].Item2; 41 | var sourceLanguage = Data.SourceLanguage[bucketId]; 42 | var targetLanguage = Data.TargetLanguage[bucketId]; 43 | 44 | var source = new int[sourceSequenceLength, BatchSize]; 45 | var target = new int[targetSequenceLength, BatchSize]; 46 | var mask = new int[targetSequenceLength, BatchSize]; 47 | for (var i = 0; i < BatchSize; ++i) 48 | { 49 | var choice = Random.Next(bucketSize); 50 | for (var t = 0; t < sourceSequenceLength; ++t) 51 | { 52 | source[t, i] = sourceLanguage[choice][t]; 53 | } 54 | for (var t = 0; t < targetSequenceLength; ++t) 55 | { 56 | target[t, i] = targetLanguage[choice][t]; 57 | if (target[t, i] == Vocabulary.PadId) 58 | mask[t, i] = 0; 59 | else 60 | mask[t, i] = 1; 61 | } 62 | } 63 | 64 | return new Batch() { Source = source, Target = target, Mask = mask}; 65 | } 66 | 67 | public Batch SampleNewBatch() 68 | { 69 | var random = Random.NextDouble(); 70 | int bucketId; 71 | for (bucketId = 0; bucketId < Data.NumBuckets - 1; bucketId++) 72 | { 73 | if (random <= CumulativeProbabilities[bucketId]) break; 74 | } 75 | return SampleNewBatch(bucketId); 76 | } 77 | 78 | public IEnumerable Iterator(int numEpochs) 79 | { 80 | for (var i = 0; i < numEpochs; ++i) 81 | yield return SampleNewBatch(); 82 | } 83 | } 84 | } -------------------------------------------------------------------------------- /tutorial/samples/WMT/Model.cs: -------------------------------------------------------------------------------- 1 | using AleaTK; 2 | using AleaTK.ML; 3 | using AleaTK.ML.Operator; 4 | using Library = AleaTK.ML.Library; 5 | 6 | namespace Tutorial.Samples 7 | { 8 | public class Config 9 | { 10 | public double InitScale; 11 | public double LearningRate; 12 | public double MaxGradNorm; 13 | public int NumLayers; 14 | public int HiddenSize; 15 | public int ReduceLearningRateAfterEpoch; 16 | public int NumEpochs; 17 | public double DropoutProbability; 18 | public double LearningRateDecay; 19 | public int BatchSize; 20 | public int VocabularySize; 21 | } 22 | 23 | public class Model 24 | { 25 | public Model(Context ctx, int numInputSteps, Config cfg, bool isTraining = true) 26 | { 27 | var addDropout = isTraining && cfg.DropoutProbability > 0.0; 28 | 29 | EncoderInputs = Library.Variable(PartialShape.Create(numInputSteps, cfg.BatchSize)); 30 | Embedding = new Embedding(EncoderInputs, cfg.VocabularySize, cfg.HiddenSize, initScale: cfg.InitScale); 31 | 32 | EmbeddingOutput = addDropout ? new Dropout(Embedding.Output, cfg.DropoutProbability).Output : Embedding.Output; 33 | 34 | var rnnType = new LstmRnnType(); 35 | EncoderRnn = new Rnn(rnnType, EmbeddingOutput, cfg.NumLayers, cfg.HiddenSize, isTraining: isTraining, dropout: addDropout ? cfg.DropoutProbability : 0.0); 36 | EncoderRnnOutput = addDropout ? new Dropout(EncoderRnn.Y, cfg.DropoutProbability).Output : EncoderRnn.Y; 37 | 38 | // attention model 39 | 40 | 41 | } 42 | 43 | public Variable EncoderInputs { get; } 44 | 45 | public Embedding Embedding { get; } 46 | 47 | public Variable EmbeddingOutput { get; } 48 | 49 | public Rnn EncoderRnn { get; } 50 | 51 | public Variable EncoderRnnOutput { get; } 52 | } 53 | } -------------------------------------------------------------------------------- /tutorial/samples/WMT/Program.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Text; 5 | using System.Threading.Tasks; 6 | 7 | namespace WMT 8 | { 9 | class Program 10 | { 11 | static void Main(string[] args) 12 | { 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /tutorial/samples/WMT/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("WMT")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("WMT")] 13 | [assembly: AssemblyCopyright("Copyright © 2016")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("b76bf3fa-4cc4-4d7a-ac49-557ad693cc06")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /tutorial/samples/WMT/Vocabulary.cs: -------------------------------------------------------------------------------- 1 | using System.Collections.Generic; 2 | using System.IO; 3 | using System.Linq; 4 | using System.Text.RegularExpressions; 5 | 6 | namespace Tutorial.Samples 7 | { 8 | public class Vocabulary 9 | { 10 | public static readonly string Pad = "_PAD_"; 11 | public static readonly string Go = "_GO_"; 12 | public static readonly string Eos = "_EOS_"; 13 | public static readonly string Unk = "_UNK_"; 14 | public static string[] SpecialWords = { Pad, Go, Eos, Unk }; 15 | 16 | public static readonly int PadId = 0; 17 | public static readonly int GoId = 1; 18 | public static readonly int EosId = 2; 19 | public static readonly int UnkId = 3; 20 | 21 | public Dictionary WordHistogram { get; } 22 | 23 | public string[] Words { get; } 24 | 25 | public Dictionary TokenIds { get; } 26 | 27 | private static string[] SplitWord(string word) 28 | { 29 | return Regex.Split(word, "([.,!?\"':;)(])"); 30 | } 31 | 32 | public static string[] Tokenizer(string sentence) 33 | { 34 | var parts = sentence.Trim().Split(null); 35 | return parts.Select(SplitWord).SelectMany(i => i).ToArray(); 36 | } 37 | 38 | public static string NormalizeDigits(string token) 39 | { 40 | return Regex.Replace(token, @"\d+", "0"); 41 | } 42 | 43 | public Vocabulary(IEnumerable words) 44 | { 45 | var enumerable = words as string[] ?? words.ToArray(); 46 | WordHistogram = enumerable.ToDictionary(x => x, x => 1); 47 | Words = enumerable.ToArray(); 48 | TokenIds = Words.Select((w, i) => new KeyValuePair(w, i)).ToDictionary(x => x.Key, x => x.Value); 49 | } 50 | 51 | public Vocabulary(Dictionary wordHistogram, int maxVocabularySize) 52 | { 53 | var ordered = wordHistogram.OrderByDescending(kv => kv.Value); 54 | var special = SpecialWords.Select(w => new KeyValuePair(w, -1)); 55 | WordHistogram = special.Concat(ordered).Take(maxVocabularySize).ToDictionary(x => x.Key, x => x.Value); 56 | Words = WordHistogram.Keys.ToArray(); 57 | TokenIds = Words.Select((w, i) => new KeyValuePair(w, i)).ToDictionary(x => x.Key, x => x.Value); 58 | } 59 | 60 | public int TokenId(string word) 61 | { 62 | return TokenIds.ContainsKey(word) ? TokenIds[word] : UnkId; 63 | } 64 | 65 | public int[] SentenceToTokenIds(string sentence, bool normalizeDigits = true) 66 | { 67 | var tokens = Tokenizer(sentence); 68 | return tokens.Select(tok => normalizeDigits ? TokenId(NormalizeDigits(tok)) : TokenId(tok)).ToArray(); 69 | } 70 | 71 | public void Save(string filename) 72 | { 73 | using (var file = new StreamWriter(filename)) 74 | { 75 | foreach (var kv in WordHistogram) 76 | { 77 | file.WriteLine($"{kv.Key} {kv.Value}"); 78 | } 79 | } 80 | } 81 | 82 | public static Vocabulary Load(string filename) 83 | { 84 | var wordHistogram = new Dictionary(); 85 | using (var file = new StreamReader(filename)) 86 | { 87 | string line; 88 | while ((line = file.ReadLine()) != null) 89 | { 90 | var parts = line.Trim().Split(); 91 | if (!string.IsNullOrEmpty(parts[0])) wordHistogram.Add(parts[0], int.Parse(parts[1])); 92 | } 93 | } 94 | return new Vocabulary(wordHistogram, wordHistogram.Count); 95 | } 96 | } 97 | } -------------------------------------------------------------------------------- /tutorial/samples/WMT/paket.references: -------------------------------------------------------------------------------- 1 | NUnit 2 | Alea 3 | SharpZipLib -------------------------------------------------------------------------------- /version_list.html: -------------------------------------------------------------------------------- 1 | 9 | --------------------------------------------------------------------------------