├── bundles
├── empty.txt
├── Furnace-cpu
│ ├── Empty.fs
│ └── Furnace-cpu.fsproj
├── Furnace-lite
│ ├── Empty.fs
│ └── Furnace-lite.fsproj
├── Furnace-cuda
│ ├── Empty.fs
│ └── Furnace-cuda.fsproj
├── Furnace-cuda-linux
│ ├── Empty.fs
│ └── Furnace-cuda-linux.fsproj
└── Furnace-cuda-windows
│ ├── Empty.fs
│ └── Furnace-cuda-windows.fsproj
├── docs
├── _template.fsx
├── _template.ipynb
├── optimization.fsx
├── tutorial-gan.fsx
├── tutorial-vae.fsx
├── nested-derivatives.fsx
├── tutorial-classifier.fsx
├── tutorial-language.fsx
├── probability-distributions.fsx
├── differentiable-programming.fsx
├── test.png
├── favicon.ico
├── content
│ ├── img
│ │ ├── copy.png
│ │ ├── github.png
│ │ ├── copy-md.png
│ │ ├── copy-blue.png
│ │ ├── github-blue.png
│ │ └── copy-md-blue.png
│ └── fsdocs-custom.css
├── img
│ ├── Furnace-logo.png
│ ├── anim-intro-1.gif
│ ├── anim-intro-2.gif
│ ├── Furnace-logo-text.png
│ ├── badge-script.svg
│ ├── badge-binder.svg
│ └── badge-notebook.svg
├── models.fsx
├── README.md
├── tensors.fsx
└── Dockerfile
├── .idea
├── .idea.DiffSharp
│ └── .idea
│ │ ├── .name
│ │ ├── encodings.xml
│ │ ├── vcs.xml
│ │ ├── indexLayout.xml
│ │ └── .gitignore
└── .idea.Furnace
│ └── .idea
│ ├── encodings.xml
│ ├── vcs.xml
│ ├── indexLayout.xml
│ └── .gitignore
├── .DS_Store
├── Dockerfile
├── RELEASE_NOTES.md
├── .gitattributes
├── fsharplint.json
├── .config
└── dotnet-tools.json
├── tests
├── Furnace.Tests
│ ├── Program.fs
│ ├── TestRandom.fs
│ ├── TestModel.Sequential.fs
│ ├── TestUtils.fs
│ ├── TestModel.Linear.fs
│ ├── TestOp.Outer.fs
│ ├── TestModel.Dropout.fs
│ ├── TestModel.VAE.fs
│ ├── TestCombos.fs
│ ├── TestModel.Conv.fs
│ ├── TestModel.ConvTranspose.fs
│ ├── Furnace.Tests.fsproj
│ ├── TestModel.Recurrent.fs
│ ├── TestOptim.fs
│ ├── TestPlotHelpers.fs
│ └── TestOp.Det.fs
├── Furnace.Benchmarks
│ ├── Program.fs
│ ├── PerfMatrix.fs
│ ├── analysis.md
│ └── Furnace.Benchmarks.fsproj
└── Furnace.Benchmarks.Python
│ ├── Furnace.Benchmarks.Python.fsproj
│ ├── Program.fs
│ ├── results.csv
│ └── BasicTensorOpsPerfPython.fs
├── src
├── Furnace.Backends.Reference
│ └── Furnace.Backends.Reference.fsproj
├── Furnace.Core
│ ├── Model.Sequential.fs
│ ├── Op.Inv.fs
│ ├── Op.Outer.fs
│ ├── Printer.fs
│ ├── Model.Linear.fs
│ ├── Op.Solve.fs
│ ├── Op.Det.fs
│ ├── Op.Norm.fs
│ ├── Furnace.Core.fsproj
│ ├── Model.Dropout.fs
│ ├── Op.BMM.fs
│ ├── Furnace.Shorten.fs
│ ├── Device.fs
│ ├── Backend.fs
│ ├── Scalar.fs
│ ├── Model.VAE.fs
│ ├── Model.Conv.fs
│ ├── Model.ConvTranspose.fs
│ ├── Dtype.fs
│ └── Data.fs
├── Furnace.Backends.Torch
│ ├── Furnace.Backends.Torch.fsproj
│ └── Furnace.Torch.fs
└── Furnace.Data
│ ├── Furnace.Data.fsproj
│ └── Image.fs
├── codecov.yml
├── .github
├── workflows
│ ├── pull-request-debug.yml
│ ├── pull-request-release.yml
│ └── build-test-docs-publish.yml
└── actions
│ └── daily-perf-improver
│ └── build-steps
│ └── action.yml
├── LICENSE
├── tools
└── generate_slice_code.py
├── examples
├── differentiable_programming.fsx
├── vae_cnn.fsx
├── rnn.fsx
├── classifier.fsx
└── vae.fsx
├── README.md
├── Directory.Build.props
├── .dockerignore
├── .gitignore
└── DEVGUIDE.md
/bundles/empty.txt:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/_template.fsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/_template.ipynb:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/optimization.fsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/tutorial-gan.fsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/tutorial-vae.fsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/nested-derivatives.fsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/tutorial-classifier.fsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/tutorial-language.fsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/probability-distributions.fsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.idea/.idea.DiffSharp/.idea/.name:
--------------------------------------------------------------------------------
1 | DiffSharp
--------------------------------------------------------------------------------
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/.DS_Store
--------------------------------------------------------------------------------
/docs/differentiable-programming.fsx:
--------------------------------------------------------------------------------
1 | // PyTorch style
2 |
3 | // Furnace style
--------------------------------------------------------------------------------
/docs/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/test.png
--------------------------------------------------------------------------------
/docs/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/favicon.ico
--------------------------------------------------------------------------------
/docs/content/img/copy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/content/img/copy.png
--------------------------------------------------------------------------------
/docs/content/img/github.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/content/img/github.png
--------------------------------------------------------------------------------
/docs/img/Furnace-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/img/Furnace-logo.png
--------------------------------------------------------------------------------
/docs/img/anim-intro-1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/img/anim-intro-1.gif
--------------------------------------------------------------------------------
/docs/img/anim-intro-2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/img/anim-intro-2.gif
--------------------------------------------------------------------------------
/docs/content/img/copy-md.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/content/img/copy-md.png
--------------------------------------------------------------------------------
/docs/content/img/copy-blue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/content/img/copy-blue.png
--------------------------------------------------------------------------------
/docs/content/img/github-blue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/content/img/github-blue.png
--------------------------------------------------------------------------------
/docs/img/Furnace-logo-text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/img/Furnace-logo-text.png
--------------------------------------------------------------------------------
/docs/content/img/copy-md-blue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsprojects/Furnace/HEAD/docs/content/img/copy-md-blue.png
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/dotnet/sdk:6.0
2 | WORKDIR /code/Furnace
3 | COPY . /code/Furnace
4 | RUN dotnet build
5 | RUN dotnet test
6 |
--------------------------------------------------------------------------------
/RELEASE_NOTES.md:
--------------------------------------------------------------------------------
1 | See [Releases](https://github.com/DiffSharp/DiffSharp/releases) in the GitHub repository for the release notes and corresponding git tags of each release.
2 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | * text=auto eol=lf
2 | *.{cmd,[cC][mM][dD]} text eol=crlf
3 | *.{bat,[bB][aA][tT]} text eol=crlf
4 |
5 | .github/workflows/*.lock.yml linguist-generated=true merge=ours
--------------------------------------------------------------------------------
/bundles/Furnace-cpu/Empty.fs:
--------------------------------------------------------------------------------
1 | namespace Furnace
2 |
3 | // This project is to bundle Furnace.Core and some default backends into a single project
4 | // See Furnace.Core for main Furnace code
--------------------------------------------------------------------------------
/bundles/Furnace-lite/Empty.fs:
--------------------------------------------------------------------------------
1 | namespace Furnace
2 |
3 | // This project is to bundle Furnace.Core and some default backends into a single project
4 | // See Furnace.Core for main Furnace code
--------------------------------------------------------------------------------
/bundles/Furnace-cuda/Empty.fs:
--------------------------------------------------------------------------------
1 | namespace DiffSharp
2 |
3 | // This project is to bundle DiffSharp.Core and some default backends into a single project
4 | // See DiffSharp.Core for main DiffSharp code
--------------------------------------------------------------------------------
/bundles/Furnace-cuda-linux/Empty.fs:
--------------------------------------------------------------------------------
1 | namespace DiffSharp
2 |
3 | // This project is to bundle DiffSharp.Core and some default backends into a single project
4 | // See DiffSharp.Core for main DiffSharp code
--------------------------------------------------------------------------------
/bundles/Furnace-cuda-windows/Empty.fs:
--------------------------------------------------------------------------------
1 | namespace DiffSharp
2 |
3 | // This project is to bundle DiffSharp.Core and some default backends into a single project
4 | // See DiffSharp.Core for main DiffSharp code
--------------------------------------------------------------------------------
/fsharplint.json:
--------------------------------------------------------------------------------
1 | {
2 | "ignoreFiles": [
3 | "*assemblyinfo.*"
4 | ],
5 | "formatting": {
6 | "memberNames": {
7 | "enabled": false
8 | }
9 | }
10 | }
--------------------------------------------------------------------------------
/.idea/.idea.DiffSharp/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/.idea.DiffSharp/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/.idea.Furnace/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/.idea.Furnace/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/docs/content/fsdocs-custom.css:
--------------------------------------------------------------------------------
1 |
2 | /*--------------------------------------------------------------------------
3 | Customize your CSS here
4 | /*--------------------------------------------------------------------------*/
5 |
6 |
--------------------------------------------------------------------------------
/.config/dotnet-tools.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": 1,
3 | "isRoot": true,
4 | "tools": {
5 | "fsdocs-tool": {
6 | "version": "20.0.0",
7 | "commands": [
8 | "fsdocs"
9 | ]
10 | }
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/.idea/.idea.Furnace/.idea/indexLayout.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/.idea.DiffSharp/.idea/indexLayout.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/tests/Furnace.Tests/Program.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | module Program = let [] main _ = 0
--------------------------------------------------------------------------------
/.idea/.idea.Furnace/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Rider ignored files
5 | /.idea.Furnace.iml
6 | /contentModel.xml
7 | /modules.xml
8 | /projectSettingsUpdater.xml
9 | # Editor-based HTTP Client requests
10 | /httpRequests/
11 | # Datasource local storage ignored files
12 | /dataSources/
13 | /dataSources.local.xml
14 |
--------------------------------------------------------------------------------
/.idea/.idea.DiffSharp/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Rider ignored files
5 | /modules.xml
6 | /.idea.DiffSharp.iml
7 | /contentModel.xml
8 | /projectSettingsUpdater.xml
9 | # Editor-based HTTP Client requests
10 | /httpRequests/
11 | # Datasource local storage ignored files
12 | /dataSources/
13 | /dataSources.local.xml
14 |
--------------------------------------------------------------------------------
/tests/Furnace.Benchmarks/Program.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | open BenchmarkDotNet.Running
7 |
8 | []
9 | let main args =
10 | let _summary = BenchmarkSwitcher.FromAssembly(System.Reflection.Assembly.GetExecutingAssembly()).Run(args)
11 | 0
12 |
--------------------------------------------------------------------------------
/bundles/Furnace-cuda/Furnace-cuda.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net6.0
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/src/Furnace.Backends.Reference/Furnace.Backends.Reference.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netstandard2.1
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Model.Sequential.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace.Model
7 |
8 |
9 | type Sequential(models: seq) =
10 | inherit Model()
11 | do base.addModel(models |> Seq.toArray)
12 | override _.forward(value) =
13 | models |> Seq.fold (fun v m -> m.forward v) value
14 | override m.ToString() = sprintf "Sequential(%A)" m.children
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | range: 50..75
3 | round: nearest
4 | precision: 2
5 | status:
6 | project:
7 | default:
8 | target: auto
9 | threshold: 10%
10 | patch:
11 | default:
12 | target: auto
13 | threshold: 10%
14 |
15 | comment:
16 | layout: "reach, diff, flags, files"
17 | behavior: default
18 | require_changes: false # if true: only post the comment if coverage changes
19 | require_base: no # [yes :: must have a base report to post]
20 | require_head: yes # [yes :: must have a head report to post]
21 |
--------------------------------------------------------------------------------
/docs/models.fsx:
--------------------------------------------------------------------------------
1 | (*** condition: prepare ***)
2 | #I "../tests/Furnace.Tests/bin/Debug/net6.0"
3 | #r "Furnace.Core.dll"
4 | #r "Furnace.Data.dll"
5 | #r "Furnace.Backends.Reference.dll"
6 | #r "Furnace.Backends.Torch.dll"
7 | // These are needed to make fsdocs --eval work. If we don't select a backend like this in the beginning, we get erratic behavior.
8 | Furnace.FurnaceImage.config(backend=Furnace.Backend.Reference)
9 | Furnace.FurnaceImage.seed(123)
10 |
11 | (**
12 | Test
13 | *)
14 |
15 | open Furnace
16 |
17 | FurnaceImage.config(backend=Backend.Reference)
18 |
19 | let a = FurnaceImage.tensor([1,2,3])
20 | printfn "%A" a
21 | (*** include-fsi-output ***)
--------------------------------------------------------------------------------
/src/Furnace.Backends.Torch/Furnace.Backends.Torch.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net6.0
5 | x64
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/bundles/Furnace-lite/Furnace-lite.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net6.0
5 | x64
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/.github/workflows/pull-request-debug.yml:
--------------------------------------------------------------------------------
1 | name: Build and test (Debug)
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - '**'
7 |
8 | jobs:
9 | build_windows:
10 |
11 | runs-on: windows-latest
12 |
13 | steps:
14 | - uses: actions/checkout@v2
15 | - name: Setup dotnet
16 | uses: actions/setup-dotnet@v1
17 | with:
18 | dotnet-version: 6.0.x
19 | - name: Install dependencies
20 | run: dotnet restore
21 | - name: Install tool dependencies
22 | run: dotnet tool restore
23 | - name: Build
24 | run: dotnet build --configuration Debug --no-restore --verbosity normal
25 | - name: Test
26 | run: dotnet test --configuration Debug --no-restore --verbosity normal
27 | - name: Run fsdocs
28 | run: dotnet fsdocs build --eval --strict
29 |
--------------------------------------------------------------------------------
/src/Furnace.Data/Furnace.Data.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netstandard2.1
5 | true
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/bundles/Furnace-cuda-linux/Furnace-cuda-linux.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net6.0
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/bundles/Furnace-cuda-windows/Furnace-cuda-windows.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net6.0
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Op.Inv.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | []
9 | module OpInvExtensions =
10 |
11 | type Tensor with
12 | member a.inv() =
13 | Shape.checkCanInvert a.shape
14 | Tensor.Op
15 | { new UnaryOp("inv") with
16 | member _.fRaw(a) = a.InverseT()
17 | member _.ad_dfda(a,ad,f) = -f.matmul(ad).matmul(f)
18 | member _.fd_dfda(a,f,fd) = let ft = f.transpose(-1, -2) in -ft.matmul(fd).matmul(ft)
19 | }
20 | (a)
21 |
22 | type FurnaceImage with
23 | static member inv(a:Tensor) = a.inv()
24 |
--------------------------------------------------------------------------------
/bundles/Furnace-cpu/Furnace-cpu.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net6.0
5 | x64
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestRandom.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace.Util
10 |
11 | []
12 | type TestRandom () =
13 |
14 | []
15 | member _.TestRandomSeed () =
16 | Random.Seed(1)
17 | let a1 = Random.Uniform()
18 | Random.Seed(1)
19 | let a2 = Random.Uniform()
20 | let a3 = Random.Uniform()
21 |
22 | Assert.AreEqual(a1, a2)
23 | Assert.AreNotEqual(a2, a3)
24 |
25 | []
26 | member _.TestRandomUUID () =
27 | Random.Seed(1)
28 | let a1 = Random.UUID()
29 | Random.Seed(1)
30 | let a2 = Random.UUID()
31 | let a3 = Random.UUID()
32 |
33 | Assert.AreEqual(a1, a2)
34 | Assert.AreNotEqual(a2, a3)
35 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Op.Outer.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | []
9 | module OpOuterExtensions =
10 |
11 | type Tensor with
12 | /// Outer product of two tensors.
13 | /// The second tensor.
14 | member a.outer(b:Tensor) =
15 | match a.dim, b.dim with
16 | | 1, 1 -> a.unsqueeze(1).matmul(b.unsqueeze(0))
17 | | 2, 2 when a.shape[0] = b.shape[0] -> a.unsqueeze(2).bmm(b.unsqueeze(1)) // Batched outer product
18 | | _ -> failwithf "Outer product unsupported for tensor shapes %A %A" a.shape b.shape
19 |
20 | type FurnaceImage with
21 | /// Outer product of two tensors.
22 | /// The first tensor.
23 | /// The second tensor.
24 | static member outer(a:Tensor, b:Tensor) = a.outer(b)
25 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Printer.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | type Printer =
9 | | Default
10 | | Short
11 | | Full
12 | | Custom of threshold: int * edgeItems: int * precision: int
13 |
14 | member p.threshold =
15 | match p with
16 | | Default -> 100
17 | | Short -> 10
18 | | Full -> System.Int32.MaxValue
19 | | Custom(t, _, _) -> t
20 |
21 | member p.edgeItems =
22 | match p with
23 | | Default -> 3
24 | | Short -> 2
25 | | Full -> -1
26 | | Custom(_, e, _) -> e
27 |
28 | member p.precision =
29 | match p with
30 | | Default -> 4
31 | | Short -> 2
32 | | Full -> 4
33 | | Custom(_, _, p) -> p
34 |
35 | /// Contains functions and settings related to print options.
36 | module Printer =
37 |
38 | /// Get or set the default printer used when printing tensors. Note, use FurnaceImage.config(...) instead.
39 | let mutable Default : Printer = Printer.Default
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Running notebooks in MyBinder
4 |
5 | The `Dockerfile` and `NuGet.config` allow us to run generated notebooks in [MyBinder](https://mybinder.org)
6 |
7 | * `master` branch of fsprojects/Furnace: [](https://mybinder.org/v2/gh/fsprojects/Furnace/master)
8 |
9 | # Generating docs
10 |
11 |
12 | To iterate on docs (requires evaluation off since DLLs get locked)
13 |
14 | dotnet fsdocs watch
15 |
16 | To use a local build of FSharp.Formatting:
17 |
18 | git clone https://github.com/fsprojects/FSharp.Formatting ../FSharp.Formatting
19 | pushd ..\FSharp.Formatting
20 | .\build
21 | popd
22 | pop
23 |
24 | Then:
25 |
26 | ..\FSharp.Formatting\src\FSharp.Formatting.CommandTool\bin\Debug\net6.0\fsdocs.exe watch
27 | ..\FSharp.Formatting\src\FSharp.Formatting.CommandTool\bin\Debug\net6.0\fsdocs.exe build --clean --eval
28 |
29 | ## Generated Notebooks
30 |
31 | Notebooks are generated for all .md and .fsx files under docs as part of the build.
32 |
33 | * Dockerfile - see https://github.com/dotnet/interactive/blob/master/docs/CreateBinder.md
34 |
35 | * NuGet.config - likewise
36 |
37 | See MyBinder for creating URLs
38 |
--------------------------------------------------------------------------------
/src/Furnace.Backends.Torch/Furnace.Torch.fs:
--------------------------------------------------------------------------------
1 | namespace Furnace
2 |
3 | open Furnace
4 | open Furnace.Backends.Torch
5 | open TorchSharp
6 |
7 | []
8 | module TorchExtensions =
9 |
10 | type FurnaceImage with
11 |
12 | ///
13 | /// Creates a new Furnace tensor from the torch tensor.
14 | ///
15 | static member fromTorch(tt: torch.Tensor) =
16 | Tensor.ofRawTensor(TorchRawTensor(tt))
17 |
18 | type Tensor with
19 | ///
20 | /// Converts the primal of a tensor to a torch tensor.
21 | ///
22 | ///
23 | /// If the tensor does not use the Torch backend an exception is raised.
24 | ///
25 | /// Note that this operation takes the primal of the tensor. This means
26 | /// code that converts to Torch tensors will not be differentiable using
27 | /// Furnace differentiation capabilities.
28 | ///
29 | member t.toTorch() =
30 | match t.primalRaw with
31 | | :? TorchRawTensor as trt -> trt.TorchTensor
32 | | _ -> failwith $"toTorch: the input is not a Furnace.Backends.Torch tensor, its backend is {t.backend}"
33 |
34 |
--------------------------------------------------------------------------------
/tests/Furnace.Benchmarks.Python/Furnace.Benchmarks.Python.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | false
6 |
7 |
8 |
9 | Exe
10 | net6.0
11 | false
12 | $(DefineConstants);TINY
13 | x64
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/tests/Furnace.Benchmarks/PerfMatrix.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace.Benchmarks
7 |
8 | open BenchmarkDotNet.Attributes
9 | open BenchmarkDotNet.Configs
10 | open BenchmarkDotNet.Columns
11 | open BenchmarkDotNet.Running
12 | open BenchmarkDotNet.Order
13 |
14 | type BasicTensorTestMatrix() =
15 |
16 | member val public workloadSize = pown 2 18
17 |
18 | #if TINY
19 | []
20 | #else
21 | []
22 | #endif
23 | member val public tensorSize = 0 with get, set
24 |
25 | #if TINY
26 | []
27 | #else
28 | []
29 | #endif
30 | member val public dtypeName = "" with get, set
31 |
32 | #if TINY
33 | []
34 | #else
35 | []
36 | #endif
37 | member val public deviceName = "" with get, set
38 |
39 | member perf.numIterations(factor) = factor * perf.workloadSize / perf.tensorSize
40 | member perf.caseId = sprintf "tensorSize=%d,dtypeName=%s,deviceName=%s" perf.tensorSize perf.dtypeName perf.deviceName
41 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Model.Linear.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace.Model
7 |
8 | open Furnace
9 |
10 | /// A model that applies a linear transformation to the incoming data: \(y = xA^T + b\)
11 | type Linear(inFeatures, outFeatures, ?bias:bool) =
12 | inherit Model()
13 | let biasv = defaultArg bias true
14 | let w = Parameter(Weight.kaiming(inFeatures, outFeatures))
15 | let k = 1./sqrt (float outFeatures)
16 | let b = Parameter(if biasv then Weight.uniform([|outFeatures|], k) else FurnaceImage.tensor([]))
17 | do base.addParameter((w, "Linear-weight"), (b, "Linear-bias"))
18 |
19 | /// Get or set the weight parameter of the model
20 | member _.weight
21 | with get() = w.value
22 | and set v = w.value <- v
23 |
24 | /// Get or set the bias parameter of the model
25 | member _.bias
26 | with get() = b.value
27 | and set v = b.value <- v
28 |
29 | /// TBD
30 | override _.ToString() = sprintf "Linear(%A, %A)" inFeatures outFeatures
31 |
32 | /// TBD
33 | override _.forward(value) =
34 | let f = FurnaceImage.matmul(value, w.value)
35 | if biasv then f + b.value else f
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestModel.Sequential.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 | open Furnace.Compose
11 | open Furnace.Model
12 | open Furnace.Data
13 | open Furnace.Optim
14 |
15 |
16 | []
17 | type TestModelSequential () =
18 |
19 | []
20 | member _.TestModelSequential () =
21 | let m1 = Linear(1, 2)
22 | let m2 = Linear(2, 3)
23 | let m3 = Linear(3, 4)
24 |
25 | let m = m1 --> m2 --> m3
26 | let mSequential = Sequential([m1;m2;m3])
27 |
28 | let x = FurnaceImage.randn([1;1])
29 | let y = x --> m
30 | let ySequential = x --> mSequential
31 |
32 | Assert.True(ySequential.allclose(y))
33 |
34 | []
35 | member _.TestModelSequentialSaveLoadState () =
36 | let batchSize = 4
37 | let inFeatures = 1
38 | let m1 = Linear(1, 2)
39 | let m2 = Linear(2, 3)
40 | let m3 = Linear(3, 4)
41 | let net = Sequential([m1;m2;m3])
42 |
43 | let fileName = System.IO.Path.GetTempFileName()
44 | FurnaceImage.save(net.state, fileName)
45 | let _ = FurnaceImage.randn([batchSize; inFeatures]) --> net
46 | net.state <- FurnaceImage.load(fileName)
47 | Assert.True(true)
--------------------------------------------------------------------------------
/src/Furnace.Core/Op.Solve.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | []
9 | module OpSolveExtensions =
10 |
11 | type Tensor with
12 | member a.solve(b:Tensor) =
13 | let _ = Shape.checkCanSolve a.shape b.shape
14 | Tensor.Op
15 | { new BinaryOp("solve") with
16 | member _.fRaw(a,b) = a.SolveTT(b)
17 | member _.ad_dfda(a,ad,b,f) =
18 | match a.dim, b.dim with
19 | | 3, 2 -> let aa:Tensor = a.solve(-ad.matmul(f.unsqueeze(-1))) in aa.squeeze(-1)
20 | | _ -> a.solve(-ad.matmul(f))
21 | member _.bd_dfdb(a,b,bd,f) = a.solve(bd)
22 | member _.fd_dfda(a,b,f,fd) =
23 | let ba = a.transpose(-2, -1).solve(fd)
24 | match a.dim, b.dim with
25 | | 2, 1 -> -ba.outer(f)
26 | | 3, 2 -> -ba.unsqueeze(-1).matmul(f.unsqueeze(-1).transpose(-2, -1))
27 | | _ -> -ba.matmul(f.transpose(-2, -1))
28 | member _.fd_dfdb(a,b,f,fd) = a.transpose(-2, -1).solve(fd)
29 | }
30 | (a,b)
31 |
32 | type FurnaceImage with
33 | static member solve(a:Tensor, b:Tensor) = a.solve(b)
34 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Op.Det.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | []
9 | module OpDetExtensions =
10 |
11 | type Tensor with
12 | member a.det() =
13 | Shape.checkCanDet a.shape
14 | Tensor.Op
15 | { new UnaryOp("det") with
16 | member _.fRaw(a) = a.DetT()
17 | member _.ad_dfda(a:Tensor,ad,f) =
18 | if a.dim = 2 then
19 | // The following differs from Jacobi's formula which has a trace instead of a sum
20 | // But it is confirmed to be correct by reverse-mode-based forward-mode eval and also finite differences
21 | f * (a.inv().transpose() * ad).sum()
22 | else
23 | f * (a.inv().transpose(-1, -2) * ad).flatten(1).sum(-1)
24 | member _.fd_dfda(a,f,fd) =
25 | if a.dim = 2 then
26 | fd * f * a.inv().transpose()
27 | else
28 | // Ugly but correct
29 | fd.unsqueeze(1).unsqueeze(1) * f.unsqueeze(1).unsqueeze(1) * a.inv().transpose(-1, -2)
30 | }
31 | (a)
32 |
33 | type FurnaceImage with
34 | static member det(a:Tensor) = a.det()
35 |
--------------------------------------------------------------------------------
/.github/actions/daily-perf-improver/build-steps/action.yml:
--------------------------------------------------------------------------------
1 | name: 'Daily Performance Improver Build Steps'
2 | description: 'Sets up environment for performance improvement work in Furnace'
3 | author: 'Daily Perf Improver'
4 |
5 | runs:
6 | using: 'composite'
7 | steps:
8 | - name: Setup .NET 6.0
9 | uses: actions/setup-dotnet@v4
10 | with:
11 | dotnet-version: 6.0.x
12 |
13 | - name: Install dependencies
14 | shell: bash
15 | run: dotnet restore
16 |
17 | - name: Install tool dependencies
18 | shell: bash
19 | run: dotnet tool restore
20 |
21 | - name: Build project in Debug configuration
22 | shell: bash
23 | run: dotnet build --configuration Debug --no-restore --verbosity normal
24 |
25 | - name: Build project in Release configuration
26 | shell: bash
27 | run: dotnet build --configuration Release --no-restore --verbosity normal
28 |
29 | - name: Install ReportGenerator for coverage analysis
30 | shell: bash
31 | run: dotnet tool install -g dotnet-reportgenerator-globaltool --version 5.1.26
32 |
33 | - name: Verify benchmark projects build
34 | shell: bash
35 | run: |
36 | dotnet build tests/Furnace.Benchmarks/Furnace.Benchmarks.fsproj --configuration Release --no-restore
37 | dotnet build tests/Furnace.Benchmarks.Python/Furnace.Benchmarks.Python.fsproj --configuration Release --no-restore
38 |
39 | - name: Run basic tests to ensure correctness
40 | shell: bash
41 | run: dotnet test --configuration Release --no-build --verbosity normal
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestUtils.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open System
9 | open Furnace
10 | open NUnit.Framework
11 |
12 | []
13 | module TestUtils =
14 | let isException f = Assert.Throws(TestDelegate(fun () -> f() |> ignore)) |> ignore
15 | let isInvalidOp f = Assert.Throws(TestDelegate(fun () -> f() |> ignore)) |> ignore
16 | let isAnyException f = Assert.Catch(TestDelegate(fun () -> f() |> ignore)) |> ignore
17 |
18 | type Assert with
19 |
20 | /// Like Assert.AreEqual bute requires that the actual and expected are the same type
21 | static member CheckEqual (expected: 'T, actual: 'T) = Assert.AreEqual(box expected, box actual)
22 |
23 | type FurnaceImage with
24 | /// Locally use the given default configuration, returning an IDisposable to revert to the previous configuration.
25 | /// The new default element type.
26 | /// The new default device.
27 | /// The new default backend.
28 | static member useConfig(?dtype: Dtype, ?device: Device, ?backend: Backend) =
29 | let prevConfig = FurnaceImage.config()
30 | FurnaceImage.config(?dtype=dtype, ?device=device, ?backend=backend)
31 | { new System.IDisposable with member _.Dispose() = FurnaceImage.config(prevConfig) }
32 |
33 |
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestModel.Linear.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 | open Furnace.Compose
11 | open Furnace.Model
12 | open Furnace.Data
13 | open Furnace.Optim
14 |
15 |
16 | []
17 | type TestModelLinear () =
18 |
19 | []
20 | member _.TestModelLinear () =
21 | // Trains a linear regressor
22 | let n, din, dout = 4, 100, 10
23 | let inputs = FurnaceImage.randn([n; din])
24 | let targets = FurnaceImage.randn([n; dout])
25 | let net = Linear(din, dout)
26 |
27 | let lr, steps = 1e-2, 1000
28 | let loss inputs p = net.asFunction p inputs |> FurnaceImage.mseLoss targets
29 | for _ in 0..steps do
30 | let g = FurnaceImage.grad (loss inputs) net.parametersVector
31 | net.parametersVector <- net.parametersVector - lr * g
32 | let y = net.forward inputs
33 | Assert.True(targets.allclose(y, 0.01))
34 |
35 | []
36 | member _.TestModelLinearSaveLoadState () =
37 | let inFeatures = 4
38 | let outFeatures = 4
39 | let batchSize = 2
40 | let net = Linear(inFeatures, outFeatures)
41 |
42 | let fileName = System.IO.Path.GetTempFileName()
43 | FurnaceImage.save(net.state, fileName)
44 | let _ = FurnaceImage.randn([batchSize; inFeatures]) --> net
45 | net.state <- FurnaceImage.load(fileName)
46 | Assert.True(true)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 2-Clause License
2 |
3 | Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
4 | Copyright (c) 2017- Microsoft Research, Cambridge, UK (Don Syme )
5 | Copyright (c) 2014- National University of Ireland Maynooth (Barak A. Pearlmutter )
6 | Copyright (c) 2014-2016 National University of Ireland Maynooth (Atılım Güneş Baydin)
7 | All rights reserved.
8 |
9 | Redistribution and use in source and binary forms, with or without
10 | modification, are permitted provided that the following conditions are met:
11 |
12 | * Redistributions of source code must retain the above copyright notice, this
13 | list of conditions and the following disclaimer.
14 |
15 | * Redistributions in binary form must reproduce the above copyright notice,
16 | this list of conditions and the following disclaimer in the documentation
17 | and/or other materials provided with the distribution.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Op.Norm.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | []
9 | module OpNormExtensions =
10 |
11 | type Tensor with
12 | member a.norm(?order:float, ?dim:int, ?keepDim:bool) =
13 | if not (a.dtype = Dtype.Float32 || a.dtype = Dtype.Float64) then failwithf "Vector norm is only supported for Float32 and Float64 dtypes."
14 | let order = defaultArg order 2.
15 | match order, dim with
16 | | 1., None -> a.flatten().abs().sum()
17 | | 1., Some(dim) -> a.abs().sum(dim=dim, ?keepDim=keepDim)
18 | | 2., None -> let aa = a.flatten() in (aa * aa).sum().sqrt()
19 | | 2., Some(dim) -> (a * a).sum(dim=dim, ?keepDim=keepDim).sqrt()
20 | | System.Double.PositiveInfinity, None -> a.flatten().abs().max()
21 | | System.Double.PositiveInfinity, Some(dim) -> a.abs().max(dim=dim, ?keepDim=keepDim)
22 | | System.Double.NegativeInfinity, None -> a.flatten().abs().min()
23 | | System.Double.NegativeInfinity, Some(dim) -> a.abs().min(dim=dim, ?keepDim=keepDim)
24 | | 0., None -> a.ne(a.zerosLike()).cast(dtype=a.dtype).sum()
25 | | 0., Some(dim) -> a.ne(a.zerosLike()).cast(dtype=a.dtype).sum(dim=dim, ?keepDim=keepDim)
26 | | order, None -> a.abs().pow(order).sum().pow(1./order)
27 | | order, Some(dim) -> a.abs().pow(order).sum(dim=dim, ?keepDim=keepDim).pow(1./order)
28 |
29 | type FurnaceImage with
30 | static member norm(a:Tensor, ?order:float, ?dim:int, ?keepDim:bool) = a.norm(?order=order, ?dim=dim, ?keepDim=keepDim)
31 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Furnace.Core.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 | netstandard2.1
4 | true
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Model.Dropout.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace.Model
7 |
8 |
9 | /// A model which during training, randomly zeroes some of the elements of the input tensor with probability p using samples from a Bernoulli distribution.
10 | type Dropout(?p:double) =
11 | inherit Model()
12 |
13 | /// TBD
14 | override _.ToString() = sprintf "Dropout()"
15 |
16 | /// TBD
17 | override m.forward(value) =
18 | if m.mode = Mode.Train then value.dropout(?p=p) else value
19 |
20 |
21 | /// A model which during training, randomly zero out entire channels. Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution.
22 | type Dropout2d(?p:double) =
23 | inherit Model()
24 |
25 | /// TBD
26 | override _.ToString() = sprintf "Dropout2d()"
27 |
28 | /// TBD
29 | override m.forward(value) =
30 | if m.mode = Mode.Train then value.dropout2d(?p=p) else value
31 |
32 |
33 | /// A model which during training, randomly zero out entire channels. Each channel will be zeroed out independently on every forward call with probability p using samples from a Bernoulli distribution.
34 | type Dropout3d(?p:double) =
35 | inherit Model()
36 |
37 | /// TBD
38 | override _.ToString() = sprintf "Dropout3d()"
39 |
40 | /// TBD
41 | override m.forward(value) =
42 | if m.mode = Mode.Train then value.dropout3d(?p=p) else value
43 |
--------------------------------------------------------------------------------
/tools/generate_slice_code.py:
--------------------------------------------------------------------------------
1 | def generate(num_dims, fixed_dims):
2 | str = '[]'
3 | str += '\nmember t.GetSlice('
4 | prefix = ''
5 | for i in range(num_dims):
6 | if fixed_dims[i]:
7 | str += '{}i{}:int'.format(prefix, i)
8 | else:
9 | str += '{}i{}min:int option, i{}max:int option'.format(prefix, i, i)
10 | prefix = ', '
11 | str += ') ='
12 | str += '\n // Dims: {}'.format(num_dims)
13 | # str += '\n // Specified dims: {}'.format(fixed_dims)
14 | for i in range(num_dims):
15 | if fixed_dims[i]:
16 | str += '\n let i{}given = 1'.format(i)
17 | str += '\n let i{}min = i{}'.format(i, i)
18 | str += '\n let i{}max = i{}'.format(i, i)
19 | else:
20 | str += '\n let i{}given = if i{}min.IsSome && i{}max.IsSome then 1 else 0'.format(i, i, i)
21 | str += '\n let i{}min = defaultArg i{}min 0'.format(i, i)
22 | str += '\n let i{}max = defaultArg i{}max (t.shape[{}] - 1)'.format(i, i, i)
23 | str += '\n let bounds = array2D ['
24 | prefix = ''
25 | for i in range(num_dims):
26 | str += '{}[i{}min; i{}max; i{}given]'.format(prefix, i, i, i)
27 | prefix = '; '
28 | str += ']'
29 | str += '\n t.GetSlice(bounds)\n'
30 | return str
31 |
32 | def per(n):
33 | ret = []
34 | for i in range(1<)
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | []
9 | module OpBMMExtensions =
10 |
11 | type Tensor with
12 | /// Batched matrix product of two tensors. Tensors must be 3d tensors each containing the same number of matrices. If the tensor is a \(b \times n \times m\) tensor, and is a \(b \times m \times p\) tensor, the result will be a \(b \times n \times p\) tensor.
13 | /// The second tensor.
14 | member a.bmm(b:Tensor) =
15 | Shape.checkCanBMM a.shape b.shape |> ignore
16 | Tensor.Op
17 | { new BinaryOp("bmm") with
18 | member _.fRaw(a,b) = a.BMMTT(b)
19 | member _.ad_dfda(a,ad,b,f) = ad.bmm(b)
20 | member _.bd_dfdb(a,b,bd,f) = a.bmm(bd)
21 | member _.fd_dfda(a,b,f,fd) = fd.bmm(b.transpose(1, 2))
22 | member _.fd_dfdb(a,b,f,fd) = a.transpose(1, 2).bmm(fd)
23 | }
24 | (a,b)
25 |
26 | type FurnaceImage with
27 | /// Batched matrix product of two tensors. Tensors and must be 3d tensors each containing the same number of matrices. If is a \(b \times n \times m\) tensor, is a \(b \times m \times p\) tensor, the result will be a \(b \times n \times p\) tensor.
28 | /// The first tensor.
29 | /// The second tensor.
30 | static member bmm(a:Tensor, b:Tensor) = a.bmm(b)
31 |
--------------------------------------------------------------------------------
/tests/Furnace.Benchmarks.Python/Program.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | open System.IO
7 | open BenchmarkDotNet.Running
8 | open Furnace.Benchmarks.Python
9 |
10 | []
11 | let main args =
12 |
13 | let summaries = BenchmarkSwitcher.FromAssembly(System.Reflection.Assembly.GetExecutingAssembly()).Run(args)
14 |
15 | let lines =
16 | [ for summary in summaries do
17 | for case in summary.BenchmarksCases do
18 | let v =
19 | try
20 | if not (isNull case.Descriptor) &&
21 | (not(isNull case.Descriptor.Categories)) &&
22 | case.Descriptor.Categories.Length > 0 then
23 | if (not (isNull summary)) && (try (summary[case] |> ignore); true with _ -> false) then
24 | let report = summary[case]
25 | let tensorSize = case.Parameters["tensorSize"] :?> int
26 | let dtypeName = case.Parameters["dtypeName"] :?> string
27 | let deviceName = case.Parameters["deviceName"] :?> string
28 | // get the time in milliseconds
29 | let runtime = report.ResultStatistics.Mean / 1000000.0 |> int64
30 | let nm = case.Descriptor.Categories[0]
31 | let key = nm + string tensorSize + dtypeName + deviceName
32 | Some (sprintf "%s,%d" key runtime)
33 | else
34 | None
35 | else
36 | None
37 | with _ -> None
38 | match v with
39 | | None -> ()
40 | | Some r -> yield r
41 | ]
42 |
43 | File.WriteAllLines(Path.Combine(__SOURCE_DIRECTORY__, "results.csv"), lines)
44 |
45 | 0
46 |
--------------------------------------------------------------------------------
/docs/img/badge-script.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/Furnace.Benchmarks/analysis.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | This project is for benchmarking, do as you want with it.
4 |
5 |
6 |
7 | ### Analysis 1 (8/9/2020)
8 |
9 | I took some measurements of repeated "tensor + tensor" and approximately fitted them to a cost model based on
10 |
11 | * N = overall number of primitive additions
12 | * n = number of elements in the tensor
13 | * t = c1 * N + c2 / (N/n)
14 | * c1 = u/s per primitive addition
15 | * c2 = u/s per overall tensor addition operation (not including creating tensors nor getting data to the GPU)
16 |
17 | Very approximate figures
18 |
19 | * Torch CPU, best c1 = 0.0010, c2 = approx 8.0
20 |
21 | * Torch GPU, best c1 = 0.000056, c2 = approx 75.0
22 |
23 | * Reference CPU, best c1 = 0.0025, c2 = approx 1.0
24 |
25 | These are pretty much as you'd expect:
26 |
27 | * Setting up operations on the GPU is expensive (c2 = 75.0) then very fast (c1 = 0.000056)
28 |
29 | * Setting up Torch operations on the CPU is non-zero cost (c2 = 8.0) then fast (c1 = 0.0010)
30 |
31 | * Setting up Reference implementation operations on the CPU is low cost (c2) then slow (c1 = 0.0025)
32 |
33 | * The reference backend has low overhead to reach down into the actual tensor data but is significantly slower on actual floating point performance
34 |
35 | * Overall we don't expect DiffSharp to be fast on tiny tensors. Indeed for this particular operation it's not until you have about tensors of about size 10,000 that the Torch CPU or GPU backends become faster than the current reference backend
36 |
37 | * Note that the reference backend addition operation is implemented in a tight loop and .NET will do an OK job on this - many other operations in the reference backend are implemented far less efficiently.
38 |
39 | The above does argue for the value of a fast purely .NET backend for problems dominated by smallish tensors.
40 |
41 | I also separately checked the cost of adding dummy fields to "RawTensor" - each dummy copy of "shape" seemed to add about 3% cost to Torch CPU for tiny tensors,
42 | this obviously becomes less important as tensors grow in size.
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/.github/workflows/pull-request-release.yml:
--------------------------------------------------------------------------------
1 | name: Build and test (Release)
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - '**'
7 |
8 | jobs:
9 | build_linux:
10 |
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - uses: actions/checkout@v2
15 | - name: Setup dotnet
16 | uses: actions/setup-dotnet@v1
17 | with:
18 | dotnet-version: 6.0.x
19 | - name: Install dependencies
20 | run: dotnet restore
21 | - name: Install tool dependencies
22 | run: dotnet tool restore
23 | - name: Build (Debug, for doc scripts)
24 | run: dotnet build --configuration Debug --no-restore --verbosity normal
25 | - name: Build (Release)
26 | run: dotnet build --configuration Release --no-restore --verbosity normal
27 | - name: Test
28 | run: dotnet test --configuration Release --no-restore --verbosity normal
29 | - name: Run fsdocs
30 | run: dotnet fsdocs build --eval --strict --properties Configuration=Release
31 |
32 | build_windows:
33 |
34 | runs-on: windows-latest
35 |
36 | steps:
37 | - uses: actions/checkout@v2
38 | - name: Setup dotnet
39 | uses: actions/setup-dotnet@v1
40 | with:
41 | dotnet-version: 6.0.x
42 | - name: Install dependencies
43 | run: dotnet restore
44 | - name: Install tool dependencies
45 | run: dotnet tool restore
46 | - name: Build (Release)
47 | run: dotnet build --configuration Release --no-restore --verbosity normal
48 | - name: Test
49 | run: dotnet test --configuration Release --no-restore --verbosity normal
50 |
51 | # build_macos:
52 | #
53 | # runs-on: macos-latest
54 | #
55 | # steps:
56 | # - uses: actions/checkout@v2
57 | # - name: Setup dotnet
58 | # uses: actions/setup-dotnet@v1
59 | # with:
60 | # dotnet-version: 6.0.x
61 | # - name: Install dependencies
62 | # run: dotnet restore
63 | # - name: Install tool dependencies
64 | # run: dotnet tool restore
65 | # - name: Build (Release)
66 | # run: dotnet build --configuration Release --no-restore --verbosity normal
67 | # - name: Test
68 | # run: dotnet test --configuration Release --no-restore --verbosity normal
69 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Furnace.Shorten.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | module Furnace.Shorten
7 |
8 | // Functional automatic differentiation API shorthand names
9 | type FurnaceImage with
10 |
11 | /// TBD
12 | static member gvp f x v = FurnaceImage.gradv f x v
13 |
14 | /// TBD
15 | static member g f x = FurnaceImage.grad f x
16 |
17 | /// TBD
18 | static member hvp f x v = FurnaceImage.hessianv f x v
19 |
20 | /// TBD
21 | static member h f x = FurnaceImage.hessian f x
22 |
23 | /// TBD
24 | static member gh f x = FurnaceImage.gradhessian f x
25 |
26 | /// TBD
27 | static member ghvp f x v = FurnaceImage.gradhessianv f x v
28 |
29 | /// TBD
30 | static member jvp f x v = FurnaceImage.jacobianv f x v
31 |
32 | /// TBD
33 | static member vjp f x v = FurnaceImage.jacobianTv f x v
34 |
35 | /// TBD
36 | static member j f x = FurnaceImage.jacobian f x
37 |
38 | /// TBD
39 | static member fgvp f x v = FurnaceImage.fgradv f x v
40 |
41 | /// TBD
42 | static member fg f x = FurnaceImage.fgrad f x
43 |
44 | /// TBD
45 | static member fgh f x = FurnaceImage.fgradhessian f x
46 |
47 | /// TBD
48 | static member fhvp f x v = FurnaceImage.fhessianv f x v
49 |
50 | /// TBD
51 | static member fh f x = FurnaceImage.fhessian f x
52 |
53 | /// TBD
54 | static member fghvp f x v = FurnaceImage.fgradhessianv f x v
55 |
56 | /// TBD
57 | static member fjvp f x v = FurnaceImage.fjacobianv f x v
58 |
59 | /// TBD
60 | static member fvjp f x v = FurnaceImage.fjacobianTv f x v
61 |
62 | /// TBD
63 | static member fj f x = FurnaceImage.fjacobian f x
64 |
65 |
--------------------------------------------------------------------------------
/docs/tensors.fsx:
--------------------------------------------------------------------------------
1 | (*** condition: prepare ***)
2 | #I "../tests/Furnace.Tests/bin/Debug/net6.0"
3 | #r "Furnace.Core.dll"
4 | #r "Furnace.Data.dll"
5 | #r "Furnace.Backends.Reference.dll"
6 | #r "Furnace.Backends.Torch.dll"
7 | // These are needed to make fsdocs --eval work. If we don't select a backend like this in the beginning, we get erratic behavior.
8 | Furnace.FurnaceImage.config(backend=Furnace.Backend.Reference)
9 | Furnace.FurnaceImage.seed(123)
10 |
11 | (*** condition: fsx ***)
12 | #if FSX
13 | #r "nuget: Furnace-lite,{{fsdocs-package-version}}"
14 | #endif // FSX
15 | (*** condition: ipynb ***)
16 | #if IPYNB
17 | // Google Colab only: uncomment and run the following to install dotnet and the F# kernel
18 | // !bash <(curl -Ls https://raw.githubusercontent.com/gbaydin/scripts/main/colab_dotnet6.sh)
19 | #endif // IPYNB
20 | (*** condition: ipynb ***)
21 | #if IPYNB
22 | // Import Furnace package
23 | #r "nuget: Furnace-lite,{{fsdocs-package-version}}"
24 |
25 | // Set dotnet interactive formatter to plaintext
26 | Formatter.SetPreferredMimeTypesFor(typeof, "text/plain")
27 | Formatter.Register(fun (x:obj) (writer: TextWriter) -> fprintfn writer "%120A" x )
28 | #endif // IPYNB
29 |
30 | (**
31 | [](https://colab.research.google.com/github/fsprojects/Furnace/blob/master/{{fsdocs-source-basename}}.ipynb)
32 | [](https://mybinder.org/v2/gh/fsprojects/Furnace/master?filepath={{fsdocs-source-basename}}.ipynb)
33 | []({{fsdocs-source-basename}}.fsx)
34 | []({{fsdocs-source-basename}}.ipynb)
35 |
36 | * The `cref:T:Furnace.FurnaceImage` API
37 |
38 | * The `cref:T:Furnace.Tensor` type
39 |
40 | Saving tensors as image and loading images as tensors
41 |
42 |
43 | ## Converting between Tensors and arrays
44 |
45 | System.Array and F# arrays
46 |
47 | *)
48 |
49 | open Furnace
50 |
51 | // Tensor
52 | let t1 = FurnaceImage.tensor [ 0.0 .. 0.2 .. 1.0 ]
53 |
54 | // System.Array
55 | let a1 = t1.toArray()
56 |
57 | // []
58 | let a1b = t1.toArray() :?> float32[]
59 |
60 | // Tensor
61 | let t2 = FurnaceImage.randn([3;3;3])
62 |
63 | // [,,]
64 | let a2 = t2.toArray() :?> float32[,,]
--------------------------------------------------------------------------------
/tests/Furnace.Benchmarks/Furnace.Benchmarks.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | false
6 |
7 |
8 |
9 | Exe
10 | net6.0
11 | false
12 | $(DefineConstants);TINY
13 | false
14 | x64
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Device.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | ///
9 | /// Represents the type of a device.
10 | ///
11 | ///
12 | ///
13 | /// The numeric values used are as for LibTorch.
14 | ///
15 | ///
16 | ///
17 | /// Contains fundamental types for the tensor programming model, including Tensor, Shape and FurnaceImage.
18 | ///
19 | type DeviceType =
20 | | CPU = 0
21 | | CUDA = 1 // CUDA.
22 | | MKLDNN = 2 // Reserved for explicit MKLDNN
23 | | OPENGL = 3 // OpenGL
24 | | OPENCL = 4 // OpenCL
25 | | IDEEP = 5 // IDEEP.
26 | | HIP = 6 // AMD HIP
27 | | FPGA = 7 // FPGA
28 | | MSNPU = 8 // MSNPU
29 | | XLA = 9 // XLA / TPU
30 |
31 | /// Represents a device specification.
32 | []
33 | type Device =
34 | | Device of DeviceType * int
35 | member x.DeviceType = (let (Device(a,_)) = x in a)
36 | member x.DeviceIndex = (let (Device(_,b)) = x in b)
37 | static member CPU = Device(DeviceType.CPU, -1)
38 | static member GPU = Device(DeviceType.CUDA, 0)
39 |
40 | member internal x.Code = (int x.DeviceType <<< 4) + x.DeviceIndex
41 |
42 | member internal x.Name =
43 | (match x.DeviceType with
44 | | DeviceType.CPU -> "cpu"
45 | | DeviceType.CUDA -> "cuda"
46 | | DeviceType.MKLDNN -> "mkldnn"
47 | | DeviceType.OPENGL -> "opengl"
48 | | DeviceType.OPENCL -> "opencl"
49 | | DeviceType.IDEEP -> "ideep"
50 | | DeviceType.HIP -> "hip"
51 | | DeviceType.FPGA -> "fpga"
52 | | DeviceType.MSNPU -> "msnpu"
53 | | DeviceType.XLA -> "xla"
54 | | _ -> failwith "unknown device type") + string x.DeviceIndex
55 |
56 | override x.ToString() = x.Name
57 |
58 | /// Contains functions and settings related to device specifications.
59 | module Device =
60 |
61 | /// Get or set the default device used when creating tensors. Note, use FurnaceImage.config(...) instead.
62 | let mutable Default : Device = Device.CPU
63 |
--------------------------------------------------------------------------------
/examples/differentiable_programming.fsx:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S dotnet fsi
2 |
3 | #I "../tests/Furnace.Tests/bin/Debug/net6.0"
4 | #r "Furnace.Core.dll"
5 | #r "Furnace.Data.dll"
6 | #r "Furnace.Backends.Torch.dll"
7 |
8 | // Libtorch binaries
9 | // Option A: you can use a platform-specific nuget package
10 | #r "nuget: TorchSharp-cpu, 0.96.5"
11 | // #r "nuget: TorchSharp-cuda-linux, 0.96.5"
12 | // #r "nuget: TorchSharp-cuda-windows, 0.96.5"
13 | // Option B: you can use a local libtorch installation
14 | // System.Runtime.InteropServices.NativeLibrary.Load("/home/gunes/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so")
15 |
16 |
17 | open Furnace
18 | open Furnace.Compose
19 | open Furnace.Model
20 | open Furnace.Data
21 | open Furnace.Optim
22 | open Furnace.Util
23 | open Furnace.Distributions
24 |
25 | open System.IO
26 |
27 | FurnaceImage.config(backend=Backend.Torch, device=Device.CPU)
28 | FurnaceImage.seed(1)
29 |
30 | type Model<'In, 'Out> with
31 | member m.run = m.forward
32 | type DiffProg<'In, 'Out> = Model<'In, 'Out>
33 |
34 |
35 | let diffprog parameters (f:'In->'Out) : DiffProg<'In, 'Out>=
36 | DiffProg<'In, 'Out>.create [] parameters [] f
37 |
38 | let param (x:Tensor) = Parameter(x)
39 |
40 | // Learn a differentiable program given an objective
41 | // DiffProg<'a,'b> -> (DiffProg<'a,'b> -> Tensor) -> DiffProg<'a,'b>
42 | let learn (diffprog:DiffProg<_,_>) loss =
43 | let lr = 0.001
44 | for i=0 to 10 do
45 | diffprog.reverseDiff()
46 | let l:Tensor = loss diffprog
47 | l.reverse()
48 | let p = diffprog.parametersVector
49 | diffprog.parametersVector <- p.primal - lr * p.derivative
50 | printfn "iteration %A, loss %A" i (float l)
51 | diffprog
52 |
53 | // A linear model as a differentiable program
54 | // DiffProg
55 | let dp =
56 | let w = param (FurnaceImage.randn([5; 1]))
57 | diffprog [w]
58 | (fun (x:Tensor) -> x.matmul(w.value))
59 |
60 | // Data
61 | let x = FurnaceImage.randn([1024; 5])
62 | let y = FurnaceImage.randn([1024; 1])
63 |
64 | // let a = diffprog.run x
65 | // printfn "%A %A %A " a.shape y.shape (FurnaceImage.mseLoss(a, y))
66 |
67 | // Objective
68 | // DiffProg -> Tensor
69 | let loss (diffprog:DiffProg) = FurnaceImage.mseLoss(diffprog.run x, y)
70 |
71 | // Learned diferentiable program
72 | // DiffProg
73 | let dpLearned = learn dp loss
74 |
75 | // Function that runs the differentiable program with new data
76 | // Tensor -> Tensor
77 | dpLearned.run
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestOp.Outer.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 |
11 |
12 | []
13 | type TestTensorOuter () =
14 | []
15 | member _.TestTensorOuter () =
16 | for combo in Combos.FloatingPointExcept16s do
17 | let a1 = combo.tensor([ 1.7865, 1.2723, 0.2065, -0.4601, 0.3218])
18 | let b1 = combo.tensor([ 2.1136, 1.0551, -0.4575])
19 |
20 | let a1outerb1 = a1.outer(b1)
21 | let a1outerb1Correct = combo.tensor([[ 3.7759, 1.8849, -0.8173],
22 | [ 2.6891, 1.3424, -0.5820],
23 | [ 0.4365, 0.2179, -0.0945],
24 | [-0.9725, -0.4854, 0.2105],
25 | [ 0.6801, 0.3395, -0.1472]])
26 |
27 | Assert.True(a1outerb1Correct.allclose(a1outerb1, 0.01))
28 |
29 | let a2 = combo.tensor([[ 1.7865, 1.2723, 0.2065, -0.4601, 0.3218],
30 | [-0.2400, -0.1650, -1.1463, 0.0578, 1.5240]])
31 | let b2 = combo.tensor([[ 2.1136, 1.0551, -0.4575],
32 | [ 1.1928, -2.3803, 0.3160]])
33 |
34 | let a2outerb2 = a2.outer(b2)
35 | let a2outerb2Correct = combo.tensor([[[ 3.7759, 1.8849, -0.8173],
36 | [ 2.6891, 1.3424, -0.5820],
37 | [ 0.4365, 0.2179, -0.0945],
38 | [-0.9725, -0.4854, 0.2105],
39 | [ 0.6801, 0.3395, -0.1472]],
40 |
41 | [[-0.2863, 0.5713, -0.0758],
42 | [-0.1968, 0.3927, -0.0521],
43 | [-1.3672, 2.7284, -0.3622],
44 | [ 0.0690, -0.1376, 0.0183],
45 | [ 1.8177, -3.6275, 0.4816]]])
46 |
47 | Assert.True(a2outerb2Correct.allclose(a2outerb2, 0.01))
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestModel.Dropout.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 | open Furnace.Compose
11 | open Furnace.Model
12 | open Furnace.Data
13 | open Furnace.Optim
14 |
15 |
16 | []
17 | type TestModelDropout () =
18 |
19 | []
20 | member _.TestModelDropout () =
21 | let m = Dropout(1.)
22 | let x = FurnaceImage.randn([10;10])
23 | Assert.CheckEqual(m.parametersVector.shape, [| 0 |])
24 | m.train()
25 | let xtrain = x --> m
26 | Assert.CheckEqual(x.zerosLike(), xtrain)
27 | m.eval()
28 | let xeval = x --> m
29 | Assert.CheckEqual(x, xeval)
30 |
31 | []
32 | member _.TestModelDropout2d () =
33 | let m = Dropout2d(1.)
34 | let x = FurnaceImage.randn([10;4;10;10])
35 |
36 | m.train()
37 | let xtrain = x --> m
38 | Assert.CheckEqual(x.zerosLike(), xtrain)
39 | m.eval()
40 | let xeval = x --> m
41 | Assert.CheckEqual(x, xeval)
42 |
43 | []
44 | member _.TestModelDropout3d () =
45 | let m = Dropout3d(1.)
46 | let x = FurnaceImage.randn([10;4;10;10;10])
47 |
48 | m.train()
49 | let xtrain = x --> m
50 | Assert.CheckEqual(x.zerosLike(), xtrain)
51 | m.eval()
52 | let xeval = x --> m
53 | Assert.CheckEqual(x, xeval)
54 |
55 | []
56 | member _.TestModelDropoutaveLoadState () =
57 | let net = Dropout(0.5)
58 |
59 | let fileName = System.IO.Path.GetTempFileName()
60 | FurnaceImage.save(net.state, fileName) // Save pre-use
61 | let _ = FurnaceImage.randn([10; 10]) --> net // Use
62 | net.state <- FurnaceImage.load(fileName) // Load after-use
63 |
64 | Assert.True(true)
65 |
66 | []
67 | member _.TestModelDropout2daveLoadState () =
68 | let net = Dropout2d(0.5)
69 |
70 | let fileName = System.IO.Path.GetTempFileName()
71 | FurnaceImage.save(net.state, fileName) // Save pre-use
72 | let _ = FurnaceImage.randn([10; 10; 10; 10]) --> net // Use
73 | net.state <- FurnaceImage.load(fileName) // Load after-use
74 |
75 | Assert.True(true)
76 |
77 | []
78 | member _.TestModelDropout3daveLoadState () =
79 | let net = Dropout3d(0.5)
80 |
81 | let fileName = System.IO.Path.GetTempFileName()
82 | FurnaceImage.save(net.state, fileName) // Save pre-use
83 | let _ = FurnaceImage.randn([10; 10; 10; 10; 10]) --> net // Use
84 | net.state <- FurnaceImage.load(fileName) // Load after-use
85 |
86 | Assert.True(true)
87 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | -----------------------------------------
6 |
7 | > **NOTE: This repository is undergoing revision and updating. It has incomplete code, functionality, and design that are likely to change without notice; when using TorchSharp backend, only x64 platform is currently supported out of the box, see [DEVGUIDE.md] for more details.**
8 |
9 | Furnace is a tensor library with support for [differentiable programming](https://en.wikipedia.org/wiki/Differentiable_programming). It is designed for use in machine learning, probabilistic programming, optimization and other domains.
10 |
11 | **Key features**
12 |
13 | * Nested and mixed-mode differentiation
14 | * Common optimizers, model elements, differentiable probability distributions
15 | * F# for robust functional programming
16 | * PyTorch familiar naming and idioms, efficient LibTorch CUDA/C++ tensors with GPU support
17 | * Linux, macOS, Windows supported
18 | * Use interactive notebooks in Jupyter and Visual Studio Code
19 | * 100% open source
20 |
21 | ## Documentation
22 |
23 | You can find the documentation [here](https://fsprojects.github.io/Furnace/), including information on installation and getting started.
24 |
25 | Release notes can be found [here](https://github.com/fsprojects/Furnace/blob/dev/RELEASE_NOTES.md).
26 |
27 | ## Communication
28 |
29 | Please use [GitHub issues](https://github.com/fsprojects/Furnace/issues) to share bug reports, feature requests, installation issues, suggestions etc.
30 |
31 | ## Contributing
32 |
33 | We welcome all contributions.
34 |
35 | * Bug fixes: if you encounter a bug, please open an [issue](https://github.com/fsprojects/Furnace/issues) describing the bug. If you are planning to contribute a bug fix, please feel free to do so in a pull request.
36 | * New features: if you plan to contribute new features, please first open an [issue](https://github.com/fsprojects/Furnace/issues) to discuss the feature before creating a pull request.
37 |
38 | ## Background
39 |
40 | Furnace is a hard fork of [DiffSharp](https://github.com/DiffSharp/DiffSharp).
41 |
42 | The original DiffSharp library was developed by [Atılım Güneş Baydin](http://www.robots.ox.ac.uk/~gunes/), [Don Syme](https://www.microsoft.com/en-us/research/people/dsyme/) and other contributors, having started as a project supervised by the automatic differentiation wizards [Barak Pearlmutter](https://scholar.google.com/citations?user=AxFrw0sAAAAJ&hl=en) and [Jeffrey Siskind](https://scholar.google.com/citations?user=CgSBtPYAAAAJ&hl=en).
43 |
44 | ## License
45 |
46 | Furnace is licensed under the BSD 2-Clause "Simplified" License, which you can find in the [LICENSE](https://github.com/fsprojects/Furnace/blob/dev/LICENSE) file in this repository.
47 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/examples/vae_cnn.fsx:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S dotnet fsi
2 |
3 | #I "../tests/Furnace.Tests/bin/Debug/net6.0"
4 | #r "Furnace.Core.dll"
5 | #r "Furnace.Data.dll"
6 | #r "Furnace.Backends.Torch.dll"
7 |
8 | // Libtorch binaries
9 | // Option A: you can use a platform-specific nuget package
10 | #r "nuget: TorchSharp-cpu, 0.96.5"
11 | // #r "nuget: TorchSharp-cuda-linux, 0.96.5"
12 | // #r "nuget: TorchSharp-cuda-windows, 0.96.5"
13 | // Option B: you can use a local libtorch installation
14 | // System.Runtime.InteropServices.NativeLibrary.Load("/home/gunes/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so")
15 |
16 |
17 | open Furnace
18 | open Furnace.Compose
19 | open Furnace.Model
20 | open Furnace.Optim
21 | open Furnace.Data
22 |
23 |
24 | FurnaceImage.config(backend=Backend.Torch, device=Device.CPU)
25 | FurnaceImage.seed(0)
26 |
27 | let epochs = 2
28 | let batchSize = 32
29 | let validInterval = 250
30 | let numSamples = 32
31 |
32 | let urls = ["https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz";
33 | "https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz";
34 | "https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz";
35 | "https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz"]
36 |
37 | let trainSet = MNIST("../data", urls=urls, train=true, transform=id)
38 | let trainLoader = trainSet.loader(batchSize=batchSize, shuffle=true)
39 | let validSet = MNIST("../data", urls=urls, train=false, transform=id)
40 | let validLoader = validSet.loader(batchSize=batchSize, shuffle=false)
41 |
42 | let encoder =
43 | Conv2d(1, 32, 4, 2)
44 | --> FurnaceImage.relu
45 | --> Conv2d(32, 64, 4, 2)
46 | --> FurnaceImage.relu
47 | --> Conv2d(64, 128, 4, 2)
48 | --> FurnaceImage.flatten(1)
49 |
50 | let decoder =
51 | FurnaceImage.unflatten(1, [128;1;1])
52 | --> ConvTranspose2d(128, 64, 4, 2)
53 | --> FurnaceImage.relu
54 | --> ConvTranspose2d(64, 32, 4, 3)
55 | --> FurnaceImage.relu
56 | --> ConvTranspose2d(32, 1, 4, 2)
57 | --> FurnaceImage.sigmoid
58 |
59 | let model = VAE([1;28;28], 64, encoder, decoder)
60 |
61 | printfn "Model\n%s" (model.summary())
62 |
63 | let optimizer = Adam(model, lr=FurnaceImage.tensor(0.001))
64 |
65 | for epoch = 1 to epochs do
66 | for i, x, _ in trainLoader.epoch() do
67 | model.reverseDiff()
68 | let l = model.loss(x)
69 | l.reverse()
70 | optimizer.step()
71 | printfn "Epoch: %A/%A minibatch: %A/%A loss: %A" epoch epochs i trainLoader.length (float(l))
72 |
73 | if i % validInterval = 0 then
74 | let mutable validLoss = FurnaceImage.zero()
75 | for _, x, _ in validLoader.epoch() do
76 | validLoss <- validLoss + model.loss(x, normalize=false)
77 | validLoss <- validLoss / validSet.length
78 | printfn "Validation loss: %A" (float validLoss)
79 | let fileName = sprintf "vae_cnn_samples_epoch_%A_minibatch_%A.png" epoch i
80 | printfn "Saving %A samples to %A" numSamples fileName
81 | let samples = model.sample(numSamples).view([-1; 1; 28; 28])
82 | samples.saveImage(fileName)
83 |
84 |
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestModel.VAE.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 | open Furnace.Compose
11 | open Furnace.Model
12 | open Furnace.Data
13 | open Furnace.Optim
14 |
15 |
16 | []
17 | type TestModelVAE() =
18 |
19 | []
20 | member _.TestModelVAEMLP () =
21 | // Fits a little VAEMLP to structured noise
22 | let xdim, zdim, n = 8, 4, 16
23 | let m = VAEMLP(xdim*xdim, zdim)
24 | let x = FurnaceImage.stack(Array.init n (fun _ -> FurnaceImage.eye(xdim)*FurnaceImage.rand([xdim;xdim])))
25 |
26 | let lr, steps = 1e-2, 50
27 | let optimizer = Adam(m, lr=FurnaceImage.tensor(lr))
28 | let loss0 = float <| m.loss(x)
29 | let mutable loss = loss0
30 | for _ in 0..steps do
31 | m.reverseDiff()
32 | let l = m.loss(x)
33 | l.reverse()
34 | optimizer.step()
35 | loss <- float l
36 |
37 | Assert.Less(loss, loss0/2.)
38 |
39 | []
40 | member _.TestModelVAE () =
41 | // Fits a little VAE to structured noise
42 | let xdim, zdim, n = 28, 4, 16
43 | let encoder = FurnaceImage.flatten(1) --> Linear(xdim*xdim, 8) --> FurnaceImage.relu
44 | let decoder = Linear(8, xdim*xdim) --> FurnaceImage.sigmoid
45 |
46 | let m = VAE([xdim;xdim], zdim, encoder, decoder)
47 | let x = FurnaceImage.stack(Array.init n (fun _ -> FurnaceImage.eye(xdim)*FurnaceImage.rand([xdim;xdim])))
48 |
49 | let lr, steps = 1e-2, 25
50 | let optimizer = Adam(m, lr=FurnaceImage.tensor(lr))
51 | let loss0 = float <| m.loss(x)
52 | let mutable loss = loss0
53 | for _ in 0..steps do
54 | m.reverseDiff()
55 | let l = m.loss(x)
56 | l.reverse()
57 | optimizer.step()
58 | loss <- float l
59 |
60 | Assert.Less(loss, loss0/2.)
61 |
62 | []
63 | member _.TestModelVAEMLPSaveLoadState () =
64 | let xdim, zdim, n = 8, 4, 16
65 | let net = VAEMLP(xdim*xdim, zdim)
66 |
67 | let fileName = System.IO.Path.GetTempFileName()
68 | FurnaceImage.save(net.state, fileName) // Save pre-use
69 | let _ = FurnaceImage.randn([n; xdim*xdim]) --> net // Use
70 | net.state <- FurnaceImage.load(fileName) // Load after-use
71 |
72 | Assert.True(true)
73 |
74 | []
75 | member _.TestModelVAESaveLoadState () =
76 | let xdim, zdim, n = 28, 4, 16
77 | let encoder = FurnaceImage.flatten(1) --> Linear(xdim*xdim, 8) --> FurnaceImage.relu
78 | let decoder = Linear(8, xdim*xdim) --> FurnaceImage.sigmoid
79 | let net = VAE([xdim;xdim], zdim, encoder, decoder)
80 |
81 | let fileName = System.IO.Path.GetTempFileName()
82 | FurnaceImage.save(net.state, fileName) // Save pre-use
83 | let _ = FurnaceImage.randn([n; xdim; xdim]) --> net // Use
84 | net.state <- FurnaceImage.load(fileName) // Load after-use
85 |
86 | Assert.True(true)
--------------------------------------------------------------------------------
/docs/img/badge-binder.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Backend.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | /// Represents a backend for Furnace tensors
9 | []
10 | type Backend =
11 | /// The reference backend
12 | | Reference
13 | /// The LibTorch backend
14 | | Torch
15 | /// Reserved for future use
16 | | Other of name: string * code: int
17 |
18 | member internal x.Code =
19 | match x with
20 | | Reference -> 0x000
21 | | Torch -> 0x0100
22 | | Other (_name, code) -> (code + 3) <<< 8
23 |
24 | /// Get the name of the backend
25 | member x.Name =
26 | match x with
27 | | Reference -> "Reference"
28 | | Torch -> "Torch"
29 | | Other (name, _) -> name
30 |
31 | override x.ToString() = x.Name
32 |
33 | /// Contains functions and settings related to backend specifications.
34 | module Backend =
35 | let mutable internal count = 0
36 | let internal codes = System.Collections.Concurrent.ConcurrentDictionary()
37 |
38 | /// Register a new backend
39 | let Register name =
40 | codes.GetOrAdd(name, (fun _ ->
41 | count <- count + 1
42 | Backend.Other(name, count)))
43 |
44 | /// Get or set the default backend used when creating tensors. Note, use FurnaceImage.config(...) instead.
45 | let mutable Default = Backend.Reference
46 |
47 | type BackendFunctionality<'T>() =
48 | let mutable last = None
49 | let backends = System.Collections.Concurrent.ConcurrentDictionary()
50 |
51 | member _.Get(?backend: Backend) =
52 | let backend = defaultArg backend Backend.Default
53 | let code = backend.Code
54 | match last with
55 | | Some (code2, v) when code = code2 -> v
56 | | _ ->
57 | match backends.TryGetValue(code) with
58 | | true, v -> v
59 | | false, _ ->
60 | let res =
61 | backends.GetOrAdd(code, fun _ ->
62 | let name = "Furnace.Backends." + backend.Name
63 | let fullName = System.Reflection.Assembly.GetExecutingAssembly().FullName.Replace("Furnace.Core", name)
64 | let asm =
65 | try System.Reflection.Assembly.Load(fullName)
66 | with e -> failwithf "Couldn't find assembly '%s', error = %s" fullName (e.ToString())
67 | let typeName = sprintf "Furnace.Backends.%s.%s%s" backend.Name backend.Name typeof<'T>.Name
68 | let theType = asm.GetType(typeName)
69 | if isNull theType then failwithf "Couldn't find type '%s' in assembly '%s'" typeName fullName
70 | let b =
71 | match System.Activator.CreateInstance(theType) with
72 | | :? 'T as b -> b
73 | | _ -> failwith "activation failed to return correct type"
74 | b
75 | )
76 | last <- Some (code, res)
77 | res
78 |
79 | member _.Backends = backends
80 |
--------------------------------------------------------------------------------
/docs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM jupyter/base-notebook:latest
2 |
3 | # Install .NET CLI dependencies
4 |
5 | ARG NB_USER=fsdocs-user
6 | ARG NB_UID=1000
7 | ENV USER ${NB_USER}
8 | ENV NB_UID ${NB_UID}
9 | ENV HOME /home/${NB_USER}
10 |
11 | WORKDIR ${HOME}
12 |
13 | USER root
14 |
15 | ENV \
16 | # Enable detection of running in a container
17 | DOTNET_RUNNING_IN_CONTAINER=true \
18 | # Enable correct mode for dotnet watch (only mode supported in a container)
19 | DOTNET_USE_POLLING_FILE_WATCHER=true \
20 | # Skip extraction of XML docs - generally not useful within an image/container - helps performance
21 | NUGET_XMLDOC_MODE=skip \
22 | # Opt out of telemetry until after we install jupyter when building the image, this prevents caching of machine id
23 | DOTNET_INTERACTIVE_CLI_TELEMETRY_OPTOUT=true \
24 | DOTNET_SDK_VERSION=5.0.202
25 |
26 | # Install .NET CLI dependencies
27 | RUN apt-get update \
28 | && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
29 | libc6 \
30 | libgcc1 \
31 | libgssapi-krb5-2 \
32 | libicu66 \
33 | libssl1.1 \
34 | libstdc++6 \
35 | zlib1g \
36 | curl \
37 | git \
38 | && rm -rf /var/lib/apt/lists/*
39 |
40 | # When updating the SDK version, the sha512 value a few lines down must also be updated.
41 | # Install .NET SDK
42 | RUN curl -SL --output dotnet.tar.gz https://dotnetcli.azureedge.net/dotnet/Sdk/$DOTNET_SDK_VERSION/dotnet-sdk-$DOTNET_SDK_VERSION-linux-x64.tar.gz \
43 | && dotnet_sha512='01ed59f236184987405673d24940d55ce29d830e7dbbc19556fdc03893039e6046712de6f901dc9911047a0dee4fd15319b7e94f8a31df6b981fa35bd93d9838' \
44 | && echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \
45 | && mkdir -p /usr/share/dotnet \
46 | && tar -ozxf dotnet.tar.gz -C /usr/share/dotnet \
47 | && rm dotnet.tar.gz \
48 | && ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet \
49 | # Trigger first run experience by running arbitrary cmd
50 | && dotnet help
51 |
52 | # Copy notebooks
53 | COPY ./ ${HOME}/notebooks/
54 |
55 | # Copy package sources
56 | # COPY ./NuGet.config ${HOME}/nuget.config
57 |
58 | RUN chown -R ${NB_UID} ${HOME}
59 | USER ${USER}
60 |
61 | # Clone and build Furnace-cpu bundle to get the latest TorchSharp and libtorch-cpu packages downloaded and cached by nuget within the Docker image
62 | # This the makes user experience faster when running #r "nuget: Furnace-cpu
63 | RUN git clone --depth 1 https://github.com/Furnace/Furnace.git \
64 | && dotnet build Furnace/bundles/Furnace-cpu
65 |
66 | #Install nteract
67 | RUN pip install nteract_on_jupyter
68 |
69 | # Install lastest build from master branch of Microsoft.DotNet.Interactive
70 | RUN dotnet tool install -g --add-source "https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json" Microsoft.dotnet-interactive
71 |
72 | #latest stable from nuget.org
73 | #RUN dotnet tool install -g Microsoft.dotnet-interactive --add-source "https://api.nuget.org/v3/index.json"
74 |
75 | ENV PATH="${PATH}:${HOME}/.dotnet/tools"
76 | RUN echo "$PATH"
77 |
78 | # Install kernel specs
79 | RUN dotnet interactive jupyter install
80 |
81 | # Enable telemetry once we install jupyter for the image
82 | ENV DOTNET_INTERACTIVE_CLI_TELEMETRY_OPTOUT=false
83 |
84 | # Set root to notebooks
85 | WORKDIR ${HOME}/notebooks/
--------------------------------------------------------------------------------
/src/Furnace.Core/Scalar.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | open System
9 | open System.Reflection
10 |
11 | /// Represents a scalar on the Furnace programming model
12 | type scalar = System.IConvertible
13 |
14 | []
15 | module ScalarExtensions =
16 | type System.IConvertible with
17 | member inline x.toSingle() = x.ToSingle(null)
18 | member inline x.toDouble() = x.ToDouble(null)
19 | member inline x.toInt64() = x.ToInt64(null)
20 | member inline x.toInt32() = x.ToInt32(null)
21 | member inline x.toInt16() = x.ToInt16(null)
22 | member inline x.toSByte() = x.ToSByte(null)
23 | member inline x.toByte() = x.ToByte(null)
24 | member inline x.toBool() = x.toInt32() <> 0
25 | member inline x.sub(y:scalar) : scalar = (x.toDouble() - y.toDouble()) :> scalar
26 | member inline x.log() : scalar = x.toDouble() |> log :> scalar
27 | member inline x.neg() : scalar = -x.toDouble() :> scalar
28 | member inline x.dtype =
29 | let ti = x.GetTypeCode()
30 | match ti with
31 | | TypeCode.Double -> Dtype.Float64
32 | | TypeCode.Single -> Dtype.Float32
33 | | TypeCode.Int32 -> Dtype.Int32
34 | | TypeCode.Int64 -> Dtype.Int64
35 | | TypeCode.SByte -> Dtype.Int8
36 | | TypeCode.Byte -> Dtype.Byte
37 | | TypeCode.Int16 -> Dtype.Int16
38 | | TypeCode.Boolean -> Dtype.Bool
39 | | _ -> failwithf "unknown scalar type '%A'" x
40 |
41 | member inline x.cast(dtype) =
42 | match dtype with
43 | | Dtype.Float16 -> x.toSingle() :> scalar
44 | | Dtype.BFloat16 -> x.toSingle() :> scalar
45 | | Dtype.Float32 -> x.toSingle() :> scalar
46 | | Dtype.Float64 -> x.toDouble() :> scalar
47 | | Dtype.Int8 -> x.toSByte() :> scalar
48 | | Dtype.Byte -> x.toByte() :> scalar
49 | | Dtype.Int32 -> x.toInt32() :> scalar
50 | | Dtype.Int64 -> x.toInt64() :> scalar
51 | | Dtype.Int16 -> x.toInt16() :> scalar
52 | | Dtype.Bool -> x.toBool() :> scalar
53 |
54 | // Floating point scalars force integers to widen to the default floating point type
55 | //
56 | // For example:
57 | // >>> import torch
58 | // >>> (torch.tensor([1], dtype=torch.int32) * 2.5).dtype
59 | // torch.float32
60 | // >>> torch.set_default_dtype(torch.float16)
61 | // >>> (torch.tensor([1], dtype=torch.int32) * 2.5).dtype
62 | // torch.float16
63 | // >>> (torch.tensor([1], dtype=torch.int32) * 2).dtype
64 | // torch.int32
65 | let tryWidenScalar (tensorDtype: Dtype) (scalar: scalar) =
66 | match tensorDtype, scalar.GetTypeCode() with
67 | | Dtype.Integral, (TypeCode.Double | TypeCode.Single) -> ValueSome Dtype.Default
68 | | _, _ -> ValueNone
69 |
70 | let widenScalarForDivision (tensorDtype: Dtype) (scalarDtype: Dtype) =
71 | match tensorDtype.IsFloatingPoint, scalarDtype.IsFloatingPoint with
72 | | false, false -> Dtype.Default
73 | | false, true -> Dtype.Default
74 | | true, false -> tensorDtype
75 | | true, true -> tensorDtype
76 |
77 |
--------------------------------------------------------------------------------
/examples/rnn.fsx:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S dotnet fsi
2 |
3 | #I "../tests/Furnace.Tests/bin/Debug/net6.0"
4 | #r "Furnace.Core.dll"
5 | #r "Furnace.Data.dll"
6 | #r "Furnace.Backends.Torch.dll"
7 |
8 | // Libtorch binaries
9 | // Option A: you can use a platform-specific nuget package
10 | #r "nuget: TorchSharp-cpu, 0.96.5"
11 | // #r "nuget: TorchSharp-cuda-linux, 0.96.5"
12 | // #r "nuget: TorchSharp-cuda-windows, 0.96.5"
13 | // Option B: you can use a local libtorch installation
14 | // System.Runtime.InteropServices.NativeLibrary.Load("/home/gunes/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so")
15 |
16 |
17 | open Furnace
18 | open Furnace.Compose
19 | open Furnace.Model
20 | open Furnace.Data
21 | open Furnace.Optim
22 | open Furnace.Util
23 | open Furnace.Distributions
24 |
25 | open System.IO
26 |
27 | FurnaceImage.config(backend=Backend.Torch, device=Device.GPU)
28 | FurnaceImage.seed(1)
29 |
30 |
31 | // let corpus = "A merry little surge of electricity piped by automatic alarm from the mood organ beside his bed awakened Rick Deckard."
32 | download "https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt" "./shakespeare.txt"
33 | let corpus = System.IO.File.ReadAllText("./shakespeare.txt")
34 |
35 | let seqLen = 32
36 | let batchSize = 16
37 | let hiddenSize = 128
38 | let numLayers = 2
39 |
40 | let dataset = TextDataset(corpus, seqLen)
41 | let loader = dataset.loader(batchSize=batchSize, shuffle=true)
42 |
43 | let rnn = RNN(dataset.numChars, hiddenSize, numLayers=numLayers, batchFirst=true)
44 | let decoder = FurnaceImage.view([-1; hiddenSize]) --> Linear(hiddenSize, dataset.numChars)
45 | let languageModel = rnn --> decoder
46 |
47 | printfn "%s" (languageModel.summary())
48 |
49 | let modelFileName = "rnn_language_model.params"
50 | if File.Exists(modelFileName) then
51 | printfn "Resuming training from existing model params found: %A" modelFileName
52 | languageModel.state <- FurnaceImage.load(modelFileName)
53 |
54 | let predict (text:string) len =
55 | let mutable hidden = rnn.newHidden(1)
56 | let mutable prediction = text
57 | let mutable last = text
58 | for _ in 1..len do
59 | let lastTensor = last |> dataset.textToTensor
60 | let newOut, newHidden = rnn.forwardWithHidden(lastTensor.unsqueeze(0), hidden)
61 | hidden <- newHidden
62 | let nextCharProbs = newOut --> decoder --> FurnaceImage.slice([-1]) --> FurnaceImage.softmax(-1)
63 | last <- Categorical(nextCharProbs).sample() |> int |> dataset.indexToChar |> string
64 | prediction <- prediction + last
65 | prediction
66 |
67 | let optimizer = Adam(languageModel, lr=FurnaceImage.tensor(0.001))
68 |
69 | let losses = ResizeArray()
70 |
71 | let epochs = 10
72 | let validInterval = 100
73 |
74 | let start = System.DateTime.Now
75 | for epoch = 1 to epochs do
76 | for i, x, t in loader.epoch() do
77 | let input = x[*,..seqLen-2]
78 | let target = t[*,1..]
79 | languageModel.reverseDiff()
80 | let output = input --> languageModel
81 | let loss = FurnaceImage.crossEntropyLoss(output, target.view(-1))
82 | loss.reverse()
83 | optimizer.step()
84 | losses.Add(float loss)
85 | printfn "%A Epoch: %A/%A minibatch: %A/%A loss: %A" (System.DateTime.Now - start) epoch epochs (i+1) loader.length (float loss)
86 |
87 | if i % validInterval = 0 then
88 | printfn "\nSample from language model:\n%A\n" (predict "We " 512)
89 |
90 | FurnaceImage.save(languageModel.state, modelFileName)
91 |
92 | let plt = Pyplot()
93 | plt.plot(losses |> FurnaceImage.tensor)
94 | plt.xlabel("Iterations")
95 | plt.ylabel("Loss")
96 | plt.tightLayout()
97 | plt.savefig (sprintf "rnn_loss_epoch_%A_minibatch_%A.pdf" epoch (i+1))
--------------------------------------------------------------------------------
/Directory.Build.props:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 0.100.3
6 | 6.0.3
7 |
8 |
9 | https://api.nuget.org/v3/index.json
10 | true
11 |
12 |
13 |
14 | $(RestoreSources);$(MSBuildThisFileDirectory)../TorchSharp/bin/packages/Release;
15 |
16 |
17 |
18 |
19 | $(RestoreSources);$(MSBuildThisFileDirectory)../TorchSharp/bin/packages/Debug;
20 |
21 |
22 |
23 | --warnon:1182 $(OtherFlags)
24 |
25 | --warnon:3390 $(OtherFlags)
26 |
27 | --nowarn:57 $(OtherFlags)
28 |
29 |
30 |
31 | 1.0.9
32 | Atılım Güneş Baydin, Don Syme, Barak A. Pearlmutter, Jeffrey Siskind, and Furnace contributors
33 | Furnace maintainers
34 | https://fsprojects.github.io/Furnace
35 | https://github.com/fsprojects/Furnace/
36 | dev
37 | BSD-2-Clause
38 | git
39 | Copyright 2014-2025 DiffSharp and Furnace contributors
40 | F# fsharp ML AI Machine Learning PyTorch Tensor Differentiation Gradients GPU Programming
41 | Furnace is a Production-grade machine learning library bringing the power and precision of F# to Torch GPU acceleration
42 |
43 |
44 | $(Version)-local-$([System.DateTime]::Now.ToString(`yyMMdd`))
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 | $(Version)
53 |
54 |
55 | $(MSBuildThisFileDirectory)bin/packages
56 | Authors=$(Authors);Owners=$(Owners);ProjectId=$(MSBuildProjectName);PackageVersion=$(PackageVersion);TorchSharpVersion=$(TorchSharpVersion)
57 |
58 |
59 |
61 |
62 |
63 |
64 |
65 |
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestCombos.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open Furnace
9 |
10 | module Dtypes =
11 |
12 | // We run most tests at all these tensor types
13 | let Bool = [ Dtype.Bool ]
14 | let SignedIntegral = [ Dtype.Int8; Dtype.Int16; Dtype.Int32; Dtype.Int64 ]
15 | let UnsignedIntegral = [ Dtype.Byte ]
16 | let Integral = SignedIntegral @ UnsignedIntegral
17 | let Float16s = [ Dtype.Float16; Dtype.BFloat16 ]
18 | let FloatingPointExcept16s = [ Dtype.Float32; Dtype.Float64 ]
19 | let FloatingPoint = Float16s @ FloatingPointExcept16s
20 | let Float32 = [ Dtype.Float32 ]
21 |
22 | // Some operations have quirky behaviour on bool types, we pin these down manually
23 | let SignedIntegralAndFloatingPointExcept16s = FloatingPointExcept16s @ SignedIntegral
24 | let SignedIntegralAndFloatingPoint = FloatingPoint @ SignedIntegral
25 | let IntegralAndFloatingPointExcept16s = FloatingPointExcept16s @ Integral
26 | let IntegralAndFloatingPoint = FloatingPoint @ Integral
27 | let IntegralAndBool = Integral @ Bool
28 | let AllExceptFloat16s = FloatingPointExcept16s @ Integral @ Bool
29 | let All = FloatingPoint @ Integral @ Bool
30 |
31 | module Combos =
32 |
33 | // Use these to experiment in your local branch
34 | //let backends = [ Backend.Reference ]
35 | //let backends = [ Backend.Torch ]
36 | //let backends = [ Backend.Reference; Backend.Torch; Backend.Register("TestDuplicate") ]
37 | //let backends = [ Backend.Reference; Backend.Torch ]
38 | //let backends = [ Backend.Reference; Backend.Register("TestDuplicate") ]
39 | //let backends = [ Backend.Register("TestDuplicate") ]
40 | //let getDevices _ = [ Device.CPU ]
41 | //let getDevices _ = [ Device.GPU ]
42 |
43 | //Use this in committed code
44 | let backends = [ Backend.Reference; Backend.Torch ]
45 | let getDevices (deviceType: DeviceType option, backend: Backend option) =
46 | FurnaceImage.devices(?deviceType=deviceType, ?backend=backend)
47 |
48 | let makeCombos dtypes =
49 | [ for backend in backends do
50 | let ds = getDevices (None, Some backend)
51 | for device in ds do
52 | for dtype in dtypes do
53 | yield ComboInfo(defaultBackend=backend, defaultDevice=device, defaultDtype=dtype, defaultFetchDevices=getDevices) ]
54 |
55 | /// These runs though all devices, backends and various Dtype
56 | let Float32 = makeCombos Dtypes.Float32
57 | let Integral = makeCombos Dtypes.Integral
58 | let FloatingPointExcept16s = makeCombos Dtypes.FloatingPointExcept16s
59 | let FloatingPoint = makeCombos Dtypes.FloatingPoint
60 | let UnsignedIntegral = makeCombos Dtypes.UnsignedIntegral
61 | let SignedIntegral = makeCombos Dtypes.SignedIntegral
62 | let SignedIntegralAndFloatingPointExcept16s = makeCombos Dtypes.SignedIntegralAndFloatingPointExcept16s
63 | let SignedIntegralAndFloatingPoint = makeCombos Dtypes.SignedIntegralAndFloatingPoint
64 | let IntegralAndFloatingPointExcept16s = makeCombos Dtypes.IntegralAndFloatingPointExcept16s
65 | let IntegralAndFloatingPoint = makeCombos Dtypes.IntegralAndFloatingPoint
66 | let Bool = makeCombos Dtypes.Bool
67 | let IntegralAndBool = makeCombos Dtypes.IntegralAndBool
68 | let All = makeCombos Dtypes.All
69 | let AllExcept16s = makeCombos Dtypes.All
70 |
71 | /// This runs though all devices and backends but leaves the default Dtype
72 | let AllDevicesAndBackendsFloat32 =
73 | [ for backend in backends do
74 | let ds = getDevices (None, Some backend)
75 | for device in ds do
76 | yield ComboInfo(defaultDtype=Dtype.Float32, defaultBackend=backend, defaultDevice=device, defaultFetchDevices=getDevices) ]
77 |
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestModel.Conv.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 | open Furnace.Compose
11 | open Furnace.Model
12 | open Furnace.Data
13 | open Furnace.Optim
14 |
15 |
16 | []
17 | type TestModelConv () =
18 |
19 | []
20 | member _.TestModelConvTranspose1d () =
21 | let x = FurnaceImage.randn([5; 3; 12])
22 | let m = ConvTranspose1d(3, 4, 3)
23 | let y = x --> m
24 | let yShape = y.shape
25 | let yShapeCorrect = [|5; 4; 14|]
26 | Assert.CheckEqual(yShapeCorrect, yShape)
27 |
28 | let x = FurnaceImage.randn([3; 3; 12])
29 | let m = ConvTranspose1d(3, 5, 2, dilation=5)
30 | let y = x --> m
31 | let yShape = y.shape
32 | let yShapeCorrect = [|3; 5; 17|]
33 | Assert.CheckEqual(yShapeCorrect, yShape)
34 |
35 | []
36 | member _.TestModelConvTranspose2d () =
37 | let x = FurnaceImage.randn([3; 3; 12; 6])
38 | let m = ConvTranspose2d(3, 5, 3)
39 | let y = x --> m
40 | let yShape = y.shape
41 | let yShapeCorrect = [|3; 5; 14; 8|]
42 | Assert.CheckEqual(yShapeCorrect, yShape)
43 |
44 | let x = FurnaceImage.randn([2; 3; 12; 6])
45 | let m = ConvTranspose2d(3, 1, 5, stride=2)
46 | let y = x --> m
47 | let yShape = y.shape
48 | let yShapeCorrect = [|2; 1; 27; 15|]
49 | Assert.CheckEqual(yShapeCorrect, yShape)
50 |
51 | []
52 | member _.TestModelConvTranspose3d () =
53 | let x = FurnaceImage.randn([2; 3; 12; 6; 6])
54 | let m = ConvTranspose3d(3, 2, 3)
55 | let y = x --> m
56 | let yShape = y.shape
57 | let yShapeCorrect = [|2; 2; 14; 8; 8|]
58 | Assert.CheckEqual(yShapeCorrect, yShape)
59 |
60 | let x = FurnaceImage.randn([2; 3; 12; 6; 6])
61 | let m = ConvTranspose3d(3, 2, 2, padding=1)
62 | let y = x --> m
63 | let yShape = y.shape
64 | let yShapeCorrect = [|2; 2; 11; 5; 5|]
65 | Assert.CheckEqual(yShapeCorrect, yShape)
66 |
67 | []
68 | member _.TestModelConvTranspose1dSaveLoadState () =
69 | let inChannels = 4
70 | let outChannels = 4
71 | let kernelSize = 3
72 | let batchSize = 2
73 | let d = 5
74 | let net = ConvTranspose1d(inChannels, outChannels, kernelSize)
75 |
76 | let fileName = System.IO.Path.GetTempFileName()
77 | FurnaceImage.save(net.state, fileName)
78 | let _ = FurnaceImage.randn([batchSize; inChannels; d]) --> net
79 | net.state <- FurnaceImage.load(fileName)
80 | Assert.True(true)
81 |
82 | []
83 | member _.TestModelConvTranspose2dSaveLoadState () =
84 | let inChannels = 4
85 | let outChannels = 4
86 | let kernelSize = 3
87 | let batchSize = 2
88 | let d = 5
89 | let net = ConvTranspose2d(inChannels, outChannels, kernelSize)
90 |
91 | let fileName = System.IO.Path.GetTempFileName()
92 | FurnaceImage.save(net.state, fileName)
93 | let _ = FurnaceImage.randn([batchSize; inChannels; d; d]) --> net
94 | net.state <- FurnaceImage.load(fileName)
95 | Assert.True(true)
96 |
97 | []
98 | member _.TestModelConvTranspose3dSaveLoadState () =
99 | let inChannels = 4
100 | let outChannels = 4
101 | let kernelSize = 3
102 | let batchSize = 2
103 | let d = 5
104 | let net = ConvTranspose3d(inChannels, outChannels, kernelSize)
105 |
106 | let fileName = System.IO.Path.GetTempFileName()
107 | FurnaceImage.save(net.state, fileName)
108 | let _ = FurnaceImage.randn([batchSize; inChannels; d; d; d]) --> net
109 | net.state <- FurnaceImage.load(fileName)
110 | Assert.True(true)
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestModel.ConvTranspose.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 | open Furnace.Compose
11 | open Furnace.Model
12 | open Furnace.Data
13 | open Furnace.Optim
14 |
15 |
16 | []
17 | type TestModelConvTranspose () =
18 |
19 | []
20 | member _.TestModelConvTranspose1d () =
21 | let x = FurnaceImage.randn([5; 3; 12])
22 | let m = ConvTranspose1d(3, 4, 3)
23 | let y = x --> m
24 | let yShape = y.shape
25 | let yShapeCorrect = [|5; 4; 14|]
26 | Assert.CheckEqual(yShapeCorrect, yShape)
27 |
28 | let x = FurnaceImage.randn([3; 3; 12])
29 | let m = ConvTranspose1d(3, 5, 2, dilation=5)
30 | let y = x --> m
31 | let yShape = y.shape
32 | let yShapeCorrect = [|3; 5; 17|]
33 | Assert.CheckEqual(yShapeCorrect, yShape)
34 |
35 | []
36 | member _.TestModelConvTranspose2d () =
37 | let x = FurnaceImage.randn([3; 3; 12; 6])
38 | let m = ConvTranspose2d(3, 5, 3)
39 | let y = x --> m
40 | let yShape = y.shape
41 | let yShapeCorrect = [|3; 5; 14; 8|]
42 | Assert.CheckEqual(yShapeCorrect, yShape)
43 |
44 | let x = FurnaceImage.randn([2; 3; 12; 6])
45 | let m = ConvTranspose2d(3, 1, 5, stride=2)
46 | let y = x --> m
47 | let yShape = y.shape
48 | let yShapeCorrect = [|2; 1; 27; 15|]
49 | Assert.CheckEqual(yShapeCorrect, yShape)
50 |
51 | []
52 | member _.TestModelConvTranspose3d () =
53 | let x = FurnaceImage.randn([2; 3; 12; 6; 6])
54 | let m = ConvTranspose3d(3, 2, 3)
55 | let y = x --> m
56 | let yShape = y.shape
57 | let yShapeCorrect = [|2; 2; 14; 8; 8|]
58 | Assert.CheckEqual(yShapeCorrect, yShape)
59 |
60 | let x = FurnaceImage.randn([2; 3; 12; 6; 6])
61 | let m = ConvTranspose3d(3, 2, 2, padding=1)
62 | let y = x --> m
63 | let yShape = y.shape
64 | let yShapeCorrect = [|2; 2; 11; 5; 5|]
65 | Assert.CheckEqual(yShapeCorrect, yShape)
66 |
67 | []
68 | member _.TestModelConvTranspose1dSaveLoadState () =
69 | let inChannels = 4
70 | let outChannels = 4
71 | let kernelSize = 3
72 | let batchSize = 2
73 | let d = 5
74 | let net = ConvTranspose1d(inChannels, outChannels, kernelSize)
75 |
76 | let fileName = System.IO.Path.GetTempFileName()
77 | FurnaceImage.save(net.state, fileName)
78 | let _ = FurnaceImage.randn([batchSize; inChannels; d]) --> net
79 | net.state <- FurnaceImage.load(fileName)
80 | Assert.True(true)
81 |
82 | []
83 | member _.TestModelConvTranspose2dSaveLoadState () =
84 | let inChannels = 4
85 | let outChannels = 4
86 | let kernelSize = 3
87 | let batchSize = 2
88 | let d = 5
89 | let net = ConvTranspose2d(inChannels, outChannels, kernelSize)
90 |
91 | let fileName = System.IO.Path.GetTempFileName()
92 | FurnaceImage.save(net.state, fileName)
93 | let _ = FurnaceImage.randn([batchSize; inChannels; d; d]) --> net
94 | net.state <- FurnaceImage.load(fileName)
95 | Assert.True(true)
96 |
97 | []
98 | member _.TestModelConvTranspose3dSaveLoadState () =
99 | let inChannels = 4
100 | let outChannels = 4
101 | let kernelSize = 3
102 | let batchSize = 2
103 | let d = 5
104 | let net = ConvTranspose3d(inChannels, outChannels, kernelSize)
105 |
106 | let fileName = System.IO.Path.GetTempFileName()
107 | FurnaceImage.save(net.state, fileName)
108 | let _ = FurnaceImage.randn([batchSize; inChannels; d; d; d]) --> net
109 | net.state <- FurnaceImage.load(fileName)
110 | Assert.True(true)
--------------------------------------------------------------------------------
/tests/Furnace.Tests/Furnace.Tests.fsproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net6.0
5 | false
6 | false
7 | x64
8 |
9 |
10 |
11 |
12 | runtime; build; native; contentfiles; analyzers; buildtransitive
13 | all
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
--------------------------------------------------------------------------------
/docs/img/badge-notebook.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/Furnace.Benchmarks.Python/results.csv:
--------------------------------------------------------------------------------
1 | fromCpuData16float32cpu,571
2 | fromCpuData16float32cuda,3543
3 | fromCpuData16float64cpu,539
4 | fromCpuData16float64cuda,3565
5 | fromCpuData16int32cpu,515
6 | fromCpuData16int32cuda,3374
7 | fromCpuData2048float32cpu,28
8 | fromCpuData2048float32cuda,53
9 | fromCpuData2048float64cpu,29
10 | fromCpuData2048float64cuda,52
11 | fromCpuData2048int32cpu,22
12 | fromCpuData2048int32cuda,48
13 | fromCpuData65536float32cpu,30
14 | fromCpuData65536float32cuda,31
15 | fromCpuData65536float64cpu,31
16 | fromCpuData65536float64cuda,38
17 | fromCpuData65536int32cpu,24
18 | fromCpuData65536int32cuda,25
19 | zeros16float32cpu,1625
20 | zeros16float32cuda,5444
21 | zeros16float64cpu,1673
22 | zeros16float64cuda,5246
23 | zeros16int32cpu,1656
24 | zeros16int32cuda,5128
25 | zeros2048float32cpu,15
26 | zeros2048float32cuda,40
27 | zeros2048float64cpu,17
28 | zeros2048float64cuda,40
29 | zeros2048int32cpu,15
30 | zeros2048int32cuda,42
31 | zeros65536float32cpu,1
32 | zeros65536float32cuda,1
33 | zeros65536float64cpu,2
34 | zeros65536float64cuda,1
35 | zeros65536int32cpu,1
36 | zeros65536int32cuda,1
37 | ones16float32cpu,1759
38 | ones16float32cuda,5160
39 | ones16float64cpu,1667
40 | ones16float64cuda,5088
41 | ones16int32cpu,1637
42 | ones16int32cuda,4960
43 | ones2048float32cpu,14
44 | ones2048float32cuda,41
45 | ones2048float64cpu,18
46 | ones2048float64cuda,40
47 | ones2048int32cpu,13
48 | ones2048int32cuda,39
49 | ones65536float32cpu,1
50 | ones65536float32cuda,1
51 | ones65536float64cpu,3
52 | ones65536float64cuda,1
53 | ones65536int32cpu,1
54 | ones65536int32cuda,1
55 | rand16float32cpu,1863
56 | rand16float32cuda,5611
57 | rand16float64cpu,1932
58 | rand16float64cuda,5458
59 | rand2048float32cpu,32
60 | rand2048float32cuda,45
61 | rand2048float64cpu,54
62 | rand2048float64cuda,42
63 | rand65536float32cpu,18
64 | rand65536float32cuda,1
65 | rand65536float64cpu,38
66 | rand65536float64cuda,1
67 | addition16float32cpu,752
68 | addition16float32cuda,3475
69 | addition16float64cpu,756
70 | addition16float64cuda,3187
71 | addition16int32cpu,725
72 | addition16int32cuda,3065
73 | addition2048float32cpu,9
74 | addition2048float32cuda,27
75 | addition2048float64cpu,13
76 | addition2048float64cuda,26
77 | addition2048int32cpu,8
78 | addition2048int32cuda,26
79 | addition65536float32cpu,7
80 | addition65536float32cuda,5
81 | addition65536float64cpu,11
82 | addition65536float64cuda,5
83 | addition65536int32cpu,6
84 | addition65536int32cuda,4
85 | addInPlace16float32cpu,400
86 | addInPlace16float32cuda,1769
87 | addInPlace16float64cpu,391
88 | addInPlace16float64cuda,1797
89 | addInPlace16int32cpu,392
90 | addInPlace16int32cuda,1699
91 | addInPlace2048float32cpu,5
92 | addInPlace2048float32cuda,14
93 | addInPlace2048float64cpu,7
94 | addInPlace2048float64cuda,14
95 | addInPlace2048int32cpu,5
96 | addInPlace2048int32cuda,14
97 | addInPlace65536float32cpu,6
98 | addInPlace65536float32cuda,4
99 | addInPlace65536float64cpu,10
100 | addInPlace65536float64cuda,5
101 | addInPlace65536int32cpu,5
102 | addInPlace65536int32cuda,4
103 | addWithAlpha16float32cpu,894
104 | addWithAlpha16float32cuda,3633
105 | addWithAlpha16float64cpu,898
106 | addWithAlpha16float64cuda,3708
107 | addWithAlpha16int32cpu,882
108 | addWithAlpha16int32cuda,3662
109 | addWithAlpha2048float32cpu,10
110 | addWithAlpha2048float32cuda,28
111 | addWithAlpha2048float64cpu,14
112 | addWithAlpha2048float64cuda,27
113 | addWithAlpha2048int32cpu,9
114 | addWithAlpha2048int32cuda,28
115 | addWithAlpha65536float32cpu,7
116 | addWithAlpha65536float32cuda,5
117 | addWithAlpha65536float64cpu,9
118 | addWithAlpha65536float64cuda,5
119 | addWithAlpha65536int32cpu,6
120 | addWithAlpha65536int32cuda,4
121 | addScalar16float32cpu,1854
122 | addScalar16float32cuda,4497
123 | addScalar16float64cpu,1919
124 | addScalar16float64cuda,4454
125 | addScalar16int32cpu,1924
126 | addScalar16int32cuda,4234
127 | addScalar2048float32cpu,18
128 | addScalar2048float32cuda,34
129 | addScalar2048float64cpu,24
130 | addScalar2048float64cuda,34
131 | addScalar2048int32cpu,18
132 | addScalar2048int32cuda,36
133 | addScalar65536float32cpu,7
134 | addScalar65536float32cuda,5
135 | addScalar65536float64cpu,11
136 | addScalar65536float64cuda,5
137 | addScalar65536int32cpu,7
138 | addScalar65536int32cuda,5
139 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 |
4 | # User-specific files
5 | *.suo
6 | *.user
7 | *.userosscache
8 | *.sln.docstates
9 |
10 | # User-specific files (MonoDevelop/Xamarin Studio)
11 | *.userprefs
12 |
13 | # Build results
14 | [Dd]ebug/
15 | [Dd]ebugPublic/
16 | [Rr]elease/
17 | [Rr]eleases/
18 | x64/
19 | x86/
20 | build/
21 | bld/
22 | [Bb]in/
23 | [Oo]bj/
24 |
25 | # Visual Studio 2015 cache/options directory
26 | .vs/
27 |
28 | # MSTest test Results
29 | [Tt]est[Rr]esult*/
30 | [Bb]uild[Ll]og.*
31 |
32 | # NUNIT
33 | *.VisualState.xml
34 | TestResult.xml
35 |
36 | # Build Results of an ATL Project
37 | [Dd]ebugPS/
38 | [Rr]eleasePS/
39 | dlldata.c
40 |
41 | # DNX
42 | project.lock.json
43 | artifacts/
44 |
45 | *_i.c
46 | *_p.c
47 | *_i.h
48 | *.ilk
49 | *.meta
50 | *.obj
51 | *.pch
52 | *.pdb
53 | *.pgc
54 | *.pgd
55 | *.rsp
56 | *.sbr
57 | *.tlb
58 | *.tli
59 | *.tlh
60 | *.tmp
61 | *.tmp_proj
62 | *.log
63 | *.vspscc
64 | *.vssscc
65 | .builds
66 | *.pidb
67 | *.svclog
68 | *.scc
69 |
70 | # Chutzpah Test files
71 | _Chutzpah*
72 |
73 | # Visual C++ cache files
74 | ipch/
75 | *.aps
76 | *.ncb
77 | *.opensdf
78 | *.sdf
79 | *.cachefile
80 |
81 | # Visual Studio profiler
82 | *.psess
83 | *.vsp
84 | *.vspx
85 |
86 | # TFS 2012 Local Workspace
87 | $tf/
88 |
89 | # Guidance Automation Toolkit
90 | *.gpState
91 |
92 | # ReSharper is a .NET coding add-in
93 | _ReSharper*/
94 | *.[Rr]e[Ss]harper
95 | *.DotSettings.user
96 |
97 | # JustCode is a .NET coding add-in
98 | .JustCode
99 |
100 | # TeamCity is a build add-in
101 | _TeamCity*
102 |
103 | # DotCover is a Code Coverage Tool
104 | *.dotCover
105 |
106 | # NCrunch
107 | _NCrunch_*
108 | .*crunch*.local.xml
109 |
110 | # MightyMoose
111 | *.mm.*
112 | AutoTest.Net/
113 |
114 | # Web workbench (sass)
115 | .sass-cache/
116 |
117 | # Installshield output folder
118 | [Ee]xpress/
119 |
120 | # DocProject is a documentation generator add-in
121 | DocProject/buildhelp/
122 | DocProject/Help/*.HxT
123 | DocProject/Help/*.HxC
124 | DocProject/Help/*.hhc
125 | DocProject/Help/*.hhk
126 | DocProject/Help/*.hhp
127 | DocProject/Help/Html2
128 | DocProject/Help/html
129 |
130 | # Click-Once directory
131 | publish/
132 |
133 | # Publish Web Output
134 | *.[Pp]ublish.xml
135 | *.azurePubxml
136 | ## TODO: Comment the next line if you want to checkin your
137 | ## web deploy settings but do note that will include unencrypted
138 | ## passwords
139 | #*.pubxml
140 |
141 | *.publishproj
142 |
143 | # NuGet Packages
144 | *.nupkg
145 | # The packages folder can be ignored because of Package Restore
146 | **/packages/*
147 | # except build/, which is used as an MSBuild target.
148 | !**/packages/build/
149 | # Uncomment if necessary however generally it will be regenerated when needed
150 | #!**/packages/repositories.config
151 |
152 | # Windows Azure Build Output
153 | csx/
154 | *.build.csdef
155 |
156 | # Windows Store app package directory
157 | AppPackages/
158 |
159 | # Visual Studio cache files
160 | # files ending in .cache can be ignored
161 | *.[Cc]ache
162 | # but keep track of directories ending in .cache
163 | !*.[Cc]ache/
164 |
165 | # Others
166 | ClientBin/
167 | [Ss]tyle[Cc]op.*
168 | ~$*
169 | *~
170 | *.dbmdl
171 | *.dbproj.schemaview
172 | *.pfx
173 | *.publishsettings
174 | node_modules/
175 | orleans.codegen.cs
176 |
177 | # RIA/Silverlight projects
178 | Generated_Code/
179 |
180 | # Backup & report files from converting an old project file
181 | # to a newer Visual Studio version. Backup files are not needed,
182 | # because we have git ;-)
183 | _UpgradeReport_Files/
184 | Backup*/
185 | UpgradeLog*.XML
186 | UpgradeLog*.htm
187 |
188 | # SQL Server files
189 | *.mdf
190 | *.ldf
191 |
192 | # Business Intelligence projects
193 | *.rdl.data
194 | *.bim.layout
195 | *.bim_*.settings
196 |
197 | # Microsoft Fakes
198 | FakesAssemblies/
199 |
200 | # Node.js Tools for Visual Studio
201 | .ntvs_analysis.dat
202 |
203 | # Visual Studio 6 build log
204 | *.plg
205 |
206 | # Visual Studio 6 workspace options file
207 | *.opt
208 |
209 | # LightSwitch generated files
210 | GeneratedArtifacts/
211 | _Pvt_Extensions/
212 | ModelManifest.xml
213 |
214 | .ionide
215 | .git
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 |
4 | # User-specific files
5 | *.suo
6 | *.user
7 | *.userosscache
8 | *.sln.docstates
9 |
10 | # User-specific files (MonoDevelop/Xamarin Studio)
11 | *.userprefs
12 |
13 | # Build results
14 | [Dd]ebug/
15 | [Dd]ebugPublic/
16 | [Rr]elease/
17 | [Rr]eleases/
18 | x64/
19 | x86/
20 | build/
21 | bld/
22 | [Bb]in/
23 | [Oo]bj/
24 |
25 | # Visual Studio 2015 cache/options directory
26 | .vs/
27 |
28 | # MSTest test Results
29 | [Tt]est[Rr]esult*/
30 | [Bb]uild[Ll]og.*
31 |
32 | # NUNIT
33 | *.VisualState.xml
34 | TestResult.xml
35 |
36 | # Build Results of an ATL Project
37 | [Dd]ebugPS/
38 | [Rr]eleasePS/
39 | dlldata.c
40 |
41 | # DNX
42 | project.lock.json
43 | artifacts/
44 |
45 | *_i.c
46 | *_p.c
47 | *_i.h
48 | *.ilk
49 | *.meta
50 | *.obj
51 | *.pch
52 | *.pdb
53 | *.pgc
54 | *.pgd
55 | *.rsp
56 | *.sbr
57 | *.tlb
58 | *.tli
59 | *.tlh
60 | *.tmp
61 | *.tmp_proj
62 | *.log
63 | *.vspscc
64 | *.vssscc
65 | .builds
66 | *.pidb
67 | *.svclog
68 | *.scc
69 |
70 | # Chutzpah Test files
71 | _Chutzpah*
72 |
73 | # Visual C++ cache files
74 | ipch/
75 | *.aps
76 | *.ncb
77 | *.opensdf
78 | *.sdf
79 | *.cachefile
80 |
81 | # Visual Studio profiler
82 | *.psess
83 | *.vsp
84 | *.vspx
85 |
86 | # TFS 2012 Local Workspace
87 | $tf/
88 |
89 | # Guidance Automation Toolkit
90 | *.gpState
91 |
92 | # ReSharper is a .NET coding add-in
93 | _ReSharper*/
94 | *.[Rr]e[Ss]harper
95 | *.DotSettings.user
96 |
97 | # JustCode is a .NET coding add-in
98 | .JustCode
99 |
100 | # TeamCity is a build add-in
101 | _TeamCity*
102 |
103 | # DotCover is a Code Coverage Tool
104 | *.dotCover
105 |
106 | # NCrunch
107 | _NCrunch_*
108 | .*crunch*.local.xml
109 |
110 | # MightyMoose
111 | *.mm.*
112 | AutoTest.Net/
113 |
114 | # Web workbench (sass)
115 | .sass-cache/
116 |
117 | # Installshield output folder
118 | [Ee]xpress/
119 |
120 | # DocProject is a documentation generator add-in
121 | DocProject/buildhelp/
122 | DocProject/Help/*.HxT
123 | DocProject/Help/*.HxC
124 | DocProject/Help/*.hhc
125 | DocProject/Help/*.hhk
126 | DocProject/Help/*.hhp
127 | DocProject/Help/Html2
128 | DocProject/Help/html
129 |
130 | # Click-Once directory
131 | publish/
132 |
133 | # Publish Web Output
134 | *.[Pp]ublish.xml
135 | *.azurePubxml
136 | ## TODO: Comment the next line if you want to checkin your
137 | ## web deploy settings but do note that will include unencrypted
138 | ## passwords
139 | #*.pubxml
140 |
141 | *.publishproj
142 |
143 | # NuGet Packages
144 | *.nupkg
145 | # The packages folder can be ignored because of Package Restore
146 | **/packages/*
147 | # except build/, which is used as an MSBuild target.
148 | !**/packages/build/
149 | # Uncomment if necessary however generally it will be regenerated when needed
150 | #!**/packages/repositories.config
151 |
152 | # Windows Azure Build Output
153 | csx/
154 | *.build.csdef
155 |
156 | # Windows Store app package directory
157 | AppPackages/
158 |
159 | # Visual Studio cache files
160 | # files ending in .cache can be ignored
161 | *.[Cc]ache
162 | # but keep track of directories ending in .cache
163 | !*.[Cc]ache/
164 |
165 | # Others
166 | ClientBin/
167 | [Ss]tyle[Cc]op.*
168 | ~$*
169 | *~
170 | *.dbmdl
171 | *.dbproj.schemaview
172 | *.pfx
173 | *.publishsettings
174 | node_modules/
175 | orleans.codegen.cs
176 |
177 | # RIA/Silverlight projects
178 | Generated_Code/
179 |
180 | # Backup & report files from converting an old project file
181 | # to a newer Visual Studio version. Backup files are not needed,
182 | # because we have git ;-)
183 | _UpgradeReport_Files/
184 | Backup*/
185 | UpgradeLog*.XML
186 | UpgradeLog*.htm
187 |
188 | # SQL Server files
189 | *.mdf
190 | *.ldf
191 |
192 | # Business Intelligence projects
193 | *.rdl.data
194 | *.bim.layout
195 | *.bim_*.settings
196 |
197 | # Microsoft Fakes
198 | FakesAssemblies/
199 |
200 | # Node.js Tools for Visual Studio
201 | .ntvs_analysis.dat
202 |
203 | # Visual Studio 6 build log
204 | *.plg
205 |
206 | # Visual Studio 6 workspace options file
207 | *.opt
208 |
209 | # LightSwitch generated files
210 | GeneratedArtifacts/
211 | _Pvt_Extensions/
212 | ModelManifest.xml
213 |
214 | *opencover.xml
215 |
216 | .ionide
217 | .vscode
218 | .fake
219 |
220 | data/
221 | output/
222 | .fsdocs/
223 | tmp/
224 | docs/.ipynb_checkpoints/
225 | examples/*.pdf
226 | examples/*.params
227 | examples/*.txt
--------------------------------------------------------------------------------
/tests/Furnace.Benchmarks.Python/BasicTensorOpsPerfPython.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atilim Gunes Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace.Benchmarks.Python
7 |
8 | open BenchmarkDotNet.Attributes
9 | open BenchmarkDotNet.Configs
10 | open Furnace.Benchmarks
11 |
12 | open System
13 | open Python.Runtime
14 |
15 | []
16 | module PythonHelpers =
17 | // take the lock
18 | let gil = Py.GIL()
19 | let scope = Py.CreateScope()
20 | // your mileage may differ
21 | if Environment.GetEnvironmentVariable("COMPUTERNAME") = "MSRC-3617253" then
22 | Environment.SetEnvironmentVariable("PYTHONHOME", @"C:\ProgramData\Anaconda3\", EnvironmentVariableTarget.User)
23 | if isNull (Environment.GetEnvironmentVariable "PYTHONHOME") then failwith "expect PYTHONHOME to be set"
24 | let _prepPython = scope.Exec("import torch")
25 |
26 | let execPython(code) =
27 | scope.Exec(code) |> ignore
28 |
29 | //[]
30 | []
31 | []
32 | []
33 | type BasicTensorOps() =
34 |
35 | inherit BasicTensorTestMatrix()
36 |
37 | // The tests here must match the ones above
38 | []
39 | member perf.fromCpuData_PyTorch() =
40 | let n = perf.numIterations(2)
41 | execPython(sprintf """
42 | for x in range(%d):
43 | torch.tensor(range(%d), dtype=torch.%s, device="%s")
44 | """ n perf.tensorSize perf.dtypeName perf.deviceName )
45 |
46 | #if !TINY
47 | []
48 | member perf.zeros_PyTorch() =
49 | let n = perf.numIterations(10)
50 | execPython(sprintf """
51 | res = torch.tensor(1)
52 | for x in range(%d):
53 | res = torch.zeros(%d, dtype=torch.%s, device="%s")
54 | """ n perf.tensorSize perf.dtypeName perf.deviceName )
55 |
56 | []
57 | member perf.ones_PyTorch() =
58 | let n = perf.numIterations(10)
59 | execPython(sprintf """
60 | import torch
61 | res = torch.tensor(1)
62 | for x in range(%d):
63 | res = torch.ones(%d, dtype=torch.%s, device="%s")
64 | """ n perf.tensorSize perf.dtypeName perf.deviceName )
65 |
66 |
67 | []
68 | member perf.rand_PyTorch() =
69 | let n = perf.numIterations(10)
70 | execPython(sprintf """
71 | import torch
72 | res = torch.tensor(1)
73 | for x in range(%d):
74 | res = torch.rand(%d, dtype=torch.%s, device="%s")
75 | """ n perf.tensorSize perf.dtypeName perf.deviceName )
76 |
77 |
78 | []
79 | member perf.addition_PyTorch() =
80 | let n = perf.numIterations(10)
81 | execPython(sprintf """
82 | t = torch.tensor(range(%d), dtype=torch.%s, device="%s")
83 | res = t
84 | for x in range(%d):
85 | res = t + t
86 | """ perf.tensorSize perf.dtypeName perf.deviceName n )
87 |
88 | []
89 | member perf.addInPlace_PyTorch() =
90 | let n = perf.numIterations(10)
91 | execPython(sprintf """
92 | import torch
93 | t = torch.tensor(range(%d), dtype=torch.%s, device="%s")
94 | res = t
95 | for x in range(%d):
96 | res = t.add_(t)
97 | """ perf.tensorSize perf.dtypeName perf.deviceName n )
98 |
99 |
100 | []
101 | member perf.addWithAlpha_PyTorch() =
102 | let n = perf.numIterations(10)
103 | execPython(sprintf """
104 | import torch
105 | t = torch.tensor(range(%d), dtype=torch.%s, device="%s")
106 | res = t
107 | for x in range(%d):
108 | res = t.add(t, alpha=3)
109 | """ perf.tensorSize perf.dtypeName perf.deviceName n )
110 |
111 | []
112 | member perf.addScalar_PyTorch() =
113 | let n = perf.numIterations(10)
114 | execPython(sprintf """
115 | import torch
116 | t = torch.tensor(range(%d), dtype=torch.%s, device="%s")
117 | res = t
118 | for x in range(%d):
119 | res = t + 1
120 | """ perf.tensorSize perf.dtypeName perf.deviceName n )
121 |
122 |
123 | #endif
124 |
125 |
--------------------------------------------------------------------------------
/examples/classifier.fsx:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S dotnet fsi
2 |
3 | #I "../tests/Furnace.Tests/bin/Debug/net6.0"
4 | #r "Furnace.Core.dll"
5 | #r "Furnace.Data.dll"
6 | #r "Furnace.Backends.Torch.dll"
7 |
8 | // Libtorch binaries
9 | // Option A: you can use a platform-specific nuget package
10 | #r "nuget: TorchSharp-cpu, 0.96.5"
11 | // #r "nuget: TorchSharp-cuda-linux, 0.96.5"
12 | // #r "nuget: TorchSharp-cuda-windows, 0.96.5"
13 | // Option B: you can use a local libtorch installation
14 | // System.Runtime.InteropServices.NativeLibrary.Load("/home/gunes/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so")
15 |
16 |
17 | open Furnace
18 | open Furnace.Model
19 | open Furnace.Compose
20 | open Furnace.Optim
21 | open Furnace.Data
22 | open Furnace.Util
23 |
24 | FurnaceImage.config(backend=Backend.Torch, device=Device.CPU)
25 | FurnaceImage.seed(0)
26 |
27 | // PyTorch style
28 | // type Classifier() =
29 | // inherit Model()
30 | // let conv1 = Conv2d(1, 32, 3, 2)
31 | // let conv2 = Conv2d(32, 64, 3, 2)
32 | // let fc1 = Linear(576, 128)
33 | // let fc2 = Linear(128, 10)
34 | // do base.add([conv1; conv2; fc1; fc2])
35 | // override self.forward(x) =
36 | // x
37 | // |> conv1.forward
38 | // |> FurnaceImage.relu
39 | // |> conv2.forward
40 | // |> FurnaceImage.relu
41 | // |> FurnaceImage.maxpool2d(2)
42 | // |> FurnaceImage.dropout(0.25)
43 | // |> FurnaceImage.flatten(1)
44 | // |> fc1.forward
45 | // |> FurnaceImage.relu
46 | // |> FurnaceImage.dropout(0.5)
47 | // |> fc2.forward
48 | // |> FurnaceImage.logsoftmax(dim=1)
49 | // let classifier = Classifier()
50 |
51 | // Furnace compositional style
52 | let classifier =
53 | Conv2d(1, 32, 3, 2)
54 | --> FurnaceImage.relu
55 | --> Conv2d(32, 64, 3, 2)
56 | --> FurnaceImage.relu
57 | --> FurnaceImage.maxpool2d(2)
58 | --> FurnaceImage.dropout(0.25)
59 | --> FurnaceImage.flatten(1)
60 | --> Linear(576, 128)
61 | --> FurnaceImage.relu
62 | --> FurnaceImage.dropout(0.5)
63 | --> Linear(128, 10)
64 | --> FurnaceImage.logsoftmax(dim=1)
65 |
66 | let epochs = 20
67 | let batchSize = 64
68 | let numSamples = 4
69 |
70 | let urls = ["https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz";
71 | "https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz";
72 | "https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz";
73 | "https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz"]
74 |
75 | let trainSet = MNIST("../data", urls=urls, train=true)
76 | let trainLoader = trainSet.loader(batchSize=batchSize, shuffle=true)
77 | let validSet = MNIST("../data", urls=urls, train=false)
78 | let validLoader = validSet.loader(batchSize=batchSize, shuffle=false)
79 |
80 |
81 | printfn "Model:\n%s" (classifier.summary())
82 |
83 | let optimizer = Adam(classifier, lr=FurnaceImage.tensor(0.001))
84 |
85 | for epoch = 1 to epochs do
86 | for i, data, target in trainLoader.epoch() do
87 | classifier.reverseDiff()
88 | let output = data --> classifier
89 | let l = FurnaceImage.nllLoss(output, target)
90 | l.reverse()
91 | optimizer.step()
92 | if i % 10 = 0 then
93 | printfn "Epoch: %A/%A, minibatch: %A/%A, loss: %A" epoch epochs i trainLoader.length (float(l))
94 |
95 |
96 | printfn "Computing validation loss"
97 | classifier.noDiff()
98 | let mutable validLoss = FurnaceImage.zero()
99 | let mutable correct = 0
100 | for j, data, target in validLoader.epoch() do
101 | let output = data --> classifier
102 | validLoss <- validLoss + FurnaceImage.nllLoss(output, target, reduction="sum")
103 | let pred = output.argmax(1)
104 | correct <- correct + int (pred.eq(target).sum())
105 | validLoss <- validLoss / validSet.length
106 | let accuracy = 100.*(float correct) / (float validSet.length)
107 | printfn "\nValidation loss: %A, accuracy: %.2f%%" (float validLoss) accuracy
108 |
109 | let samples, sampleLabels = validLoader.batch(numSamples)
110 | printfn "Sample predictions:\n%s" (samples.toImageString(gridCols=4))
111 | printfn "True labels : %A " (sampleLabels.int())
112 | let predictedLabels = (samples --> classifier).argmax(dim=1)
113 | printfn "Predicted labels: %A\n" predictedLabels
114 |
115 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Model.VAE.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace.Model
7 |
8 | open Furnace
9 | open Furnace.Util
10 |
11 | /// Variational auto-encoder base
12 | []
13 | type VAEBase(zDim:int) =
14 | inherit Model()
15 |
16 | let sampleLatent mu (logVar:Tensor) =
17 | let std = FurnaceImage.exp(0.5*logVar)
18 | let eps = FurnaceImage.randnLike(std)
19 | eps.mul(std).add(mu)
20 |
21 | abstract member encode: Tensor -> Tensor * Tensor
22 | abstract member decode: Tensor -> Tensor
23 |
24 | member m.encodeDecode(x:Tensor) =
25 | let mu, logVar = m.encode x
26 | let z = sampleLatent mu logVar
27 | m.decode z, mu, logVar
28 |
29 | override m.forward(x) =
30 | let x, _, _ = m.encodeDecode(x) in x
31 |
32 | static member loss(xRecon:Tensor, x:Tensor, mu:Tensor, logVar:Tensor) =
33 | let bce = FurnaceImage.bceLoss(xRecon, x.viewAs(xRecon), reduction="sum")
34 | let kl = -0.5 * FurnaceImage.sum(1. + logVar - mu.pow(2.) - logVar.exp())
35 | bce + kl
36 |
37 | member m.loss(x, ?normalize:bool) =
38 | let normalize = defaultArg normalize true
39 | let xRecon, mu, logVar = m.encodeDecode x
40 | let loss = VAEBase.loss(xRecon, x, mu, logVar)
41 | if normalize then loss / x.shape[0] else loss
42 |
43 | member m.sample(?numSamples:int) =
44 | let numSamples = defaultArg numSamples 1
45 | FurnaceImage.randn([|numSamples; zDim|]) |> m.decode
46 |
47 |
48 | /// Variational auto-encoder
49 | type VAE(xShape:seq, zDim:int, encoder:Model, decoder:Model) =
50 | inherit VAEBase(zDim)
51 | // TODO: check if encoder can accept input with xShape
52 | let encoderOutputDim = encoder.forward(FurnaceImage.zeros(xShape).unsqueeze(0)).flatten().nelement
53 | let prez = Linear(encoderOutputDim, zDim*2)
54 | let postz = Linear(zDim, encoderOutputDim)
55 | do
56 | // TODO: check if decoder can accept input with (-1, zDim)
57 | // let decodedExample = xExample --> encoder --> decoder
58 | // if decodedExample.shape <> xShape then failwithf "Expecting decoder's output shape (%A) to be xShape (%A)" decodedExample.shape xShape
59 | base.addModel(encoder,decoder,prez,postz)
60 |
61 | override _.encode x =
62 | let mulogvar = x --> encoder --> prez
63 | let h = mulogvar.split([zDim; zDim], dim=1)
64 | let mu, logVar = h[0], h[1]
65 | mu, logVar
66 |
67 | override _.decode z =
68 | z --> postz -->decoder
69 |
70 | override _.ToString() = sprintf "VAE(%A, %A, %A, %A)" xShape zDim encoder decoder
71 |
72 |
73 | /// Variational auto-encoder with multilayer perceptron (MLP) encoder and decoder.
74 | type VAEMLP(xDim:int, zDim:int, ?hDims:seq, ?nonlinearity:Tensor->Tensor, ?nonlinearityLast:Tensor->Tensor) =
75 | inherit VAEBase(zDim)
76 | let hDims = defaultArg hDims (let d = (xDim+zDim)/2 in seq [d; d]) |> Array.ofSeq
77 | let nonlinearity = defaultArg nonlinearity FurnaceImage.relu
78 | let nonlinearityLast = defaultArg nonlinearityLast FurnaceImage.sigmoid
79 | let dims =
80 | if hDims.Length = 0 then
81 | [|xDim; zDim|]
82 | else
83 | Array.append (Array.append [|xDim|] hDims) [|zDim|]
84 |
85 | let enc:Model[] = Array.append [|for i in 0..dims.Length-2 -> Linear(dims[i], dims[i+1])|] [|Linear(dims[dims.Length-2], dims[dims.Length-1])|]
86 | let dec:Model[] = Array.rev [|for i in 0..dims.Length-2 -> Linear(dims[i+1], dims[i])|]
87 | do
88 | base.addModel(enc)
89 | base.addModel(dec)
90 |
91 | override _.encode (x:Tensor) =
92 | let batchSize = x.shape[0]
93 | let mutable x = x.view([batchSize; xDim])
94 | for i in 0..enc.Length-3 do
95 | x <- nonlinearity <| enc[i].forward(x)
96 | let mu = enc[enc.Length-2].forward(x)
97 | let logVar = enc[enc.Length-1].forward(x)
98 | mu, logVar
99 |
100 | override _.decode z =
101 | let mutable h = z
102 | for i in 0..dec.Length-2 do
103 | h <- nonlinearity <| dec[i].forward(h)
104 | nonlinearityLast <| dec[dec.Length-1].forward(h)
105 |
106 | override _.ToString() = sprintf "VAEMLP(%A, %A, %A)" xDim hDims zDim
107 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Model.Conv.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace.Model
7 |
8 | open Furnace
9 |
10 | /// A model that applies a 1D convolution over an input signal composed of several input planes
11 | type Conv1d(inChannels:int, outChannels:int, kernelSize:int, ?stride:int, ?padding:int, ?dilation:int, ?bias:bool) =
12 | inherit Model()
13 | let biasv = defaultArg bias true
14 | let k = 1./ sqrt (float (inChannels*kernelSize))
15 | let w = Parameter <| Weight.uniform([|outChannels; inChannels; kernelSize|], k)
16 | let b = Parameter <| if biasv then Weight.uniform([|outChannels|], k) else FurnaceImage.tensor([])
17 | do base.addParameter((w, "Conv1d-weight"), (b, "Conv1d-bias"))
18 |
19 | /// Get or set the weight parameter of the model
20 | member _.weight
21 | with get() = w.value
22 | and set v = w.value <- v
23 |
24 | /// Get or set the bias parameter of the model
25 | member _.bias
26 | with get() = b.value
27 | and set v = b.value <- v
28 |
29 | /// TBD
30 | override _.ToString() = sprintf "Conv1d(%A, %A, %A)" inChannels outChannels kernelSize
31 |
32 | /// TBD
33 | override _.forward(value) =
34 | let f = FurnaceImage.conv1d(value, w.value, ?stride=stride, ?padding=padding, ?dilation=dilation)
35 | if biasv then f + b.value.expand([value.shape[0]; outChannels]).view([value.shape[0]; outChannels; 1]) else f
36 |
37 | /// A model that applies a 2D convolution over an input signal composed of several input planes
38 | type Conv2d(inChannels:int, outChannels:int, ?kernelSize:int, ?stride:int, ?padding:int, ?dilation:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq, ?dilations:seq, ?bias:bool) =
39 | inherit Model()
40 | let kernelSizes = Shape.resolve2dKernelSizes kernelSize kernelSizes
41 | let biasv = defaultArg bias true
42 | let k = 1./ sqrt (float (inChannels*kernelSizes[0]*kernelSizes[1]))
43 | let w = Parameter <| Weight.uniform([|outChannels; inChannels; kernelSizes[0]; kernelSizes[1]|], k)
44 | let b = Parameter <| if biasv then Weight.uniform([|outChannels|], k) else FurnaceImage.tensor([])
45 | do base.addParameter((w, "Conv2d-weight"), (b, "Conv2d-bias"))
46 |
47 | /// Get or set the weight parameter of the model
48 | member _.weight
49 | with get() = w.value
50 | and set v = w.value <- v
51 |
52 | /// Get or set the bias parameter of the model
53 | member _.bias
54 | with get() = b.value
55 | and set v = b.value <- v
56 |
57 | /// TBD
58 | override _.ToString() = sprintf "Conv2d(%A, %A, %A)" inChannels outChannels kernelSizes
59 |
60 | /// TBD
61 | override _.forward(value) =
62 | let f = FurnaceImage.conv2d(value, w.value, ?stride=stride, ?strides=strides, ?padding=padding, ?paddings=paddings, ?dilation=dilation, ?dilations=dilations)
63 | if biasv then f + b.value.expand([value.shape[0]; outChannels]).view([value.shape[0]; outChannels; 1; 1]) else f
64 |
65 | /// A model that applies a 3D convolution over an input signal composed of several input planes
66 | type Conv3d(inChannels:int, outChannels:int, ?kernelSize:int, ?stride:int, ?padding:int, ?dilation:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq, ?dilations:seq, ?bias:bool) =
67 | inherit Model()
68 | let kernelSizes = Shape.resolve3dKernelSizes kernelSize kernelSizes
69 | let biasv = defaultArg bias true
70 | let k = 1./ sqrt (float (inChannels*kernelSizes[0]*kernelSizes[1]*kernelSizes[2]))
71 | let w = Parameter <| Weight.uniform([|outChannels; inChannels; kernelSizes[0]; kernelSizes[1]; kernelSizes[2]|], k)
72 | let b = Parameter <| if biasv then Weight.uniform([|outChannels|], k) else FurnaceImage.tensor([])
73 | do base.addParameter((w, "Conv3d-weight"), (b, "Conv3d-bias"))
74 |
75 | /// Get or set the weight parameter of the model
76 | member _.weight
77 | with get() = w.value
78 | and set v = w.value <- v
79 |
80 | /// Get or set the bias parameter of the model
81 | member _.bias
82 | with get() = b.value
83 | and set v = b.value <- v
84 |
85 | /// TBD
86 | override _.ToString() = sprintf "Conv3d(%A, %A, %A)" inChannels outChannels kernelSizes
87 |
88 | /// TBD
89 | override _.forward(value) =
90 | let f = FurnaceImage.conv3d(value, w.value, ?stride=stride, ?strides=strides, ?padding=padding, ?paddings=paddings, ?dilation=dilation, ?dilations=dilations)
91 | if biasv then f + b.value.expand([value.shape[0]; outChannels]).view([value.shape[0]; outChannels; 1; 1; 1]) else f
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestModel.Recurrent.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 | open Furnace.Compose
11 | open Furnace.Model
12 | open Furnace.Data
13 | open Furnace.Optim
14 |
15 |
16 | []
17 | type TestModelRecurrent () =
18 |
19 | []
20 | member _.TestModelRNN () =
21 | let din = 8
22 | let dout = 10
23 | let seqLen = 4
24 | let batchSize = 16
25 | let numLayers = 3
26 | let numDirections = 1
27 |
28 | // Seq first
29 | let input = FurnaceImage.randn([seqLen; batchSize; din])
30 | let rnn = RNN(din, dout, numLayers=numLayers, bidirectional=false)
31 | let output = input --> rnn
32 | let outputShape = output.shape
33 | let outputShapeCorrect = [|seqLen; batchSize; dout|]
34 | Assert.AreEqual(outputShapeCorrect, outputShape)
35 |
36 | // Batch first
37 | let input = FurnaceImage.randn([batchSize; seqLen; din])
38 | let rnn = RNN(din, dout, numLayers=numLayers, batchFirst=true, bidirectional=false)
39 | let output = input --> rnn
40 | let outputShape = output.shape
41 | let outputShapeCorrect = [|batchSize; seqLen; dout|]
42 | Assert.AreEqual(outputShapeCorrect, outputShape)
43 |
44 | let hiddenShape = rnn.newHidden(batchSize).shape
45 | let hiddenShapeCorrect = [|numLayers*numDirections; batchSize; dout|]
46 | Assert.AreEqual(hiddenShapeCorrect, hiddenShape)
47 |
48 | let steps = 64
49 | let lr = 0.01
50 | let optimizer = Adam(rnn, lr=FurnaceImage.tensor(lr))
51 | let target = FurnaceImage.randn([batchSize; seqLen; dout])
52 | let output = input --> rnn
53 | let mutable loss = FurnaceImage.mseLoss(output, target)
54 | let loss0 = float loss
55 |
56 | for i in 1..steps do
57 | rnn.reverseDiff()
58 | let output = input --> rnn
59 | loss <- FurnaceImage.mseLoss(output, target)
60 | loss.reverse()
61 | optimizer.step()
62 | let lossFinal = float loss
63 |
64 | Assert.Less(lossFinal, loss0/2.)
65 |
66 | []
67 | member _.TestModelLSTM () =
68 | let din = 8
69 | let dout = 10
70 | let seqLen = 4
71 | let batchSize = 16
72 | let numLayers = 2
73 | let numDirections = 1
74 |
75 | // Seq first
76 | let input = FurnaceImage.randn([seqLen; batchSize; din])
77 | let lstm = LSTM(din, dout, numLayers=numLayers, bidirectional=false)
78 | let output = input --> lstm
79 | let outputShape = output.shape
80 | let outputShapeCorrect = [|seqLen; batchSize; dout|]
81 | Assert.AreEqual(outputShapeCorrect, outputShape)
82 |
83 | // Batch first
84 | let input = FurnaceImage.randn([batchSize; seqLen; din])
85 | let lstm = LSTM(din, dout, numLayers=numLayers, batchFirst=true, bidirectional=false)
86 | let output = input --> lstm
87 | let outputShape = output.shape
88 | let outputShapeCorrect = [|batchSize; seqLen; dout|]
89 | Assert.AreEqual(outputShapeCorrect, outputShape)
90 |
91 | let hiddenShape = lstm.newHidden(batchSize).shape
92 | let hiddenShapeCorrect = [|numLayers*numDirections; batchSize; dout|]
93 | Assert.AreEqual(hiddenShapeCorrect, hiddenShape)
94 |
95 | let steps = 128
96 | let lr = 0.01
97 | let optimizer = Adam(lstm, lr=FurnaceImage.tensor(lr))
98 | let target = FurnaceImage.randn([batchSize; seqLen; dout])
99 | let output = input --> lstm
100 | let mutable loss = FurnaceImage.mseLoss(output, target)
101 | let loss0 = float loss
102 |
103 | for i in 1..steps do
104 | lstm.reverseDiff()
105 | let output = input --> lstm
106 | loss <- FurnaceImage.mseLoss(output, target)
107 | loss.reverse()
108 | optimizer.step()
109 | let lossFinal = float loss
110 |
111 | Assert.Less(lossFinal, loss0/2.)
112 |
113 | []
114 | member _.TestModelRNNSaveLoadState () =
115 | let net = RNN(10, 10)
116 |
117 | let fileName = System.IO.Path.GetTempFileName()
118 | FurnaceImage.save(net.state, fileName) // Save pre-use
119 | let _ = FurnaceImage.randn([10; 10; 10]) --> net // Use
120 | net.state <- FurnaceImage.load(fileName) // Load after-use
121 |
122 | Assert.True(true)
123 |
124 | []
125 | member _.TestModelLSTMSaveLoadState () =
126 | let net = LSTM(10, 10)
127 |
128 | let fileName = System.IO.Path.GetTempFileName()
129 | FurnaceImage.save(net.state, fileName) // Save pre-use
130 | let _ = FurnaceImage.randn([10; 10; 10]) --> net // Use
131 | net.state <- FurnaceImage.load(fileName) // Load after-use
132 |
133 | Assert.True(true)
134 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Model.ConvTranspose.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace.Model
7 |
8 | open Furnace
9 |
10 |
11 | /// A model that applies a 1D transposed convolution operator over an input image composed of several input planes.
12 | type ConvTranspose1d(inChannels:int, outChannels:int, kernelSize:int, ?stride:int, ?padding:int, ?dilation:int, ?bias:bool) =
13 | inherit Model()
14 | let biasv = defaultArg bias true
15 | let k = 1./ sqrt (float (inChannels*kernelSize))
16 | let w = Parameter <| Weight.uniform([|inChannels; outChannels; kernelSize|], k)
17 | let b = Parameter <| if biasv then Weight.uniform([|outChannels|], k) else FurnaceImage.tensor([])
18 | do base.addParameter((w, "ConvTranspose1d-weight"), (b, "ConvTranspose1d-bias"))
19 |
20 | /// Get or set the weight parameter of the model
21 | member _.weight
22 | with get() = w.value
23 | and set v = w.value <- v
24 |
25 | /// Get or set the bias parameter of the model
26 | member _.bias
27 | with get() = b.value
28 | and set v = b.value <- v
29 |
30 | /// TBD
31 | override _.ToString() = sprintf "ConvTranspose1d(%A, %A, %A)" inChannels outChannels kernelSize
32 |
33 | /// TBD
34 | override _.forward(value) =
35 | let f = FurnaceImage.convTranspose1d(value, w.value, ?stride=stride, ?padding=padding, ?dilation=dilation)
36 | if biasv then f + b.value.expand([value.shape[0]; outChannels]).view([value.shape[0]; outChannels; 1]) else f
37 |
38 |
39 | /// A model that applies a 2D transposed convolution operator over an input image composed of several input planes.
40 | type ConvTranspose2d(inChannels:int, outChannels:int, ?kernelSize:int, ?stride:int, ?padding:int, ?dilation:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq, ?dilations:seq, ?bias:bool) =
41 | inherit Model()
42 | let kernelSizes = Shape.resolve2dKernelSizes kernelSize kernelSizes
43 | let biasv = defaultArg bias true
44 | let k = 1./ sqrt (float (inChannels*kernelSizes[0]*kernelSizes[1]))
45 | let w = Parameter <| Weight.uniform([|inChannels; outChannels; kernelSizes[0]; kernelSizes[1]|], k)
46 | let b = Parameter <| if biasv then Weight.uniform([|outChannels|], k) else FurnaceImage.tensor([])
47 | do base.addParameter((w, "ConvTranspose2d-weight"), (b, "ConvTranspose2d-bias"))
48 |
49 | /// Get or set the weight parameter of the model
50 | member _.weight
51 | with get() = w.value
52 | and set v = w.value <- v
53 |
54 | /// Get or set the bias parameter of the model
55 | member _.bias
56 | with get() = b.value
57 | and set v = b.value <- v
58 |
59 | /// TBD
60 | override _.ToString() = sprintf "ConvTranspose2d(%A, %A, %A)" inChannels outChannels kernelSizes
61 |
62 | /// TBD
63 | override _.forward(value) =
64 | let f = FurnaceImage.convTranspose2d(value, w.value, ?stride=stride, ?strides=strides, ?padding=padding, ?paddings=paddings, ?dilation=dilation, ?dilations=dilations)
65 | if biasv then f + b.value.expand([value.shape[0]; outChannels]).view([value.shape[0]; outChannels; 1; 1]) else f
66 |
67 | /// A model that applies a 3D transposed convolution operator over an input image composed of several input planes.
68 | type ConvTranspose3d(inChannels:int, outChannels:int, ?kernelSize:int, ?stride:int, ?padding:int, ?dilation:int, ?kernelSizes:seq, ?strides:seq, ?paddings:seq, ?dilations:seq, ?bias:bool) =
69 | inherit Model()
70 | let kernelSizes = Shape.resolve3dKernelSizes kernelSize kernelSizes
71 | let biasv = defaultArg bias true
72 | let k = 1./ sqrt (float (inChannels*kernelSizes[0]*kernelSizes[1]*kernelSizes[2]))
73 | let w = Parameter <| Weight.uniform([|inChannels; outChannels; kernelSizes[0]; kernelSizes[1]; kernelSizes[2]|], k)
74 | let b = Parameter <| if biasv then Weight.uniform([|outChannels|], k) else FurnaceImage.tensor([])
75 | do base.addParameter((w, "ConvTranspose3d-weight"), (b, "ConvTranspose3d-bias"))
76 |
77 | /// Get or set the weight parameter of the model
78 | member _.weight
79 | with get() = w.value
80 | and set v = w.value <- v
81 |
82 | /// Get or set the bias parameter of the model
83 | member _.bias
84 | with get() = b.value
85 | and set v = b.value <- v
86 |
87 | /// TBD
88 | override _.ToString() = sprintf "ConvTranspose3d(%A, %A, %A)" inChannels outChannels kernelSizes
89 |
90 | /// TBD
91 | override _.forward(value) =
92 | let f = FurnaceImage.convTranspose3d(value, w.value, ?stride=stride, ?strides=strides, ?padding=padding, ?paddings=paddings, ?dilation=dilation, ?dilations=dilations)
93 | if biasv then f + b.value.expand([value.shape[0]; outChannels]).view([value.shape[0]; outChannels; 1; 1; 1]) else f
--------------------------------------------------------------------------------
/DEVGUIDE.md:
--------------------------------------------------------------------------------
1 | # Furnace - Development Guide
2 |
3 | You can clone this repository to your machine as follows:
4 | ```
5 | git clone --branch dev https://github.com/Furnace/Furnace.git
6 | cd Furnace
7 | ```
8 |
9 | ## Run tests
10 |
11 | Required:
12 | - Install [.NET Core SDK](https://dotnet.microsoft.com/download) for your system
13 |
14 | Use the following command in the root directory of this repository:
15 | ```
16 | dotnet test
17 | ```
18 |
19 | ## Build Furnace in Docker
20 |
21 | Required:
22 | - Install [Docker](https://hub.docker.com/search/?type=edition&offering=community) for your system
23 |
24 | Build a Docker image called `Furnace`. This will work without any local .NET Core installation and build Furnace inside the image.
25 | ```
26 | docker build -t Furnace .
27 | ```
28 |
29 | Use the following to instantiate a Docker container from the `Furnace` image and run the tests inside:
30 | ```
31 | docker run --rm Furnace dotnet test
32 | ```
33 |
34 | ## Building against locally built TorchSharp packages
35 |
36 | To add features you may have extend TorchSharp to make extra features of LibTorch available.
37 |
38 | The build is set up to look for a parallel build of TorchSharp, e.g.
39 |
40 | C:\GitHub\dsyme\Furnace
41 | C:\GitHub\dsyme\TorchSharp
42 |
43 | To build, test and pack TorchSharp in that repo do this:
44 |
45 | .\build build
46 | .\build test
47 | .\build pack
48 |
49 | You will see something like this
50 |
51 | Successfully created package 'C:\GitHub\dsyme\TorchSharp\bin/packages/Debug/TorchSharp.0.3.0-local-Debug-20200520.nupkg'.
52 | Successfully created package 'C:\GitHub\dsyme\TorchSharp\bin/packages/Debug/LibTorch.Redist.0.3.0-local-Debug-20200520.nupkg'.
53 |
54 | with warning:
55 |
56 | warning : Packages will be incomplete and unusable on other platforms...
57 |
58 | To consume the packages into Furnace adjust TorchSharpVersion in Directory.Build.props.
59 |
60 | When rebuilding the TorchSharp you will need to clear your package cache to pick up the new nuget package with the same version id, e.g.
61 |
62 | rmdir /q /s %USERPROFILE%\.nuget\packages\torchsharp
63 | rmdir /q /s %USERPROFILE%\.nuget\packages\LibTorch.Redist
64 | dotnet restore
65 |
66 | The LibTorch packages are quite large and you may need to watch disk space.
67 |
68 | ## The Reference Backend
69 |
70 | The "Reference" backend defines the semantics we expect of the Torch backend.
71 |
72 | Sometimes configurations of Torch expose small differences in semantics (e.g. when using CUDA, or functionality not suppored for integer tensors). We generally seek to paper
73 | over those cracks by working around the problems in the Torch backend.
74 |
75 | ## Developing and Testing on GPU
76 |
77 | By default in-branch testing is only done on CPU. To enable on GPU/CUDA you must:
78 |
79 | 1. Make sure you have a device eligible for CUDA 11.1 and all device drivers installed (e.g. install the appropriate NVIDIA CUDA SDK)
80 |
81 | 2. Use `dotnet test /p:Furnace_TESTGPU=true`
82 |
83 | 3. Verify that `dsharp.isCudaEnabled(Device.GPU)` is returning true and GPU testing is enabled in `TestUtil.fs`.
84 |
85 | ## Micro Performance Benchmarking
86 |
87 | Python numbers must be collected in a separate run, they are currently injected back into source code (ugh)
88 | to get figures in one report. There are better ways to do this.
89 |
90 | To update Python benchmarks on your machine (note, writes back results into source code)
91 |
92 | dotnet run --project tests\Furnace.Benchmarks.Python\Furnace.Benchmarks.Python.fsproj -c Release --filter "*"
93 |
94 | This takes a while to run.
95 |
96 | To run benchmarks:
97 |
98 | dotnet run --project tests\Furnace.Benchmarks\Furnace.Benchmarks.fsproj -c Release --filter "*"
99 |
100 | To filter etc., see `--help`
101 |
102 | ## TorchSharp backend on macos arm64
103 |
104 | In order to use TorchSharp backend on macOs arm64 platform:
105 |
106 | * you need to build TorchSharp from this PR: https://github.com/dotnet/TorchSharp/pull/903
107 | * you need to adjust according to "Building against locally built TorchSharp packages" section of this document
108 |
109 | At the time of this writing, there is one failing test and 327/328 passing:
110 |
111 | ```
112 | The active test run was aborted. Reason: Test host process crashed : libc++abi: terminating due to uncaught exception of type c10::Error: "addmm_impl_cpu_" not implemented for 'Half'
113 | Exception raised from operator() at /tmp/pytorch-20230625-11028-1u2efwj/aten/src/ATen/native/LinearAlgebra.cpp:1433 (most recent call first):
114 | frame #0: c10::detail::torchCheckFail(char const*, char const*, unsigned int, std::__1::basic_string, std::__1::allocator> const&) + 92 (0x104c861c0 in libc10.dylib)
115 | frame #1: at::native::addmm_impl_cpu_(at::Tensor&, at::Tensor const&, at::Tensor, at::Tensor, c10::Scalar const&, c10::Scalar const&) + 4484 (0x3003161f4 in libtorch_cpu.dylib)
116 | frame #2: at::native::structured_mm_out_cpu::impl(at::Tensor const&, at::Tensor const&, at::Tensor const&) + 184 (0x300316704 in libtorch_cpu.dylib)
117 | frame #3: at::(anonymous namespace)::wrapper_CPU_mm(at::Tensor const&, at::Tensor const&) + 96 (0x300d0c7f8 in libtorch_cpu.dylib)
118 | ...
119 | ```
120 |
--------------------------------------------------------------------------------
/examples/vae.fsx:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S dotnet fsi
2 |
3 | #I "../tests/Furnace.Tests/bin/Debug/net6.0"
4 | #r "Furnace.Core.dll"
5 | #r "Furnace.Data.dll"
6 | #r "Furnace.Backends.Torch.dll"
7 |
8 | // Libtorch binaries
9 | // Option A: you can use a platform-specific nuget package
10 | #r "nuget: TorchSharp-cpu, 0.96.5"
11 | // #r "nuget: TorchSharp-cuda-linux, 0.96.5"
12 | // #r "nuget: TorchSharp-cuda-windows, 0.96.5"
13 | // Option B: you can use a local libtorch installation
14 | // System.Runtime.InteropServices.NativeLibrary.Load("/home/gunes/anaconda3/lib/python3.8/site-packages/torch/lib/libtorch.so")
15 |
16 |
17 | open Furnace
18 | open Furnace.Model
19 | open Furnace.Optim
20 | open Furnace.Data
21 |
22 |
23 | type VAE(xDim:int, zDim:int, ?hDims:seq, ?nonlinearity:Tensor->Tensor, ?nonlinearityLast:Tensor->Tensor) =
24 | inherit Model()
25 | let hDims = defaultArg hDims (let d = (xDim+zDim)/2 in seq [d; d]) |> Array.ofSeq
26 | let nonlinearity = defaultArg nonlinearity FurnaceImage.relu
27 | let nonlinearityLast = defaultArg nonlinearityLast FurnaceImage.sigmoid
28 | let dims =
29 | if hDims.Length = 0 then
30 | [|xDim; zDim|]
31 | else
32 | Array.append (Array.append [|xDim|] hDims) [|zDim|]
33 |
34 | let enc:Model[] = Array.append [|for i in 0..dims.Length-2 -> Linear(dims[i], dims[i+1])|] [|Linear(dims[dims.Length-2], dims[dims.Length-1])|]
35 | let dec:Model[] = Array.rev [|for i in 0..dims.Length-2 -> Linear(dims[i+1], dims[i])|]
36 | do
37 | base.addModel(enc)
38 | base.addModel(dec)
39 |
40 | let encode x =
41 | let mutable x = x
42 | for i in 0..enc.Length-3 do
43 | x <- nonlinearity <| enc[i].forward(x)
44 | let mu = enc[enc.Length-2].forward(x)
45 | let logVar = enc[enc.Length-1].forward(x)
46 | mu, logVar
47 |
48 | let sampleLatent mu (logVar:Tensor) =
49 | let std = FurnaceImage.exp(0.5*logVar)
50 | let eps = FurnaceImage.randnLike(std)
51 | eps.mul(std).add(mu)
52 |
53 | let decode z =
54 | let mutable h = z
55 | for i in 0..dec.Length-2 do
56 | h <- nonlinearity <| dec[i].forward(h)
57 | nonlinearityLast <| dec[dec.Length-1].forward(h)
58 |
59 | member _.encodeDecode(x:Tensor) =
60 | let mu, logVar = encode (x.view([-1; xDim]))
61 | let z = sampleLatent mu logVar
62 | decode z, mu, logVar
63 |
64 | override m.forward(x) =
65 | let x, _, _ = m.encodeDecode(x) in x
66 |
67 | override _.ToString() = sprintf "VAE(%A, %A, %A)" xDim hDims zDim
68 |
69 | static member loss(xRecon:Tensor, x:Tensor, mu:Tensor, logVar:Tensor) =
70 | let bce = FurnaceImage.bceLoss(xRecon, x.viewAs(xRecon), reduction="sum")
71 | let kl = -0.5 * FurnaceImage.sum(1. + logVar - mu.pow(2.) - logVar.exp())
72 | bce + kl
73 |
74 | member m.loss(x, ?normalize:bool) =
75 | let normalize = defaultArg normalize true
76 | let xRecon, mu, logVar = m.encodeDecode x
77 | let loss = VAE.loss(xRecon, x, mu, logVar)
78 | if normalize then loss / x.shape[0] else loss
79 |
80 | member _.sample(?numSamples:int) =
81 | let numSamples = defaultArg numSamples 1
82 | FurnaceImage.randn([|numSamples; zDim|]) |> decode
83 |
84 |
85 | FurnaceImage.config(backend=Backend.Torch, device=Device.CPU)
86 | FurnaceImage.seed(0)
87 |
88 | let epochs = 2
89 | let batchSize = 32
90 | let validInterval = 250
91 | let numSamples = 32
92 |
93 | let urls = ["https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz";
94 | "https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz";
95 | "https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz";
96 | "https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz"]
97 |
98 | let trainSet = MNIST("../data", urls=urls, train=true, transform=id)
99 | let trainLoader = trainSet.loader(batchSize=batchSize, shuffle=true)
100 | let validSet = MNIST("../data", urls=urls, train=false, transform=id)
101 | let validLoader = validSet.loader(batchSize=batchSize, shuffle=false)
102 |
103 | let model = VAE(28*28, 20, [400])
104 | printfn "Model\n%s" (model.summary())
105 |
106 | let optimizer = Adam(model, lr=FurnaceImage.tensor(0.001))
107 |
108 | for epoch = 1 to epochs do
109 | for i, x, _ in trainLoader.epoch() do
110 | model.reverseDiff()
111 | let l = model.loss(x)
112 | l.reverse()
113 | optimizer.step()
114 | printfn "Epoch: %A/%A minibatch: %A/%A loss: %A" epoch epochs i trainLoader.length (float(l))
115 |
116 | if i % validInterval = 0 then
117 | let mutable validLoss = FurnaceImage.zero()
118 | for _, x, _ in validLoader.epoch() do
119 | validLoss <- validLoss + model.loss(x, normalize=false)
120 | validLoss <- validLoss / validSet.length
121 | printfn "Validation loss: %A" (float validLoss)
122 | let fileName = sprintf "vae_samples_epoch_%A_minibatch_%A.png" epoch i
123 | printfn "Saving %A samples to %A" numSamples fileName
124 | let samples = model.sample(numSamples).view([-1; 1; 28; 28])
125 | samples.saveImage(fileName)
126 |
127 |
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestOptim.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 | open Furnace.Compose
11 | open Furnace.Model
12 | open Furnace.Data
13 | open Furnace.Optim
14 |
15 | []
16 | type TestOptim () =
17 | do FurnaceImage.seed(123)
18 | let n, din, dout = 64, 100, 10
19 | let inputs = FurnaceImage.randn([n; din])
20 | let targets = FurnaceImage.randn([n; dout])
21 | let dataset = TensorDataset(inputs, targets)
22 | let dataloader = dataset.loader(8, shuffle=true)
23 |
24 | let rosenbrock (x:Tensor) =
25 | let x, y = x[0], x[1]
26 | (1. - x)**2 + 100. * (y - x**2)**2
27 |
28 | []
29 | member _.TestOptimizerStep () =
30 | let net = Linear(din, dout)
31 | let optimizer = SGD(net)
32 | let step0 = optimizer.stateStep
33 | let step0Correct = 0
34 | net.reverseDiff()
35 | let y = net.forward(inputs)
36 | let loss = FurnaceImage.mseLoss(y, targets)
37 | loss.reverse()
38 | optimizer.step()
39 | let step1 = optimizer.stateStep
40 | let step1Correct = 1
41 | Assert.AreEqual(step0Correct, step0)
42 | Assert.AreEqual(step1Correct, step1)
43 |
44 | []
45 | member _.TestOptimModelSGDStyle1 () =
46 | // Trains a linear regressor
47 | let net = Linear(din, dout)
48 | let lr, mom, epochs = 1e-2, 0.9, 250
49 | let optimizer = SGD(net, lr=FurnaceImage.tensor(lr), momentum=FurnaceImage.tensor(mom), nesterov=true)
50 | for _ in 0..epochs do
51 | for _, inputs, targets in dataloader.epoch() do
52 | net.reverseDiff()
53 | let y = net.forward(inputs)
54 | let loss = FurnaceImage.mseLoss(y, targets)
55 | loss.reverse()
56 | optimizer.step()
57 | let y = net.forward inputs
58 | Assert.True(targets.allclose(y, 0.1, 0.1))
59 |
60 | []
61 | member _.TestOptimModelSGDStyle2 () =
62 | // Trains a linear regressor
63 | let net = Linear(din, dout)
64 | let lr, mom, epochs = 1e-2, 0.9, 250
65 | optim.sgd(net, dataloader, FurnaceImage.mseLoss, lr=FurnaceImage.tensor(lr), momentum=FurnaceImage.tensor(mom), nesterov=true, threshold=1e-4, epochs=epochs)
66 | let y = net.forward inputs
67 | Assert.True(targets.allclose(y, 0.1, 0.1))
68 |
69 | []
70 | member _.TestOptimModelSGDStyle3 () =
71 | // Trains a linear regressor
72 | let net = Linear(din, dout)
73 | let lr, epochs = 1e-1, 250
74 | for _ in 0..epochs do
75 | for _, inputs, targets in dataloader.epoch() do
76 | let loss p = net.asFunction p inputs |> FurnaceImage.mseLoss targets
77 | let g = FurnaceImage.grad loss net.parametersVector
78 | net.parametersVector <- net.parametersVector - lr * g
79 |
80 | let y = net.forward inputs
81 | Assert.True(targets.allclose(y, 0.1, 0.1))
82 |
83 | []
84 | member _.TestOptimModelAdamStyle1 () =
85 | // Trains a linear regressor
86 | let net = Linear(din, dout)
87 | let lr, epochs = 1e-2, 50
88 | let optimizer = Adam(net, lr=FurnaceImage.tensor(lr))
89 | for _ in 0..epochs do
90 | for _, inputs, targets in dataloader.epoch() do
91 | net.reverseDiff()
92 | let y = net.forward(inputs)
93 | let loss = FurnaceImage.mseLoss(y, targets)
94 | loss.reverse()
95 | optimizer.step()
96 | // printfn "%A" (float loss)
97 | let y = net.forward inputs
98 | Assert.True(targets.allclose(y, 0.1, 0.1))
99 |
100 | []
101 | member _.TestOptimModelAdamStyle2 () =
102 | // Trains a linear regressor
103 | let net = Linear(din, dout)
104 | let lr, epochs = 1e-2, 50
105 | optim.adam(net, dataloader, FurnaceImage.mseLoss, lr=FurnaceImage.tensor(lr), threshold=1e-4, epochs=epochs)
106 | let y = net.forward inputs
107 | Assert.True(targets.allclose(y, 0.1, 0.1))
108 |
109 | []
110 | member _.TestOptimFunSGD () =
111 | let x0 = FurnaceImage.tensor([1.5, 1.5])
112 | let lr, momentum, iters, threshold = 1e-3, 0.5, 1000, 1e-3
113 | let fx, x = optim.sgd(rosenbrock, x0, lr=FurnaceImage.tensor(lr), momentum=FurnaceImage.tensor(momentum), nesterov=true, iters=iters, threshold=threshold)
114 | let fxOpt = FurnaceImage.tensor(0.)
115 | let xOpt = FurnaceImage.tensor([1., 1.])
116 | Assert.True(fxOpt.allclose(fx, 0.1, 0.1))
117 | Assert.True(xOpt.allclose(x, 0.1, 0.1))
118 |
119 | []
120 | member _.TestOptimFunAdam () =
121 | let x0 = FurnaceImage.tensor([1.5, 1.5])
122 | let lr, iters, threshold = 1., 1000, 1e-3
123 | let fx, x = optim.adam(rosenbrock, x0, lr=FurnaceImage.tensor(lr), iters=iters, threshold=threshold)
124 | let fxOpt = FurnaceImage.tensor(0.)
125 | let xOpt = FurnaceImage.tensor([1., 1.])
126 | Assert.True(fxOpt.allclose(fx, 0.1, 0.1))
127 | Assert.True(xOpt.allclose(x, 0.1, 0.1))
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestPlotHelpers.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open System
9 | open System.IO
10 | open NUnit.Framework
11 | open Furnace
12 | open Furnace.Util
13 |
14 | []
15 | type TestPlotHelpers() =
16 |
17 | []
18 | member _.TestHelpersPrintValFloat32() =
19 | // Test printVal with Float32 values
20 | let result1 = printVal (3.14f :> scalar)
21 | let result2 = printVal (Single.NaN :> scalar)
22 | let result3 = printVal (Single.PositiveInfinity :> scalar)
23 |
24 | Assert.IsTrue(result1.Contains("3.14"))
25 | Assert.AreEqual("float('nan')", result2)
26 | Assert.AreEqual("float('inf')", result3)
27 |
28 | []
29 | member _.TestHelpersPrintValFloat64() =
30 | // Test printVal with Float64 values
31 | let result1 = printVal (2.718 :> scalar)
32 | let result2 = printVal (Double.NaN :> scalar)
33 | let result3 = printVal (Double.PositiveInfinity :> scalar)
34 |
35 | Assert.IsTrue(result1.Contains("2.718"))
36 | Assert.AreEqual("float('nan')", result2)
37 | Assert.AreEqual("float('inf')", result3)
38 |
39 | []
40 | member _.TestHelpersPrintValIntegers() =
41 | // Test printVal with various integer types
42 | let result1 = printVal (42 :> scalar) // Int32
43 | let result2 = printVal (42L :> scalar) // Int64
44 | let result3 = printVal (42s :> scalar) // Int16
45 | let result4 = printVal (42uy :> scalar) // Byte
46 | let result5 = printVal (42y :> scalar) // SByte
47 |
48 | Assert.AreEqual("42", result1)
49 | Assert.AreEqual("42", result2)
50 | Assert.AreEqual("42", result3)
51 | Assert.AreEqual("42", result4)
52 | Assert.AreEqual("42", result5)
53 |
54 | []
55 | member _.TestHelpersPrintValBoolean() =
56 | // Test printVal with Boolean values
57 | let resultTrue = printVal (true :> scalar)
58 | let resultFalse = printVal (false :> scalar)
59 |
60 | Assert.AreEqual("True", resultTrue)
61 | Assert.AreEqual("False", resultFalse)
62 |
63 | []
64 | member _.TestHelpersToPythonBool() =
65 | // Test toPython with boolean values
66 | let resultTrue = toPython true
67 | let resultFalse = toPython false
68 |
69 | Assert.AreEqual("True", resultTrue)
70 | Assert.AreEqual("False", resultFalse)
71 |
72 | []
73 | member _.TestHelpersToPythonScalarTensor() =
74 | // Test toPython with scalar tensor
75 | let t = FurnaceImage.scalar(42.0f)
76 | let result = toPython t
77 |
78 | Assert.IsTrue(result.Contains("42"))
79 |
80 | []
81 | member _.TestHelpersToPython1DTensor() =
82 | // Test toPython with 1D tensor
83 | let t = FurnaceImage.tensor([1.0f; 2.0f; 3.0f])
84 | let result = toPython t
85 |
86 | // Should be in Python list format: [1.000000, 2.000000, 3.000000]
87 | Assert.IsTrue(result.StartsWith("["))
88 | Assert.IsTrue(result.EndsWith("]"))
89 | Assert.IsTrue(result.Contains("1."))
90 | Assert.IsTrue(result.Contains("2."))
91 | Assert.IsTrue(result.Contains("3."))
92 |
93 | []
94 | member _.TestHelpersToPython2DTensor() =
95 | // Test toPython with 2D tensor
96 | let t = FurnaceImage.tensor([[1.0f; 2.0f]; [3.0f; 4.0f]])
97 | let result = toPython t
98 |
99 | // Should be nested list format: [[1., 2.], [3., 4.]]
100 | Assert.IsTrue(result.StartsWith("["))
101 | Assert.IsTrue(result.EndsWith("]"))
102 | // Should contain at least two opening brackets for nested structure
103 | let openBrackets = result.ToCharArray() |> Array.filter (fun c -> c = '[') |> Array.length
104 | Assert.GreaterOrEqual(openBrackets, 2)
105 |
106 | []
107 | member _.TestHelpersToPythonOtherTypes() =
108 | // Test toPython with other types (should fall back to ToString)
109 | let result = toPython "hello world"
110 | Assert.AreEqual("hello world", result)
111 |
112 | let result2 = toPython 123
113 | Assert.AreEqual("123", result2)
114 |
115 | []
116 | member _.TestHelpersRunScriptSuccess() =
117 | // Test runScript with successful execution (echo command)
118 | let tempDir = Path.GetTempPath()
119 | let lines = [| "echo 'test'" |]
120 |
121 | // This should not throw an exception
122 | Assert.DoesNotThrow(fun () -> runScript "echo" lines 1000)
123 |
124 | []
125 | member _.TestHelpersRunScriptTimeout() =
126 | // Test runScript with timeout (should handle gracefully)
127 | let lines = [| "sleep 5" |] // Command that takes longer than timeout
128 |
129 | // This should not throw an exception, just print warning
130 | Assert.DoesNotThrow(fun () -> runScript "sleep" lines 100)
131 |
132 | []
133 | member _.TestHelpersRunScriptInvalidExecutable() =
134 | // Test runScript with invalid executable (should handle gracefully)
135 | let lines = [| "test" |]
136 |
137 | // This should not throw an exception, just print warning
138 | Assert.DoesNotThrow(fun () -> runScript "nonexistent_executable_12345" lines 1000)
--------------------------------------------------------------------------------
/src/Furnace.Data/Image.fs:
--------------------------------------------------------------------------------
1 | namespace Furnace
2 |
3 | open SkiaSharp
4 |
5 | []
6 | module ImageUtil =
7 | /// Saves the given pixel array to a file and optionally resizes it in the process. Supports .png format.
8 | let saveImage (pixels: float32[,,]) (fileName: string) (resize: option) : unit =
9 | let c, h, w = pixels.GetLength 0, pixels.GetLength 1, pixels.GetLength 2
10 |
11 | use bitmap = new SKBitmap(w, h, SKColorType.Bgra8888, SKAlphaType.Premul)
12 | use surface = SKSurface.Create(bitmap.Info)
13 | use canvas = surface.Canvas
14 |
15 | for y in 0 .. h - 1 do
16 | for x in 0 .. w - 1 do
17 | let r, g, b =
18 | if c = 1 then
19 | let gray = int (pixels.[0, y, x] * 255.0f)
20 | gray, gray, gray
21 | else
22 | let r = int (pixels.[0, y, x] * 255.0f)
23 | let g = int (pixels.[1, y, x] * 255.0f)
24 | let b = int (pixels.[2, y, x] * 255.0f)
25 | r, g, b
26 | bitmap.SetPixel(x, y, SKColor(byte r, byte g, byte b))
27 |
28 | use resized =
29 | match resize with
30 | | Some (width, height) ->
31 | let info = new SKImageInfo(width, height)
32 | use resizedBitmap = bitmap.Resize(info, SKFilterQuality.High)
33 | resizedBitmap.Encode(SKEncodedImageFormat.Png, 100)
34 | | None ->
35 | bitmap.Encode(SKEncodedImageFormat.Png, 100)
36 |
37 | use stream = System.IO.File.OpenWrite(fileName)
38 | resized.SaveTo(stream)
39 |
40 | /// Loads a pixel array from a file and optionally resizes it in the process.
41 | let loadImage (fileName: string) (resize: option) : float32[,,] =
42 | use stream = System.IO.File.OpenRead(fileName)
43 | use codec = SKCodec.Create(stream)
44 | use bitmap = SKBitmap.Decode(codec)
45 |
46 | let bitmap =
47 | match resize with
48 | | Some (width, height) ->
49 | let info = new SKImageInfo(width, height)
50 | bitmap.Resize(info, SKFilterQuality.High)
51 | | None -> bitmap
52 |
53 | let w, h = bitmap.Width, bitmap.Height
54 | let pixels = Array3D.create 3 h w 0.0f
55 |
56 | for y in 0 .. h - 1 do
57 | for x in 0 .. w - 1 do
58 | let color = bitmap.GetPixel(x, y)
59 | pixels.[0, y, x] <- float32 color.Red / 255.0f
60 | pixels.[1, y, x] <- float32 color.Green / 255.0f
61 | pixels.[2, y, x] <- float32 color.Blue / 255.0f
62 |
63 | pixels
64 |
65 |
66 | []
67 | module ImageExtensions =
68 | type Tensor with
69 | /// Save tensor to an image file using png or jpg format
70 | member t.saveImage(fileName:string, ?pixelMin:double, ?pixelMax:double, ?normalize:bool, ?resize:int*int, ?gridCols:int) =
71 | let pixels:Tensor = t.move(Device.CPU).toImage(?pixelMin=pixelMin, ?pixelMax=pixelMax, ?normalize=normalize, ?gridCols=gridCols)
72 | saveImage (pixels.float32().toArray() :?> float32[,,]) fileName resize
73 |
74 | /// Load an image file and return it as a tensor
75 | static member loadImage(fileName:string, ?normalize:bool, ?resize:int*int, ?device: Device, ?dtype: Dtype, ?backend: Backend) =
76 | let normalize = defaultArg normalize false
77 | let pixels = loadImage fileName resize
78 | let pixels:Tensor = Tensor.create(pixels, ?device=device, ?dtype=dtype, ?backend=backend)
79 | if normalize then pixels.normalize() else pixels
80 |
81 |
82 | type FurnaceImage with
83 | /// Load an image file as a tensor.
84 | /// The file name of the image to load.
85 | /// If True, shift the image to the range (0, 1).
86 | /// An optional new size for the image.
87 | /// The desired device of returned tensor. Default: if None, uses Device.Default.
88 | /// The desired element type of returned tensor. Default: if None, uses Dtype.Default.
89 | /// The desired backend of returned tensor. Default: if None, uses Backend.Default.
90 | static member loadImage(fileName:string, ?normalize:bool, ?resize:int*int, ?device: Device, ?dtype: Dtype, ?backend: Backend) =
91 | Tensor.loadImage(fileName=fileName, ?normalize=normalize, ?resize=resize, ?device=device, ?dtype=dtype, ?backend=backend)
92 |
93 | /// Save a given Tensor into an image file.
94 | /// If the input tensor has 4 dimensions, then make a single image grid.
95 | /// The input tensor.
96 | /// The name of the file to save to.
97 | /// The minimum pixel value.
98 | /// The maximum pixel value.
99 | /// If True, shift the image to the range (0, 1), by the min and max values specified by range.
100 | /// An optional new size for the image.
101 | /// Number of columns of images in the grid.
102 | static member saveImage(input:Tensor, fileName:string, ?pixelMin:double, ?pixelMax:double, ?normalize:bool, ?resize:int*int, ?gridCols:int) =
103 | input.saveImage(fileName=fileName, ?pixelMin=pixelMin, ?pixelMax=pixelMax, ?normalize=normalize, ?resize=resize, ?gridCols=gridCols)
104 |
--------------------------------------------------------------------------------
/src/Furnace.Core/Dtype.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Furnace
7 |
8 | /// Represents a storage type for elements of a tensor
9 | []
10 | type Dtype =
11 | /// Store elements as 16-bit floating point numbers (bfloat16 variation)
12 | | []
13 | BFloat16
14 | /// Store elements as 16-bit floating point numbers
15 | | []
16 | Float16
17 | /// Store elements as 32-bit floating point numbers
18 | | Float32
19 | /// Store elements as 64-bit floating point numbers
20 | | Float64
21 | /// Store elements as 8-bit integers
22 | | Int8
23 | /// Store elements as 8-bit unsigned integers
24 | | Byte
25 | /// Store elements as 16-bit signed integers
26 | | Int16
27 | /// Store elements as 32-bit signed integers
28 | | Int32
29 | /// Store elements as 64-bit signed integers
30 | | Int64
31 | /// Store elements as booleans
32 | | Bool
33 |
34 | member internal x.Name =
35 | match x with
36 | | BFloat16 -> "BFloat16"
37 | | Float16 -> "Float16"
38 | | Float32 -> "Float32"
39 | | Float64 -> "Float64"
40 | | Int8 -> "Int8"
41 | | Byte -> "Byte"
42 | | Int16 -> "Int16"
43 | | Int32 -> "Int32"
44 | | Int64 -> "Int64"
45 | | Bool -> "Bool"
46 |
47 | /// Gets the natural result of the Sum(), SumToSize() and Sum(dim) operation on this dtype
48 | member t.SummationType =
49 | match t with
50 | | Bool | Byte | Int8 | Int16 | Int32 | Int64 -> Dtype.Int64
51 | | dt -> dt
52 |
53 | override x.ToString() = x.Name
54 |
55 | /// Contains global functions and settings related to tensor element types, used when writing backends.
56 | []
57 | module DtypeAutoOpens =
58 |
59 | type Dtype with
60 | /// Matches all floating point tensor element types
61 | member x.IsFloatingPoint =
62 | match x with
63 | | Float16 | BFloat16 | Float32 | Float64 -> true
64 | | _ -> false
65 |
66 | /// Matches all integral tensor element types
67 | member x.IsIntegral =
68 | match x with
69 | | Byte | Int8 | Int16 | Int32 | Int64 -> true
70 | | _ -> false
71 |
72 | /// Raise an exception indicating the given operation is not supported for the given tensor element type.
73 | let opNotSupported msg (dtype: Dtype) =
74 | invalidOp (sprintf "operation '%s' not permitted on tensors of type %A" msg dtype)
75 |
76 | /// Raise an exception indicating the given operation is not supported for the given tensor device type.
77 | let opNotSupportedOnDeviceType msg (dtype: Dtype) (deviceType: DeviceType) =
78 | invalidOp (sprintf "operation '%s' not permitted on tensors of type %A on device type %A" msg dtype deviceType)
79 |
80 | /// Raise an exception indicating the given binary operation is not supported for the two given tensor element types.
81 | let opNotSupported2 msg (dtype1: Dtype) (dtype2: Dtype) =
82 | invalidOp (sprintf "operation '%s' not permitted on tensors of type (%A, %A)" msg dtype1 dtype2)
83 |
84 | /// Contains functions and settings related to tensor element types
85 | module Dtype =
86 |
87 | /// Matches all floating point tensor element types
88 | []
89 | let (|FloatingPoint|_|) (x: Dtype) = if x.IsFloatingPoint then ValueSome() else ValueNone
90 |
91 | /// Matches all integral tensor element types
92 | []
93 | let (|Integral|_|) (x: Dtype) = if x.IsIntegral then ValueSome() else ValueNone
94 |
95 | /// Matches all integral or boolean tensor element types
96 | []
97 | let (|IntegralOrBool|_|) x =
98 | match x with
99 | | Integral | Bool -> ValueSome()
100 | | _ -> ValueNone
101 |
102 | /// Find the Dtype into which dtype1 and dtype2 can be widened
103 | let widen (dtype1: Dtype) (dtype2: Dtype) =
104 | if dtype1 = dtype2 then ValueSome dtype1
105 | else
106 | match dtype1, dtype2 with
107 | | Float64, _ | _, Float64 -> ValueSome Float64
108 | | Float32, _ | _, Float32 -> ValueSome Float32
109 | | BFloat16, _ | _, BFloat16 -> ValueSome BFloat16
110 | | Float16, _ | _, Float16 -> ValueSome Float16
111 | | Int64, _ | _, Int64 -> ValueSome Int64
112 | | Int32, _ | _, Int32 -> ValueSome Int32
113 | | Int16, _ | _, Int16 -> ValueSome Int16
114 | | Int8, Bool | Bool, Int8 -> ValueSome Int8
115 | | Byte, Bool | Bool, Byte -> ValueSome Byte
116 | | Int8, Int8 -> ValueSome Int8
117 | | Byte, Byte -> ValueSome Byte
118 | | Bool, Bool -> ValueSome Bool
119 | | Int8, Byte | Byte, Int8 -> ValueNone
120 |
121 | /// Get or set the default element type used when creating tensors. Only floating point types are supported as the default type. Note, use FurnaceImage.config(...) instead.
122 | let mutable Default = Dtype.Float32
123 |
124 | /// Find the Dtype which would result from dividing tensors with dtype1 and dtype2
125 | let divisionType (dtype1: Dtype) (dtype2: Dtype) =
126 | match dtype1.IsFloatingPoint, dtype2.IsFloatingPoint with
127 | | false, false -> Default
128 | | false, true -> dtype2
129 | | true, false -> dtype1
130 | | true, true -> (widen dtype1 dtype2).Value
131 |
132 |
133 |
--------------------------------------------------------------------------------
/tests/Furnace.Tests/TestOp.Det.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace Tests
7 |
8 | open NUnit.Framework
9 | open Furnace
10 |
11 |
12 | []
13 | type TestTensorDet () =
14 | []
15 | member _.TestTensorDet () =
16 | for combo in Combos.FloatingPointExcept16s do
17 | let t3x3 = combo.tensor([[ 1.3038, -0.8699, 1.2059],
18 | [ 1.0837, -1.5076, -0.1286],
19 | [-0.9857, 0.3633, -1.0049]])
20 | let t3x3Det = t3x3.det()
21 | let t3x3DetCorrect = combo.tensor(-0.3387)
22 |
23 | Assert.True(t3x3DetCorrect.allclose(t3x3Det, 0.01))
24 |
25 | let t4x2x2 = combo.tensor([[[-2.1301, -1.4122],
26 | [-0.4353, -0.6708]],
27 |
28 | [[ 0.0696, -1.3661],
29 | [ 0.4162, 0.0663]],
30 |
31 | [[-1.3677, -0.6721],
32 | [ 0.6547, 0.5127]],
33 |
34 | [[-1.1081, 1.0203],
35 | [-0.1355, 0.0641]]])
36 | let t4x2x2Det = t4x2x2.det()
37 | let t4x2x2DetCorrect = combo.tensor([ 0.8141, 0.5732, -0.2612, 0.0672])
38 |
39 | Assert.True(t4x2x2DetCorrect.allclose(t4x2x2Det, 0.01))
40 |
41 |
42 | []
43 | type TestDerivativesDet () =
44 | []
45 | member _.TestDerivativeDet () =
46 | for combo in Combos.FloatingPointExcept16s do
47 | let fwdx = combo.tensor([[-0.1219, 1.4357, 0.3839],
48 | [-1.2608, -0.5778, -0.8679],
49 | [ 0.2116, -1.1607, -0.4967]])
50 | let fwdx = fwdx.forwardDiff(combo.tensor([[ 0.6779, -0.0532, 0.1049],
51 | [-0.0534, -0.3002, -0.7770],
52 | [-1.3737, -0.4547, 0.1911]]))
53 | let fwdz = FurnaceImage.det(fwdx)
54 | let fwdzCorrect = combo.tensor(-0.4662)
55 | let fwdzd = fwdz.derivative
56 | let fwdzdCorrect = combo.tensor(1.6214)
57 |
58 | let revx = combo.tensor([[-0.1219, 1.4357, 0.3839],
59 | [-1.2608, -0.5778, -0.8679],
60 | [ 0.2116, -1.1607, -0.4967]]).reverseDiff()
61 | let revz = FurnaceImage.det(revx)
62 | let revzCorrect = combo.tensor(-0.4662)
63 | revz.reverse(combo.tensor(1.3444))
64 | let revxd = revx.derivative
65 | let revxdCorrect = combo.tensor([[-0.9685, -1.0888, 2.1318],
66 | [ 0.3596, -0.0278, 0.2182],
67 | [-1.3770, -0.7929, 2.5283]])
68 |
69 | Assert.True(fwdz.allclose(fwdzCorrect, 0.01))
70 | Assert.True(fwdzd.allclose(fwdzdCorrect, 0.01))
71 | Assert.True(revz.allclose(revzCorrect, 0.01))
72 | Assert.True(revxd.allclose(revxdCorrect, 0.01))
73 |
74 | []
75 | member _.TestDerivativeDetBatched () =
76 | for combo in Combos.FloatingPointExcept16s do
77 | let fwdx = combo.tensor([[[ 1.2799, -0.6491],
78 | [-1.4575, 2.0789]],
79 |
80 | [[-1.0350, 0.8558],
81 | [ 1.3920, 1.4445]],
82 |
83 | [[-2.0709, 0.2865],
84 | [ 1.0892, 0.5796]]])
85 | let fwdx = fwdx.forwardDiff(combo.tensor([[[ 2.2755, -1.2585],
86 | [ 0.6867, 0.9552]],
87 |
88 | [[-0.6031, -0.1197],
89 | [-1.5058, -0.3416]],
90 |
91 | [[-1.1658, 0.4657],
92 | [ 1.1314, 0.8895]]]))
93 | let fwdz = FurnaceImage.det(fwdx)
94 | let fwdzCorrect = combo.tensor([ 1.7147, -2.6862, -1.5123])
95 | let fwdzd = fwdz.derivative
96 | let fwdzdCorrect = combo.tensor([ 4.5646, 0.9376, -3.3492])
97 |
98 | let revx = combo.tensor([[[ 1.2799, -0.6491],
99 | [-1.4575, 2.0789]],
100 |
101 | [[-1.0350, 0.8558],
102 | [ 1.3920, 1.4445]],
103 |
104 | [[-2.0709, 0.2865],
105 | [ 1.0892, 0.5796]]]).reverseDiff()
106 | let revz = FurnaceImage.det(revx)
107 | let revzCorrect = combo.tensor([ 1.7147, -2.6862, -1.5123])
108 | revz.reverse(combo.tensor([-0.1814, 1.2643, 1.5553]))
109 | let revxd = revx.derivative
110 | let revxdCorrect = combo.tensor([[[-0.3771, -0.2644],
111 | [-0.1177, -0.2321]],
112 |
113 | [[ 1.8262, -1.7598],
114 | [-1.0819, -1.3085]],
115 |
116 | [[ 0.9014, -1.6940],
117 | [-0.4456, -3.2208]]])
118 |
119 | Assert.True(fwdz.allclose(fwdzCorrect, 0.01))
120 | Assert.True(fwdzd.allclose(fwdzdCorrect, 0.01))
121 | Assert.True(revz.allclose(revzCorrect, 0.01))
122 | Assert.True(revxd.allclose(revxdCorrect, 0.01))
--------------------------------------------------------------------------------
/.github/workflows/build-test-docs-publish.yml:
--------------------------------------------------------------------------------
1 | # note, the nuget org token expires around June 2021
2 |
3 | name: Build, test, generate docs, publish
4 |
5 | on:
6 | push:
7 | branches:
8 | - dev
9 | tags:
10 | - v* # Push events to v1.2.3 tag
11 | workflow_dispatch:
12 |
13 | jobs:
14 | build:
15 |
16 | runs-on: ubuntu-latest
17 |
18 | steps:
19 | - uses: actions/checkout@v2
20 | - name: Install SkiaSharp Dependencies
21 | run: |
22 | sudo apt-get update
23 | sudo apt-get install -y libfontconfig1
24 | sudo apt-get install -y libharfbuzz0b
25 | sudo apt-get install -y libicu-dev
26 | sudo apt-get install -y libfreetype6
27 | sudo apt-get install -y libgif-dev
28 | sudo apt-get install -y libjpeg-dev
29 | sudo apt-get install -y libpng-dev
30 | sudo apt-get install -y libtiff-dev
31 | - name: Setup dotnet 6.0
32 | uses: actions/setup-dotnet@v1
33 | with:
34 | dotnet-version: 6.0.x
35 | - name: Install dependencies
36 | run: dotnet restore
37 | - name: Install tool dependencies
38 | run: dotnet tool restore
39 | # Release build is used to prepare packages
40 | - name: Build (Release)
41 | run: dotnet build --configuration Release --no-restore --verbosity normal
42 | # Debug build is used to prepare docs
43 | - name: Build (Debug)
44 | run: dotnet build --configuration Debug --no-restore --verbosity normal
45 | - name: Get git commit info
46 | run: |
47 | echo "GIT_COMMIT_AUTHOR=$(git log -1 --pretty=%cn)" >> $GITHUB_ENV
48 | echo "GIT_COMMIT_AUTHOR_EMAIL=$(git log -1 --pretty=%ce)" >> $GITHUB_ENV
49 | echo "GIT_COMMIT_MESSAGE=$(git log -1 --pretty=%s)" >> $GITHUB_ENV
50 | - name: Install ReportGenerator
51 | run: dotnet tool install -g dotnet-reportgenerator-globaltool --version 5.1.26
52 | - name: Test with coverage
53 | run: |
54 | # Create coverage directory
55 | mkdir -p coverage
56 |
57 | # Run tests with coverage and debug output
58 | dotnet test --configuration Release --no-build \
59 | /p:CollectCoverage=true \
60 | /p:CoverletOutputFormat=opencover \
61 | /p:CoverletOutput="$PWD/coverage/coverage.opencover.xml" \
62 |
63 | - name: Process coverage
64 | run: |
65 | # Convert to LCOV format
66 | reportgenerator \
67 | -reports:"$PWD/coverage/coverage.opencover.xml" \
68 | -targetdir:"$PWD/coverage" \
69 | -reporttypes:lcov
70 |
71 | - name: Upload coverage
72 | uses: coverallsapp/github-action@v2
73 | with:
74 | file: coverage/lcov.info
75 | - name: Run fsdocs
76 | run: dotnet fsdocs build --eval --strict --properties Configuration=Release
77 |
78 | - name: Deploy docs
79 | uses: peaceiris/actions-gh-pages@v3
80 | with:
81 | personal_token: ${{ secrets.GH_PAT }}
82 | publish_dir: ./output
83 | publish_branch: gh-pages
84 | force_orphan: true
85 | pack_cpu:
86 |
87 | runs-on: ubuntu-latest
88 | needs: build
89 |
90 | steps:
91 | - uses: actions/checkout@v2
92 | - name: Setup dotnet
93 | uses: actions/setup-dotnet@v1
94 | with:
95 | dotnet-version: 6.0.x
96 | - name: Install dependencies
97 | run: dotnet restore
98 | - name: Pack
99 | run: dotnet pack --configuration Release --verbosity normal
100 | - name: Publish NuGets
101 | run: dotnet nuget push "bin/packages/*.nupkg" -s https://api.nuget.org/v3/index.json -k ${{ secrets.NUGET_KEY }} --skip-duplicate
102 |
103 | # Done in a separate job because it downloads the massive Windows CUDA packages (though only for reference
104 | # during the package build, it doesn't actually use them)
105 | pack_cuda_windows:
106 |
107 | runs-on: ubuntu-latest
108 | needs: build
109 |
110 | steps:
111 | - uses: actions/checkout@v2
112 | - name: Setup dotnet
113 | uses: actions/setup-dotnet@v1
114 | with:
115 | dotnet-version: 6.0.x
116 | - name: Install dependencies
117 | run: dotnet restore
118 | - name: Pack (Furnace-cuda-windows)
119 | run: dotnet pack --configuration Release --verbosity normal bundles/Furnace-cuda-windows
120 | - name: Publish NuGets
121 | run: dotnet nuget push "bin/packages/*.nupkg" -s https://api.nuget.org/v3/index.json -k ${{ secrets.NUGET_KEY }} --skip-duplicate
122 |
123 | # Done in a separate job because it downloads the massive Linux CUDA packages (though only for reference
124 | # during the package build, it doesn't actually use them)
125 | pack_cuda_linux:
126 |
127 | runs-on: ubuntu-latest
128 | needs: build
129 |
130 | steps:
131 | - uses: actions/checkout@v2
132 | - name: Setup dotnet
133 | uses: actions/setup-dotnet@v1
134 | with:
135 | dotnet-version: 6.0.102
136 | - name: Disable global.json
137 | run: mv ./global.json ./global.bak || true
138 | - name: Install dependencies
139 | run: dotnet restore
140 | - name: Pack (Furnace-cuda-linux)
141 | run: dotnet pack --configuration Release --verbosity normal bundles/Furnace-cuda-linux
142 | - name: Publish NuGets
143 | run: dotnet nuget push "bin/packages/*.nupkg" -s https://api.nuget.org/v3/index.json -k ${{ secrets.NUGET_KEY }} --skip-duplicate
144 |
145 | # Done in a separate job because it downloads the massive Linux CUDA packages (though only for reference
146 | # during the package build, it doesn't actually use them)
147 | pack_cuda:
148 |
149 | runs-on: ubuntu-latest
150 | needs: build
151 |
152 | steps:
153 | - uses: actions/checkout@v2
154 | - name: Setup dotnet
155 | uses: actions/setup-dotnet@v1
156 | with:
157 | dotnet-version: 6.0.102
158 | - name: Disable global.json
159 | run: mv ./global.json ./global.bak || true
160 | - name: Install dependencies
161 | run: dotnet restore
162 | - name: Pack (Furnace-cuda)
163 | run: dotnet pack --configuration Release --verbosity normal bundles/Furnace-cuda
164 | - name: Publish NuGets
165 | run: dotnet nuget push "bin/packages/*.nupkg" -s https://api.nuget.org/v3/index.json -k ${{ secrets.NUGET_KEY }} --skip-duplicate
--------------------------------------------------------------------------------
/src/Furnace.Core/Data.fs:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2016- University of Oxford (Atılım Güneş Baydin )
2 | // and other contributors, see LICENSE in root of repository.
3 | //
4 | // BSD 2-Clause License. See LICENSE in root of repository.
5 |
6 | namespace rec Furnace.Data
7 |
8 | open Furnace
9 | open Furnace.Compose
10 | open Furnace.Util
11 |
12 |
13 | ///
14 | /// Contains datasets and components related to data loading.
15 | ///
16 | ///
17 | /// Represents a dataset.
18 | []
19 | type Dataset() =
20 | abstract member length: int
21 | abstract member item: int -> Tensor * Tensor
22 | member d.loader(batchSize:int, ?shuffle:bool, ?dropLast:bool, ?device:Device, ?dtype:Dtype, ?backend:Backend, ?targetDevice:Device, ?targetDtype:Dtype, ?targetBackend:Backend) = DataLoader(d, batchSize=batchSize, ?shuffle=shuffle, ?dropLast=dropLast, ?device=device, ?dtype=dtype, ?backend=backend, ?targetDevice=targetDevice, ?targetDtype=targetDtype, ?targetBackend=targetBackend)
23 | override d.ToString() = sprintf "Dataset(%A)" d.length
24 | member d.Item
25 | with get(i:int) =
26 | d.item(i)
27 | member d.GetSlice(imin:int option, imax:int option) =
28 | let imin = defaultArg imin 0
29 | let imax = defaultArg imax d.length
30 | if imin >= imax then failwithf "Expecting imin (%A) < imax (%A)" imin imax
31 | DatasetSubset(d, [|imin..imax|])
32 | member d.filter(predicate:Tensor->Tensor->bool) =
33 | let indices = ResizeArray()
34 | for i in 0..d.length-1 do
35 | let data, target = d.item(i)
36 | if predicate data target then
37 | indices.Add(i)
38 | if indices.Count = 0 then failwithf "Could not find any data items for which the predicate is true"
39 | DatasetSubset(d, indices.ToArray())
40 |
41 |
42 | type DatasetSubset(dataset:Dataset, indices:int[]) =
43 | inherit Dataset()
44 | override d.length = indices.Length
45 | override d.item(i) = dataset.item(indices[i])
46 |
47 |
48 | type DataLoader(dataset:Dataset, batchSize:int, ?shuffle:bool, ?dropLast:bool, ?device:Device, ?dtype:Dtype, ?backend:Backend, ?targetDevice:Device, ?targetDtype:Dtype, ?targetBackend:Backend) =
49 | let batchSize = min batchSize dataset.length
50 | let shuffle = defaultArg shuffle false
51 | let dropLast = defaultArg dropLast true
52 | let device = defaultArg device Device.Default
53 | let dtype = defaultArg dtype Dtype.Default
54 | let backend = defaultArg backend Backend.Default
55 | let targetDevice = defaultArg targetDevice device
56 | let targetDtype = defaultArg targetDtype dtype
57 | let targetBackend = defaultArg targetBackend backend
58 | let datalength = if dropLast then batchSize*(dataset.length/batchSize) else dataset.length
59 | member d.length = ((float datalength)/(float batchSize)) |> ceil |> int
60 | member d.epoch(?numBatches:int) =
61 | let numBatches = defaultArg numBatches d.length
62 | if numBatches < 1 || numBatches > d.length then failwithf "Expecting 1 <= numBatches (%A) <= %A" numBatches d.length
63 | let indexer = if shuffle then Random.shuffledIndices datalength else id
64 | let indices = Seq.init datalength id |> Seq.map indexer
65 | let batchIndices = indices |> Seq.chunkBySize batchSize
66 | let batches = batchIndices |> Seq.map (Array.map dataset.item >> Array.unzip)
67 | batches |> Seq.mapi (fun i (data, target) -> i, data |> FurnaceImage.stack |> FurnaceImage.move(device, dtype, backend), target |> FurnaceImage.stack |> FurnaceImage.move(targetDevice, targetDtype, targetBackend))
68 | |> Seq.truncate numBatches
69 | member d.batch(?batchSize:int) =
70 | let _, data, target = d.epoch() |> Seq.head
71 | match batchSize with
72 | | Some(b) when b <= 0 -> failwithf "Expecting batchSize > 0"
73 | | Some(b) when b < data.shape[0]-> data[..b-1], target[..b-1]
74 | | _ -> data, target
75 |
76 |
77 | type TensorDataset(data:Tensor, target:Tensor) =
78 | inherit Dataset()
79 | do if data.shape[0] <> target.shape[0] then failwith "Expecting data and target to have the same size in the first dimension"
80 | override d.length = data.shape[0]
81 | override d.item(i) = data[i], target[i]
82 |
83 |
84 | type TextDataset(text:string, seqLength, ?chars) =
85 | inherit Dataset()
86 | // """0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;?@[\\]^_`{|}~ """
87 | let _chars = (defaultArg chars text) |> Seq.distinct |> Seq.toArray |> Array.sort
88 | let onehot = memoize (fun (length, hot) -> FurnaceImage.onehot(length, hot, device=Device.CPU))
89 | let _charToIndex = memoize (fun c -> try Array.findIndex ((=) c) _chars with _ -> failwithf "Character %A not found in this TextDataset (chars: %A)" c _chars)
90 | let _indexToChar(index) = _chars[index]
91 | let textToIndices(text:string) = text |> Seq.map _charToIndex |> Seq.toArray
92 | let indicesToTensor(indices) = indices |> Array.map (fun i -> onehot(_chars.Length, i)) |> FurnaceImage.stack
93 | let sequences =
94 | if seqLength > text.Length then failwithf "Expecting text.Length (%A) >= seqLength (%A)" text.Length seqLength
95 | [|for i in 0..(text.Length - seqLength + 1)-1 do text.Substring(i, seqLength)|] |> Array.map textToIndices
96 |
97 | member d.indexToChar(i) = _indexToChar(i)
98 | member d.charToIndex(c) = _charToIndex(c)
99 | member d.textToTensor(text:string) = text |> textToIndices |> indicesToTensor
100 | member d.tensorToText(tensor:Tensor) =
101 | if tensor.dim <> 2 then failwithf "Expecting a 2d tensor with shape seqLen x features, received tensor with shape %A" tensor.shape
102 | let t2text (tens:Tensor) = [|for i in 0..tens.shape[0]-1 do tens[i].argmax()[0]|] |> Array.map _indexToChar |> System.String |> string
103 | tensor |> t2text
104 |
105 | member d.chars = _chars
106 | member d.numChars = _chars.Length
107 | override d.length = sequences.Length
108 | override d.item(i) =
109 | let data = sequences[i] |> indicesToTensor
110 | let target = sequences[i] |> FurnaceImage.tensor(dtype=Dtype.Default, device=Device.CPU)
111 | data, target
112 |
113 | // More datasets (MNIST, CIFAR, etc.) are implemented in Furnace.Data project
--------------------------------------------------------------------------------