├── .gitattributes ├── luarocks ├── fun │ └── scm-1 │ │ ├── tests │ │ └── .gitignore │ │ ├── rock_manifest │ │ └── fun-scm-1.rockspec ├── gnuplot │ └── scm-1 │ │ ├── doc │ │ ├── plot_x.png │ │ ├── plot_raw.png │ │ ├── plot_xyf.png │ │ ├── scatter3.png │ │ ├── plot_filled.png │ │ ├── plot_hist.png │ │ ├── plot_labels.png │ │ ├── plot_sincos.png │ │ ├── plot_splot.png │ │ ├── plot_splot2.png │ │ ├── plot_imagesc.png │ │ ├── scatter3_helix.png │ │ ├── plothistogram.md │ │ ├── plotmatrix.md │ │ ├── plot3dpoints.md │ │ └── plotsurface.md │ │ └── gnuplot-scm-1.rockspec ├── nn │ └── scm-1 │ │ ├── doc │ │ └── image │ │ │ ├── abs.png │ │ │ ├── elu.png │ │ │ ├── exp.png │ │ │ ├── htanh.png │ │ │ ├── lena.jpg │ │ │ ├── lenap.jpg │ │ │ ├── power.png │ │ │ ├── prelu.png │ │ │ ├── relu.png │ │ │ ├── relu6.png │ │ │ ├── rrelu.png │ │ │ ├── sqrt.png │ │ │ ├── tanh.png │ │ │ ├── hshrink.png │ │ │ ├── sigmoid.png │ │ │ ├── softmax.png │ │ │ ├── softmin.png │ │ │ ├── square.png │ │ │ ├── sshrink.png │ │ │ ├── logsigmoid.png │ │ │ ├── logsoftmax.png │ │ │ ├── sigmmoid.png │ │ │ ├── softplus.png │ │ │ └── softsign.png │ │ └── nn-scm-1.rockspec ├── torch │ └── scm-1 │ │ ├── doc │ │ ├── gather.png │ │ └── pipefile.md │ │ └── torch-scm-1.rockspec ├── nnx │ └── 0.1-1 │ │ ├── doc │ │ └── image │ │ │ ├── Lenna.png │ │ │ └── Lenna-150x150-bilinear.png │ │ └── nnx-0.1-1.rockspec ├── optim │ └── 1.0.5-0 │ │ ├── doc │ │ ├── logger_plot.png │ │ └── image │ │ │ ├── parameterflattening.png │ │ │ └── parameterflattening.svg.png │ │ └── optim-1.0.5-0.rockspec ├── strict │ └── 0-0 │ │ ├── doc │ │ ├── README.md │ │ ├── CONTRIBUTING.md │ │ └── LICENSE │ │ ├── rock_manifest │ │ └── strict-0-0.rockspec ├── luafilesystem │ └── 1.6.3-2 │ │ ├── doc │ │ └── us │ │ │ └── luafilesystem.png │ │ ├── rock_manifest │ │ └── luafilesystem-1.6.3-2.rockspec ├── sundown │ └── scm-1 │ │ ├── bin │ │ └── mdcat │ │ ├── rock_manifest │ │ └── sundown-scm-1.rockspec ├── dok │ └── scm-1 │ │ ├── rock_manifest │ │ └── dok-scm-1.rockspec ├── inspect │ └── 3.1.0-1 │ │ ├── rock_manifest │ │ ├── inspect-3.1.0-1.rockspec │ │ └── doc │ │ └── CHANGELOG.md ├── luaffi │ └── scm-1 │ │ ├── rock_manifest │ │ ├── luaffi-scm-1.rockspec │ │ └── doc │ │ └── CONTRIBUTING.md ├── xlua │ └── 1.1-0 │ │ ├── rock_manifest │ │ ├── xlua-1.1-0.rockspec │ │ └── doc │ │ └── README.md ├── sys │ └── 1.1-0 │ │ ├── rock_manifest │ │ └── sys-1.1-0.rockspec ├── penlight │ └── 1.5.4-1 │ │ └── doc │ │ └── config.ld ├── paths │ └── scm-1 │ │ ├── rock_manifest │ │ ├── doc │ │ └── index.md │ │ └── paths-scm-1.rockspec ├── cwrap │ └── scm-1 │ │ ├── cwrap-scm-1.rockspec │ │ ├── rock_manifest │ │ └── doc │ │ └── index.md ├── config.lua ├── moses │ └── 1.6.1-1 │ │ ├── moses-1.6.1-1.rockspec │ │ ├── spec │ │ ├── chaining_spec.lua │ │ └── import_spec.lua │ │ └── rock_manifest └── image │ └── 1.1.alpha-0 │ ├── image-1.1.alpha-0.rockspec │ └── doc │ └── index.md ├── bin ├── TH.dll ├── ffi.dll ├── lfs.dll ├── luaT.dll ├── libnnx.dll ├── libppm.dll ├── libsys.dll ├── lua51.dll ├── luajit.exe ├── concrt140.dll ├── libTHNN.dll ├── libblas.dll ├── libimage.dll ├── liblapack.dll ├── libluajit.dll ├── libpaths.dll ├── libtmglib.dll ├── libtorch.dll ├── mime │ └── core.dll ├── msvcp140.dll ├── vcomp140.dll ├── libsundown.dll ├── socket │ └── core.dll ├── vccorlib140.dll ├── libgcc_s_seh-1.dll ├── libgfortran-3.dll ├── libquadmath-0.dll ├── vcruntime140.dll ├── libwinpthread-1.dll ├── cmake.cmd ├── mdcat.bat ├── luarocks-admin.bat └── luarocks.bat ├── lib ├── TH.lib ├── luaT.lib └── libluajit.lib ├── tools ├── 7z.dll ├── 7z.exe ├── cp.exe ├── ls.exe ├── mv.exe ├── find.exe ├── pwd.exe ├── test.exe ├── wget.exe ├── md5sum.exe ├── mkdir.exe ├── rmdir.exe ├── uname.exe ├── libeay32.dll ├── libiconv2.dll ├── libintl3.dll └── libssl32.dll ├── lapack ├── bin │ ├── libblas.dll │ ├── liblapack.dll │ └── libtmglib.dll └── lib │ ├── libblas.lib │ ├── libblas.dll.a │ ├── liblapack.lib │ ├── libtmglib.lib │ ├── liblapack.dll.a │ ├── libtmglib.dll.a │ ├── pkgconfig │ ├── blas.pc │ └── lapack.pc │ └── cmake │ └── lapack-3.7.1 │ └── lapack-config.cmake ├── lua ├── image │ ├── assets │ │ ├── P4.pbm │ │ ├── P5.pgm │ │ ├── P6.ppm │ │ ├── fabio.jpg │ │ ├── fabio.png │ │ ├── foobar.png │ │ ├── rgb2x1.png │ │ ├── gray3x1.png │ │ ├── bmp-without-ext │ │ ├── gray16-1x2.png │ │ ├── rectangle.png │ │ ├── rgb16-2x1.png │ │ ├── corrupt-ihdr.png │ │ ├── grace_hopper_512.jpg │ │ ├── grace_hopper_512.png │ │ └── P2.pgm │ └── win.ui ├── nn │ ├── doc │ │ └── image │ │ │ ├── abs.png │ │ │ ├── elu.png │ │ │ ├── exp.png │ │ │ ├── htanh.png │ │ │ ├── lena.jpg │ │ │ ├── lenap.jpg │ │ │ ├── power.png │ │ │ ├── prelu.png │ │ │ ├── relu.png │ │ │ ├── relu6.png │ │ │ ├── rrelu.png │ │ │ ├── sqrt.png │ │ │ ├── square.png │ │ │ ├── tanh.png │ │ │ ├── hshrink.png │ │ │ ├── sigmmoid.png │ │ │ ├── sigmoid.png │ │ │ ├── softmax.png │ │ │ ├── softmin.png │ │ │ ├── softplus.png │ │ │ ├── softsign.png │ │ │ ├── sshrink.png │ │ │ ├── logsigmoid.png │ │ │ └── logsoftmax.png │ ├── ReLU.lua │ ├── Clamp.lua │ ├── VolumetricBatchNormalization.lua │ ├── Exp.lua │ ├── Mean.lua │ ├── ErrorMessages.lua │ ├── Tanh.lua │ ├── Sigmoid.lua │ ├── SoftMax.lua │ ├── Log.lua │ ├── Abs.lua │ ├── LogSoftMax.lua │ ├── SpatialSoftMax.lua │ ├── Square.lua │ ├── SpatialLogSoftMax.lua │ ├── CriterionTable.lua │ ├── Contiguous.lua │ ├── Power.lua │ ├── TanhShrink.lua │ ├── Sqrt.lua │ ├── HardShrink.lua │ ├── SoftShrink.lua │ ├── SoftSign.lua │ ├── GatedLinearUnit.lua │ ├── LogSigmoid.lua │ ├── CSubTable.lua │ ├── Identity.lua │ ├── L1Cost.lua │ ├── SoftMarginCriterion.lua │ ├── CDivTable.lua │ ├── ReLU6.lua │ ├── Transpose.lua │ ├── GradientReversal.lua │ ├── SoftMin.lua │ ├── MarginCriterion.lua │ ├── AbsCriterion.lua │ ├── MSECriterion.lua │ ├── Select.lua │ ├── CAddTable.lua │ ├── Index.lua │ ├── SmoothL1Criterion.lua │ ├── Mul.lua │ ├── SpatialAdaptiveAveragePooling.lua │ ├── SoftPlus.lua │ ├── LeakyReLU.lua │ ├── HardTanh.lua │ ├── SpatialLPPooling.lua │ ├── DistKLDivCriterion.lua │ ├── ELU.lua │ ├── MulConstant.lua │ ├── L1Penalty.lua │ ├── Copy.lua │ ├── L1HingeEmbeddingCriterion.lua │ ├── TemporalMaxPooling.lua │ ├── MultiCriterion.lua │ ├── NarrowTable.lua │ ├── SplitTable.lua │ ├── SpatialAdaptiveMaxPooling.lua │ ├── RReLU.lua │ ├── MultiLabelMarginCriterion.lua │ ├── WeightedMSECriterion.lua │ ├── Squeeze.lua │ ├── HingeEmbeddingCriterion.lua │ ├── SpatialBatchNormalization.lua │ ├── ParallelCriterion.lua │ └── SpatialMaxUnpooling.lua ├── torch │ ├── doc │ │ ├── gather.png │ │ └── pipefile.md │ └── TestSuite.lua ├── gnuplot │ ├── doc │ │ ├── plot_x.png │ │ ├── plot_hist.png │ │ ├── plot_xyf.png │ │ ├── plot_filled.png │ │ ├── plot_imagesc.png │ │ ├── plot_sincos.png │ │ ├── plot_splot.png │ │ └── plot_splot2.png │ └── init.lua ├── optim │ ├── doc │ │ ├── logger_plot.png │ │ └── image │ │ │ ├── parameterflattening.png │ │ │ └── parameterflattening.svg.png │ ├── README.md │ └── init.lua ├── luarocks │ ├── require.lua │ ├── fetch │ │ ├── hg_ssh.lua │ │ ├── hg_https.lua │ │ ├── git_https.lua │ │ ├── git_file.lua │ │ ├── hg_http.lua │ │ └── git_http.lua │ ├── site_config.lua │ ├── build │ │ └── command.lua │ └── refresh_cache.lua ├── cwrap │ ├── init.lua │ └── README.md ├── dok │ └── init.lua ├── sundown │ ├── init.lua │ ├── env.lua │ ├── html.lua │ └── htmlcdefs.lua ├── nnx │ ├── Minus.lua │ ├── SparseCriterion.lua │ ├── DistMarginCriterion.lua │ ├── Tic.lua │ ├── Toc.lua │ ├── FunctionWrapper.lua │ ├── SpatialMaxSampling.lua │ ├── SaturatedLU.lua │ ├── TreeNLLCriterion.lua │ ├── LA.lua │ └── SpatialMatching.lua ├── pl │ ├── init.lua │ ├── url.lua │ └── MultiMap.lua ├── sys │ ├── fpath.lua │ └── colors.lua ├── jit │ ├── dis_x64.lua │ ├── dis_mipsel.lua │ └── zone.lua └── strict.lua ├── include ├── lua.hpp ├── TH │ ├── THBlas.h │ ├── THLogAdd.h │ ├── THMemoryFile.h │ ├── THMath.h │ ├── TH.h │ ├── THStorage.h │ ├── THDiskFile.h │ ├── THGenerateFloatTypes.h │ ├── generic │ │ ├── THTensorCopy.h │ │ ├── THTensorCopy.c │ │ ├── THStorageCopy.h │ │ ├── THBlas.h │ │ ├── THStorageCopy.c │ │ └── THTensorRandom.h │ ├── THTensor.h │ ├── THLapack.h │ └── THAllocator.h ├── THNN │ └── THNN.h └── lualib.h ├── setpaths.cmd └── share └── cmake └── torch ├── THConfig.cmake ├── luaTConfig.cmake ├── TorchWrap.cmake ├── TorchExports-release.cmake └── TorchConfig.cmake /.gitattributes: -------------------------------------------------------------------------------- 1 | * binary 2 | -------------------------------------------------------------------------------- /luarocks/fun/scm-1/tests/.gitignore: -------------------------------------------------------------------------------- 1 | *.new 2 | -------------------------------------------------------------------------------- /bin/TH.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/TH.dll -------------------------------------------------------------------------------- /bin/ffi.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/ffi.dll -------------------------------------------------------------------------------- /bin/lfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/lfs.dll -------------------------------------------------------------------------------- /bin/luaT.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/luaT.dll -------------------------------------------------------------------------------- /lib/TH.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lib/TH.lib -------------------------------------------------------------------------------- /lib/luaT.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lib/luaT.lib -------------------------------------------------------------------------------- /tools/7z.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/7z.dll -------------------------------------------------------------------------------- /tools/7z.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/7z.exe -------------------------------------------------------------------------------- /tools/cp.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/cp.exe -------------------------------------------------------------------------------- /tools/ls.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/ls.exe -------------------------------------------------------------------------------- /tools/mv.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/mv.exe -------------------------------------------------------------------------------- /bin/libnnx.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libnnx.dll -------------------------------------------------------------------------------- /bin/libppm.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libppm.dll -------------------------------------------------------------------------------- /bin/libsys.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libsys.dll -------------------------------------------------------------------------------- /bin/lua51.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/lua51.dll -------------------------------------------------------------------------------- /bin/luajit.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/luajit.exe -------------------------------------------------------------------------------- /tools/find.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/find.exe -------------------------------------------------------------------------------- /tools/pwd.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/pwd.exe -------------------------------------------------------------------------------- /tools/test.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/test.exe -------------------------------------------------------------------------------- /tools/wget.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/wget.exe -------------------------------------------------------------------------------- /bin/concrt140.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/concrt140.dll -------------------------------------------------------------------------------- /bin/libTHNN.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libTHNN.dll -------------------------------------------------------------------------------- /bin/libblas.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libblas.dll -------------------------------------------------------------------------------- /bin/libimage.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libimage.dll -------------------------------------------------------------------------------- /bin/liblapack.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/liblapack.dll -------------------------------------------------------------------------------- /bin/libluajit.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libluajit.dll -------------------------------------------------------------------------------- /bin/libpaths.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libpaths.dll -------------------------------------------------------------------------------- /bin/libtmglib.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libtmglib.dll -------------------------------------------------------------------------------- /bin/libtorch.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libtorch.dll -------------------------------------------------------------------------------- /bin/mime/core.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/mime/core.dll -------------------------------------------------------------------------------- /bin/msvcp140.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/msvcp140.dll -------------------------------------------------------------------------------- /bin/vcomp140.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/vcomp140.dll -------------------------------------------------------------------------------- /lib/libluajit.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lib/libluajit.lib -------------------------------------------------------------------------------- /tools/md5sum.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/md5sum.exe -------------------------------------------------------------------------------- /tools/mkdir.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/mkdir.exe -------------------------------------------------------------------------------- /tools/rmdir.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/rmdir.exe -------------------------------------------------------------------------------- /tools/uname.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/uname.exe -------------------------------------------------------------------------------- /bin/libsundown.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libsundown.dll -------------------------------------------------------------------------------- /bin/socket/core.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/socket/core.dll -------------------------------------------------------------------------------- /bin/vccorlib140.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/vccorlib140.dll -------------------------------------------------------------------------------- /tools/libeay32.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/libeay32.dll -------------------------------------------------------------------------------- /tools/libiconv2.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/libiconv2.dll -------------------------------------------------------------------------------- /tools/libintl3.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/libintl3.dll -------------------------------------------------------------------------------- /tools/libssl32.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/tools/libssl32.dll -------------------------------------------------------------------------------- /bin/libgcc_s_seh-1.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libgcc_s_seh-1.dll -------------------------------------------------------------------------------- /bin/libgfortran-3.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libgfortran-3.dll -------------------------------------------------------------------------------- /bin/libquadmath-0.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libquadmath-0.dll -------------------------------------------------------------------------------- /bin/vcruntime140.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/vcruntime140.dll -------------------------------------------------------------------------------- /lapack/bin/libblas.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lapack/bin/libblas.dll -------------------------------------------------------------------------------- /lapack/lib/libblas.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lapack/lib/libblas.lib -------------------------------------------------------------------------------- /bin/libwinpthread-1.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/bin/libwinpthread-1.dll -------------------------------------------------------------------------------- /lapack/bin/liblapack.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lapack/bin/liblapack.dll -------------------------------------------------------------------------------- /lapack/bin/libtmglib.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lapack/bin/libtmglib.dll -------------------------------------------------------------------------------- /lapack/lib/libblas.dll.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lapack/lib/libblas.dll.a -------------------------------------------------------------------------------- /lapack/lib/liblapack.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lapack/lib/liblapack.lib -------------------------------------------------------------------------------- /lapack/lib/libtmglib.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lapack/lib/libtmglib.lib -------------------------------------------------------------------------------- /lua/image/assets/P4.pbm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/P4.pbm -------------------------------------------------------------------------------- /lua/image/assets/P5.pgm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/P5.pgm -------------------------------------------------------------------------------- /lua/image/assets/P6.ppm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/P6.ppm -------------------------------------------------------------------------------- /lua/nn/doc/image/abs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/abs.png -------------------------------------------------------------------------------- /lua/nn/doc/image/elu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/elu.png -------------------------------------------------------------------------------- /lua/nn/doc/image/exp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/exp.png -------------------------------------------------------------------------------- /lua/torch/doc/gather.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/torch/doc/gather.png -------------------------------------------------------------------------------- /lapack/lib/liblapack.dll.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lapack/lib/liblapack.dll.a -------------------------------------------------------------------------------- /lapack/lib/libtmglib.dll.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lapack/lib/libtmglib.dll.a -------------------------------------------------------------------------------- /lua/gnuplot/doc/plot_x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/gnuplot/doc/plot_x.png -------------------------------------------------------------------------------- /lua/image/assets/fabio.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/fabio.jpg -------------------------------------------------------------------------------- /lua/image/assets/fabio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/fabio.png -------------------------------------------------------------------------------- /lua/image/assets/foobar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/foobar.png -------------------------------------------------------------------------------- /lua/image/assets/rgb2x1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/rgb2x1.png -------------------------------------------------------------------------------- /lua/nn/doc/image/htanh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/htanh.png -------------------------------------------------------------------------------- /lua/nn/doc/image/lena.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/lena.jpg -------------------------------------------------------------------------------- /lua/nn/doc/image/lenap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/lenap.jpg -------------------------------------------------------------------------------- /lua/nn/doc/image/power.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/power.png -------------------------------------------------------------------------------- /lua/nn/doc/image/prelu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/prelu.png -------------------------------------------------------------------------------- /lua/nn/doc/image/relu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/relu.png -------------------------------------------------------------------------------- /lua/nn/doc/image/relu6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/relu6.png -------------------------------------------------------------------------------- /lua/nn/doc/image/rrelu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/rrelu.png -------------------------------------------------------------------------------- /lua/nn/doc/image/sqrt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/sqrt.png -------------------------------------------------------------------------------- /lua/nn/doc/image/square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/square.png -------------------------------------------------------------------------------- /lua/nn/doc/image/tanh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/tanh.png -------------------------------------------------------------------------------- /lua/gnuplot/doc/plot_hist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/gnuplot/doc/plot_hist.png -------------------------------------------------------------------------------- /lua/gnuplot/doc/plot_xyf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/gnuplot/doc/plot_xyf.png -------------------------------------------------------------------------------- /lua/image/assets/gray3x1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/gray3x1.png -------------------------------------------------------------------------------- /lua/nn/doc/image/hshrink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/hshrink.png -------------------------------------------------------------------------------- /lua/nn/doc/image/sigmmoid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/sigmmoid.png -------------------------------------------------------------------------------- /lua/nn/doc/image/sigmoid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/sigmoid.png -------------------------------------------------------------------------------- /lua/nn/doc/image/softmax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/softmax.png -------------------------------------------------------------------------------- /lua/nn/doc/image/softmin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/softmin.png -------------------------------------------------------------------------------- /lua/nn/doc/image/softplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/softplus.png -------------------------------------------------------------------------------- /lua/nn/doc/image/softsign.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/softsign.png -------------------------------------------------------------------------------- /lua/nn/doc/image/sshrink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/sshrink.png -------------------------------------------------------------------------------- /lua/optim/doc/logger_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/optim/doc/logger_plot.png -------------------------------------------------------------------------------- /lua/gnuplot/doc/plot_filled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/gnuplot/doc/plot_filled.png -------------------------------------------------------------------------------- /lua/gnuplot/doc/plot_imagesc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/gnuplot/doc/plot_imagesc.png -------------------------------------------------------------------------------- /lua/gnuplot/doc/plot_sincos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/gnuplot/doc/plot_sincos.png -------------------------------------------------------------------------------- /lua/gnuplot/doc/plot_splot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/gnuplot/doc/plot_splot.png -------------------------------------------------------------------------------- /lua/gnuplot/doc/plot_splot2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/gnuplot/doc/plot_splot2.png -------------------------------------------------------------------------------- /lua/image/assets/bmp-without-ext: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/bmp-without-ext -------------------------------------------------------------------------------- /lua/image/assets/gray16-1x2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/gray16-1x2.png -------------------------------------------------------------------------------- /lua/image/assets/rectangle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/rectangle.png -------------------------------------------------------------------------------- /lua/image/assets/rgb16-2x1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/rgb16-2x1.png -------------------------------------------------------------------------------- /lua/nn/doc/image/logsigmoid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/logsigmoid.png -------------------------------------------------------------------------------- /lua/nn/doc/image/logsoftmax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/nn/doc/image/logsoftmax.png -------------------------------------------------------------------------------- /lua/image/assets/corrupt-ihdr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/corrupt-ihdr.png -------------------------------------------------------------------------------- /lua/image/assets/grace_hopper_512.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/grace_hopper_512.jpg -------------------------------------------------------------------------------- /lua/image/assets/grace_hopper_512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/image/assets/grace_hopper_512.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_x.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/abs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/abs.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/elu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/elu.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/exp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/exp.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/htanh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/htanh.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/lena.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/lena.jpg -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/lenap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/lenap.jpg -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/power.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/power.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/prelu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/prelu.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/relu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/relu.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/relu6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/relu6.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/rrelu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/rrelu.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/sqrt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/sqrt.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/tanh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/tanh.png -------------------------------------------------------------------------------- /luarocks/torch/scm-1/doc/gather.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/torch/scm-1/doc/gather.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_raw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_raw.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_xyf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_xyf.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/scatter3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/scatter3.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/hshrink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/hshrink.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/sigmoid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/sigmoid.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/softmax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/softmax.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/softmin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/softmin.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/square.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/sshrink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/sshrink.png -------------------------------------------------------------------------------- /luarocks/nnx/0.1-1/doc/image/Lenna.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nnx/0.1-1/doc/image/Lenna.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_filled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_filled.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_hist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_hist.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_labels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_labels.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_sincos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_sincos.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_splot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_splot.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_splot2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_splot2.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/logsigmoid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/logsigmoid.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/logsoftmax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/logsoftmax.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/sigmmoid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/sigmmoid.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/softplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/softplus.png -------------------------------------------------------------------------------- /luarocks/nn/scm-1/doc/image/softsign.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nn/scm-1/doc/image/softsign.png -------------------------------------------------------------------------------- /luarocks/optim/1.0.5-0/doc/logger_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/optim/1.0.5-0/doc/logger_plot.png -------------------------------------------------------------------------------- /lua/luarocks/require.lua: -------------------------------------------------------------------------------- 1 | --- Retained for compatibility reasons only. Use luarocks.loader instead. 2 | return require("luarocks.loader") 3 | -------------------------------------------------------------------------------- /lua/optim/doc/image/parameterflattening.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/optim/doc/image/parameterflattening.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot_imagesc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/plot_imagesc.png -------------------------------------------------------------------------------- /lua/optim/doc/image/parameterflattening.svg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/lua/optim/doc/image/parameterflattening.svg.png -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/scatter3_helix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/gnuplot/scm-1/doc/scatter3_helix.png -------------------------------------------------------------------------------- /lua/gnuplot/init.lua: -------------------------------------------------------------------------------- 1 | require 'torch' 2 | 3 | gnuplot = {} 4 | require('gnuplot.gnuplot') 5 | require('gnuplot.hist') 6 | 7 | return gnuplot 8 | -------------------------------------------------------------------------------- /lua/nn/ReLU.lua: -------------------------------------------------------------------------------- 1 | local ReLU, Parent = torch.class('nn.ReLU', 'nn.Threshold') 2 | 3 | function ReLU:__init(p) 4 | Parent.__init(self,0,0,p) 5 | end 6 | -------------------------------------------------------------------------------- /luarocks/strict/0-0/doc/README.md: -------------------------------------------------------------------------------- 1 | strict 2 | ====== 3 | 4 | A Lua package to detect reading of undeclared variables and creating of global variables. 5 | -------------------------------------------------------------------------------- /luarocks/luafilesystem/1.6.3-2/doc/us/luafilesystem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/luafilesystem/1.6.3-2/doc/us/luafilesystem.png -------------------------------------------------------------------------------- /luarocks/nnx/0.1-1/doc/image/Lenna-150x150-bilinear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/nnx/0.1-1/doc/image/Lenna-150x150-bilinear.png -------------------------------------------------------------------------------- /luarocks/optim/1.0.5-0/doc/image/parameterflattening.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/optim/1.0.5-0/doc/image/parameterflattening.png -------------------------------------------------------------------------------- /bin/cmake.cmd: -------------------------------------------------------------------------------- 1 | if %1 == -E ( 2 | cmake.exe %* 3 | ) else ( 4 | cmake.exe -G "NMake Makefiles" -DCMAKE_LINK_FLAGS:implib=libluajit.lib -DLUALIB=libluajit %* 5 | ) 6 | -------------------------------------------------------------------------------- /luarocks/optim/1.0.5-0/doc/image/parameterflattening.svg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hiili/WindowsTorch/HEAD/luarocks/optim/1.0.5-0/doc/image/parameterflattening.svg.png -------------------------------------------------------------------------------- /lua/nn/Clamp.lua: -------------------------------------------------------------------------------- 1 | local Clamp, Parent = torch.class('nn.Clamp', 'nn.HardTanh') 2 | 3 | function Clamp:__init(min_value, max_value) 4 | Parent.__init(self, min_value, max_value) 5 | end 6 | -------------------------------------------------------------------------------- /lua/nn/VolumetricBatchNormalization.lua: -------------------------------------------------------------------------------- 1 | local BN, parent = torch.class('nn.VolumetricBatchNormalization', 'nn.BatchNormalization') 2 | 3 | -- expected dimension of input 4 | BN.nDim = 5 5 | -------------------------------------------------------------------------------- /include/lua.hpp: -------------------------------------------------------------------------------- 1 | // C++ wrapper for LuaJIT header files. 2 | 3 | extern "C" { 4 | #include "lua.h" 5 | #include "lauxlib.h" 6 | #include "lualib.h" 7 | #include "luajit.h" 8 | } 9 | 10 | -------------------------------------------------------------------------------- /lua/cwrap/init.lua: -------------------------------------------------------------------------------- 1 | local cwrap = {} 2 | 3 | cwrap.types = require 'cwrap.types' 4 | cwrap.CInterface = require 'cwrap.cinterface' 5 | cwrap.CInterface.argtypes = cwrap.types 6 | 7 | return cwrap 8 | -------------------------------------------------------------------------------- /luarocks/sundown/scm-1/bin/mdcat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env lua 2 | 3 | local ascii = require 'sundown.ascii' 4 | assert(#arg == 1, 'usage: mdcat ') 5 | print(ascii.render(io.open(arg[1]):read('*all'))) 6 | -------------------------------------------------------------------------------- /setpaths.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set PATH=C:\torch\bin;%PATH% 4 | 5 | set LUA_CPATH=C:/torch/bin/?.dll;; 6 | set LUA_DEV=C:/torch 7 | set LUA_PATH=C:/torch/lua/?;C:/torch/lua/?.lua;C:/torch/lua/?/init.lua;; 8 | -------------------------------------------------------------------------------- /include/TH/THBlas.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_BLAS_INC 2 | #define TH_BLAS_INC 3 | 4 | #include "THGeneral.h" 5 | 6 | #define THBlas_(NAME) TH_CONCAT_4(TH,Real,Blas_,NAME) 7 | 8 | #include "generic/THBlas.h" 9 | #include "THGenerateAllTypes.h" 10 | 11 | #endif 12 | -------------------------------------------------------------------------------- /lua/nn/Exp.lua: -------------------------------------------------------------------------------- 1 | local Exp = torch.class('nn.Exp', 'nn.Module') 2 | 3 | function Exp:updateOutput(input) 4 | return self.output:exp(input) 5 | end 6 | 7 | function Exp:updateGradInput(input, gradOutput) 8 | return self.gradInput:cmul(self.output, gradOutput) 9 | end 10 | -------------------------------------------------------------------------------- /lua/dok/init.lua: -------------------------------------------------------------------------------- 1 | dok = {} 2 | 3 | require 'dok.inline' 4 | 5 | local ok,sd = pcall(require, 'sundown') 6 | if ok then 7 | dok.markdown2html = sd.render 8 | else 9 | dok.markdown2html = function() return '

Error: Sundown could not be loaded

' end 10 | end 11 | -------------------------------------------------------------------------------- /lapack/lib/pkgconfig/blas.pc: -------------------------------------------------------------------------------- 1 | libdir=C:/torch/lapack/lib 2 | includedir=C:/torch/lapack/include 3 | 4 | Name: BLAS 5 | Description: FORTRAN reference implementation of BLAS Basic Linear Algebra Subprograms 6 | Version: 3.7.1 7 | URL: http://www.netlib.org/blas/ 8 | Libs: -L${libdir} -lblas 9 | -------------------------------------------------------------------------------- /lua/sundown/init.lua: -------------------------------------------------------------------------------- 1 | local sundown = require 'sundown.env' 2 | local html = require 'sundown.html' 3 | local ascii = require 'sundown.ascii' 4 | 5 | sundown.render = html.render 6 | sundown.renderHTML = html.render 7 | sundown.renderASCII = ascii.render 8 | 9 | return sundown 10 | -------------------------------------------------------------------------------- /luarocks/dok/scm-1/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | ["dok-scm-1.rockspec"] = "c34026a282aeaacc0772fb2876749403", 3 | lua = { 4 | dok = { 5 | ["init.lua"] = "5ad6cb8b15a18de694c83026f8ca481f", 6 | ["inline.lua"] = "1163f7c75696a5ceae589d3426e24ff4" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /lapack/lib/pkgconfig/lapack.pc: -------------------------------------------------------------------------------- 1 | libdir=C:/torch/lapack/lib 2 | includedir=C:/torch/lapack/include 3 | 4 | Name: LAPACK 5 | Description: FORTRAN reference implementation of LAPACK Linear Algebra PACKage 6 | Version: 3.7.1 7 | URL: http://www.netlib.org/lapack/ 8 | Libs: -L${libdir} -llapack 9 | Requires.private: blas 10 | -------------------------------------------------------------------------------- /lua/optim/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Optimization package 3 | 4 | This package contains several optimization routines and a logger for [Torch](https://github.com/torch/torch7/blob/master/README.md): 5 | 6 | * [Overview](doc/intro.md); 7 | * [Optimization algorithms](doc/algos.md); 8 | * [Logger](doc/logger.md). 9 | -------------------------------------------------------------------------------- /share/cmake/torch/THConfig.cmake: -------------------------------------------------------------------------------- 1 | # Find the TH includes and library 2 | # 3 | # TH_INCLUDE_DIR -- where to find the includes 4 | # TH_LIBRARIES -- list of libraries to link against 5 | # TH_FOUND -- set to 1 if found 6 | 7 | SET(TH_FOUND 1) 8 | SET(TH_INCLUDE_DIR "C:/torch/include/TH") 9 | SET(TH_LIBRARIES "C:/torch/lib/TH.dll") 10 | -------------------------------------------------------------------------------- /share/cmake/torch/luaTConfig.cmake: -------------------------------------------------------------------------------- 1 | # Find the luaT includes and library 2 | # 3 | # LUAT_INCLUDE_DIR -- where to find the includes 4 | # LUAT_LIBRARIES -- list of libraries to link against 5 | # LUAT_FOUND -- set to 1 if found 6 | 7 | SET(LUAT_FOUND 1) 8 | SET(LUAT_INCLUDE_DIR "C:/torch/include") 9 | SET(LUAT_LIBRARIES "C:/torch/lib/luaT.dll") 10 | -------------------------------------------------------------------------------- /lua/nn/Mean.lua: -------------------------------------------------------------------------------- 1 | local Mean, parent = torch.class('nn.Mean', 'nn.Sum') 2 | 3 | --[[ 4 | 5 | This file is still here because of backward compatibility. 6 | 7 | Please use instead "nn.Sum(dimension, nInputDims, sizeAverage)" 8 | 9 | ]]-- 10 | 11 | 12 | function Mean:__init(dimension, nInputDims) 13 | parent.__init(self, dimension, nInputDims, true) 14 | end 15 | -------------------------------------------------------------------------------- /lua/nnx/Minus.lua: -------------------------------------------------------------------------------- 1 | local Minus, parent = torch.class('nn.Minus', 'nn.Module') 2 | 3 | function Minus:updateOutput(input) 4 | self.output:resizeAs(input):copy(input):mul(-1) 5 | return self.output 6 | end 7 | 8 | function Minus:updateGradInput(input, gradOutput) 9 | self.gradInput:resizeAs(input):copy(gradOutput):mul(-1) 10 | return self.gradInput 11 | end 12 | -------------------------------------------------------------------------------- /luarocks/inspect/3.1.0-1/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | doc = { 3 | ["CHANGELOG.md"] = "045171ea5b0dc992583f3a91a2efd1b0", 4 | ["README.md"] = "0f9d098e97ea44160ffd92d6fb5884a5" 5 | }, 6 | ["inspect-3.1.0-1.rockspec"] = "50c0f238a459ec3ef9d880faf4613689", 7 | lua = { 8 | ["inspect.lua"] = "e8cc5e77d2cd86124854b51de28ed149" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /lua/luarocks/fetch/hg_ssh.lua: -------------------------------------------------------------------------------- 1 | 2 | --- Fetch back-end for retrieving sources from hg repositories 3 | -- that use ssh:// transport. For example, for fetching a repository 4 | -- that requires the following command line: 5 | -- `hg clone ssh://example.com/foo` 6 | -- you can use this in the rockspec: 7 | -- source = { url = "hg+ssh://example.com/foo" } 8 | return require "luarocks.fetch.hg_http" 9 | -------------------------------------------------------------------------------- /lua/luarocks/fetch/hg_https.lua: -------------------------------------------------------------------------------- 1 | 2 | --- Fetch back-end for retrieving sources from hg repositories 3 | -- that use https:// transport. For example, for fetching a repository 4 | -- that requires the following command line: 5 | -- `hg clone https://example.com/foo` 6 | -- you can use this in the rockspec: 7 | -- source = { url = "hg+https://example.com/foo" } 8 | return require "luarocks.fetch.hg_http" 9 | -------------------------------------------------------------------------------- /lua/luarocks/fetch/git_https.lua: -------------------------------------------------------------------------------- 1 | --- Fetch back-end for retrieving sources from Git repositories 2 | -- that use https:// transport. For example, for fetching a repository 3 | -- that requires the following command line: 4 | -- `git clone https://example.com/foo.git` 5 | -- you can use this in the rockspec: 6 | -- source = { url = "git+https://example.com/foo.git" } 7 | return require "luarocks.fetch.git_http" 8 | -------------------------------------------------------------------------------- /include/TH/THLogAdd.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_LOG_ADD_INC 2 | #define TH_LOG_ADD_INC 3 | 4 | #include "THGeneral.h" 5 | 6 | TH_API const double THLog2Pi; 7 | TH_API const double THLogZero; 8 | TH_API const double THLogOne; 9 | 10 | TH_API double THLogAdd(double log_a, double log_b); 11 | TH_API double THLogSub(double log_a, double log_b); 12 | TH_API double THExpMinusApprox(const double x); 13 | 14 | #endif 15 | -------------------------------------------------------------------------------- /luarocks/luaffi/scm-1/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | doc = { 3 | ["CONTRIBUTING.md"] = "a87615e43a40144fc712ad8b3979cc71", 4 | LICENSE = "04c1a943abc036ee6eb4ce74ab815565", 5 | ["README.md"] = "112a69f1c92b1535cdbb6c0b6b9f337a" 6 | }, 7 | lib = { 8 | ["ffi.dll"] = "86a79ddb4934d5bb2d0e581ec5bf2a21" 9 | }, 10 | ["luaffi-scm-1.rockspec"] = "3a8431b1a69a46cf8b44f73a950a8de0" 11 | } 12 | -------------------------------------------------------------------------------- /luarocks/strict/0-0/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | doc = { 3 | ["CONTRIBUTING.md"] = "3eaa395a12f4bf5861913ac7d88fa28c", 4 | LICENSE = "9ebede79bd70cf9c467fa4fb49e5b1e6", 5 | ["README.md"] = "bbc8111968fcaaaf8cb2b6e01e273b39" 6 | }, 7 | lua = { 8 | ["strict.lua"] = "e4864b1f7d2df2560a281f7c12ac7030" 9 | }, 10 | ["strict-0-0.rockspec"] = "ebea27a10e08d302ad3c0ea4e6df3b8d" 11 | } 12 | -------------------------------------------------------------------------------- /lua/nn/ErrorMessages.lua: -------------------------------------------------------------------------------- 1 | 2 | local mt = { 3 | __index = function(table, key) 4 | error("nn."..key.." is only supported for Float or Double Tensors.") 5 | end 6 | } 7 | 8 | local tensors = { 9 | torch.ByteTensor, 10 | torch.CharTensor, 11 | torch.ShortTensor, 12 | torch.IntTensor, 13 | torch.LongTensor, 14 | } 15 | 16 | for _, t in ipairs(tensors) do 17 | t.nn = {} 18 | setmetatable(t.nn, mt) 19 | end 20 | -------------------------------------------------------------------------------- /include/TH/THMemoryFile.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_MEMORY_FILE_INC 2 | #define TH_MEMORY_FILE_INC 3 | 4 | #include "THFile.h" 5 | #include "THStorage.h" 6 | 7 | TH_API THFile *THMemoryFile_newWithStorage(THCharStorage *storage, const char *mode); 8 | TH_API THFile *THMemoryFile_new(const char *mode); 9 | 10 | TH_API THCharStorage *THMemoryFile_storage(THFile *self); 11 | TH_API void THMemoryFile_longSize(THFile *self, int size); 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /lua/pl/init.lua: -------------------------------------------------------------------------------- 1 | -------------- 2 | -- Entry point for loading all PL libraries only on demand, into the global space. 3 | -- Requiring 'pl' means that whenever a module is implicitly accesssed 4 | -- (e.g. `utils.split`) 5 | -- then that module is dynamically loaded. The submodules are all brought into 6 | -- the global space. 7 | --Updated to use @{pl.import_into} 8 | -- @module pl 9 | require'pl.import_into'(_G) 10 | 11 | if rawget(_G,'PENLIGHT_STRICT') then require 'pl.strict' end 12 | -------------------------------------------------------------------------------- /lua/sys/fpath.lua: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- 2 | -- always returns the path of the file running 3 | -------------------------------------------------------------------------------- 4 | local function fpath() 5 | local fpath = _G.debug.getinfo(2).source:gsub('@','') 6 | if fpath:find('/') ~= 1 then fpath = paths.concat(paths.cwd(),fpath) end 7 | return paths.dirname(fpath),paths.basename(fpath) 8 | end 9 | 10 | return fpath 11 | -------------------------------------------------------------------------------- /luarocks/strict/0-0/strict-0-0.rockspec: -------------------------------------------------------------------------------- 1 | package = 'strict' 2 | version = '0-0' 3 | 4 | source = { 5 | url = 'git://github.com/deepmind/strict.git', 6 | branch = 'master' 7 | } 8 | 9 | description = { 10 | summary = 'Detect reading of undeclared variables and creating of global variables', 11 | homepage = 'http://github.com/deepmind/strict.git' 12 | } 13 | 14 | dependencies = {} 15 | 16 | build = { 17 | type = "builtin", 18 | modules = { 19 | ['strict'] = 'strict.lua' 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /include/TH/THMath.h: -------------------------------------------------------------------------------- 1 | #ifndef _THMATH_H 2 | #define _THMATH_H 3 | 4 | static inline double TH_sigmoid(double value) { 5 | return 1.0 / (1.0 + exp(-value)); 6 | } 7 | 8 | static inline double TH_frac(double x) { 9 | return x - trunc(x); 10 | } 11 | 12 | static inline double TH_rsqrt(double x) { 13 | return 1.0 / sqrt(x); 14 | } 15 | 16 | static inline double TH_lerp(double a, double b, double weight) { 17 | return a + weight * (b-a); 18 | } 19 | 20 | #endif // _THMATH_H 21 | 22 | -------------------------------------------------------------------------------- /lua/nn/Tanh.lua: -------------------------------------------------------------------------------- 1 | local Tanh = torch.class('nn.Tanh', 'nn.Module') 2 | 3 | function Tanh:updateOutput(input) 4 | input.THNN.Tanh_updateOutput( 5 | input:cdata(), 6 | self.output:cdata() 7 | ) 8 | return self.output 9 | end 10 | 11 | function Tanh:updateGradInput(input, gradOutput) 12 | input.THNN.Tanh_updateGradInput( 13 | input:cdata(), 14 | gradOutput:cdata(), 15 | self.gradInput:cdata(), 16 | self.output:cdata() 17 | ) 18 | return self.gradInput 19 | end 20 | -------------------------------------------------------------------------------- /include/TH/TH.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_INC 2 | #define TH_INC 3 | 4 | #include "THGeneral.h" 5 | 6 | #include "THBlas.h" 7 | #ifdef USE_LAPACK 8 | #include "THLapack.h" 9 | #endif 10 | 11 | #include "THAtomic.h" 12 | #include "THVector.h" 13 | #include "THLogAdd.h" 14 | #include "THRandom.h" 15 | #include "THStorage.h" 16 | #include "THTensor.h" 17 | #include "THTensorApply.h" 18 | #include "THTensorDimApply.h" 19 | 20 | #include "THFile.h" 21 | #include "THDiskFile.h" 22 | #include "THMemoryFile.h" 23 | 24 | #endif 25 | -------------------------------------------------------------------------------- /lua/nnx/SparseCriterion.lua: -------------------------------------------------------------------------------- 1 | local SparseCriterion, parent = torch.class('nn.SparseCriterion', 'nn.Criterion') 2 | 3 | function SparseCriterion:__init() 4 | parent.__init(self) 5 | self.sizeAverage = true 6 | end 7 | 8 | function SparseCriterion:updateOutput(input) 9 | input.nn.SparseCriterion_updateOutput(self, input) 10 | return self.output 11 | end 12 | 13 | function SparseCriterion:updateGradInput(input) 14 | input.nn.SparseCriterion_updateGradInput(self, input) 15 | return self.gradInput 16 | end 17 | -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plothistogram.md: -------------------------------------------------------------------------------- 1 | 2 | ## Histograms ## 3 | 4 | Given a tensor, the distribution of values can be plotted using 5 | `gnuplot.hist` function. 6 | 7 | 8 | ### gnuplot.hist(x, [nbins, min, max]) ### 9 | 10 | Plot the histogram of values in `N-D` tensor `x`, optionally using `nbins` 11 | number of bins and only using values between `min` and `max`. 12 | 13 | ```lua 14 | gnuplot.hist(torch.randn(100000),100) 15 | ``` 16 | ![](plot_hist.png) 17 | -------------------------------------------------------------------------------- /luarocks/xlua/1.1-0/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | doc = { 3 | LICENSE = "84ffc07cc327f00d5bfa198ff2fdc7be", 4 | ["README.md"] = "88fc5107373e00bcd6dc56d48cc57ff4" 5 | }, 6 | lua = { 7 | xlua = { 8 | ["OptionParser.lua"] = "393c85890219f44c3f1462f028c82cee", 9 | ["Profiler.lua"] = "dd5e542766336107ccdb27b44b208ce7", 10 | ["init.lua"] = "e7055102b770f315e49d29b091963869" 11 | } 12 | }, 13 | ["xlua-1.1-0.rockspec"] = "19513d7292162c01a19d6955fbb67dd9" 14 | } 15 | -------------------------------------------------------------------------------- /lua/nn/Sigmoid.lua: -------------------------------------------------------------------------------- 1 | local Sigmoid = torch.class('nn.Sigmoid', 'nn.Module') 2 | 3 | function Sigmoid:updateOutput(input) 4 | input.THNN.Sigmoid_updateOutput( 5 | input:cdata(), 6 | self.output:cdata() 7 | ) 8 | return self.output 9 | end 10 | 11 | function Sigmoid:updateGradInput(input, gradOutput) 12 | input.THNN.Sigmoid_updateGradInput( 13 | input:cdata(), 14 | gradOutput:cdata(), 15 | self.gradInput:cdata(), 16 | self.output:cdata() 17 | ) 18 | return self.gradInput 19 | end 20 | -------------------------------------------------------------------------------- /lua/nn/SoftMax.lua: -------------------------------------------------------------------------------- 1 | local SoftMax, _ = torch.class('nn.SoftMax', 'nn.Module') 2 | 3 | function SoftMax:updateOutput(input) 4 | input.THNN.SoftMax_updateOutput( 5 | input:cdata(), 6 | self.output:cdata() 7 | ) 8 | return self.output 9 | end 10 | 11 | function SoftMax:updateGradInput(input, gradOutput) 12 | input.THNN.SoftMax_updateGradInput( 13 | input:cdata(), 14 | gradOutput:cdata(), 15 | self.gradInput:cdata(), 16 | self.output:cdata() 17 | ) 18 | return self.gradInput 19 | end 20 | -------------------------------------------------------------------------------- /lua/nnx/DistMarginCriterion.lua: -------------------------------------------------------------------------------- 1 | local DistMarginCriterion, parent = torch.class('nn.DistMarginCriterion', 'nn.Criterion') 2 | 3 | function DistMarginCriterion:__init() 4 | parent.__init(self) 5 | self.sizeAverage = true 6 | end 7 | 8 | function DistMarginCriterion:updateOutput(input, target) 9 | return input.nn.DistMarginCriterion_updateOutput(self, input, target) 10 | end 11 | 12 | function DistMarginCriterion:updateGradInput(input, target) 13 | return input.nn.DistMarginCriterion_updateGradInput(self, input, target) 14 | end 15 | -------------------------------------------------------------------------------- /lua/image/assets/P2.pgm: -------------------------------------------------------------------------------- 1 | P2 2 | # feep.ascii.pgm 3 | 24 7 4 | 15 5 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 | 0 3 3 3 3 0 0 7 7 7 7 0 0 11 11 11 11 0 0 15 15 15 15 0 7 | 0 3 0 0 0 0 0 7 0 0 0 0 0 11 0 0 0 0 0 15 0 0 15 0 8 | 0 3 3 3 0 0 0 7 7 7 0 0 0 11 11 11 0 0 0 15 15 15 15 0 9 | 0 3 0 0 0 0 0 7 0 0 0 0 0 11 0 0 0 0 0 15 0 0 0 0 10 | 0 3 0 0 0 0 0 7 7 7 7 0 0 11 11 11 11 0 0 15 0 0 0 0 11 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -------------------------------------------------------------------------------- /lua/nn/Log.lua: -------------------------------------------------------------------------------- 1 | local Log, parent = torch.class('nn.Log', 'nn.Module') 2 | 3 | function Log:__init() 4 | parent.__init(self) 5 | end 6 | 7 | function Log:updateOutput(input) 8 | self.output:resizeAs(input) 9 | self.output:copy(input) 10 | self.output:log() 11 | return self.output 12 | end 13 | 14 | function Log:updateGradInput(input, gradOutput) 15 | self.gradInput:resizeAs(input) 16 | self.gradInput:fill(1) 17 | self.gradInput:cdiv(input) 18 | self.gradInput:cmul(gradOutput) 19 | return self.gradInput 20 | end 21 | -------------------------------------------------------------------------------- /lua/nn/Abs.lua: -------------------------------------------------------------------------------- 1 | local Abs, parent = torch.class('nn.Abs', 'nn.Module') 2 | 3 | function Abs:__init() 4 | parent.__init(self) 5 | end 6 | 7 | function Abs:updateOutput(input) 8 | input.THNN.Abs_updateOutput( 9 | input:cdata(), 10 | self.output:cdata() 11 | ) 12 | return self.output 13 | end 14 | 15 | function Abs:updateGradInput(input, gradOutput) 16 | input.THNN.Abs_updateGradInput( 17 | input:cdata(), 18 | gradOutput:cdata(), 19 | self.gradInput:cdata() 20 | ) 21 | return self.gradInput 22 | end 23 | -------------------------------------------------------------------------------- /lua/nn/LogSoftMax.lua: -------------------------------------------------------------------------------- 1 | local LogSoftMax = torch.class('nn.LogSoftMax', 'nn.Module') 2 | 3 | function LogSoftMax:updateOutput(input) 4 | input.THNN.LogSoftMax_updateOutput( 5 | input:cdata(), 6 | self.output:cdata() 7 | ) 8 | return self.output 9 | end 10 | 11 | function LogSoftMax:updateGradInput(input, gradOutput) 12 | input.THNN.LogSoftMax_updateGradInput( 13 | input:cdata(), 14 | gradOutput:cdata(), 15 | self.gradInput:cdata(), 16 | self.output:cdata() 17 | ) 18 | return self.gradInput 19 | end 20 | -------------------------------------------------------------------------------- /lua/nnx/Tic.lua: -------------------------------------------------------------------------------- 1 | require 'nn' 2 | local Tic, parent = torch.class('nn.Tic', 'nn.Module') 3 | 4 | function Tic:__init(name) 5 | parent.__init(self) 6 | self.name = name or 'default' 7 | tic_modules = tic_modules or {} 8 | tic_modules[self.name] = torch.Timer() 9 | end 10 | 11 | function Tic:updateOutput(input) 12 | tic_modules[self.name]:reset() 13 | self.output = input 14 | return self.output 15 | end 16 | 17 | function Tic:updateGradInput(input, gradOutput) 18 | self.gradInput = gradOutput 19 | return self.gradInput 20 | end 21 | -------------------------------------------------------------------------------- /lua/nn/SpatialSoftMax.lua: -------------------------------------------------------------------------------- 1 | local SpatialSoftMax, _ = torch.class('nn.SpatialSoftMax', 'nn.Module') 2 | 3 | function SpatialSoftMax:updateOutput(input) 4 | input.THNN.SoftMax_updateOutput( 5 | input:cdata(), 6 | self.output:cdata() 7 | ) 8 | return self.output 9 | end 10 | 11 | function SpatialSoftMax:updateGradInput(input, gradOutput) 12 | input.THNN.SoftMax_updateGradInput( 13 | input:cdata(), 14 | gradOutput:cdata(), 15 | self.gradInput:cdata(), 16 | self.output:cdata() 17 | ) 18 | return self.gradInput 19 | end 20 | -------------------------------------------------------------------------------- /bin/mdcat.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | "C:\torch\bin\luajit" -e "package.path=\"C:\\Users\\pwagner.increment\\AppData\\Roaming/.luarocks/share/lua/5.1/?.lua;C:\\Users\\pwagner.increment\\AppData\\Roaming/.luarocks/share/lua/5.1/?/init.lua;C:/torch/lua/?.lua;C:/torch/lua/?/init.lua;C:/torch/\\lua\\?.lua;\"..package.path; package.cpath=\"C:\\Users\\pwagner.increment\\AppData\\Roaming/.luarocks/lib/lua/5.1/?.dll;C:/torch/bin/?.dll;\"..package.cpath" -e "local k,l,_=pcall(require,\"luarocks.loader\") _=k and l.add_context(\"sundown\",\"scm-1\")" "C:\torch\luarocks\sundown\scm-1\bin\mdcat" %* 3 | exit /b %ERRORLEVEL% 4 | -------------------------------------------------------------------------------- /lua/nn/Square.lua: -------------------------------------------------------------------------------- 1 | local Square, parent = torch.class('nn.Square', 'nn.Module') 2 | 3 | function Square:__init(args) 4 | parent.__init(self) 5 | end 6 | 7 | function Square:updateOutput(input) 8 | input.THNN.Square_updateOutput( 9 | input:cdata(), 10 | self.output:cdata() 11 | ) 12 | return self.output 13 | end 14 | 15 | function Square:updateGradInput(input, gradOutput) 16 | input.THNN.Square_updateGradInput( 17 | input:cdata(), 18 | gradOutput:cdata(), 19 | self.gradInput:cdata() 20 | ) 21 | return self.gradInput 22 | end 23 | -------------------------------------------------------------------------------- /lua/nnx/Toc.lua: -------------------------------------------------------------------------------- 1 | require 'nn' 2 | local Toc, parent = torch.class('nn.Toc', 'nn.Module') 3 | 4 | function Toc:__init(name, comment) 5 | parent.__init(self) 6 | self.name = name or 'default' 7 | self.comment = comment or '' 8 | end 9 | 10 | function Toc:updateOutput(input) 11 | print("Toc '"..self.name.."' ("..self.comment..") : "..tic_modules[self.name]:time()['real']) 12 | self.output = input 13 | return self.output 14 | end 15 | 16 | function Toc:updateGradInput(input, gradOutput) 17 | self.gradInput = gradOutput 18 | return self.gradInput 19 | end 20 | -------------------------------------------------------------------------------- /lua/nn/SpatialLogSoftMax.lua: -------------------------------------------------------------------------------- 1 | local SpatialLogSoftMax = torch.class('nn.SpatialLogSoftMax', 'nn.Module') 2 | 3 | function SpatialLogSoftMax:updateOutput(input) 4 | input.THNN.LogSoftMax_updateOutput( 5 | input:cdata(), 6 | self.output:cdata() 7 | ) 8 | return self.output 9 | end 10 | 11 | function SpatialLogSoftMax:updateGradInput(input, gradOutput) 12 | input.THNN.LogSoftMax_updateGradInput( 13 | input:cdata(), 14 | gradOutput:cdata(), 15 | self.gradInput:cdata(), 16 | self.output:cdata() 17 | ) 18 | return self.gradInput 19 | end 20 | -------------------------------------------------------------------------------- /lua/nn/CriterionTable.lua: -------------------------------------------------------------------------------- 1 | local CriterionTable, parent = torch.class('nn.CriterionTable', 'nn.Module') 2 | 3 | function CriterionTable:__init(criterion) 4 | parent.__init(self) 5 | self.criterion = criterion 6 | self.gradInput = {criterion.gradInput} 7 | end 8 | 9 | function CriterionTable:updateOutput(input) 10 | self.output = self.criterion:updateOutput(table.unpack(input)) 11 | return self.output 12 | end 13 | 14 | function CriterionTable:updateGradInput(input, gradOutput) 15 | self.criterion:updateGradInput(table.unpack(input)) 16 | return self.gradInput 17 | end 18 | -------------------------------------------------------------------------------- /lua/nn/Contiguous.lua: -------------------------------------------------------------------------------- 1 | local Contiguous, parent = torch.class('nn.Contiguous', 'nn.Module') 2 | 3 | function Contiguous:updateOutput(input) 4 | if not input:isContiguous() then 5 | self.output:resizeAs(input):copy(input) 6 | else 7 | self.output:set(input) 8 | end 9 | return self.output 10 | end 11 | 12 | function Contiguous:updateGradInput(input, gradOutput) 13 | if not gradOutput:isContiguous() then 14 | self.gradInput:resizeAs(gradOutput):copy(gradOutput) 15 | else 16 | self.gradInput:set(gradOutput) 17 | end 18 | return self.gradInput 19 | end 20 | -------------------------------------------------------------------------------- /luarocks/sys/1.1-0/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | doc = { 3 | LICENSE = "84ffc07cc327f00d5bfa198ff2fdc7be", 4 | ["README.md"] = "af4a68a8beff45e32ce4cac2fdef7d50" 5 | }, 6 | lib = { 7 | ["libsys.dll"] = "4a3070ac2e535ea6efda7bda498768fa" 8 | }, 9 | lua = { 10 | sys = { 11 | ["colors.lua"] = "06c8f512e3a4b4befbecd60c2e4a172c", 12 | ["fpath.lua"] = "64e14e0ff3eb0dd34cec0172a1f5c6ac", 13 | ["init.lua"] = "619249306dcdc022e7951f95aba88cb4" 14 | } 15 | }, 16 | ["sys-1.1-0.rockspec"] = "9788cf51f56e5245abe5e83195bf5329" 17 | } 18 | -------------------------------------------------------------------------------- /include/TH/THStorage.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_STORAGE_INC 2 | #define TH_STORAGE_INC 3 | 4 | #include "THGeneral.h" 5 | #include "THAllocator.h" 6 | 7 | #define THStorage TH_CONCAT_3(TH,Real,Storage) 8 | #define THStorage_(NAME) TH_CONCAT_4(TH,Real,Storage_,NAME) 9 | 10 | /* fast access methods */ 11 | #define TH_STORAGE_GET(storage, idx) ((storage)->data[(idx)]) 12 | #define TH_STORAGE_SET(storage, idx, value) ((storage)->data[(idx)] = (value)) 13 | 14 | #include "generic/THStorage.h" 15 | #include "THGenerateAllTypes.h" 16 | 17 | #include "generic/THStorageCopy.h" 18 | #include "THGenerateAllTypes.h" 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /lapack/lib/cmake/lapack-3.7.1/lapack-config.cmake: -------------------------------------------------------------------------------- 1 | # Compute locations from /lib/cmake/lapack-/.cmake 2 | get_filename_component(_LAPACK_SELF_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) 3 | 4 | # Load lapack targets from the install tree if necessary. 5 | set(_LAPACK_TARGET "blas") 6 | if(_LAPACK_TARGET AND NOT TARGET "${_LAPACK_TARGET}") 7 | include("${_LAPACK_SELF_DIR}/lapack-targets.cmake") 8 | endif() 9 | unset(_LAPACK_TARGET) 10 | 11 | # Report the blas and lapack raw or imported libraries. 12 | set(LAPACK_blas_LIBRARIES "blas") 13 | set(LAPACK_lapack_LIBRARIES "lapack") 14 | 15 | unset(_LAPACK_SELF_DIR) 16 | -------------------------------------------------------------------------------- /lua/nn/Power.lua: -------------------------------------------------------------------------------- 1 | local Power, parent = torch.class('nn.Power','nn.Module') 2 | 3 | function Power:__init(p) 4 | parent.__init(self) 5 | self.pow = p 6 | if not p then 7 | error('nn.Power(power)') 8 | end 9 | end 10 | 11 | function Power:updateOutput(input) 12 | self.output:resizeAs(input):copy(input) 13 | self.output:pow(self.pow) 14 | return self.output 15 | end 16 | 17 | function Power:updateGradInput(input, gradOutput) 18 | self.gradInput:resizeAs(input):copy(input) 19 | self.gradInput:pow(self.pow - 1) 20 | self.gradInput:cmul(gradOutput):mul(self.pow) 21 | return self.gradInput 22 | end 23 | -------------------------------------------------------------------------------- /luarocks/penlight/1.5.4-1/doc/config.ld: -------------------------------------------------------------------------------- 1 | project = 'Penlight' 2 | description = 'Penlight Lua Libraries 1.5.4' 3 | full_description = 'The documentation is available @{01-introduction.md|here}.' 4 | title = 'Penlight Documentation' 5 | dir = 'api' 6 | style = '!fixed' 7 | use_markdown_titles = true 8 | topics = 'manual' 9 | examples = {'../examples','../tests/test-data.lua'} 10 | package = 'pl' 11 | format = 'discount' 12 | sort_modules=true 13 | file = '../lua/pl' 14 | kind_names={topic='Manual',module='Libraries'} 15 | tparam_alias('array','array') 16 | tparam_alias('array2d','array') 17 | alias('ret',{'return',modifiers={type="$1"}}) 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /lua/sundown/env.lua: -------------------------------------------------------------------------------- 1 | local ffi = require 'ffi' 2 | 3 | local sundown = {} 4 | 5 | -- Compat function from https://github.com/stevedonovan/Penlight/blob/master/lua/pl/compat.lua 6 | if not package.searchpath then 7 | local sep = package.config:sub(1,1) 8 | function package.searchpath (mod,path) 9 | mod = mod:gsub('%.',sep) 10 | for m in path:gmatch('[^;]+') do 11 | local nm = m:gsub('?',mod) 12 | local f = io.open(nm,'r') 13 | if f then f:close(); return nm end 14 | end 15 | end 16 | end 17 | 18 | sundown.C = ffi.load(package.searchpath('libsundown', package.cpath)) 19 | 20 | return sundown 21 | -------------------------------------------------------------------------------- /lua/nn/TanhShrink.lua: -------------------------------------------------------------------------------- 1 | local TanhShrink, parent = torch.class('nn.TanhShrink','nn.Module') 2 | 3 | function TanhShrink:__init() 4 | parent.__init(self) 5 | self.tanh = nn.Tanh() 6 | end 7 | 8 | function TanhShrink:updateOutput(input) 9 | local th = self.tanh:updateOutput(input) 10 | self.output:resizeAs(input):copy(input) 11 | self.output:add(-1,th) 12 | return self.output 13 | end 14 | 15 | function TanhShrink:updateGradInput(input, gradOutput) 16 | local dth = self.tanh:updateGradInput(input,gradOutput) 17 | self.gradInput:resizeAs(input):copy(gradOutput) 18 | self.gradInput:add(-1,dth) 19 | return self.gradInput 20 | end 21 | -------------------------------------------------------------------------------- /lua/nn/Sqrt.lua: -------------------------------------------------------------------------------- 1 | local Sqrt, parent = torch.class('nn.Sqrt','nn.Module') 2 | 3 | function Sqrt:__init(b) 4 | parent.__init(self) 5 | self.eps = b or 0 6 | end 7 | 8 | function Sqrt:updateOutput(input) 9 | self.eps = self.eps or 0 10 | input.THNN.Sqrt_updateOutput( 11 | input:cdata(), 12 | self.output:cdata(), 13 | self.eps 14 | ) 15 | return self.output 16 | end 17 | 18 | function Sqrt:updateGradInput(input, gradOutput) 19 | input.THNN.Sqrt_updateGradInput( 20 | input:cdata(), 21 | gradOutput:cdata(), 22 | self.gradInput:cdata(), 23 | self.output:cdata() 24 | ) 25 | return self.gradInput 26 | end 27 | -------------------------------------------------------------------------------- /luarocks/dok/scm-1/dok-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "dok" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/torch/dok.git", 6 | } 7 | 8 | description = { 9 | summary = "Support for the old torch7 dok system", 10 | detailed = [[ 11 | ]], 12 | homepage = "https://github.com/torch/dok", 13 | license = "BSD" 14 | } 15 | 16 | dependencies = { 17 | "lua >= 5.1", 18 | "sundown >= 1.0" 19 | } 20 | 21 | build = { 22 | type = "builtin", 23 | modules = { 24 | ["dok.init"] = "init.lua", 25 | ["dok.inline"] = "inline.lua", 26 | }, 27 | install = { 28 | lua = { 29 | -- ["dok.README"] = "README.md" 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /lua/luarocks/site_config.lua: -------------------------------------------------------------------------------- 1 | local site_config = {} 2 | 3 | site_config.LUAROCKS_PREFIX=[[C:/torch/]] 4 | site_config.LUA_INCDIR=[[C:/torch/include]] 5 | site_config.LUA_LIBDIR=[[C:/torch/lib]] 6 | site_config.LUA_BINDIR=[[C:/torch/bin]] 7 | site_config.LUA_INTERPRETER = [[luajit]] 8 | site_config.LUAROCKS_SYSCONFDIR=[[C:/torch/luarocks]] 9 | site_config.LUAROCKS_ROCKS_TREE=[[C:/torch/]] 10 | site_config.LUAROCKS_ROCKS_SUBDIR=[[luarocks]] 11 | site_config.LUA_DIR_SET = true 12 | site_config.LUAROCKS_UNAME_S=[[Windows]] 13 | site_config.LUAROCKS_UNAME_M=[[x64]] 14 | site_config.LUAROCKS_DOWNLOADER=[[wget]] 15 | site_config.LUAROCKS_MD5CHECKER=[[md5sum]] 16 | 17 | return site_config 18 | -------------------------------------------------------------------------------- /luarocks/paths/scm-1/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | doc = { 3 | ["dirfunctions.md"] = "0cc35a10ad5c3a08a3d7df7da7824574", 4 | ["dirpaths.md"] = "240a11d10b8c173ed740b5684eb96c1d", 5 | ["filenames.md"] = "7541a43eeab429d5fb6beb3649935406", 6 | ["index.md"] = "ce09c877eab96ca904bc0cb14f0f2236", 7 | ["misc.md"] = "df9bb28918c392bca94af81fe6bfaf24" 8 | }, 9 | lib = { 10 | ["libpaths.dll"] = "c31ce8e645f834f0a61b9afa13fbfb34" 11 | }, 12 | lua = { 13 | paths = { 14 | ["init.lua"] = "4d15a6dcb29e85b0183ca19b2dca558b" 15 | } 16 | }, 17 | ["paths-scm-1.rockspec"] = "f826068bc349e672e4300964c6122670" 18 | } 19 | -------------------------------------------------------------------------------- /lua/nn/HardShrink.lua: -------------------------------------------------------------------------------- 1 | local HardShrink, parent = torch.class('nn.HardShrink', 'nn.Module') 2 | 3 | function HardShrink:__init(lam) 4 | parent.__init(self) 5 | self.lambda = lam or 0.5 6 | end 7 | 8 | function HardShrink:updateOutput(input) 9 | input.THNN.HardShrink_updateOutput( 10 | input:cdata(), 11 | self.output:cdata(), 12 | self.lambda 13 | ) 14 | return self.output 15 | end 16 | 17 | function HardShrink:updateGradInput(input, gradOutput) 18 | input.THNN.HardShrink_updateGradInput( 19 | input:cdata(), 20 | gradOutput:cdata(), 21 | self.gradInput:cdata(), 22 | self.lambda 23 | ) 24 | return self.gradInput 25 | end 26 | -------------------------------------------------------------------------------- /lua/nn/SoftShrink.lua: -------------------------------------------------------------------------------- 1 | local SoftShrink, parent = torch.class('nn.SoftShrink', 'nn.Module') 2 | 3 | function SoftShrink:__init(lam) 4 | parent.__init(self) 5 | self.lambda = lam or 0.5 6 | end 7 | 8 | function SoftShrink:updateOutput(input) 9 | input.THNN.SoftShrink_updateOutput( 10 | input:cdata(), 11 | self.output:cdata(), 12 | self.lambda 13 | ) 14 | return self.output 15 | end 16 | 17 | function SoftShrink:updateGradInput(input, gradOutput) 18 | input.THNN.SoftShrink_updateGradInput( 19 | input:cdata(), 20 | gradOutput:cdata(), 21 | self.gradInput:cdata(), 22 | self.lambda 23 | ) 24 | return self.gradInput 25 | end 26 | -------------------------------------------------------------------------------- /luarocks/luaffi/scm-1/luaffi-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "luaffi" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/facebook/luaffifb.git", 6 | } 7 | 8 | description = { 9 | summary = "FFI library for calling C functions from lua", 10 | detailed = [[ 11 | ]], 12 | homepage = "https://github.com/facebook/luaffifb", 13 | license = "BSD" 14 | } 15 | 16 | dependencies = { 17 | "lua >= 5.1", 18 | } 19 | 20 | build = { 21 | type = "builtin", 22 | modules = { 23 | ffi = { 24 | incdirs = { 25 | "dynasm" 26 | }, 27 | sources = { 28 | "call.c", "ctype.c", "ffi.c", "parser.c", 29 | } 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /luarocks/paths/scm-1/doc/index.md: -------------------------------------------------------------------------------- 1 | 2 | # Filename Manipulation Package # 3 | 4 | This package provides portable functions and variables to manipulate the file system : 5 | 6 | * [Manipulating filenames](filenames.md) : functions for manipulating filenames ; 7 | * [Directory functions](dirfunctions.md) : functions for listing and manipulating directories ; 8 | * [Directory paths](dirpaths.md) : paths to well known directories ; 9 | * [Miscellaneous](misc.md) : uncategorized functions ; 10 | 11 | When this package is loaded, it also computes a number of useful 12 | variables indicating where the various Torch components are installed. 13 | Do not change their values. 14 | 15 | -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plotmatrix.md: -------------------------------------------------------------------------------- 1 | 2 | ## Plotting Matrices ## 3 | 4 | A given matrix can be plotted using 2D contour plot on a surface. 5 | 6 | 7 | ### gnuplot.imagesc(z, ['color' or 'gray']) ### 8 | 9 | Plot surface ` z ` using contour plot. The second argument defines 10 | the color palette for the display. By default, grayscale colors are 11 | used, however, one can also use any color palette available in 12 | `Gnuplot`. 13 | 14 | ```lua 15 | x = torch.linspace(-1,1) 16 | xx = torch.Tensor(x:size(1),x:size(1)):zero():addr(1,x,x) 17 | xx = xx*math.pi*6 18 | gnuplot.imagesc(torch.sin(xx),'color') 19 | ``` 20 | ![](plot_imagesc.png) 21 | -------------------------------------------------------------------------------- /luarocks/sys/1.1-0/sys-1.1-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "sys" 2 | version = "1.1-0" 3 | 4 | source = { 5 | url = "git://github.com/torch/sys" 6 | } 7 | 8 | description = { 9 | summary = "A system library for Torch", 10 | detailed = [[ 11 | Provides system functionalities for Torch. 12 | ]], 13 | homepage = "https://github.com/torch/sys", 14 | license = "BSD" 15 | } 16 | 17 | dependencies = { 18 | "torch >= 7.0", 19 | } 20 | 21 | build = { 22 | type = "command", 23 | build_command = [[cmake -E make_directory build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="$(LUA_BINDIR)/.." -DCMAKE_INSTALL_PREFIX="$(PREFIX)" && $(MAKE)]], 24 | install_command = "cd build && $(MAKE) install" 25 | } 26 | -------------------------------------------------------------------------------- /luarocks/cwrap/scm-1/cwrap-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "cwrap" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/torch/cwrap.git", 6 | } 7 | 8 | description = { 9 | summary = "Advanced automatic wrapper for C functions", 10 | detailed = [[ 11 | ]], 12 | homepage = "https://github.com/torch/cwrap", 13 | license = "BSD" 14 | } 15 | 16 | dependencies = { 17 | "lua >= 5.1", 18 | } 19 | 20 | build = { 21 | type = "builtin", 22 | modules = { 23 | ["cwrap.init"] = "init.lua", 24 | ["cwrap.cinterface"] = "cinterface.lua", 25 | ["cwrap.types"] = "types.lua", 26 | }, 27 | install = { 28 | lua = { 29 | ["cwrap.README"] = "README.md" 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /luarocks/nn/scm-1/nn-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "nn" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/torch/nn.git", 6 | } 7 | 8 | description = { 9 | summary = "Neural Network package for Torch", 10 | detailed = [[ 11 | ]], 12 | homepage = "https://github.com/torch/nn", 13 | license = "BSD" 14 | } 15 | 16 | dependencies = { 17 | "torch >= 7.0", 18 | "luaffi", 19 | "moses" 20 | } 21 | 22 | build = { 23 | type = "command", 24 | build_command = [[ 25 | cmake -E make_directory build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="$(LUA_BINDIR)/.." -DCMAKE_INSTALL_PREFIX="$(PREFIX)" && $(MAKE) 26 | ]], 27 | install_command = "cd build && $(MAKE) install" 28 | } 29 | -------------------------------------------------------------------------------- /lua/nnx/FunctionWrapper.lua: -------------------------------------------------------------------------------- 1 | local FunctionWrapper, parent = torch.class('nn.FunctionWrapper', 'nn.Module') 2 | 3 | local help_desc = [[ 4 | Dummy module that takes a forward and a backward function as argument. 5 | ]] 6 | 7 | function FunctionWrapper:__init(init, updateOutput, updateGradInput) 8 | init(self) 9 | self.fn_updateOutput = updateOutput 10 | self.fn_updateGradInput = updateGradInput 11 | end 12 | 13 | function FunctionWrapper:updateOutput(input) 14 | self.output = self.fn_updateOutput(self, input) 15 | return self.output 16 | end 17 | 18 | function FunctionWrapper:updateGradInput(input, gradOutput) 19 | self.gradInput = self.fn_updateGradInput(self, input, gradOutput) 20 | return self.gradInput 21 | end -------------------------------------------------------------------------------- /lua/optim/init.lua: -------------------------------------------------------------------------------- 1 | 2 | require 'torch' 3 | 4 | optim = {} 5 | 6 | -- optimizations 7 | require('optim.sgd') 8 | require('optim.cg') 9 | require('optim.asgd') 10 | require('optim.nag') 11 | require('optim.fista') 12 | require('optim.lbfgs') 13 | require('optim.adagrad') 14 | require('optim.rprop') 15 | require('optim.adam') 16 | require('optim.adamax') 17 | require('optim.rmsprop') 18 | require('optim.adadelta') 19 | require('optim.cmaes') 20 | require('optim.de') 21 | 22 | -- line search functions 23 | require('optim.lswolfe') 24 | 25 | -- helpers 26 | require('optim.polyinterp') 27 | require('optim.checkgrad') 28 | 29 | -- tools 30 | require('optim.ConfusionMatrix') 31 | require('optim.Logger') 32 | 33 | return optim 34 | -------------------------------------------------------------------------------- /luarocks/inspect/3.1.0-1/inspect-3.1.0-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "inspect" 2 | version = "3.1.0-1" 3 | source = { 4 | url = "https://github.com/kikito/inspect.lua/archive/v3.1.0.tar.gz", 5 | dir = "inspect.lua-3.1.0" 6 | } 7 | description = { 8 | summary = "Lua table visualizer, ideal for debugging", 9 | detailed = [[ 10 | inspect will print out your lua tables nicely so you can debug your programs quickly. It sorts keys by type and name and handles recursive tables properly. 11 | ]], 12 | homepage = "https://github.com/kikito/inspect.lua", 13 | license = "MIT " 14 | } 15 | dependencies = { 16 | "lua >= 5.1" 17 | } 18 | build = { 19 | type = "builtin", 20 | modules = { 21 | inspect = "inspect.lua" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /include/TH/THDiskFile.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_DISK_FILE_INC 2 | #define TH_DISK_FILE_INC 3 | 4 | #include "THFile.h" 5 | 6 | TH_API THFile *THDiskFile_new(const char *name, const char *mode, int isQuiet); 7 | TH_API THFile *THPipeFile_new(const char *name, const char *mode, int isQuiet); 8 | 9 | TH_API const char *THDiskFile_name(THFile *self); 10 | 11 | TH_API int THDiskFile_isLittleEndianCPU(void); 12 | TH_API int THDiskFile_isBigEndianCPU(void); 13 | TH_API void THDiskFile_nativeEndianEncoding(THFile *self); 14 | TH_API void THDiskFile_littleEndianEncoding(THFile *self); 15 | TH_API void THDiskFile_bigEndianEncoding(THFile *self); 16 | TH_API void THDiskFile_longSize(THFile *self, int size); 17 | TH_API void THDiskFile_noBuffer(THFile *self); 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /lua/sundown/html.lua: -------------------------------------------------------------------------------- 1 | local sundown = require 'sundown.env' 2 | local ffi = require 'ffi' 3 | local C = sundown.C 4 | 5 | require 'sundown.sdcdefs' 6 | require 'sundown.htmlcdefs' 7 | 8 | local function render(txt) 9 | local callbacks = ffi.new('struct sd_callbacks') 10 | local options = ffi.new('struct sd_html_renderopt') 11 | C.sd_html_renderer(callbacks, options, 0) 12 | local markdown = C.sd_markdown_new(0xfff, 16, callbacks, options) 13 | 14 | local outbuf = C.sd_bufnew(64) 15 | C.sd_markdown_render(outbuf, ffi.cast('const char*', txt), #txt, markdown) 16 | C.sd_markdown_free(markdown) 17 | txt = ffi.string(outbuf.data, outbuf.size) 18 | C.sd_bufrelease(outbuf) 19 | 20 | return txt 21 | end 22 | 23 | return {render=render} 24 | -------------------------------------------------------------------------------- /luarocks/paths/scm-1/paths-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "paths" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/torch/paths.git", 6 | } 7 | 8 | description = { 9 | summary = "Paths manipulations", 10 | detailed = [[ 11 | ]], 12 | homepage = "https://github.com/torch/paths", 13 | license = "BSD" 14 | } 15 | 16 | dependencies = { 17 | "lua >= 5.1", 18 | } 19 | 20 | build = { 21 | type = "command", 22 | build_command = [[ 23 | cmake -E make_directory build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Release -DLUALIB=$(LUALIB) -DLUA_INCDIR="$(LUA_INCDIR)" -DLUA_LIBDIR="$(LUA_LIBDIR)" -DLUADIR="$(LUADIR)" -DLIBDIR="$(LIBDIR)" -DCMAKE_INSTALL_PREFIX="$(PREFIX)" && $(MAKE) 24 | ]], 25 | install_command = "cd build && $(MAKE) install" 26 | } 27 | -------------------------------------------------------------------------------- /luarocks/config.lua: -------------------------------------------------------------------------------- 1 | -- I do not know what luarocks guys have in mind 2 | -- If they depend on external commands, it seems 3 | -- completely irrelevant to install packages to 4 | -- replace those commands (as you need these freaking 5 | -- commands anyways to install the first modules!!) 6 | -- On top of that it breaks features. BOUH. 7 | fs_use_modules = false 8 | 9 | rocks_trees = { 10 | home..[[/.luarocks]], 11 | {root="C:/torch", bin_dir="C:/torch/bin", lib_dir="C:/torch/bin", lua_dir="C:/torch/lua"} 12 | } 13 | 14 | rocks_servers = { 15 | [[https://raw.githubusercontent.com/torch/rocks/master]], 16 | [[https://raw.githubusercontent.com/rocks-moonscript-org/moonrocks-mirror/master]] 17 | } 18 | 19 | variables = { 20 | LUALIB = [[libluajit.lib]] 21 | } 22 | -------------------------------------------------------------------------------- /lua/nn/SoftSign.lua: -------------------------------------------------------------------------------- 1 | local SoftSign, parent = torch.class('nn.SoftSign', 'nn.Module') 2 | 3 | function SoftSign:updateOutput(input) 4 | self.temp = self.temp or input.new() 5 | self.temp:resizeAs(input):copy(input):abs():add(1) 6 | self.output:resizeAs(input):copy(input):cdiv(self.temp) 7 | return self.output 8 | end 9 | 10 | function SoftSign:updateGradInput(input, gradOutput) 11 | self.tempgrad = self.tempgrad or input.new() 12 | self.tempgrad:resizeAs(self.output):copy(input):abs():add(1):cmul(self.tempgrad) 13 | self.gradInput:resizeAs(input):copy(gradOutput):cdiv(self.tempgrad) 14 | return self.gradInput 15 | end 16 | 17 | function SoftSign:clearState() 18 | nn.utils.clear(self, 'temp', 'tempgrad') 19 | return parent.clearState(self) 20 | end 21 | -------------------------------------------------------------------------------- /luarocks/luafilesystem/1.6.3-2/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | doc = { 3 | us = { 4 | ["doc.css"] = "d0a913514fb190240b3b4033d105cbc0", 5 | ["examples.html"] = "5832f72021728374cf57b621d62ce0ff", 6 | ["index.html"] = "96885bdda963939f0a363b5fa6b16b59", 7 | ["license.html"] = "e3a756835cb7c8ae277d5e513c8e32ee", 8 | ["luafilesystem.png"] = "81e923e976e99f894ea0aa8b52baff29", 9 | ["manual.html"] = "d6473799b73ce486c3ea436586cb3b34" 10 | } 11 | }, 12 | lib = { 13 | ["lfs.dll"] = "c7ce7bd75fda1301e1e26b00446976dd" 14 | }, 15 | ["luafilesystem-1.6.3-2.rockspec"] = "eb0ef7c190516892eb8357af799eea5f", 16 | tests = { 17 | ["test.lua"] = "7b4ddb5bdb7e0b1b1ed0150d473535c9" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /luarocks/optim/1.0.5-0/optim-1.0.5-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "optim" 2 | version = "1.0.5-0" 3 | 4 | source = { 5 | url = "git://github.com/torch/optim", 6 | } 7 | 8 | description = { 9 | summary = "An optimization library for Torch.", 10 | detailed = [[ 11 | This package contains several optimization routines for Torch. 12 | ]], 13 | homepage = "https://github.com/torch/optim", 14 | license = "BSD" 15 | } 16 | 17 | dependencies = { 18 | "torch >= 7.0", 19 | } 20 | 21 | build = { 22 | type = "command", 23 | build_command = [[ 24 | cmake -E make_directory build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="$(LUA_BINDIR)/.." -DCMAKE_INSTALL_PREFIX="$(PREFIX)" && $(MAKE) 25 | ]], 26 | install_command = "cd build && $(MAKE) install" 27 | } 28 | -------------------------------------------------------------------------------- /include/TH/THGenerateFloatTypes.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_GENERIC_FILE 2 | #error "You must define TH_GENERIC_FILE before including THGenerateAllTypes.h" 3 | #endif 4 | 5 | #define real float 6 | #define accreal double 7 | #define Real Float 8 | #define THInf FLT_MAX 9 | #define TH_REAL_IS_FLOAT 10 | #line 1 TH_GENERIC_FILE 11 | #include TH_GENERIC_FILE 12 | #undef accreal 13 | #undef real 14 | #undef Real 15 | #undef THInf 16 | #undef TH_REAL_IS_FLOAT 17 | 18 | #define real double 19 | #define accreal double 20 | #define Real Double 21 | #define THInf DBL_MAX 22 | #define TH_REAL_IS_DOUBLE 23 | #line 1 TH_GENERIC_FILE 24 | #include TH_GENERIC_FILE 25 | #undef accreal 26 | #undef real 27 | #undef Real 28 | #undef THInf 29 | #undef TH_REAL_IS_DOUBLE 30 | 31 | #undef TH_GENERIC_FILE 32 | -------------------------------------------------------------------------------- /lua/nn/GatedLinearUnit.lua: -------------------------------------------------------------------------------- 1 | local GatedLinearUnit, parent = torch.class('nn.GatedLinearUnit', 'nn.Module') 2 | 3 | function GatedLinearUnit:__init(dim) 4 | parent.__init(self) 5 | self.dim = dim 6 | end 7 | 8 | function GatedLinearUnit:updateOutput(input) 9 | local dim = self.dim or input:dim() 10 | input.THNN.GatedLinear_updateOutput( 11 | input:cdata(), 12 | self.output:cdata(), 13 | dim 14 | ) 15 | return self.output 16 | end 17 | 18 | function GatedLinearUnit:updateGradInput(input, gradOutput) 19 | local dim = self.dim or input:dim() 20 | input.THNN.GatedLinear_updateGradInput( 21 | input:cdata(), 22 | gradOutput:cdata(), 23 | self.gradInput:cdata(), 24 | dim 25 | ) 26 | return self.gradInput 27 | end 28 | -------------------------------------------------------------------------------- /lua/jit/dis_x64.lua: -------------------------------------------------------------------------------- 1 | ---------------------------------------------------------------------------- 2 | -- LuaJIT x64 disassembler wrapper module. 3 | -- 4 | -- Copyright (C) 2005-2017 Mike Pall. All rights reserved. 5 | -- Released under the MIT license. See Copyright Notice in luajit.h 6 | ---------------------------------------------------------------------------- 7 | -- This module just exports the 64 bit functions from the combined 8 | -- x86/x64 disassembler module. All the interesting stuff is there. 9 | ------------------------------------------------------------------------------ 10 | 11 | local dis_x86 = require((string.match(..., ".*%.") or "").."dis_x86") 12 | return { 13 | create = dis_x86.create64, 14 | disass = dis_x86.disass64, 15 | regname = dis_x86.regname64 16 | } 17 | 18 | -------------------------------------------------------------------------------- /lua/nn/LogSigmoid.lua: -------------------------------------------------------------------------------- 1 | local LogSigmoid, parent = torch.class('nn.LogSigmoid', 'nn.Module') 2 | 3 | function LogSigmoid:updateOutput(input) 4 | self.buffer = self.buffer or input.new() 5 | input.THNN.LogSigmoid_updateOutput( 6 | input:cdata(), 7 | self.output:cdata(), 8 | self.buffer:cdata() 9 | ) 10 | return self.output 11 | end 12 | 13 | function LogSigmoid:updateGradInput(input, gradOutput) 14 | input.THNN.LogSigmoid_updateGradInput( 15 | input:cdata(), 16 | gradOutput:cdata(), 17 | self.gradInput:cdata(), 18 | self.buffer:cdata() 19 | ) 20 | return self.gradInput 21 | end 22 | 23 | function LogSigmoid:clearState() 24 | if self.buffer then self.buffer:set() end 25 | return parent.clearState(self) 26 | end 27 | 28 | -------------------------------------------------------------------------------- /luarocks/xlua/1.1-0/xlua-1.1-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "xlua" 2 | version = "1.1-0" 3 | 4 | source = { 5 | url = "git://github.com/torch/xlua", 6 | } 7 | 8 | description = { 9 | summary = "Extra Lua functions.", 10 | detailed = [[ 11 | Lua is pretty compact in terms of built-in functionalities: 12 | this package extends the table and string libraries, 13 | and provide other general purpose tools (progress bar, ...). 14 | ]], 15 | homepage = "https://github.com/torch/xlua", 16 | license = "BSD" 17 | } 18 | 19 | dependencies = { 20 | "torch >= 7.0", 21 | "sys >= 1.0" 22 | } 23 | 24 | build = { 25 | type = "builtin", 26 | modules = { 27 | ['xlua.init'] = 'init.lua', 28 | ['xlua.OptionParser'] = 'OptionParser.lua', 29 | ['xlua.Profiler'] = 'Profiler.lua' 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /lua/jit/dis_mipsel.lua: -------------------------------------------------------------------------------- 1 | ---------------------------------------------------------------------------- 2 | -- LuaJIT MIPSEL disassembler wrapper module. 3 | -- 4 | -- Copyright (C) 2005-2017 Mike Pall. All rights reserved. 5 | -- Released under the MIT license. See Copyright Notice in luajit.h 6 | ---------------------------------------------------------------------------- 7 | -- This module just exports the little-endian functions from the 8 | -- MIPS disassembler module. All the interesting stuff is there. 9 | ------------------------------------------------------------------------------ 10 | 11 | local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips") 12 | return { 13 | create = dis_mips.create_el, 14 | disass = dis_mips.disass_el, 15 | regname = dis_mips.regname 16 | } 17 | 18 | -------------------------------------------------------------------------------- /luarocks/cwrap/scm-1/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | ["cwrap-scm-1.rockspec"] = "fc5824e8ba4f6941fb7ba38e380e0f8a", 3 | doc = { 4 | ["argumenttypes.md"] = "5f4c35e28344bbf06f12e1f30a12b1f3", 5 | ["example.md"] = "3f3f877170e51d4cc9e92f11ec22622b", 6 | ["highlevelinterface.md"] = "169073c68147d06933aa94cac5ed45a8", 7 | ["index.md"] = "21bb64501d654e62329857833f70a70b", 8 | ["usertypes.md"] = "3e296ea35cb2bb3e7329bd9d9ad4808c" 9 | }, 10 | lua = { 11 | cwrap = { 12 | ["README.md"] = "9dd5a8e37de73993163e2cb39ba8d98f", 13 | ["cinterface.lua"] = "e184a2bc4b505081103fe3bfcea22f16", 14 | ["init.lua"] = "c46189fb86fb1fddeac0a51a5b274577", 15 | ["types.lua"] = "89f89242a0148d2310234e9d22ba0456" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /bin/luarocks-admin.bat: -------------------------------------------------------------------------------- 1 | ::rem:: --[[ 2 | @setlocal& set PATH=C:/torch/bin;%PATH% & set luafile="%~f0" & if exist "%~f0.bat" set luafile="%~f0.bat" 3 | @C:/torch/bin/luajit.exe %luafile% %*& exit /b ]] 4 | 5 | package.path = [[C:/torch/lua/?.lua;C:/torch/lua/?/init.lua;]]..package.path 6 | 7 | -- this should be loaded first. 8 | local cfg = require("luarocks.cfg") 9 | 10 | local loader = require("luarocks.loader") 11 | local command_line = require("luarocks.command_line") 12 | 13 | program_description = "LuaRocks repository administration interface" 14 | 15 | commands = { 16 | help = "luarocks.help", 17 | make_manifest = "luarocks.make_manifest", 18 | add = "luarocks.add", 19 | remove = "luarocks.admin_remove", 20 | refresh_cache = "luarocks.refresh_cache", 21 | } 22 | 23 | command_line.run_command(...) 24 | -------------------------------------------------------------------------------- /lua/nn/CSubTable.lua: -------------------------------------------------------------------------------- 1 | 2 | local CSubTable, parent = torch.class('nn.CSubTable', 'nn.Module') 3 | 4 | function CSubTable:__init() 5 | parent.__init(self) 6 | self.gradInput = {} 7 | end 8 | 9 | function CSubTable:updateOutput(input) 10 | self.output:resizeAs(input[1]):copy(input[1]) 11 | self.output:add(-1,input[2]) 12 | return self.output 13 | end 14 | 15 | function CSubTable:updateGradInput(input, gradOutput) 16 | self.gradInput[1] = self.gradInput[1] or input[1].new() 17 | self.gradInput[2] = self.gradInput[2] or input[1].new() 18 | self.gradInput[1]:resizeAs(input[1]):copy(gradOutput) 19 | self.gradInput[2]:resizeAs(input[2]):copy(gradOutput):mul(-1) 20 | 21 | for i=#input+1, #self.gradInput do 22 | self.gradInput[i] = nil 23 | end 24 | 25 | return self.gradInput 26 | end 27 | -------------------------------------------------------------------------------- /include/TH/generic/THTensorCopy.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_GENERIC_FILE 2 | #define TH_GENERIC_FILE "generic/THTensorCopy.h" 3 | #else 4 | 5 | /* Support for copy between different Tensor types */ 6 | 7 | TH_API void THTensor_(copy)(THTensor *tensor, THTensor *src); 8 | TH_API void THTensor_(copyByte)(THTensor *tensor, struct THByteTensor *src); 9 | TH_API void THTensor_(copyChar)(THTensor *tensor, struct THCharTensor *src); 10 | TH_API void THTensor_(copyShort)(THTensor *tensor, struct THShortTensor *src); 11 | TH_API void THTensor_(copyInt)(THTensor *tensor, struct THIntTensor *src); 12 | TH_API void THTensor_(copyLong)(THTensor *tensor, struct THLongTensor *src); 13 | TH_API void THTensor_(copyFloat)(THTensor *tensor, struct THFloatTensor *src); 14 | TH_API void THTensor_(copyDouble)(THTensor *tensor, struct THDoubleTensor *src); 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /lua/nnx/SpatialMaxSampling.lua: -------------------------------------------------------------------------------- 1 | local SpatialMaxSampling, parent = torch.class('nn.SpatialMaxSampling', 'nn.Module') 2 | 3 | function SpatialMaxSampling:__init(...) 4 | parent.__init(self) 5 | xlua.unpack_class( 6 | self, {...}, 'nn.SpatialMaxSampling', 7 | 'resample an image using max selection', 8 | {arg='owidth', type='number', help='output width'}, 9 | {arg='oheight', type='number', help='output height'} 10 | ) 11 | self.indices = torch.Tensor() 12 | end 13 | 14 | function SpatialMaxSampling:updateOutput(input) 15 | input.nn.SpatialMaxSampling_updateOutput(self, input) 16 | return self.output 17 | end 18 | 19 | function SpatialMaxSampling:updateGradInput(input, gradOutput) 20 | input.nn.SpatialMaxSampling_updateGradInput(self, input, gradOutput) 21 | return self.gradInput 22 | end 23 | -------------------------------------------------------------------------------- /lua/nn/Identity.lua: -------------------------------------------------------------------------------- 1 | local Identity, _ = torch.class('nn.Identity', 'nn.Module') 2 | 3 | function Identity:updateOutput(input) 4 | self.output = input 5 | return self.output 6 | end 7 | 8 | 9 | function Identity:updateGradInput(input, gradOutput) 10 | self.gradInput = gradOutput 11 | return self.gradInput 12 | end 13 | 14 | function Identity:clearState() 15 | -- don't call set because it might reset referenced tensors 16 | local function clear(f) 17 | if self[f] then 18 | if torch.isTensor(self[f]) then 19 | self[f] = self[f].new() 20 | elseif type(self[f]) == 'table' then 21 | self[f] = {} 22 | else 23 | self[f] = nil 24 | end 25 | end 26 | end 27 | clear('output') 28 | clear('gradInput') 29 | return self 30 | end 31 | -------------------------------------------------------------------------------- /include/TH/generic/THTensorCopy.c: -------------------------------------------------------------------------------- 1 | #ifndef TH_GENERIC_FILE 2 | #define TH_GENERIC_FILE "generic/THTensorCopy.c" 3 | #else 4 | 5 | void THTensor_(copy)(THTensor *tensor, THTensor *src) 6 | { 7 | TH_TENSOR_APPLY2(real, tensor, real, src, *tensor_data = (real)(*src_data);) 8 | } 9 | 10 | #define IMPLEMENT_THTensor_COPY(TYPENAMESRC, TYPE_SRC) \ 11 | void THTensor_(copy##TYPENAMESRC)(THTensor *tensor, TH##TYPENAMESRC##Tensor *src) \ 12 | { \ 13 | TH_TENSOR_APPLY2(real, tensor, TYPE_SRC, src, *tensor_data = (real)(*src_data);) \ 14 | } 15 | 16 | IMPLEMENT_THTensor_COPY(Byte, unsigned char) 17 | IMPLEMENT_THTensor_COPY(Char, char) 18 | IMPLEMENT_THTensor_COPY(Short, short) 19 | IMPLEMENT_THTensor_COPY(Int, int) 20 | IMPLEMENT_THTensor_COPY(Long, long) 21 | IMPLEMENT_THTensor_COPY(Float, float) 22 | IMPLEMENT_THTensor_COPY(Double, double) 23 | 24 | #endif 25 | -------------------------------------------------------------------------------- /lua/nn/L1Cost.lua: -------------------------------------------------------------------------------- 1 | local THNN = require 'nn.THNN' 2 | local L1Cost, parent = torch.class('nn.L1Cost','nn.Criterion') 3 | 4 | function L1Cost:__init() 5 | parent.__init(self) 6 | end 7 | 8 | function L1Cost:updateOutput(input) 9 | self.output_tensor = self.output_tensor or input.new(1) 10 | input.THNN.L1Cost_updateOutput( 11 | input:cdata(), 12 | self.output_tensor:cdata() 13 | ) 14 | self.output = self.output_tensor[1] 15 | return self.output 16 | end 17 | 18 | function L1Cost:updateGradInput(input) 19 | input.THNN.L1Cost_updateGradInput( 20 | input:cdata(), 21 | THNN.NULL, 22 | self.gradInput:cdata() 23 | ) 24 | return self.gradInput 25 | end 26 | 27 | function L1Cost:clearState() 28 | if self.output_tensor then self.output_tensor:set() end 29 | return parent.clearState(self) 30 | end 31 | -------------------------------------------------------------------------------- /lua/torch/TestSuite.lua: -------------------------------------------------------------------------------- 1 | function torch.TestSuite() 2 | local obj = { 3 | __tests = {}, 4 | __isTestSuite = true 5 | } 6 | 7 | local metatable = {} 8 | 9 | function metatable:__index(key) 10 | return self.__tests[key] 11 | end 12 | 13 | function metatable:__newindex(key, value) 14 | if self.__tests[key] ~= nil then 15 | error("Test " .. tostring(key) .. " is already defined.") 16 | end 17 | if type(value) ~= "function" then 18 | if type(value) == "table" then 19 | error("Nested tables of tests are not supported") 20 | else 21 | error("Only functions are supported as members of a TestSuite") 22 | end 23 | end 24 | self.__tests[key] = value 25 | end 26 | 27 | setmetatable(obj, metatable) 28 | 29 | return obj 30 | end 31 | -------------------------------------------------------------------------------- /lua/nn/SoftMarginCriterion.lua: -------------------------------------------------------------------------------- 1 | local SoftMarginCriterion, parent = torch.class('nn.SoftMarginCriterion', 'nn.Criterion') 2 | 3 | function SoftMarginCriterion:__init() 4 | parent.__init(self) 5 | self.sizeAverage = true 6 | end 7 | 8 | function SoftMarginCriterion:updateOutput(input, target) 9 | self.output_tensor = self.output_tensor or input.new(1) 10 | input.THNN.SoftMarginCriterion_updateOutput( 11 | input:cdata(), target:cdata(), 12 | self.output_tensor:cdata(), 13 | self.sizeAverage) 14 | self.output = self.output_tensor[1] 15 | return self.output 16 | end 17 | 18 | function SoftMarginCriterion:updateGradInput(input, target) 19 | input.THNN.SoftMarginCriterion_updateGradInput( 20 | input:cdata(), target:cdata(), 21 | self.gradInput:cdata(), 22 | self.sizeAverage) 23 | return self.gradInput 24 | end 25 | -------------------------------------------------------------------------------- /luarocks/moses/1.6.1-1/moses-1.6.1-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "moses" 2 | version = "1.6.1-1" 3 | source = { 4 | url = "https://github.com/Yonaba/Moses/archive/Moses-1.6.1-1.tar.gz", 5 | dir = "Moses-Moses-1.6.1-1" 6 | } 7 | description = { 8 | summary = "Utility-belt library for functional programming in Lua", 9 | detailed = [[ 10 | A utility-belt library for functional programming, which complements the built-in 11 | Lua table library, making easier operations on arrays, lists, collections. 12 | ]], 13 | homepage = "http://yonaba.github.com/Moses/", 14 | license = "MIT " 15 | } 16 | dependencies = { 17 | "lua >= 5.1" 18 | } 19 | build = { 20 | type = "builtin", 21 | modules = { 22 | ["moses"] = "moses.lua", 23 | ["moses_min"] = "moses_min.lua", 24 | }, 25 | copy_directories = {"doc","spec"} 26 | } -------------------------------------------------------------------------------- /lua/cwrap/README.md: -------------------------------------------------------------------------------- 1 | # CWrap package # 2 | 3 | The __cwrap__ package helps you to automate the generation of Lua/C wrappers 4 | around existing C functions, such that these functions would be callable 5 | from Lua. This package is used by the __torch__ package, but does not depend on 6 | anything, and could be used by anyone using Lua. 7 | The documentation is organized as follows : 8 | 9 | * [Example Use Case](doc/example.md) 10 | * [High Level Interface](doc/highlevelinterface.md) 11 | * [Argument Types](doc/argumenttypes.md) 12 | * [User Types](doc/usertypes.md) 13 | 14 | __DISCLAIMER__ Before going any further, we assume the reader has a good 15 | knowledge of how to interface C functions with Lua. A good start would be 16 | the [Lua reference manual](http://www.lua.org/manual/5.1), or the book 17 | [Programming in Lua](http://www.inf.puc-rio.br/~roberto/pil2). 18 | -------------------------------------------------------------------------------- /lua/nn/CDivTable.lua: -------------------------------------------------------------------------------- 1 | 2 | local CDivTable, parent = torch.class('nn.CDivTable', 'nn.Module') 3 | 4 | function CDivTable:__init() 5 | parent.__init(self) 6 | self.gradInput = {} 7 | end 8 | 9 | function CDivTable:updateOutput(input) 10 | self.output:resizeAs(input[1]):copy(input[1]) 11 | self.output:cdiv(input[2]) 12 | return self.output 13 | end 14 | 15 | function CDivTable:updateGradInput(input, gradOutput) 16 | self.gradInput[1] = self.gradInput[1] or input[1].new() 17 | self.gradInput[2] = self.gradInput[2] or input[1].new() 18 | self.gradInput[1]:resizeAs(input[1]):copy(gradOutput):cdiv(input[2]) 19 | self.gradInput[2]:resizeAs(input[2]):zero():addcdiv(-1,self.gradInput[1],input[2]):cmul(input[1]) 20 | 21 | for i=#input+1, #self.gradInput do 22 | self.gradInput[i] = nil 23 | end 24 | 25 | return self.gradInput 26 | end 27 | -------------------------------------------------------------------------------- /lua/nn/ReLU6.lua: -------------------------------------------------------------------------------- 1 | local ReLU6, parent = torch.class('nn.ReLU6', 'nn.Module') 2 | 3 | function ReLU6:__init(inplace) 4 | parent.__init(self) 5 | 6 | if inplace == nil then 7 | self.inplace = false 8 | else 9 | self.inplace = inplace 10 | end 11 | 12 | if (inplace and type(inplace) ~= 'boolean') then 13 | error('in-place flag must be boolean') 14 | end 15 | end 16 | 17 | function ReLU6:updateOutput(input) 18 | input.THNN.HardTanh_updateOutput( 19 | input:cdata(), 20 | self.output:cdata(), 21 | 0, 6, self.inplace) 22 | return self.output 23 | end 24 | 25 | function ReLU6:updateGradInput(input, gradOutput) 26 | input.THNN.HardTanh_updateGradInput( 27 | input:cdata(), 28 | gradOutput:cdata(), 29 | self.gradInput:cdata(), 30 | 0, 6, self.inplace) 31 | return self.gradInput 32 | end 33 | -------------------------------------------------------------------------------- /luarocks/cwrap/scm-1/doc/index.md: -------------------------------------------------------------------------------- 1 | # CWrap package # 2 | 3 | The __cwrap__ package helps you to automate the generation of Lua/C wrappers 4 | around existing C functions, such that these functions would be callable 5 | from Lua. This package is used by the __torch__ package, but does not depend on 6 | anything, and could be used by anyone using Lua. 7 | The documentation is organized as follows : 8 | 9 | * [Example Use Case](example.md) 10 | * [High Level Interface](highlevelinterface.md) 11 | * [Argument Types](argumenttypes.md) 12 | * [User Types](usertypes.md) 13 | 14 | __DISCLAIMER__ Before going any further, we assume the reader has a good 15 | knowledge of how to interface C functions with Lua. A good start would be 16 | the [Lua reference manual](http://www.lua.org/manual/5.1), or the book 17 | [Programming in Lua](http://www.inf.puc-rio.br/~roberto/pil2). 18 | 19 | 20 | -------------------------------------------------------------------------------- /luarocks/nnx/0.1-1/nnx-0.1-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "nnx" 2 | version = "0.1-1" 3 | 4 | source = { 5 | url = "git://github.com/clementfarabet/lua---nnx", 6 | tag = "master" 7 | } 8 | 9 | description = { 10 | summary = "A completely unstable and experimental package that extends Torch's builtin nn library", 11 | detailed = [[ 12 | This is an experimental package that extends nn. You've be warned! 13 | ]], 14 | homepage = "https://github.com/clementfarabet/lua---nnx", 15 | license = "BSD" 16 | } 17 | 18 | dependencies = { 19 | "torch >= 7.0", 20 | "xlua >= 1.0" 21 | } 22 | 23 | build = { 24 | type = "command", 25 | build_command = [[ 26 | cmake -E make_directory build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="$(LUA_BINDIR)/.." -DCMAKE_INSTALL_PREFIX="$(PREFIX)" && $(MAKE) 27 | ]], 28 | install_command = "cd build && $(MAKE) install" 29 | } 30 | -------------------------------------------------------------------------------- /lua/nn/Transpose.lua: -------------------------------------------------------------------------------- 1 | local Transpose, parent = torch.class('nn.Transpose', 'nn.Module') 2 | 3 | -- transpose dimensions: 4 | -- n = nn.Transpose({1,4},{1,3}) 5 | -- will transpose dims 1 and 4, then 1 and 3... 6 | 7 | function Transpose:__init(...) 8 | parent.__init(self) 9 | self.permutations = {...} 10 | end 11 | 12 | function Transpose:updateOutput(input) 13 | for _,perm in ipairs(self.permutations) do 14 | input = input:transpose(perm[1],perm[2]) 15 | end 16 | self.output:resizeAs(input):copy(input) 17 | return self.output 18 | end 19 | 20 | function Transpose:updateGradInput(input, gradOutput) 21 | for i = #self.permutations,1,-1 do 22 | local perm = self.permutations[i] 23 | gradOutput = gradOutput:transpose(perm[1],perm[2]) 24 | end 25 | self.gradInput:resizeAs(gradOutput):copy(gradOutput) 26 | return self.gradInput 27 | end 28 | 29 | -------------------------------------------------------------------------------- /lua/nn/GradientReversal.lua: -------------------------------------------------------------------------------- 1 | local GradientReversal, parent = torch.class('nn.GradientReversal', 'nn.Module') 2 | 3 | GradientReversal.__version = 2 4 | 5 | function GradientReversal:__init(lambda) 6 | lambda = lambda or 1 7 | parent.__init(self) 8 | self.lambda = lambda 9 | end 10 | 11 | function GradientReversal:setLambda(lambda) 12 | self.lambda = lambda 13 | end 14 | 15 | function GradientReversal:updateOutput(input) 16 | self.output:set(input) 17 | return self.output 18 | end 19 | 20 | function GradientReversal:updateGradInput(input, gradOutput) 21 | self.gradInput:resizeAs(gradOutput) 22 | self.gradInput:copy(gradOutput) 23 | self.gradInput:mul(-self.lambda) 24 | return self.gradInput 25 | end 26 | 27 | function GradientReversal:read(file, version) 28 | parent.read(self, file) 29 | if version < 2 then 30 | self.lambda = 1 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plot3dpoints.md: -------------------------------------------------------------------------------- 1 | 2 | ## Plotting 3D Points ## 3 | 4 | Arbitrary 3D point constellations can be plotted using an API similar to the 5 | `scatter3` function in Matalb. 6 | 7 | 8 | ### gnuplot.scatter3(x, y, z) ### 9 | Plot `(x_i, y_i, z_i)` triplets in 3D. 10 | 11 | ```lua 12 | z = torch.linspace(-2 * math.pi, 2 * math.pi) 13 | x = z:clone():cos() 14 | y = z:clone():sin() 15 | gnuplot.scatter3(x, y, z) 16 | ``` 17 | ![](scatter3.png) 18 | 19 | It is also possible to specify a header, as well as multiple scatter plot sets 20 | on the same axis. 21 | 22 | ```lua 23 | z1 = torch.linspace(-2 * math.pi, 2 * math.pi) 24 | x = z1:clone():cos() 25 | y = z1:clone():sin() 26 | z2 = z1:clone():add(math.pi) 27 | gnuplot.scatter3({'pntsA', x, y, z1}, {'pntsB', x, y, z2}) 28 | ``` 29 | ![](scatter3_helix.png) 30 | 31 | -------------------------------------------------------------------------------- /luarocks/sundown/scm-1/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | bin = { 3 | mdcat = "db12ea53bfdc735610230849097cf874" 4 | }, 5 | doc = { 6 | ["LICENSE.md"] = "8abc83c1178e6f61ee7f6e459d0c175c", 7 | ["README.md"] = "1cec45c1c487092878f64fc5640d58de" 8 | }, 9 | lib = { 10 | ["libsundown.dll"] = "c682ffaba899cfd8f488d0f689d4f6eb" 11 | }, 12 | lua = { 13 | sundown = { 14 | ["ascii.lua"] = "bf08a04f855b4f556bdaf00ea2775583", 15 | ["env.lua"] = "5b35ab3ee8cc43974867c3f316d45d3c", 16 | ["html.lua"] = "b0322a7fb8fe418cc8428b93f4c271ad", 17 | ["htmlcdefs.lua"] = "e5ae09bf725d1b5f1dacad2941fd54f9", 18 | ["init.lua"] = "bbebe11a1ab5ed51e040fe5b9953ae6f", 19 | ["sdcdefs.lua"] = "8d662f67899f1f0dcbfbe071ea4d92c9" 20 | } 21 | }, 22 | ["sundown-scm-1.rockspec"] = "3107405f4b306b0876a3d83e935c70df" 23 | } 24 | -------------------------------------------------------------------------------- /luarocks/image/1.1.alpha-0/image-1.1.alpha-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "image" 2 | version = "1.1.alpha-0" 3 | 4 | source = { 5 | url = "git://github.com/torch/image", 6 | tag = "master" 7 | } 8 | 9 | description = { 10 | summary = "An image library for Torch", 11 | detailed = [[ 12 | This package provides routines to load/save and manipulate images 13 | using Torch's Tensor data structure. 14 | ]], 15 | homepage = "https://github.com/torch/image", 16 | license = "BSD" 17 | } 18 | 19 | dependencies = { 20 | "torch >= 7.0", 21 | "sys >= 1.0", 22 | "xlua >= 1.0", 23 | "dok" 24 | } 25 | 26 | build = { 27 | type = "command", 28 | build_command = [[ 29 | cmake -E make_directory build && cd build && cmake .. -DLUALIB=$(LUALIB) -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="$(LUA_BINDIR)/.." -DCMAKE_INSTALL_PREFIX="$(PREFIX)" && $(MAKE) 30 | ]], 31 | install_command = "cd build && $(MAKE) install" 32 | } 33 | -------------------------------------------------------------------------------- /luarocks/moses/1.6.1-1/spec/chaining_spec.lua: -------------------------------------------------------------------------------- 1 | require 'luacov' 2 | local _ = require 'moses' 3 | 4 | context('Chaining specs', function() 5 | 6 | context('chain', function() 7 | 8 | test('Chains a value',function() 9 | local v = _.chain({1,2,3,4}) 10 | :filter(function(i,k) return k%2~=0 end) 11 | :max() 12 | :value() 13 | assert_equal(v, 3) 14 | end) 15 | 16 | test('_(value) is the same as _.chain(value)', function() 17 | local v = _({1,2,3,4}) 18 | :filter(function(i,k) return k%2~=0 end) 19 | :max() 20 | :value() 21 | assert_equal(v, 3) 22 | end) 23 | 24 | end) 25 | 26 | context('value', function() 27 | 28 | test('Unwraps a chained object',function() 29 | local t = {1,2,3} 30 | assert_equal(_.chain(t):value(), t) 31 | assert_equal(_(t):value(), t) 32 | end) 33 | 34 | end) 35 | 36 | end) -------------------------------------------------------------------------------- /luarocks/luafilesystem/1.6.3-2/luafilesystem-1.6.3-2.rockspec: -------------------------------------------------------------------------------- 1 | package = "LuaFileSystem" 2 | version = "1.6.3-2" 3 | source = { 4 | url = "git://github.com/keplerproject/luafilesystem", 5 | tag = "v_1_6_3" 6 | } 7 | description = { 8 | summary = "File System Library for the Lua Programming Language", 9 | detailed = [[ 10 | LuaFileSystem is a Lua library developed to complement the set of 11 | functions related to file systems offered by the standard Lua 12 | distribution. LuaFileSystem offers a portable way to access the 13 | underlying directory structure and file attributes. 14 | ]], 15 | homepage = "http://keplerproject.github.io/luafilesystem", 16 | license = "MIT/X11" 17 | } 18 | dependencies = { 19 | "lua >= 5.1" 20 | } 21 | build = { 22 | type = "builtin", 23 | modules = { 24 | lfs = "src/lfs.c" 25 | }, 26 | copy_directories = { 27 | "doc", "tests" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /include/THNN/THNN.h: -------------------------------------------------------------------------------- 1 | #ifndef THNN_H 2 | #define THNN_H 3 | 4 | #include 5 | #include 6 | #ifdef _OPENMP 7 | #include 8 | #endif 9 | 10 | #define THNN_(NAME) TH_CONCAT_3(THNN_, Real, NAME) 11 | 12 | #define THIndexTensor THLongTensor 13 | #define THIndexTensor_(NAME) THLongTensor_ ## NAME 14 | 15 | #define THIntegerTensor THIntTensor 16 | #define THIntegerTensor_(NAME) THIntTensor_ ## NAME 17 | 18 | typedef long THIndex_t; 19 | typedef int THInteger_t; 20 | typedef void THNNState; 21 | 22 | #define THNN_resizeAs_indices(I1, I2) \ 23 | THLongStorage *size2 = THIndexTensor_(newSizeOf)(I2); \ 24 | if (!THTensor_(isSize)(I1, size2)) \ 25 | { \ 26 | THTensor_(resize)(I1, size2, NULL); \ 27 | } \ 28 | THLongStorage_free(size2); 29 | 30 | #include "generic/THNN.h" 31 | #include 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /include/TH/generic/THStorageCopy.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_GENERIC_FILE 2 | #define TH_GENERIC_FILE "generic/THStorageCopy.h" 3 | #else 4 | 5 | /* Support for copy between different Storage types */ 6 | 7 | TH_API void THStorage_(rawCopy)(THStorage *storage, real *src); 8 | TH_API void THStorage_(copy)(THStorage *storage, THStorage *src); 9 | TH_API void THStorage_(copyByte)(THStorage *storage, struct THByteStorage *src); 10 | TH_API void THStorage_(copyChar)(THStorage *storage, struct THCharStorage *src); 11 | TH_API void THStorage_(copyShort)(THStorage *storage, struct THShortStorage *src); 12 | TH_API void THStorage_(copyInt)(THStorage *storage, struct THIntStorage *src); 13 | TH_API void THStorage_(copyLong)(THStorage *storage, struct THLongStorage *src); 14 | TH_API void THStorage_(copyFloat)(THStorage *storage, struct THFloatStorage *src); 15 | TH_API void THStorage_(copyDouble)(THStorage *storage, struct THDoubleStorage *src); 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /luarocks/fun/scm-1/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | ["fun-scm-1.rockspec"] = "b8fd66b052741f9bae72de1d6ed071b4", 3 | lua = { 4 | ["fun.lua"] = "8dfb9cd9fa556c3cf13acbc07f4a2581" 5 | }, 6 | tests = { 7 | [".gitignore"] = "84e1176faf63075a15f0ecdfdfb306f8", 8 | ["basic.lua"] = "bb871f1004eaec02915776c9e4aed2d9", 9 | ["compositions.lua"] = "50ed9dfa92ea3da494c03765d3c90f7d", 10 | ["filtering.lua"] = "03c52b71ff2da7d1a6f5ba497434a3b2", 11 | ["generators.lua"] = "f312b4d3bfa0fb2eec51a695129f6cc5", 12 | ["indexing.lua"] = "bea44a3f1d90b7c9f6b0ce9d7be5bf34", 13 | ["operators.lua"] = "618e01482453c0d0d3a99feb54c2bf67", 14 | ["reducing.lua"] = "d2ce99c0ba6154197984de86031c65f1", 15 | runtest = "514ac267a46b03f950133101fa77dc16", 16 | ["slicing.lua"] = "5f41049eaeab14445de49d7180232de8", 17 | ["transformations.lua"] = "16a739b6cef00a380e8c5b68528bc9f9" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /lua/luarocks/fetch/git_file.lua: -------------------------------------------------------------------------------- 1 | 2 | --- Fetch back-end for retrieving sources from local Git repositories. 3 | --module("luarocks.fetch.git_file", package.seeall) 4 | local git_file = {} 5 | 6 | local git = require("luarocks.fetch.git") 7 | 8 | --- Fetch sources for building a rock from a local Git repository. 9 | -- @param rockspec table: The rockspec table 10 | -- @param extract boolean: Unused in this module (required for API purposes.) 11 | -- @param dest_dir string or nil: If set, will extract to the given directory. 12 | -- @return (string, string) or (nil, string): The absolute pathname of 13 | -- the fetched source tarball and the temporary directory created to 14 | -- store it; or nil and an error message. 15 | function git_file.get_sources(rockspec, extract, dest_dir) 16 | rockspec.source.url = rockspec.source.url:gsub("^git.file://", "") 17 | return git.get_sources(rockspec, extract, dest_dir) 18 | end 19 | 20 | return git_file 21 | -------------------------------------------------------------------------------- /lua/nn/SoftMin.lua: -------------------------------------------------------------------------------- 1 | local SoftMin, parent = torch.class('nn.SoftMin', 'nn.Module') 2 | 3 | function SoftMin:updateOutput(input) 4 | self.mininput = self.mininput or input.new() 5 | self.mininput:resizeAs(input):copy(input):mul(-1) 6 | input.THNN.SoftMax_updateOutput( 7 | self.mininput:cdata(), 8 | self.output:cdata() 9 | ) 10 | return self.output 11 | end 12 | 13 | function SoftMin:updateGradInput(input, gradOutput) 14 | self.mininput = self.mininput or input.new() 15 | self.mininput:resizeAs(input):copy(input):mul(-1) 16 | 17 | input.THNN.SoftMax_updateGradInput( 18 | self.mininput:cdata(), 19 | gradOutput:cdata(), 20 | self.gradInput:cdata(), 21 | self.output:cdata() 22 | ) 23 | 24 | self.gradInput:mul(-1) 25 | return self.gradInput 26 | end 27 | 28 | function SoftMin:clearState() 29 | if self.mininput then self.mininput:set() end 30 | return parent.clearState(self) 31 | end 32 | -------------------------------------------------------------------------------- /lua/nn/MarginCriterion.lua: -------------------------------------------------------------------------------- 1 | local MarginCriterion, parent = torch.class('nn.MarginCriterion', 'nn.Criterion') 2 | 3 | function MarginCriterion:__init(margin) 4 | parent.__init(self) 5 | self.sizeAverage = true 6 | self.margin = margin or 1 7 | end 8 | 9 | function MarginCriterion:updateOutput(input, target) 10 | self.output_tensor = self.output_tensor or input.new(1) 11 | input.THNN.MarginCriterion_updateOutput( 12 | input:cdata(), 13 | target:cdata(), 14 | self.output_tensor:cdata(), 15 | self.sizeAverage, 16 | self.margin 17 | ) 18 | self.output = self.output_tensor[1] 19 | return self.output 20 | end 21 | 22 | function MarginCriterion:updateGradInput(input, target) 23 | input.THNN.MarginCriterion_updateGradInput( 24 | input:cdata(), 25 | target:cdata(), 26 | self.gradInput:cdata(), 27 | self.sizeAverage, 28 | self.margin 29 | ) 30 | return self.gradInput 31 | end 32 | -------------------------------------------------------------------------------- /lua/nn/AbsCriterion.lua: -------------------------------------------------------------------------------- 1 | local AbsCriterion, parent = torch.class('nn.AbsCriterion', 'nn.Criterion') 2 | 3 | function AbsCriterion:__init(sizeAverage) 4 | parent.__init(self) 5 | if sizeAverage ~= nil then 6 | self.sizeAverage = sizeAverage 7 | else 8 | self.sizeAverage = true 9 | end 10 | end 11 | 12 | function AbsCriterion:updateOutput(input, target) 13 | self.output_tensor = self.output_tensor or input.new(1) 14 | input.THNN.AbsCriterion_updateOutput( 15 | input:cdata(), 16 | target:cdata(), 17 | self.output_tensor:cdata(), 18 | self.sizeAverage 19 | ) 20 | self.output = self.output_tensor[1] 21 | return self.output 22 | end 23 | 24 | function AbsCriterion:updateGradInput(input, target) 25 | input.THNN.AbsCriterion_updateGradInput( 26 | input:cdata(), 27 | target:cdata(), 28 | self.gradInput:cdata(), 29 | self.sizeAverage 30 | ) 31 | return self.gradInput 32 | end 33 | -------------------------------------------------------------------------------- /lua/nn/MSECriterion.lua: -------------------------------------------------------------------------------- 1 | local MSECriterion, parent = torch.class('nn.MSECriterion', 'nn.Criterion') 2 | 3 | function MSECriterion:__init(sizeAverage) 4 | parent.__init(self) 5 | if sizeAverage ~= nil then 6 | self.sizeAverage = sizeAverage 7 | else 8 | self.sizeAverage = true 9 | end 10 | end 11 | 12 | function MSECriterion:updateOutput(input, target) 13 | self.output_tensor = self.output_tensor or input.new(1) 14 | input.THNN.MSECriterion_updateOutput( 15 | input:cdata(), 16 | target:cdata(), 17 | self.output_tensor:cdata(), 18 | self.sizeAverage 19 | ) 20 | self.output = self.output_tensor[1] 21 | return self.output 22 | end 23 | 24 | function MSECriterion:updateGradInput(input, target) 25 | input.THNN.MSECriterion_updateGradInput( 26 | input:cdata(), 27 | target:cdata(), 28 | self.gradInput:cdata(), 29 | self.sizeAverage 30 | ) 31 | return self.gradInput 32 | end 33 | -------------------------------------------------------------------------------- /lua/torch/doc/pipefile.md: -------------------------------------------------------------------------------- 1 | 2 | # PipeFile # 3 | 4 | Parent classes: [DiskFile](diskfile.md) 5 | 6 | A `PipeFile` is a particular `File` which is able to perform basic read/write operations 7 | on a command pipe. It implements all methods described in [DiskFile](diskfile.md) and [File](file.md). 8 | 9 | The file might be open in read or write mode, depending on the parameter 10 | `mode` (which can take the value `"r"` or `"w"`) 11 | given to the [torch.PipeFile(fileName, mode)](#torch.PipeFile). Read-write mode is not allowed. 12 | 13 | 14 | ### torch.PipeFile(command, [mode], [quiet]) ### 15 | 16 | _Constructor_ which execute `command` by opening a pipe in read or write 17 | `mode`. Valid `mode` are `"r"` (read) or `"w"` (write). Default is read 18 | mode. 19 | 20 | If (and only if) `quiet` is `true`, no error will be raised in case of 21 | problem opening the file: instead `nil` will be returned. 22 | 23 | -------------------------------------------------------------------------------- /luarocks/torch/scm-1/torch-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "torch" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/torch/torch7.git", 6 | } 7 | 8 | description = { 9 | summary = "Torch7", 10 | detailed = [[ 11 | ]], 12 | homepage = "https://github.com/torch/torch7", 13 | license = "BSD" 14 | } 15 | 16 | dependencies = { 17 | "lua >= 5.1", 18 | "paths >= 1.0", 19 | "cwrap >= 1.0" 20 | } 21 | 22 | build = { 23 | type = "command", 24 | build_command = [[ 25 | cmake -E make_directory build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Release -DLUA=$(LUA) -DLUALIB=$(LUALIB) -DLUA_BINDIR="$(LUA_BINDIR)" -DLUA_INCDIR="$(LUA_INCDIR)" -DLUA_LIBDIR="$(LUA_LIBDIR)" -DLUADIR="$(LUADIR)" -DLIBDIR="$(LIBDIR)" -DCMAKE_INSTALL_PREFIX="$(PREFIX)" -DBLAS_LIBRARIES=C:/torch/lapack/lib/libblas.lib -DBLAS_INFO=generic -DLAPACK_LIBRARIES=C:/torch/lapack/lib/liblapack.lib -DLAPACK_FOUND=TRUE && $(MAKE) 26 | ]], 27 | install_command = "cd build && $(MAKE) install" 28 | } 29 | -------------------------------------------------------------------------------- /luarocks/torch/scm-1/doc/pipefile.md: -------------------------------------------------------------------------------- 1 | 2 | # PipeFile # 3 | 4 | Parent classes: [DiskFile](diskfile.md) 5 | 6 | A `PipeFile` is a particular `File` which is able to perform basic read/write operations 7 | on a command pipe. It implements all methods described in [DiskFile](diskfile.md) and [File](file.md). 8 | 9 | The file might be open in read or write mode, depending on the parameter 10 | `mode` (which can take the value `"r"` or `"w"`) 11 | given to the [torch.PipeFile(fileName, mode)](#torch.PipeFile). Read-write mode is not allowed. 12 | 13 | 14 | ### torch.PipeFile(command, [mode], [quiet]) ### 15 | 16 | _Constructor_ which execute `command` by opening a pipe in read or write 17 | `mode`. Valid `mode` are `"r"` (read) or `"w"` (write). Default is read 18 | mode. 19 | 20 | If (and only if) `quiet` is `true`, no error will be raised in case of 21 | problem opening the file: instead `nil` will be returned. 22 | 23 | -------------------------------------------------------------------------------- /lua/nn/Select.lua: -------------------------------------------------------------------------------- 1 | local Select, parent = torch.class('nn.Select', 'nn.Module') 2 | 3 | function Select:__init(dimension,index) 4 | parent.__init(self) 5 | self.dimension = dimension 6 | self.index = index 7 | end 8 | 9 | function Select:updateOutput(input) 10 | local dim = self.dimension < 0 and input:dim() + self.dimension + 1 or self.dimension 11 | local index = self.index < 0 and input:size(dim) + self.index + 1 or self.index 12 | local output = input:select(dim, index); 13 | self.output:resizeAs(output) 14 | return self.output:copy(output) 15 | end 16 | 17 | function Select:updateGradInput(input, gradOutput) 18 | local dim = self.dimension < 0 and input:dim() + self.dimension + 1 or self.dimension 19 | local index = self.index < 0 and input:size(dim) + self.index + 1 or self.index 20 | self.gradInput:resizeAs(input) 21 | self.gradInput:zero() 22 | self.gradInput:select(dim,index):copy(gradOutput) 23 | return self.gradInput 24 | end 25 | -------------------------------------------------------------------------------- /lua/sys/colors.lua: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- 2 | -- colors, can be used to print things in color 3 | -------------------------------------------------------------------------------- 4 | local colors = { 5 | none = '\27[0m', 6 | black = '\27[0;30m', 7 | red = '\27[0;31m', 8 | green = '\27[0;32m', 9 | yellow = '\27[0;33m', 10 | blue = '\27[0;34m', 11 | magenta = '\27[0;35m', 12 | cyan = '\27[0;36m', 13 | white = '\27[0;37m', 14 | Black = '\27[1;30m', 15 | Red = '\27[1;31m', 16 | Green = '\27[1;32m', 17 | Yellow = '\27[1;33m', 18 | Blue = '\27[1;34m', 19 | Magenta = '\27[1;35m', 20 | Cyan = '\27[1;36m', 21 | White = '\27[1;37m', 22 | _black = '\27[40m', 23 | _red = '\27[41m', 24 | _green = '\27[42m', 25 | _yellow = '\27[43m', 26 | _blue = '\27[44m', 27 | _magenta = '\27[45m', 28 | _cyan = '\27[46m', 29 | _white = '\27[47m' 30 | } 31 | 32 | return colors 33 | -------------------------------------------------------------------------------- /include/TH/generic/THBlas.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_GENERIC_FILE 2 | #define TH_GENERIC_FILE "generic/THBlas.h" 3 | #else 4 | 5 | /* Level 1 */ 6 | TH_API void THBlas_(swap)(long n, real *x, long incx, real *y, long incy); 7 | TH_API void THBlas_(scal)(long n, real a, real *x, long incx); 8 | TH_API void THBlas_(copy)(long n, real *x, long incx, real *y, long incy); 9 | TH_API void THBlas_(axpy)(long n, real a, real *x, long incx, real *y, long incy); 10 | TH_API real THBlas_(dot)(long n, real *x, long incx, real *y, long incy); 11 | 12 | /* Level 2 */ 13 | TH_API void THBlas_(gemv)(char trans, long m, long n, real alpha, real *a, long lda, real *x, long incx, real beta, real *y, long incy); 14 | TH_API void THBlas_(ger)(long m, long n, real alpha, real *x, long incx, real *y, long incy, real *a, long lda); 15 | 16 | /* Level 3 */ 17 | TH_API void THBlas_(gemm)(char transa, char transb, long m, long n, long k, real alpha, real *a, long lda, real *b, long ldb, real beta, real *c, long ldc); 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /lua/nn/CAddTable.lua: -------------------------------------------------------------------------------- 1 | local CAddTable, parent = torch.class('nn.CAddTable', 'nn.Module') 2 | 3 | function CAddTable:__init(ip) 4 | parent.__init(self) 5 | self.inplace = ip 6 | self.gradInput = {} 7 | end 8 | 9 | function CAddTable:updateOutput(input) 10 | if self.inplace then 11 | self.output:set(input[1]) 12 | else 13 | self.output:resizeAs(input[1]):copy(input[1]) 14 | end 15 | for i=2,#input do 16 | self.output:add(input[i]) 17 | end 18 | return self.output 19 | end 20 | 21 | function CAddTable:updateGradInput(input, gradOutput) 22 | for i=1,#input do 23 | self.gradInput[i] = self.gradInput[i] or input[1].new() 24 | if self.inplace then 25 | self.gradInput[i]:set(gradOutput) 26 | else 27 | self.gradInput[i]:resizeAs(input[i]):copy(gradOutput) 28 | end 29 | end 30 | 31 | for i=#input+1, #self.gradInput do 32 | self.gradInput[i] = nil 33 | end 34 | 35 | return self.gradInput 36 | end 37 | -------------------------------------------------------------------------------- /lua/nn/Index.lua: -------------------------------------------------------------------------------- 1 | local Index, parent = torch.class('nn.Index', 'nn.Module') 2 | 3 | function Index:__init(dimension) 4 | parent.__init(self) 5 | self.dimension = dimension 6 | self.gradInput = {self.gradInput, self.gradInput.new()} 7 | end 8 | 9 | function Index:updateOutput(input) 10 | local t = input[1] 11 | local index = input[2] 12 | self.output:index(t, self.dimension, index) 13 | return self.output 14 | end 15 | 16 | function Index:updateGradInput(input, gradOutput) 17 | local t = input[1] 18 | local index = input[2] 19 | 20 | self.gradInput[2]:resize(index:size()):zero() 21 | local gradInput = self.gradInput[1] -- no gradient for the index variable 22 | gradInput:resizeAs(t):zero() 23 | gradInput:indexAdd(self.dimension, index, gradOutput) 24 | return self.gradInput 25 | end 26 | 27 | function Index:clearState() 28 | self.gradInput[1]:set() 29 | self.gradInput[2]:set() 30 | self.output:set() 31 | return self 32 | end 33 | -------------------------------------------------------------------------------- /lua/nn/SmoothL1Criterion.lua: -------------------------------------------------------------------------------- 1 | local SmoothL1Criterion, parent = torch.class('nn.SmoothL1Criterion', 'nn.Criterion') 2 | 3 | function SmoothL1Criterion:__init(sizeAverage) 4 | parent.__init(self) 5 | if sizeAverage ~= nil then 6 | self.sizeAverage = sizeAverage 7 | else 8 | self.sizeAverage = true 9 | end 10 | end 11 | 12 | function SmoothL1Criterion:updateOutput(input, target) 13 | self.output_tensor = self.output_tensor or input.new(1) 14 | input.THNN.SmoothL1Criterion_updateOutput( 15 | input:cdata(), 16 | target:cdata(), 17 | self.output_tensor:cdata(), 18 | self.sizeAverage 19 | ) 20 | self.output = self.output_tensor[1] 21 | return self.output 22 | end 23 | 24 | function SmoothL1Criterion:updateGradInput(input, target) 25 | input.THNN.SmoothL1Criterion_updateGradInput( 26 | input:cdata(), 27 | target:cdata(), 28 | self.gradInput:cdata(), 29 | self.sizeAverage 30 | ) 31 | return self.gradInput 32 | end 33 | -------------------------------------------------------------------------------- /share/cmake/torch/TorchWrap.cmake: -------------------------------------------------------------------------------- 1 | MACRO(ADD_TORCH_WRAP target luafile) 2 | INCLUDE_DIRECTORIES("${CMAKE_CURRENT_BINARY_DIR}") 3 | GET_FILENAME_COMPONENT(_file_ "${luafile}" NAME_WE) 4 | SET(cfile "${_file_}.c") 5 | IF (DEFINED CWRAP_CUSTOM_LUA) 6 | ADD_CUSTOM_COMMAND( 7 | OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" 8 | COMMAND ${CWRAP_CUSTOM_LUA} ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" 9 | WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" 10 | DEPENDS "${luafile}") 11 | ELSE (DEFINED CWRAP_CUSTOM_LUA) 12 | ADD_CUSTOM_COMMAND( 13 | OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" 14 | COMMAND C:/torch/bin/luajit ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" 15 | WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" 16 | DEPENDS "${luafile}") 17 | ENDIF (DEFINED CWRAP_CUSTOM_LUA) 18 | ADD_CUSTOM_TARGET(${target} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/${cfile}") 19 | ENDMACRO(ADD_TORCH_WRAP) 20 | -------------------------------------------------------------------------------- /share/cmake/torch/TorchExports-release.cmake: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------- 2 | # Generated CMake target import file for configuration "Release". 3 | #---------------------------------------------------------------- 4 | 5 | # Commands may need to know the format version. 6 | set(CMAKE_IMPORT_FILE_VERSION 1) 7 | 8 | # Import target "TH" for configuration "Release" 9 | set_property(TARGET TH APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) 10 | set_target_properties(TH PROPERTIES 11 | IMPORTED_IMPLIB_RELEASE "${_IMPORT_PREFIX}/lib/TH.lib" 12 | IMPORTED_LINK_INTERFACE_LIBRARIES_RELEASE "C:/torch/lapack/lib/libblas.lib;C:/torch/lapack/lib/liblapack.lib" 13 | IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/bin/TH.dll" 14 | ) 15 | 16 | list(APPEND _IMPORT_CHECK_TARGETS TH ) 17 | list(APPEND _IMPORT_CHECK_FILES_FOR_TH "${_IMPORT_PREFIX}/lib/TH.lib" "${_IMPORT_PREFIX}/bin/TH.dll" ) 18 | 19 | # Commands beyond this point should not need to know the version. 20 | set(CMAKE_IMPORT_FILE_VERSION) 21 | -------------------------------------------------------------------------------- /lua/nn/Mul.lua: -------------------------------------------------------------------------------- 1 | local Mul, parent = torch.class('nn.Mul', 'nn.Module') 2 | 3 | function Mul:__init() 4 | parent.__init(self) 5 | 6 | self.weight = torch.Tensor(1) 7 | self.gradWeight = torch.Tensor(1) 8 | 9 | self:reset() 10 | end 11 | 12 | 13 | function Mul:reset(stdv) 14 | if stdv then 15 | stdv = stdv * math.sqrt(3) 16 | else 17 | stdv = 1./math.sqrt(self.weight:size(1)) 18 | end 19 | 20 | self.weight:uniform(-stdv, stdv); 21 | end 22 | 23 | function Mul:updateOutput(input) 24 | self.output:resizeAs(input):copy(input); 25 | self.output:mul(self.weight[1]); 26 | return self.output 27 | end 28 | 29 | function Mul:updateGradInput(input, gradOutput) 30 | self.gradInput:resizeAs(input):zero() 31 | self.gradInput:add(self.weight[1], gradOutput) 32 | return self.gradInput 33 | end 34 | 35 | function Mul:accGradParameters(input, gradOutput, scale) 36 | scale = scale or 1 37 | self.gradWeight[1] = self.gradWeight[1] + scale*input:dot(gradOutput); 38 | end 39 | -------------------------------------------------------------------------------- /luarocks/moses/1.6.1-1/rock_manifest: -------------------------------------------------------------------------------- 1 | rock_manifest = { 2 | doc = { 3 | ["index.html"] = "11eb76eeb84ed3b799ebeb0aa51ddf9b", 4 | ["ldoc.css"] = "e2b04e90ff648f8e00d9d4d458de59d9", 5 | topics = { 6 | ["tutorial.md.html"] = "e3c3236df73e64ab0b2a929fa8761a16" 7 | }, 8 | ["tutorial.md"] = "fbc9b413a3ca79ea49e2190f6234dee9" 9 | }, 10 | lua = { 11 | ["moses.lua"] = "2d537b86ac089768d80057a73af3dc74", 12 | ["moses_min.lua"] = "cf1e0fc6f35a0406c790ceac0324afe4" 13 | }, 14 | ["moses-1.6.1-1.rockspec"] = "9492f6296e104b33340e7a928bc0e812", 15 | spec = { 16 | ["array_spec.lua"] = "2f451e49719ed065242c5046762a4cc1", 17 | ["chaining_spec.lua"] = "abc7203317f91405a5bf9f6e54784890", 18 | ["func_spec.lua"] = "fefc9e569d5252087cb25b795c10a190", 19 | ["import_spec.lua"] = "1445d05f804266df1281eabf4009f66e", 20 | ["object_spec.lua"] = "a0d917077ba4c5b783bc837ff3ae1140", 21 | ["table_spec.lua"] = "79b1f3bc2b7e26348f3cd0826f1d3469" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /lua/nnx/SaturatedLU.lua: -------------------------------------------------------------------------------- 1 | local SaturatedLU, parent = torch.class('nn.SaturatedLU','nn.Module') 2 | 3 | function SaturatedLU:__init(th,v,th2,v2) 4 | parent.__init(self) 5 | self.threshold = th or -1.0 6 | self.val = v or -1.0 7 | self.threshold2 = th2 or 1.0 8 | self.val2 = v2 or 1.0 9 | if (th and type(th) ~= 'number') or (v and type(v) ~= 'number') 10 | or (th2 and type(th2) ~= 'number') or (v2 and type(v2) ~= 'number') then 11 | error('nn.SaturatedLU(lower-bound, value, upper-bound, value2)') 12 | end 13 | end 14 | 15 | function SaturatedLU:updateOutput(input) 16 | self.output = input:clone() 17 | self.output[self.output:lt(self.threshold)] = self.val 18 | self.output[self.output:gt(self.threshold2)] = self.val2 19 | return self.output 20 | end 21 | 22 | function SaturatedLU:updateGradInput(input, gradOutput) 23 | self.gradInput = gradOutput:clone() 24 | self.gradInput[input:lt(self.threshold)] = 0 25 | self.gradInput[input:gt(self.threshold2)] = 0 26 | return self.gradInput 27 | end -------------------------------------------------------------------------------- /lua/nn/SpatialAdaptiveAveragePooling.lua: -------------------------------------------------------------------------------- 1 | local SpatialAdaptiveAveragePooling, parent = torch.class('nn.SpatialAdaptiveAveragePooling', 'nn.Module') 2 | 3 | function SpatialAdaptiveAveragePooling:__init(W, H) 4 | parent.__init(self) 5 | 6 | self.W = W 7 | self.H = H 8 | end 9 | 10 | function SpatialAdaptiveAveragePooling:updateOutput(input) 11 | input.THNN.SpatialAdaptiveAveragePooling_updateOutput( 12 | input:cdata(), 13 | self.output:cdata(), 14 | self.W, self.H 15 | ) 16 | return self.output 17 | end 18 | 19 | function SpatialAdaptiveAveragePooling:updateGradInput(input, gradOutput) 20 | input.THNN.SpatialAdaptiveAveragePooling_updateGradInput( 21 | input:cdata(), 22 | gradOutput:cdata(), 23 | self.gradInput:cdata() 24 | ) 25 | return self.gradInput 26 | end 27 | 28 | -- for backward compat 29 | function SpatialAdaptiveAveragePooling:empty() 30 | self:clearState() 31 | end 32 | 33 | function SpatialAdaptiveAveragePooling:clearState() 34 | return parent.clearState(self) 35 | end 36 | -------------------------------------------------------------------------------- /include/TH/THTensor.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_TENSOR_INC 2 | #define TH_TENSOR_INC 3 | 4 | #include "THStorage.h" 5 | #include "THTensorApply.h" 6 | 7 | #define THTensor TH_CONCAT_3(TH,Real,Tensor) 8 | #define THTensor_(NAME) TH_CONCAT_4(TH,Real,Tensor_,NAME) 9 | 10 | #define TH_DESC_BUFF_LEN 64 11 | typedef struct { 12 | char str[TH_DESC_BUFF_LEN]; 13 | } THDescBuff; 14 | 15 | /* basics */ 16 | #include "generic/THTensor.h" 17 | #include "THGenerateAllTypes.h" 18 | 19 | #include "generic/THTensorCopy.h" 20 | #include "THGenerateAllTypes.h" 21 | 22 | #include "THTensorMacros.h" 23 | 24 | /* random numbers */ 25 | #include "THRandom.h" 26 | #include "generic/THTensorRandom.h" 27 | #include "THGenerateAllTypes.h" 28 | 29 | /* maths */ 30 | #include "generic/THTensorMath.h" 31 | #include "THGenerateAllTypes.h" 32 | 33 | /* convolutions */ 34 | #include "generic/THTensorConv.h" 35 | #include "THGenerateAllTypes.h" 36 | 37 | /* lapack support */ 38 | #include "generic/THTensorLapack.h" 39 | #include "THGenerateFloatTypes.h" 40 | 41 | #endif 42 | -------------------------------------------------------------------------------- /luarocks/fun/scm-1/fun-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "fun" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/luafun/luafun.git", 6 | } 7 | 8 | description = { 9 | summary = "High-performance functional programming library for Lua", 10 | homepage = "https://luafun.github.io/", 11 | license = "MIT/X11", 12 | maintainer = "Roman Tsisyk ", 13 | detailed = [[ 14 | Lua Fun is a high-performance functional programming library for Lua 15 | designed with LuaJIT's trace compiler in mind. 16 | 17 | Lua Fun provides a set of more than 50 programming primitives typically 18 | found in languages like Standard ML, Haskell, Erlang, JavaScript, Python and 19 | even Lisp. High-order functions such as map, filter, reduce, zip, etc., 20 | make it easy to write simple and efficient functional code. 21 | ]] 22 | } 23 | 24 | dependencies = { 25 | "lua" 26 | } 27 | 28 | build = { 29 | type = "builtin", 30 | modules = { 31 | fun = "fun.lua", 32 | }, 33 | copy_directories = { "tests" }, 34 | } 35 | -------------------------------------------------------------------------------- /include/TH/THLapack.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_LAPACK_INC 2 | #define TH_LAPACK_INC 3 | 4 | #include "THGeneral.h" 5 | 6 | #define THLapack_(NAME) TH_CONCAT_4(TH,Real,Lapack_,NAME) 7 | 8 | #define THLapackCheck(fmt, func, info , ...) \ 9 | if (info < 0) { \ 10 | THError("Lapack Error in %s : Illegal Argument %d", func, -info); \ 11 | } else if(info > 0) { \ 12 | THError(fmt, func, info, ##__VA_ARGS__); \ 13 | } \ 14 | 15 | #define THLapackCheckWithCleanup(fmt, cleanup, func, info , ...) \ 16 | if (info < 0) { \ 17 | cleanup \ 18 | THError("Lapack Error in %s : Illegal Argument %d", func, -info); \ 19 | } else if(info > 0) { \ 20 | cleanup \ 21 | THError(fmt, func, info, ##__VA_ARGS__); \ 22 | } 23 | 24 | #include "generic/THLapack.h" 25 | #include "THGenerateAllTypes.h" 26 | 27 | #endif 28 | -------------------------------------------------------------------------------- /lua/nnx/TreeNLLCriterion.lua: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------ 2 | --[[ TreeNLLCriterion ]]-- 3 | -- Negative Log Likelihood for SoftMaxTrees. 4 | -- Used for maximizing the likelihood of SoftMaxTree outputs. 5 | -- SoftMaxTree outputs a column tensor representing the log likelihood 6 | -- of each target in the batch. Thus SoftMaxTree requires the targets. 7 | -- So this Criterion only computes the negative of those outputs, as 8 | -- well as its corresponding gradients. 9 | ------------------------------------------------------------------------ 10 | local TreeNLLCriterion, parent = torch.class("nn.TreeNLLCriterion", "nn.Criterion") 11 | 12 | function TreeNLLCriterion:__init() 13 | self._module = nn.Mean() 14 | parent.__init(self) 15 | self._output_grad = torch.Tensor{-1} 16 | end 17 | 18 | function TreeNLLCriterion:updateOutput(input, target) 19 | return -self._module:forward(input)[1] 20 | end 21 | 22 | function TreeNLLCriterion:updateGradInput(input, target) 23 | return self._module:backward(input, self._output_grad) 24 | end 25 | -------------------------------------------------------------------------------- /lua/image/win.ui: -------------------------------------------------------------------------------- 1 | 2 | 3 | Display 4 | 5 | 6 | 7 | 0 8 | 0 9 | 640 10 | 480 11 | 12 | 13 | 14 | Form 15 | 16 | 17 | 18 | 0 19 | 20 | 21 | 22 | 23 | 24 | 10 25 | 10 26 | 27 | 28 | 29 | QFrame::NoFrame 30 | 31 | 32 | QFrame::Raised 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /luarocks/moses/1.6.1-1/spec/import_spec.lua: -------------------------------------------------------------------------------- 1 | require 'luacov' 2 | local _ = require 'moses' 3 | 4 | context('Import specs', function() 5 | 6 | test('imports all library function to a given context', function() 7 | local funcs = _.functions() 8 | local context = _.import({}) 9 | assert_true(_.all(funcs, function(k, n) return _.has(context, n) end)) 10 | end) 11 | 12 | test('passing "noConflict" will preserve already existing keys', function() 13 | local funcs = _.functions() 14 | local context = _.import({each = 1, all = 2}, true) 15 | assert_true(_.all(funcs, function(k, n) return _.has(context, n) end)) 16 | assert_equal(context.each, 1) 17 | assert_equal(context.all, 2) 18 | end) 19 | 20 | test('The context will default to the global _G if not supplied', function() 21 | local oldG = _.clone(_G,true) 22 | assert_not_equal(_G, oldG) 23 | _.import() 24 | local funcs = _.functions() 25 | _.each(funcs, function(__, fname) 26 | assert_not_nil(_G[fname]) 27 | assert_true(type(_G[fname]) == 'function') 28 | end) 29 | _G = oldG 30 | end) 31 | 32 | end) -------------------------------------------------------------------------------- /include/TH/generic/THStorageCopy.c: -------------------------------------------------------------------------------- 1 | #ifndef TH_GENERIC_FILE 2 | #define TH_GENERIC_FILE "generic/THStorageCopy.c" 3 | #else 4 | 5 | void THStorage_(rawCopy)(THStorage *storage, real *src) 6 | { 7 | long i; 8 | for(i = 0; i < storage->size; i++) 9 | storage->data[i] = src[i]; 10 | } 11 | 12 | void THStorage_(copy)(THStorage *storage, THStorage *src) 13 | { 14 | THArgCheck(storage->size == src->size, 2, "size mismatch"); 15 | THStorage_(rawCopy)(storage, src->data); 16 | } 17 | 18 | 19 | #define IMPLEMENT_THStorage_COPY(TYPENAMESRC) \ 20 | void THStorage_(copy##TYPENAMESRC)(THStorage *storage, TH##TYPENAMESRC##Storage *src) \ 21 | { \ 22 | long i; \ 23 | THArgCheck(storage->size == src->size, 2, "size mismatch"); \ 24 | for(i = 0; i < storage->size; i++) \ 25 | storage->data[i] = (real)src->data[i]; \ 26 | } 27 | 28 | IMPLEMENT_THStorage_COPY(Byte) 29 | IMPLEMENT_THStorage_COPY(Char) 30 | IMPLEMENT_THStorage_COPY(Short) 31 | IMPLEMENT_THStorage_COPY(Int) 32 | IMPLEMENT_THStorage_COPY(Long) 33 | IMPLEMENT_THStorage_COPY(Float) 34 | IMPLEMENT_THStorage_COPY(Double) 35 | 36 | #endif 37 | -------------------------------------------------------------------------------- /lua/luarocks/fetch/hg_http.lua: -------------------------------------------------------------------------------- 1 | 2 | --- Fetch back-end for retrieving sources from hg repositories 3 | -- that use http:// transport. For example, for fetching a repository 4 | -- that requires the following command line: 5 | -- `hg clone http://example.com/foo` 6 | -- you can use this in the rockspec: 7 | -- source = { url = "hg+http://example.com/foo" } 8 | local hg_http = {} 9 | 10 | local hg = require("luarocks.fetch.hg") 11 | 12 | --- Download sources for building a rock, using hg over http. 13 | -- @param rockspec table: The rockspec table 14 | -- @param extract boolean: Unused in this module (required for API purposes.) 15 | -- @param dest_dir string or nil: If set, will extract to the given directory. 16 | -- @return (string, string) or (nil, string): The absolute pathname of 17 | -- the fetched source tarball and the temporary directory created to 18 | -- store it; or nil and an error message. 19 | function hg_http.get_sources(rockspec, extract, dest_dir) 20 | rockspec.source.url = rockspec.source.url:gsub("^hg.", "") 21 | return hg.get_sources(rockspec, extract, dest_dir) 22 | end 23 | 24 | return hg_http 25 | -------------------------------------------------------------------------------- /lua/nn/SoftPlus.lua: -------------------------------------------------------------------------------- 1 | local SoftPlus, parent = torch.class('nn.SoftPlus', 'nn.Module') 2 | 3 | function SoftPlus:__init(beta) 4 | parent.__init(self) 5 | self.beta = beta or 1 -- Beta controls sharpness of transfer function 6 | self.threshold = 20 -- Avoid floating point issues with exp(x), x>20 7 | end 8 | 9 | function SoftPlus:updateOutput(input) 10 | -- f(x) = 1/beta * log(1 + exp(beta * x)) 11 | input.THNN.SoftPlus_updateOutput( 12 | input:cdata(), 13 | self.output:cdata(), 14 | self.beta, 15 | self.threshold 16 | ) 17 | return self.output 18 | end 19 | 20 | function SoftPlus:updateGradInput(input, gradOutput) 21 | -- d/dx[log(1+exp(k*x))/k] = exp(kx) / (exp(kx) + 1) 22 | -- SINCE 23 | -- y = (1/k)*log(1+exp(k*x)) --> x = (1/k)*log(exp(k*y)-1) 24 | -- THEREFORE: 25 | -- d/dx(f(x)) = (exp(k*y) - 1) / exp(k*y) 26 | input.THNN.SoftPlus_updateGradInput( 27 | input:cdata(), 28 | gradOutput:cdata(), 29 | self.gradInput:cdata(), 30 | self.output:cdata(), 31 | self.beta, 32 | self.threshold 33 | ) 34 | return self.gradInput 35 | end 36 | -------------------------------------------------------------------------------- /lua/nn/LeakyReLU.lua: -------------------------------------------------------------------------------- 1 | local LeakyReLU, parent = torch.class('nn.LeakyReLU','nn.Module') 2 | 3 | function LeakyReLU:__init(negval,ip) 4 | parent.__init(self) 5 | if type(negval) == 'boolean' then 6 | local ip = negval 7 | self.negval = 1/100 8 | else 9 | self.negval = negval or (1/100) 10 | end 11 | -- default for inplace is false 12 | self.inplace = ip or false 13 | if self.negval < 0 then 14 | self.inplace = false 15 | end 16 | end 17 | 18 | function LeakyReLU:updateOutput(input) 19 | input.THNN.LeakyReLU_updateOutput( 20 | input:cdata(), 21 | self.output:cdata(), 22 | self.negval, 23 | self.inplace 24 | ) 25 | return self.output 26 | end 27 | 28 | function LeakyReLU:updateGradInput(input, gradOutput) 29 | input.THNN.LeakyReLU_updateGradInput( 30 | input:cdata(), 31 | gradOutput:cdata(), 32 | self.gradInput:cdata(), 33 | self.negval, 34 | self.inplace 35 | ) 36 | return self.gradInput 37 | end 38 | 39 | function LeakyReLU:__tostring__() 40 | return torch.type(self) .. string.format('(%g)', self.negval) 41 | end 42 | -------------------------------------------------------------------------------- /lua/nnx/LA.lua: -------------------------------------------------------------------------------- 1 | require 'nn' 2 | 3 | 4 | --Based on: http://arxiv.org/pdf/1412.6830v1.pdf 5 | --If input dimension is larger than 1, a reshape is needed after usage. 6 | --Usage: 7 | ------------------------------------ 8 | -- model:add(LA(4, 3 * 32 * 32)) 9 | -- model:add(nn.Reshape(3,32,32)) 10 | ------------------------------------ 11 | 12 | 13 | function LA(s, inputSize) 14 | local module = nn.Sequential() 15 | local maxmodules = {} 16 | for i = 1,s do 17 | maxmodules[i] = nn.Sequential() 18 | maxmodules[i]:add(nn.MulConstant(-1.0)) 19 | maxmodules[i]:add(nn.Add(inputSize,true)) 20 | maxmodules[i]:add(nn.ReLU()) 21 | maxmodules[i]:add(nn.CMul(inputSize)) 22 | end 23 | maxmodules[s+1] = nn.Sequential() 24 | maxmodules[s+1]:add(nn.ReLU()) 25 | 26 | local catmodule = nn.ConcatTable() 27 | print('number of modules is: '.. #maxmodules) 28 | for i=1,#maxmodules do 29 | catmodule:add(maxmodules[i]) 30 | end 31 | 32 | module:add(catmodule) 33 | 34 | 35 | module:add(nn.JoinTable(1)) 36 | module:add(nn.Reshape(s + 1,inputSize)) 37 | 38 | module:add(nn.Sum(1)) 39 | 40 | 41 | return module 42 | end 43 | 44 | -------------------------------------------------------------------------------- /lua/luarocks/build/command.lua: -------------------------------------------------------------------------------- 1 | 2 | --- Build back-end for raw listing of commands in rockspec files. 3 | --module("luarocks.build.command", package.seeall) 4 | local command = {} 5 | 6 | local fs = require("luarocks.fs") 7 | local util = require("luarocks.util") 8 | 9 | --- Driver function for the "command" build back-end. 10 | -- @param rockspec table: the loaded rockspec. 11 | -- @return boolean or (nil, string): true if no errors ocurred, 12 | -- nil and an error message otherwise. 13 | function command.run(rockspec) 14 | assert(type(rockspec) == "table") 15 | 16 | local build = rockspec.build 17 | 18 | util.variable_substitutions(build, rockspec.variables) 19 | 20 | if build.build_command then 21 | util.printout(build.build_command) 22 | if not fs.execute(build.build_command) then 23 | return nil, "Failed building." 24 | end 25 | end 26 | if build.install_command then 27 | util.printout(build.install_command) 28 | if not fs.execute(build.install_command) then 29 | return nil, "Failed installing." 30 | end 31 | end 32 | return true 33 | end 34 | 35 | return command 36 | -------------------------------------------------------------------------------- /lua/nn/HardTanh.lua: -------------------------------------------------------------------------------- 1 | local HardTanh, parent = torch.class('nn.HardTanh', 'nn.Module') 2 | 3 | function HardTanh:__init(min_value, max_value, inplace) 4 | parent.__init(self) 5 | self.min_val = min_value or -1 6 | self.max_val = max_value or 1 7 | self.inplace = inplace or false 8 | if (inplace and type(inplace) ~= 'boolean') then 9 | error('in-place flag must be boolean') 10 | end 11 | assert(self.max_val>self.min_val, 'max_value must be larger than min_value') 12 | end 13 | 14 | function HardTanh:updateOutput(input) 15 | self.min_val = self.min_val or -1 16 | self.max_val = self.max_val or 1 17 | input.THNN.HardTanh_updateOutput( 18 | input:cdata(), 19 | self.output:cdata(), 20 | self.min_val, 21 | self.max_val, 22 | self.inplace or false 23 | ) 24 | return self.output 25 | end 26 | 27 | function HardTanh:updateGradInput(input, gradOutput) 28 | input.THNN.HardTanh_updateGradInput( 29 | input:cdata(), 30 | gradOutput:cdata(), 31 | self.gradInput:cdata(), 32 | self.min_val, 33 | self.max_val, 34 | self.inplace or false 35 | ) 36 | return self.gradInput 37 | end 38 | -------------------------------------------------------------------------------- /lua/nn/SpatialLPPooling.lua: -------------------------------------------------------------------------------- 1 | local SpatialLPPooling, parent = torch.class('nn.SpatialLPPooling', 'nn.Sequential') 2 | 3 | function SpatialLPPooling:__init(nInputPlane, pnorm, kW, kH, dW, dH) 4 | parent.__init(self) 5 | 6 | dW = dW or kW 7 | dH = dH or kH 8 | 9 | self.kW = kW 10 | self.kH = kH 11 | self.dW = dW 12 | self.dH = dH 13 | 14 | if pnorm == 2 then 15 | self:add(nn.Square()) 16 | else 17 | self:add(nn.Power(pnorm)) 18 | end 19 | self:add(nn.SpatialAveragePooling(kW, kH, dW, dH)) 20 | self:add(nn.MulConstant(kW*kH)) 21 | if pnorm == 2 then 22 | self:add(nn.Sqrt()) 23 | else 24 | self:add(nn.Power(1/pnorm)) 25 | end 26 | end 27 | 28 | -- the module is a Sequential: by default, it'll try to learn the parameters 29 | -- of the sub sampler: we avoid that by redefining its methods. 30 | function SpatialLPPooling:reset() 31 | end 32 | 33 | function SpatialLPPooling:accGradParameters() 34 | end 35 | 36 | function SpatialLPPooling:accUpdateGradParameters() 37 | end 38 | 39 | function SpatialLPPooling:zeroGradParameters() 40 | end 41 | 42 | function SpatialLPPooling:updateParameters() 43 | end 44 | -------------------------------------------------------------------------------- /lua/pl/url.lua: -------------------------------------------------------------------------------- 1 | --- Python-style URL quoting library. 2 | -- 3 | -- @module pl.url 4 | 5 | local url = {} 6 | 7 | local function quote_char(c) 8 | return string.format("%%%02X", string.byte(c)) 9 | end 10 | 11 | --- Quote the url, replacing special characters using the '%xx' escape. 12 | -- @string s the string 13 | -- @bool quote_plus Also escape slashes and replace spaces by plus signs. 14 | function url.quote(s, quote_plus) 15 | if type(s) ~= "string" then 16 | return s 17 | end 18 | 19 | s = s:gsub("\n", "\r\n") 20 | s = s:gsub("([^A-Za-z0-9 %-_%./])", quote_char) 21 | if quote_plus then 22 | s = s:gsub(" ", "+") 23 | s = s:gsub("/", quote_char) 24 | else 25 | s = s:gsub(" ", "%%20") 26 | end 27 | 28 | return s 29 | end 30 | 31 | local function unquote_char(h) 32 | return string.char(tonumber(h, 16)) 33 | end 34 | 35 | --- Unquote the url, replacing '%xx' escapes and plus signs. 36 | -- @string s the string 37 | function url.unquote(s) 38 | if type(s) ~= "string" then 39 | return s 40 | end 41 | 42 | s = s:gsub("+", " ") 43 | s = s:gsub("%%(%x%x)", unquote_char) 44 | s = s:gsub("\r\n", "\n") 45 | 46 | return s 47 | end 48 | 49 | return url 50 | -------------------------------------------------------------------------------- /lua/jit/zone.lua: -------------------------------------------------------------------------------- 1 | ---------------------------------------------------------------------------- 2 | -- LuaJIT profiler zones. 3 | -- 4 | -- Copyright (C) 2005-2017 Mike Pall. All rights reserved. 5 | -- Released under the MIT license. See Copyright Notice in luajit.h 6 | ---------------------------------------------------------------------------- 7 | -- 8 | -- This module implements a simple hierarchical zone model. 9 | -- 10 | -- Example usage: 11 | -- 12 | -- local zone = require("jit.zone") 13 | -- zone("AI") 14 | -- ... 15 | -- zone("A*") 16 | -- ... 17 | -- print(zone:get()) --> "A*" 18 | -- ... 19 | -- zone() 20 | -- ... 21 | -- print(zone:get()) --> "AI" 22 | -- ... 23 | -- zone() 24 | -- 25 | ---------------------------------------------------------------------------- 26 | 27 | local remove = table.remove 28 | 29 | return setmetatable({ 30 | flush = function(t) 31 | for i=#t,1,-1 do t[i] = nil end 32 | end, 33 | get = function(t) 34 | return t[#t] 35 | end 36 | }, { 37 | __call = function(t, zone) 38 | if zone then 39 | t[#t+1] = zone 40 | else 41 | return (assert(remove(t), "empty zone stack")) 42 | end 43 | end 44 | }) 45 | 46 | -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/doc/plotsurface.md: -------------------------------------------------------------------------------- 1 | 2 | ## Plotting 3D Surfaces ## 3 | 4 | Surface plotting creates a 3D surface plot of a given matrix `z`. Entries 5 | of `z` are used as height values. It is also possible to specify `x` and 6 | `y` locations corresponding to each point in `z` . If a terminal with 7 | interactive capabilities is being used by `Gnuplot` backend (like `x11` or 8 | `wxt` or `qt`), then rotating, zooming is also possible. 9 | 10 | 11 | ### gnuplot.splot(z) ### 12 | 13 | Plot surface ` z ` in 3D. 14 | ```lua 15 | x = torch.linspace(-1,1) 16 | xx = torch.Tensor(x:size(1),x:size(1)):zero():addr(1,x,x) 17 | xx = xx*math.pi*6 18 | gnuplot.splot(torch.sin(xx)) 19 | ``` 20 | ![](plot_splot.png) 21 | 22 | It is also possible to specify the `x` and `y` locations of each 23 | point in `z` by `gnuplot.splot(x,y,z)`. In this `x` and `y` has 24 | to be the same shape as `z`. 25 | 26 | One can also display multiple surfaces at a time. 27 | 28 | ```lua 29 | x = torch.linspace(-1,1) 30 | xx = torch.Tensor(x:size(1),x:size(1)):zero():addr(1,x,x) 31 | xx = xx*math.pi*2 32 | gnuplot.splot({torch.sin(xx)},{torch.sin(xx)+2}) 33 | ``` 34 | ![](plot_splot2.png) 35 | -------------------------------------------------------------------------------- /luarocks/luaffi/scm-1/doc/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to luaffifb 2 | We want to make contributing to this project as easy and transparent as 3 | possible. 4 | 5 | ## Our Development Process 6 | ... (in particular how this is synced with internal changes to the project) 7 | 8 | ## Pull Requests 9 | We actively welcome your pull requests. 10 | 1. Fork the repo and create your branch from `master`. 11 | 2. If you've added code that should be tested, add tests 12 | 3. If you haven't already, complete the Contributor License Agreement ("CLA"). 13 | 14 | ## Contributor License Agreement ("CLA") 15 | In order to accept your pull request, we need you to submit a CLA. You only need 16 | to do this once to work on any of Facebook's open source projects. 17 | 18 | Complete your CLA here: 19 | 20 | ## Issues 21 | We use GitHub issues to track public bugs. Please ensure your description is 22 | clear and has sufficient instructions to be able to reproduce the issue. 23 | 24 | ## Coding Style 25 | * Use four spaces for indentation rather than tabs 26 | * 80 character line length 27 | 28 | ## License 29 | By contributing to luaffifb, you agree that your contributions will be licensed 30 | under its BSD license. 31 | -------------------------------------------------------------------------------- /lua/nn/DistKLDivCriterion.lua: -------------------------------------------------------------------------------- 1 | local DistKLDivCriterion, parent = torch.class('nn.DistKLDivCriterion', 'nn.Criterion') 2 | 3 | function DistKLDivCriterion:__init() 4 | parent.__init(self) 5 | self.sizeAverage = true 6 | end 7 | 8 | function DistKLDivCriterion:updateOutput(input, target) 9 | assert(input:dim() == target:dim() and 10 | torch.LongTensor(input:size()):eq(torch.LongTensor(target:size())):all(), 11 | 'input and target should have the same size') 12 | self.output_tensor = self.output_tensor or input.new(1) 13 | input.THNN.DistKLDivCriterion_updateOutput( 14 | input:cdata(), 15 | target:cdata(), 16 | self.output_tensor:cdata(), 17 | self.sizeAverage 18 | ) 19 | self.output = self.output_tensor[1] 20 | return self.output 21 | end 22 | 23 | function DistKLDivCriterion:updateGradInput(input, target) 24 | assert(input:dim() == target:dim() and 25 | torch.LongTensor(input:size()):eq(torch.LongTensor(target:size())):all(), 26 | 'input and target should have the same size') 27 | input.THNN.DistKLDivCriterion_updateGradInput( 28 | input:cdata(), 29 | target:cdata(), 30 | self.gradInput:cdata(), 31 | self.sizeAverage 32 | ) 33 | return self.gradInput 34 | end 35 | -------------------------------------------------------------------------------- /lua/luarocks/refresh_cache.lua: -------------------------------------------------------------------------------- 1 | 2 | --- Module implementing the luarocks-admin "refresh_cache" command. 3 | --module("luarocks.refresh_cache", package.seeall) 4 | local refresh_cache = {} 5 | package.loaded["luarocks.refresh_cache"] = refresh_cache 6 | 7 | local util = require("luarocks.util") 8 | local cfg = require("luarocks.cfg") 9 | local cache = require("luarocks.cache") 10 | 11 | refresh_cache.help_summary = "Refresh local cache of a remote rocks server." 12 | refresh_cache.help_arguments = "[--from=]" 13 | refresh_cache.help = [[ 14 | The flag --from indicates which server to use. 15 | If not given, the default server set in the upload_server variable 16 | from the configuration file is used instead. 17 | ]] 18 | 19 | function refresh_cache.run(...) 20 | local flags = util.parse_flags(...) 21 | local server, upload_server = cache.get_upload_server(flags["server"]) 22 | if not server then return nil, upload_server end 23 | local download_url = cache.get_server_urls(server, upload_server) 24 | 25 | local ok, err = cache.refresh_local_cache(server, download_url, cfg.upload_user, cfg.upload_password) 26 | if not ok then 27 | return nil, err 28 | else 29 | return true 30 | end 31 | end 32 | 33 | 34 | return refresh_cache 35 | -------------------------------------------------------------------------------- /bin/luarocks.bat: -------------------------------------------------------------------------------- 1 | ::rem:: --[[ 2 | @setlocal& set PATH=C:/torch/bin;%PATH% & set luafile="%~f0" & if exist "%~f0.bat" set luafile="%~f0.bat" 3 | @C:/torch/bin/luajit.exe %luafile% %*& exit /b ]] 4 | 5 | package.path = [[C:/torch/lua/?.lua;C:/torch/lua/?/init.lua;]]..package.path 6 | 7 | -- this should be loaded first. 8 | local cfg = require("luarocks.cfg") 9 | 10 | local loader = require("luarocks.loader") 11 | local command_line = require("luarocks.command_line") 12 | 13 | program_description = "LuaRocks main command-line interface" 14 | 15 | commands = { 16 | help = "luarocks.help", 17 | pack = "luarocks.pack", 18 | unpack = "luarocks.unpack", 19 | build = "luarocks.build", 20 | install = "luarocks.install", 21 | search = "luarocks.search", 22 | list = "luarocks.list", 23 | remove = "luarocks.remove", 24 | make = "luarocks.make", 25 | download = "luarocks.download", 26 | path = "luarocks.path_cmd", 27 | show = "luarocks.show", 28 | new_version = "luarocks.new_version", 29 | lint = "luarocks.lint", 30 | write_rockspec = "luarocks.write_rockspec", 31 | purge = "luarocks.purge", 32 | doc = "luarocks.doc", 33 | upload = "luarocks.upload", 34 | config = "luarocks.config_cmd", 35 | } 36 | 37 | command_line.run_command(...) 38 | -------------------------------------------------------------------------------- /lua/nn/ELU.lua: -------------------------------------------------------------------------------- 1 | local ELU, parent = torch.class('nn.ELU', 'nn.Module') 2 | 3 | --[[ 4 | Djork-Arné Clevert, Thomas Unterthiner, Sepp Hochreiter 5 | Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) 6 | http://arxiv.org/pdf/1511.07289.pdf 7 | --]] 8 | 9 | function ELU:__init(alpha, inplace) 10 | parent.__init(self) 11 | self.alpha = alpha or 1 12 | assert(type(self.alpha) == 'number') 13 | self.inplace = inplace or false 14 | assert(type(self.inplace) == 'boolean') 15 | end 16 | 17 | function ELU:updateOutput(input) 18 | local inplace = self.inplace or false 19 | 20 | input.THNN.ELU_updateOutput( 21 | input:cdata(), 22 | self.output:cdata(), 23 | self.alpha, 24 | inplace 25 | ) 26 | return self.output 27 | end 28 | 29 | function ELU:updateGradInput(input, gradOutput) 30 | local inplace = self.inplace or false 31 | 32 | input.THNN.ELU_updateGradInput( 33 | input:cdata(), 34 | gradOutput:cdata(), 35 | self.gradInput:cdata(), 36 | self.output:cdata(), 37 | self.alpha, 38 | inplace 39 | ) 40 | return self.gradInput 41 | end 42 | 43 | function ELU:__tostring__() 44 | return string.format('%s (alpha:%f)', torch.type(self), self.alpha) 45 | end 46 | -------------------------------------------------------------------------------- /lua/sundown/htmlcdefs.lua: -------------------------------------------------------------------------------- 1 | local ffi = require 'ffi' 2 | 3 | ffi.cdef[[ 4 | 5 | struct sd_html_renderopt { 6 | struct { 7 | int header_count; 8 | int current_level; 9 | int level_offset; 10 | } toc_data; 11 | 12 | unsigned int flags; 13 | 14 | /* extra callbacks */ 15 | void (*link_attributes)(struct sd_buf *ob, const struct sd_buf *url, void *self); 16 | }; 17 | 18 | typedef enum { 19 | HTML_SKIP_HTML = (1 << 0), 20 | HTML_SKIP_STYLE = (1 << 1), 21 | HTML_SKIP_IMAGES = (1 << 2), 22 | HTML_SKIP_LINKS = (1 << 3), 23 | HTML_EXPAND_TABS = (1 << 4), 24 | HTML_SAFELINK = (1 << 5), 25 | HTML_TOC = (1 << 6), 26 | HTML_HARD_WRAP = (1 << 7), 27 | HTML_USE_XHTML = (1 << 8), 28 | HTML_ESCAPE = (1 << 9), 29 | } sd_html_render_mode; 30 | 31 | typedef enum { 32 | HTML_TAG_NONE = 0, 33 | HTML_TAG_OPEN, 34 | HTML_TAG_CLOSE, 35 | } sd_html_tag; 36 | 37 | int sd_html_is_tag(const uint8_t *tag_data, size_t tag_size, const char *tagname); 38 | 39 | void sd_html_renderer(struct sd_callbacks *callbacks, struct sd_html_renderopt *options_ptr, unsigned int render_flags); 40 | 41 | void sd_html_toc_renderer(struct sd_callbacks *callbacks, struct sd_html_renderopt *options_ptr); 42 | 43 | void sd_html_smartypants(struct sd_buf *ob, const uint8_t *text, size_t size); 44 | 45 | ]] 46 | -------------------------------------------------------------------------------- /luarocks/gnuplot/scm-1/gnuplot-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "gnuplot" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/torch/gnuplot.git", 6 | } 7 | 8 | description = { 9 | summary = "Torch interface to Gnuplot", 10 | detailed = [[ 11 | ]], 12 | homepage = "https://github.com/torch/gnuplot", 13 | license = "BSD" 14 | } 15 | 16 | dependencies = { 17 | "lua >= 5.1", 18 | "torch >= 7.0", 19 | "paths >= 1.0", 20 | } 21 | 22 | build = { 23 | type = "builtin", 24 | modules = { 25 | ["gnuplot.init"] = "init.lua", 26 | ["gnuplot.gnuplot"] = "gnuplot.lua", 27 | ["gnuplot.hist"] = "hist.lua" 28 | }, 29 | 30 | install = { 31 | lua = { 32 | -- this is ugly (but this is luarocks) 33 | ["gnuplot.README"] = "README.md", 34 | ["gnuplot.doc.plot_filled"] = "doc/plot_filled.png", 35 | ["gnuplot.doc.plot_hist"] = "doc/plot_hist.png", 36 | ["gnuplot.doc.plot_imagesc"] = "doc/plot_imagesc.png", 37 | ["gnuplot.doc.plot_sincos"] = "doc/plot_sincos.png", 38 | ["gnuplot.doc.plot_splot"] = "doc/plot_splot.png", 39 | ["gnuplot.doc.plot_splot2"] = "doc/plot_splot2.png", 40 | ["gnuplot.doc.plot_x"] = "doc/plot_x.png", 41 | ["gnuplot.doc.plot_xyf"] = "doc/plot_xyf.png" 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /include/lualib.h: -------------------------------------------------------------------------------- 1 | /* 2 | ** Standard library header. 3 | ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h 4 | */ 5 | 6 | #ifndef _LUALIB_H 7 | #define _LUALIB_H 8 | 9 | #include "lua.h" 10 | 11 | #define LUA_FILEHANDLE "FILE*" 12 | 13 | #define LUA_COLIBNAME "coroutine" 14 | #define LUA_MATHLIBNAME "math" 15 | #define LUA_STRLIBNAME "string" 16 | #define LUA_TABLIBNAME "table" 17 | #define LUA_IOLIBNAME "io" 18 | #define LUA_OSLIBNAME "os" 19 | #define LUA_LOADLIBNAME "package" 20 | #define LUA_DBLIBNAME "debug" 21 | #define LUA_BITLIBNAME "bit" 22 | #define LUA_JITLIBNAME "jit" 23 | #define LUA_FFILIBNAME "ffi" 24 | 25 | LUALIB_API int luaopen_base(lua_State *L); 26 | LUALIB_API int luaopen_math(lua_State *L); 27 | LUALIB_API int luaopen_string(lua_State *L); 28 | LUALIB_API int luaopen_table(lua_State *L); 29 | LUALIB_API int luaopen_io(lua_State *L); 30 | LUALIB_API int luaopen_os(lua_State *L); 31 | LUALIB_API int luaopen_package(lua_State *L); 32 | LUALIB_API int luaopen_debug(lua_State *L); 33 | LUALIB_API int luaopen_bit(lua_State *L); 34 | LUALIB_API int luaopen_jit(lua_State *L); 35 | LUALIB_API int luaopen_ffi(lua_State *L); 36 | 37 | LUALIB_API void luaL_openlibs(lua_State *L); 38 | 39 | #ifndef lua_assert 40 | #define lua_assert(x) ((void)0) 41 | #endif 42 | 43 | #endif 44 | -------------------------------------------------------------------------------- /lua/luarocks/fetch/git_http.lua: -------------------------------------------------------------------------------- 1 | 2 | --- Fetch back-end for retrieving sources from Git repositories 3 | -- that use http:// transport. For example, for fetching a repository 4 | -- that requires the following command line: 5 | -- `git clone http://example.com/foo.git` 6 | -- you can use this in the rockspec: 7 | -- source = { url = "git+http://example.com/foo.git" } 8 | -- Prefer using the normal git:// fetch mode as it is more widely 9 | -- available in older versions of LuaRocks. 10 | --module("luarocks.fetch.git_http", package.seeall) 11 | local git_http = {} 12 | 13 | local git = require("luarocks.fetch.git") 14 | 15 | --- Fetch sources for building a rock from a local Git repository. 16 | -- @param rockspec table: The rockspec table 17 | -- @param extract boolean: Unused in this module (required for API purposes.) 18 | -- @param dest_dir string or nil: If set, will extract to the given directory. 19 | -- @return (string, string) or (nil, string): The absolute pathname of 20 | -- the fetched source tarball and the temporary directory created to 21 | -- store it; or nil and an error message. 22 | function git_http.get_sources(rockspec, extract, dest_dir) 23 | rockspec.source.url = rockspec.source.url:gsub("^git.", "") 24 | return git.get_sources(rockspec, extract, dest_dir, "--") 25 | end 26 | 27 | return git_http 28 | -------------------------------------------------------------------------------- /lua/nn/MulConstant.lua: -------------------------------------------------------------------------------- 1 | local MulConstant, parent = torch.class('nn.MulConstant', 'nn.Module') 2 | 3 | function MulConstant:__init(constant_scalar,ip) 4 | parent.__init(self) 5 | assert(type(constant_scalar) == 'number', 'input is not scalar!') 6 | self.constant_scalar = constant_scalar 7 | 8 | -- default for inplace is false 9 | self.inplace = ip or false 10 | if (ip and type(ip) ~= 'boolean') then 11 | error('in-place flag must be boolean') 12 | end 13 | end 14 | 15 | function MulConstant:updateOutput(input) 16 | if self.inplace then 17 | input:mul(self.constant_scalar) 18 | self.output:set(input) 19 | else 20 | self.output:resizeAs(input) 21 | self.output:copy(input) 22 | self.output:mul(self.constant_scalar) 23 | end 24 | return self.output 25 | end 26 | 27 | function MulConstant:updateGradInput(input, gradOutput) 28 | if self.gradInput then 29 | if self.inplace then 30 | gradOutput:mul(self.constant_scalar) 31 | self.gradInput:set(gradOutput) 32 | -- restore previous input value 33 | input:div(self.constant_scalar) 34 | else 35 | self.gradInput:resizeAs(gradOutput) 36 | self.gradInput:copy(gradOutput) 37 | self.gradInput:mul(self.constant_scalar) 38 | end 39 | return self.gradInput 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /luarocks/sundown/scm-1/sundown-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "sundown" 2 | version = "scm-1" 3 | 4 | source = { 5 | url = "git://github.com/torch/sundown-ffi.git" 6 | } 7 | 8 | description = { 9 | summary = "A FFI interface to the Markdown implementation of the Sundown library", 10 | detailed = [[ 11 | ]], 12 | homepage = "https://github.com/torch/sundown-ffi", 13 | license = "BSD" 14 | } 15 | 16 | dependencies = { 17 | "lua >= 5.1", 18 | } 19 | 20 | build = { 21 | type = "builtin", 22 | install = { 23 | bin = { 24 | mdcat = "mdcat.lua" 25 | } 26 | }, 27 | modules = { 28 | ["sundown.env"] = "env.lua", 29 | ["sundown.init"] = "init.lua", 30 | ["sundown.sdcdefs"] = "sdcdefs.lua", 31 | ["sundown.htmlcdefs"] = "htmlcdefs.lua", 32 | ["sundown.html"] = "html.lua", 33 | ["sundown.ascii"] = "ascii.lua", 34 | libsundown = { 35 | sources = { 36 | "src/autolink.c", 37 | "src/buffer.c", 38 | "src/markdown.c", 39 | "src/stack.c", 40 | "html/houdini_href_e.c", 41 | "html/houdini_html_e.c", 42 | "html/html.c", 43 | "html/html_smartypants.c" 44 | }, 45 | incdirs = { 46 | "src/", 47 | "html/" 48 | } 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /lua/nn/L1Penalty.lua: -------------------------------------------------------------------------------- 1 | local L1Penalty, parent = torch.class('nn.L1Penalty','nn.Module') 2 | 3 | --This module acts as an L1 latent state regularizer, adding the 4 | --[gradOutput] to the gradient of the L1 loss. The [input] is copied to 5 | --the [output]. 6 | 7 | function L1Penalty:__init(l1weight, sizeAverage, provideOutput) 8 | parent.__init(self) 9 | self.l1weight = l1weight 10 | self.sizeAverage = sizeAverage or false 11 | if provideOutput == nil then 12 | self.provideOutput = true 13 | else 14 | self.provideOutput = provideOutput 15 | end 16 | end 17 | 18 | function L1Penalty:updateOutput(input) 19 | local m = self.l1weight 20 | if self.sizeAverage == true then 21 | m = m/input:nElement() 22 | end 23 | local loss = m*input:norm(1) 24 | self.loss = loss 25 | self.output = input 26 | return self.output 27 | end 28 | 29 | function L1Penalty:updateGradInput(input, gradOutput) 30 | local m = self.l1weight 31 | if self.sizeAverage == true then 32 | m = m/input:nElement() 33 | end 34 | 35 | self.gradInput:resizeAs(input):copy(input):sign():mul(m) 36 | 37 | if self.provideOutput == true then 38 | self.gradInput:add(gradOutput) 39 | end 40 | 41 | return self.gradInput 42 | end 43 | -------------------------------------------------------------------------------- /lua/nn/Copy.lua: -------------------------------------------------------------------------------- 1 | local Copy, parent = torch.class('nn.Copy', 'nn.Module') 2 | 3 | function Copy:__init(intype, outtype, forceCopy, dontCast) 4 | intype = intype or torch.Tensor.__typename 5 | outtype = outtype or torch.Tensor.__typename 6 | 7 | self.dontCast = dontCast 8 | 9 | parent.__init(self) 10 | self.gradInput = torch.getmetatable(intype).new() 11 | self.output = torch.getmetatable(outtype).new() 12 | 13 | if (not forceCopy) and intype == outtype then 14 | 15 | self.updateOutput = function(self, input) 16 | self.output:set(input) 17 | return input 18 | end 19 | 20 | self.updateGradInput = function(self, input, gradOutput) 21 | self.gradInput:set(gradOutput) 22 | return gradOutput 23 | end 24 | end 25 | end 26 | 27 | function Copy:updateOutput(input) 28 | self.output:resize(input:size()):copy(input) 29 | return self.output 30 | end 31 | 32 | function Copy:updateGradInput(input, gradOutput) 33 | self.gradInput:resize(gradOutput:size()):copy(gradOutput) 34 | return self.gradInput 35 | end 36 | 37 | function Copy:type(type, tensorCache) 38 | if type and self.dontCast then 39 | return self 40 | end 41 | return parent.type(self, type, tensorCache) 42 | end 43 | -------------------------------------------------------------------------------- /lua/nn/L1HingeEmbeddingCriterion.lua: -------------------------------------------------------------------------------- 1 | local L1HingeEmbeddingCriterion, parent = torch.class('nn.L1HingeEmbeddingCriterion', 'nn.Criterion') 2 | 3 | function L1HingeEmbeddingCriterion:__init(margin) 4 | parent.__init(self) 5 | margin = margin or 1 6 | self.margin = margin 7 | self.gradInput = {torch.Tensor(), torch.Tensor()} 8 | end 9 | 10 | function L1HingeEmbeddingCriterion:updateOutput(input,y) 11 | self.output=input[1]:dist(input[2],1); 12 | if y == -1 then 13 | self.output = math.max(0,self.margin - self.output); 14 | end 15 | return self.output 16 | end 17 | 18 | 19 | local function mathsign(t) 20 | if t>0 then return 1; end 21 | if t<0 then return -1; end 22 | return 2*torch.random(2)-3; 23 | end 24 | 25 | function L1HingeEmbeddingCriterion:updateGradInput(input, y) 26 | self.gradInput[1]:resizeAs(input[1]) 27 | self.gradInput[2]:resizeAs(input[2]) 28 | self.gradInput[1]:copy(input[1]) 29 | self.gradInput[1]:add(-1, input[2]) 30 | local dist = self.gradInput[1]:norm(1); 31 | self.gradInput[1]:apply(mathsign) -- L1 gradient 32 | if y == -1 then -- just to avoid a mul by 1 33 | if dist > self.margin then 34 | self.gradInput[1]:zero() 35 | else 36 | self.gradInput[1]:mul(-1) 37 | end 38 | end 39 | self.gradInput[2]:zero():add(-1, self.gradInput[1]) 40 | return self.gradInput 41 | end 42 | -------------------------------------------------------------------------------- /lua/nn/TemporalMaxPooling.lua: -------------------------------------------------------------------------------- 1 | local TemporalMaxPooling, parent = torch.class('nn.TemporalMaxPooling', 'nn.Module') 2 | 3 | function TemporalMaxPooling:__init(kW, dW) 4 | parent.__init(self) 5 | 6 | dW = dW or kW 7 | 8 | self.kW = kW 9 | self.dW = dW 10 | end 11 | 12 | function TemporalMaxPooling:updateOutput(input) 13 | self.indices = self.indices or torch.LongTensor() 14 | if torch.typename(input):find('torch%.Cuda.*Tensor') then 15 | self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices 16 | else 17 | self.indices = self.indices:long() 18 | end 19 | input.THNN.TemporalMaxPooling_updateOutput( 20 | input:cdata(), self.output:cdata(), 21 | self.indices:cdata(), self.kW, self.dW 22 | ) 23 | return self.output 24 | end 25 | 26 | function TemporalMaxPooling:updateGradInput(input, gradOutput) 27 | if self.gradInput then 28 | input.THNN.TemporalMaxPooling_updateGradInput( 29 | input:cdata(), gradOutput:cdata(), 30 | self.gradInput:cdata(), self.indices:cdata(), 31 | self.kW, self.dW 32 | ) 33 | return self.gradInput 34 | end 35 | end 36 | 37 | function TemporalMaxPooling:empty() 38 | self:clearState() 39 | end 40 | 41 | function TemporalMaxPooling:clearState() 42 | if self.indices then self.indices:set() end 43 | return parent.clearState(self) 44 | end 45 | -------------------------------------------------------------------------------- /lua/nn/MultiCriterion.lua: -------------------------------------------------------------------------------- 1 | local MultiCriterion, parent = torch.class('nn.MultiCriterion', 'nn.Criterion') 2 | 3 | function MultiCriterion:__init() 4 | parent.__init(self) 5 | self.criterions = {} 6 | self.weights = torch.DoubleStorage() 7 | end 8 | 9 | function MultiCriterion:add(criterion, weight) 10 | assert(criterion, 'no criterion provided') 11 | weight = weight or 1 12 | table.insert(self.criterions, criterion) 13 | self.weights:resize(#self.criterions, true) 14 | self.weights[#self.criterions] = weight 15 | return self 16 | end 17 | 18 | function MultiCriterion:updateOutput(input, target) 19 | self.output = 0 20 | for i=1,#self.criterions do 21 | self.output = self.output + self.weights[i]*self.criterions[i]:updateOutput(input, target) 22 | end 23 | return self.output 24 | end 25 | 26 | function MultiCriterion:updateGradInput(input, target) 27 | self.gradInput = nn.utils.recursiveResizeAs(self.gradInput, input) 28 | nn.utils.recursiveFill(self.gradInput, 0) 29 | for i=1,#self.criterions do 30 | nn.utils.recursiveAdd(self.gradInput, self.weights[i], self.criterions[i]:updateGradInput(input, target)) 31 | end 32 | return self.gradInput 33 | end 34 | 35 | function MultiCriterion:type(type) 36 | for i,criterion in ipairs(self.criterions) do 37 | criterion:type(type) 38 | end 39 | return parent.type(self, type) 40 | end 41 | -------------------------------------------------------------------------------- /lua/nn/NarrowTable.lua: -------------------------------------------------------------------------------- 1 | local NarrowTable, parent = torch.class('nn.NarrowTable', 'nn.Module') 2 | 3 | function NarrowTable:__init(offset, length) 4 | parent.__init(self) 5 | self.offset = offset 6 | self.length = length or 1 7 | if not offset then 8 | error('nn.NarrowTable(offset, length)') 9 | end 10 | 11 | self.output = {} 12 | self.gradInput = {} 13 | end 14 | 15 | function NarrowTable:updateOutput(input) 16 | for k,v in ipairs(self.output) do self.output[k] = nil end 17 | for i=1,self.length do 18 | self.output[i] = input[self.offset+i-1] 19 | end 20 | return self.output 21 | end 22 | 23 | function NarrowTable:updateGradInput(input, gradOutput) 24 | for i=1,#gradOutput do 25 | self.gradInput[self.offset+i-1] = gradOutput[i] 26 | end 27 | for i=1,#input do 28 | if (i < self.offset) or (i >= self.offset + self.length) then 29 | self.gradInput[i] = nn.utils.recursiveResizeAs(self.gradInput[i], input[i]) 30 | nn.utils.recursiveFill(self.gradInput[i], 0) 31 | end 32 | end 33 | for i=#input+1,#self.gradInput do self.gradInput[i] = nil end 34 | return self.gradInput 35 | end 36 | 37 | function NarrowTable:type(type, tensorCache) 38 | self.output = {} 39 | self.gradInput = {} 40 | return parent.type(self, type, tensorCache) 41 | end 42 | 43 | NarrowTable.clearState = nn.Identity.clearState 44 | -------------------------------------------------------------------------------- /lua/nn/SplitTable.lua: -------------------------------------------------------------------------------- 1 | local SplitTable, parent = torch.class('nn.SplitTable', 'nn.Module') 2 | 3 | function SplitTable:__init(dimension, nInputDims) 4 | parent.__init(self) 5 | self.dimension = dimension 6 | self.nInputDims = nInputDims 7 | end 8 | 9 | function SplitTable:_getPositiveDimension(input) 10 | local dimension = self.dimension 11 | if dimension < 0 then 12 | dimension = input:dim() + dimension + 1 13 | elseif self.nInputDims and input:dim()==(self.nInputDims+1) then 14 | dimension = dimension + 1 15 | end 16 | return dimension 17 | end 18 | 19 | function SplitTable:updateOutput(input) 20 | local dimension = self:_getPositiveDimension(input) 21 | local slices = input:size(dimension) 22 | 23 | local currentOutput= {} 24 | for i=1,slices do 25 | currentOutput[#currentOutput+1] = input:select(dimension,i) 26 | end 27 | self.output = currentOutput 28 | return self.output 29 | end 30 | 31 | function SplitTable:updateGradInput(input, gradOutput) 32 | local dimension = self:_getPositiveDimension(input) 33 | local slices = input:size(dimension) 34 | if self.gradInput then 35 | self.gradInput:resizeAs(input) 36 | 37 | for i=1,slices do 38 | local currentGradInput = gradOutput[i]; 39 | self.gradInput:select(dimension,i):copy(currentGradInput) 40 | end 41 | end 42 | return self.gradInput 43 | end 44 | -------------------------------------------------------------------------------- /lua/nn/SpatialAdaptiveMaxPooling.lua: -------------------------------------------------------------------------------- 1 | local SpatialAdaptiveMaxPooling, parent = torch.class('nn.SpatialAdaptiveMaxPooling', 'nn.Module') 2 | 3 | function SpatialAdaptiveMaxPooling:__init(W, H) 4 | parent.__init(self) 5 | 6 | self.W = W 7 | self.H = H 8 | end 9 | 10 | function SpatialAdaptiveMaxPooling:updateOutput(input) 11 | self.indices = self.indices or torch.LongTensor() 12 | if torch.typename(input):find('torch%.Cuda.*Tensor') then 13 | self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices 14 | else 15 | self.indices = self.indices:long() 16 | end 17 | input.THNN.SpatialAdaptiveMaxPooling_updateOutput( 18 | input:cdata(), 19 | self.output:cdata(), 20 | self.indices:cdata(), 21 | self.W, self.H 22 | ) 23 | return self.output 24 | end 25 | 26 | function SpatialAdaptiveMaxPooling:updateGradInput(input, gradOutput) 27 | input.THNN.SpatialAdaptiveMaxPooling_updateGradInput( 28 | input:cdata(), 29 | gradOutput:cdata(), 30 | self.gradInput:cdata(), 31 | self.indices:cdata() 32 | ) 33 | return self.gradInput 34 | end 35 | 36 | -- for backward compat 37 | function SpatialAdaptiveMaxPooling:empty() 38 | self:clearState() 39 | end 40 | 41 | function SpatialAdaptiveMaxPooling:clearState() 42 | if self.indices then 43 | self.indices:set() 44 | end 45 | return parent.clearState(self) 46 | end 47 | -------------------------------------------------------------------------------- /lua/pl/MultiMap.lua: -------------------------------------------------------------------------------- 1 | --- MultiMap, a Map which has multiple values per key. 2 | -- 3 | -- Dependencies: `pl.utils`, `pl.class`, `pl.List`, `pl.Map` 4 | -- @classmod pl.MultiMap 5 | 6 | local utils = require 'pl.utils' 7 | local class = require 'pl.class' 8 | local List = require 'pl.List' 9 | local Map = require 'pl.Map' 10 | 11 | -- MultiMap is a standard MT 12 | local MultiMap = utils.stdmt.MultiMap 13 | 14 | class(Map,nil,MultiMap) 15 | MultiMap._name = 'MultiMap' 16 | 17 | function MultiMap:_init (t) 18 | if not t then return end 19 | self:update(t) 20 | end 21 | 22 | --- update a MultiMap using a table. 23 | -- @param t either a Multimap or a map-like table. 24 | -- @return the map 25 | function MultiMap:update (t) 26 | utils.assert_arg(1,t,'table') 27 | if Map:class_of(t) then 28 | for k,v in pairs(t) do 29 | self[k] = List() 30 | self[k]:append(v) 31 | end 32 | else 33 | for k,v in pairs(t) do 34 | self[k] = List(v) 35 | end 36 | end 37 | end 38 | 39 | --- add a new value to a key. Setting a nil value removes the key. 40 | -- @param key the key 41 | -- @param val the value 42 | -- @return the map 43 | function MultiMap:set (key,val) 44 | if val == nil then 45 | self[key] = nil 46 | else 47 | if not self[key] then 48 | self[key] = List() 49 | end 50 | self[key]:append(val) 51 | end 52 | end 53 | 54 | return MultiMap 55 | -------------------------------------------------------------------------------- /lua/nn/RReLU.lua: -------------------------------------------------------------------------------- 1 | local ffi = require 'ffi' 2 | local RReLU, parent = torch.class('nn.RReLU', 'nn.Module') 3 | 4 | function RReLU:__init(l, u, ip) 5 | parent.__init(self) 6 | self.lower = l or 1/8 7 | self.upper = u or 1/3 8 | assert(self.lower <= self.upper and self.lower >= 0 and self.upper >= 0) 9 | self.noise = torch.Tensor() 10 | self.train = true 11 | self.inplace = ip or false 12 | end 13 | 14 | function RReLU:updateOutput(input) 15 | local gen = ffi.typeof('THGenerator**')(torch._gen)[0] 16 | input.THNN.RReLU_updateOutput( 17 | input:cdata(), 18 | self.output:cdata(), 19 | self.noise:cdata(), 20 | self.lower, 21 | self.upper, 22 | self.train, 23 | self.inplace, 24 | gen 25 | ) 26 | return self.output 27 | end 28 | 29 | function RReLU:updateGradInput(input, gradOutput) 30 | input.THNN.RReLU_updateGradInput( 31 | input:cdata(), 32 | gradOutput:cdata(), 33 | self.gradInput:cdata(), 34 | self.noise:cdata(), 35 | self.lower, 36 | self.upper, 37 | self.train, 38 | self.inplace 39 | ) 40 | return self.gradInput 41 | end 42 | 43 | function RReLU:__tostring__() 44 | return string.format('%s (l:%f, u:%f)', torch.type(self), self.lower, self.upper) 45 | end 46 | 47 | function RReLU:clearState() 48 | if self.noise then self.noise:set() end 49 | return parent.clearState(self) 50 | end 51 | -------------------------------------------------------------------------------- /lua/nn/MultiLabelMarginCriterion.lua: -------------------------------------------------------------------------------- 1 | local MultiLabelMarginCriterion, parent = torch.class('nn.MultiLabelMarginCriterion', 'nn.Criterion') 2 | 3 | function MultiLabelMarginCriterion:__init() 4 | parent.__init(self) 5 | self.sizeAverage = true 6 | self.isTarget = torch.Tensor() 7 | end 8 | 9 | function MultiLabelMarginCriterion:updateOutput(input, target) 10 | if torch.typename(input):find('torch%.Cuda.*Tensor') then 11 | target = torch.CudaLongTensor and target:cudaLong() or target 12 | else 13 | target = target:long() 14 | end 15 | self.output_tensor = self.output_tensor or input.new(1) 16 | input.THNN.MultiLabelMarginCriterion_updateOutput( 17 | input:cdata(), 18 | target:cdata(), 19 | self.output_tensor:cdata(), 20 | self.isTarget:cdata(), 21 | self.sizeAverage 22 | ) 23 | self.output = self.output_tensor[1] 24 | return self.output 25 | end 26 | 27 | function MultiLabelMarginCriterion:updateGradInput(input, target) 28 | if torch.typename(input):find('torch%.Cuda.*Tensor') then 29 | target = torch.CudaLongTensor and target:cudaLong() or target 30 | else 31 | target = target:long() 32 | end 33 | input.THNN.MultiLabelMarginCriterion_updateGradInput( 34 | input:cdata(), 35 | target:cdata(), 36 | self.gradInput:cdata(), 37 | self.isTarget:cdata(), 38 | self.sizeAverage 39 | ) 40 | return self.gradInput 41 | end 42 | -------------------------------------------------------------------------------- /share/cmake/torch/TorchConfig.cmake: -------------------------------------------------------------------------------- 1 | # This (ugly) setup assumes: 2 | # CMAKE_PREFIX_PATH = LUA_BINDIR 3 | # CMAKE_INSTALL_PREFIX = PREFIX 4 | 5 | # Define Torch basic subpaths 6 | SET(Torch_INSTALL_PREFIX "C:/torch") 7 | 8 | SET(Torch_INSTALL_BIN_SUBDIR "bin") 9 | SET(Torch_INSTALL_MAN_SUBDIR "share/man") 10 | SET(Torch_INSTALL_LIB_SUBDIR "lib") 11 | SET(Torch_INSTALL_SHARE_SUBDIR "share") 12 | SET(Torch_INSTALL_INCLUDE_SUBDIR "include") 13 | SET(Torch_INSTALL_CMAKE_SUBDIR "share/cmake/torch") 14 | SET(Torch_INSTALL_LUA_PATH_SUBDIR "luarocks/torch/scm-1/lua") 15 | SET(Torch_INSTALL_LUA_CPATH_SUBDIR "luarocks/torch/scm-1/lib") 16 | SET(Torch_INSTALL_CMAKE_RIDBUS "../..") 17 | 18 | FILE(RELATIVE_PATH Torch_INSTALL_LUA_PATH_SUBDIR "${Torch_INSTALL_PREFIX}" "${CMAKE_INSTALL_PREFIX}/lua") 19 | FILE(RELATIVE_PATH Torch_INSTALL_LUA_CPATH_SUBDIR "${Torch_INSTALL_PREFIX}" "${CMAKE_INSTALL_PREFIX}/lib") 20 | 21 | SET(CMAKE_MODULE_PATH "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_CMAKE_SUBDIR}" "${CMAKE_MODULE_PATH}") 22 | SET(CMAKE_INSTALL_PREFIX "${Torch_INSTALL_PREFIX}") # override 23 | 24 | INCLUDE(TorchPathsInit) 25 | INCLUDE(TorchPackage) 26 | INCLUDE(TorchWrap) 27 | 28 | # Define Torch basic targets 29 | INCLUDE(TorchExports) 30 | 31 | INCLUDE_DIRECTORIES("${Torch_INSTALL_INCLUDE}") 32 | INCLUDE_DIRECTORIES("${Torch_INSTALL_INCLUDE}/TH") 33 | LINK_DIRECTORIES("${Torch_INSTALL_LIB}") 34 | 35 | MESSAGE(STATUS "Found Torch7 in ${Torch_INSTALL_PREFIX}") 36 | -------------------------------------------------------------------------------- /lua/strict.lua: -------------------------------------------------------------------------------- 1 | 2 | local IGNORED_WRITES = {} 3 | local IGNORED_READS = { 4 | qt=true, 5 | _PROMPT=true, 6 | _PROMPT2=true, 7 | writeObjects=true, 8 | arg=true, 9 | } 10 | 11 | -- Raises an error when an undeclared variable is read. 12 | local function guardGlobals() 13 | assert(getmetatable(_G) == nil, "another global metatable exists") 14 | 15 | -- The detecting of undeclared vars is discussed on: 16 | -- http://www.lua.org/pil/14.2.html 17 | -- http://lua-users.org/wiki/DetectingUndefinedVariables 18 | setmetatable(_G, { 19 | __newindex = function (table, key, value) 20 | if not IGNORED_WRITES[key] then 21 | local info = debug.getinfo(2, "Sl") 22 | io.stderr:write(string.format( 23 | "strict: %s:%s: write to undeclared variable: %s\n", 24 | tostring(info.short_src), tostring(info.currentline), key)) 25 | end 26 | rawset(table, key, value) 27 | end, 28 | __index = function (table, key) 29 | if IGNORED_READS[key] then 30 | return 31 | end 32 | error("attempt to read undeclared variable "..key, 2) 33 | end, 34 | }) 35 | 36 | local origRequire = require 37 | function require(modname) 38 | IGNORED_WRITES[modname] = true 39 | return origRequire(modname) 40 | end 41 | end 42 | 43 | guardGlobals() 44 | -------------------------------------------------------------------------------- /luarocks/image/1.1.alpha-0/doc/index.md: -------------------------------------------------------------------------------- 1 | # image Package Reference Manual # 2 | 3 | __image__ is the [Torch7 distribution](http://torch.ch/) package for processing 4 | images. It contains a wide variety of functions divided into the following categories: 5 | 6 | * [Saving and loading](saveload.md) images as JPEG, PNG, PPM and PGM; 7 | * [Simple transformations](simpletransform.md) like translation, scaling and rotation; 8 | * [Parameterized transformations](paramtransform.md) like convolutions and warping; 9 | * [Simple Drawing Routines](doc/drawing.md) like drawing text or a rectangle on an image; 10 | * [Graphical user interfaces](gui.md) like display and window; 11 | * [Color Space Conversions](colorspace.md) from and to RGB, YUV, Lab, and HSL; 12 | * [Tensor Constructors](tensorconstruct.md) for creating Lenna, Fabio and Gaussian and Laplacian kernels; 13 | 14 | Note that unless speficied otherwise, this package deals with images of size 15 | `nChannel x height x width`. 16 | 17 | ## Install 18 | 19 | The easiest way to install this package it by following the [intructions](http://torch.ch/docs/getting-started.html) 20 | to install [Torch7](www.torch.ch), which includes __image__. 21 | Otherwise, to update or manually re-install it: 22 | 23 | ```bash 24 | $ luarocks install image 25 | ``` 26 | 27 | ## Usage 28 | 29 | ```lua 30 | > require 'image' 31 | > l = image.lena() 32 | > image.display(l) 33 | > f = image.fabio() 34 | > image.display(f) 35 | ``` 36 | -------------------------------------------------------------------------------- /lua/nn/WeightedMSECriterion.lua: -------------------------------------------------------------------------------- 1 | local WeightedMSECriterion, parent = torch.class('nn.WeightedMSECriterion','nn.MSECriterion') 2 | 3 | function WeightedMSECriterion:__init(w) 4 | parent.__init(self) 5 | self.weight = w:clone() 6 | end 7 | 8 | function WeightedMSECriterion:updateOutput(input,target) 9 | self.buffer = self.buffer or input.new() 10 | self.buffer:resizeAs(input):copy(target) 11 | if input:dim() - 1 == self.weight:dim() then 12 | for i=1,input:size(1) do 13 | self.buffer[i]:cmul(self.weight) 14 | end 15 | else 16 | self.buffer:cmul(self.weight) 17 | end 18 | self.output_tensor = self.output_tensor or input.new(1) 19 | input.THNN.MSECriterion_updateOutput( 20 | input:cdata(), 21 | self.buffer:cdata(), 22 | self.output_tensor:cdata(), 23 | self.sizeAverage 24 | ) 25 | self.output = self.output_tensor[1] 26 | return self.output 27 | end 28 | 29 | function WeightedMSECriterion:updateGradInput(input, target) 30 | self.buffer:resizeAs(input):copy(target) 31 | if input:dim() - 1 == self.weight:dim() then 32 | for i=1,input:size(1) do 33 | self.buffer[i]:cmul(self.weight) 34 | end 35 | else 36 | self.buffer:cmul(self.weight) 37 | end 38 | input.THNN.MSECriterion_updateGradInput( 39 | input:cdata(), 40 | self.buffer:cdata(), 41 | self.gradInput:cdata(), 42 | self.sizeAverage 43 | ) 44 | return self.gradInput 45 | end 46 | -------------------------------------------------------------------------------- /luarocks/xlua/1.1-0/doc/README.md: -------------------------------------------------------------------------------- 1 | # A set of useful extensions to Lua 2 | 3 | [![Build Status](https://travis-ci.org/torch/xlua.svg)](https://travis-ci.org/torch/xlua) 4 | 5 | ## Dependencies: 6 | Torch7 (www.torch.ch) 7 | 8 | ## Install: 9 | ``` 10 | $ torch-rocks install xlua 11 | ``` 12 | 13 | ## Use 14 | ``` 15 | $ torch -lxlua 16 | xLua > a = 5 17 | xLua > b = 'test' 18 | xLua > xlua.who() 19 | 20 | Global Libs: 21 | {[1] = string, 22 | [2] = package, 23 | [3] = os, 24 | [4] = io, 25 | [5] = xlua, 26 | [6] = sys, 27 | [7] = math, 28 | [8] = debug, 29 | [9] = table, 30 | [10] = coroutine} 31 | 32 | Global Vars: 33 | {[a] = 5, 34 | [b] = test} 35 | 36 | xLua > xlua.clearall() -- also calls the garbage collector ! 37 | xLua > xlua.who() 38 | 39 | Global Libs: 40 | {[1] = string, 41 | [2] = package, 42 | [3] = os, 43 | [4] = io, 44 | [5] = xlua, 45 | [6] = sys, 46 | [7] = math, 47 | [8] = debug, 48 | [9] = table, 49 | [10] = coroutine} 50 | 51 | Global Vars: 52 | {} 53 | 54 | xLua > print(xlua) 55 | {[clear] = function: 0x10020cd10, 56 | [clearall] = function: 0x10020ca70, 57 | [_PACKAGE] = , 58 | [progress] = function: 0x10020cda0, 59 | [print] = function: 0x10020c9d0, 60 | [_NAME] = xlua, 61 | [who] = function: 0x10020cd50, 62 | [_M] = table: 0x10020c990, 63 | [lua_print] = function: 0x100201900} 64 | 65 | xLua > test = {a = 14, b = "test"} 66 | xLua > =test 67 | {[a] = 14, 68 | [b] = test} 69 | xLua > 70 | ``` 71 | -------------------------------------------------------------------------------- /luarocks/inspect/3.1.0-1/doc/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## v3.1.0 2 | 3 | * Fixes bug: all control codes are escaped correctly (instead of only the named ones such as \n). 4 | Example: \1 becomes \\1 (or \\001 when followed by a digit) 5 | * Fixes bug when using the `process` option in recursive tables 6 | * Overriding global `tostring` with inspect no longer results in an error. 7 | * Simplifies id generation, using less tables and metatables. 8 | 9 | ## v3.0.3 10 | * Fixes a bug which sometimes displayed struct-like parts of tables as sequence-like due 11 | to the way rawlen/the # operator are implemented. 12 | 13 | ## v3.0.2 14 | * Fixes a bug when a table was garbage-collected while inspect was trying to render it 15 | 16 | ## v3.0.1 17 | * Fixes a bug when dealing with tables which have a __len metamethod in Lua >= 5.2 18 | 19 | ## v3.0.0 20 | 21 | The basic functionality remains as before, but there's one backwards-incompatible change if you used `options.filter`. 22 | 23 | * **Removed** `options.filter` 24 | * **Added** `options.process`, which can be used to do the same as `options.filter`, and more. 25 | * **Added** two new constants, `inspect.METATABLE` and `inspect.KEY` 26 | * **Added** `options.indent` & `options.newline`. 27 | 28 | 29 | ## v2.0.0 30 | 31 | * Ability to deal with LuaJit's custom types 32 | * License change from BSD to MIT 33 | * Moved second parameter (depth) to options (options.depth) 34 | * Added a new parameter, options.filter. 35 | * Reimplemented some parts of the system without object orientation 36 | -------------------------------------------------------------------------------- /lua/nn/Squeeze.lua: -------------------------------------------------------------------------------- 1 | local Squeeze, parent = torch.class('nn.Squeeze', 'nn.Module') 2 | 3 | function Squeeze:__init(dim, numInputDims) 4 | parent.__init(self) 5 | self.dim = dim 6 | self:setNumInputDims(numInputDims) 7 | end 8 | 9 | function Squeeze:setNumInputDims(numInputDims) 10 | self.numInputDims = numInputDims 11 | return self 12 | end 13 | 14 | function Squeeze:updateOutput(input) 15 | assert(input and torch.isTensor(input), 'Squeeze only works on tensors') 16 | local dim = self.dim 17 | local addone = false 18 | if self.numInputDims and input:dim()==(self.numInputDims+1) then 19 | if dim then 20 | dim = dim + 1 21 | elseif input:size(1) == 1 then 22 | addone = true -- in case of minibatch of size 1. 23 | end 24 | end 25 | self.output:set(dim and input:squeeze(dim) or input:squeeze()) 26 | if addone then 27 | local s = self.output:size():totable{} 28 | table.insert(s, 1, 1) 29 | self.output:set(self.output:view(torch.LongStorage(s))) 30 | end 31 | return self.output 32 | end 33 | 34 | function Squeeze:updateGradInput(input, gradOutput) 35 | assert(input and torch.isTensor(input), 'Squeeze only works on tensors') 36 | assert(gradOutput and torch.isTensor(gradOutput), 'Squeeze only works on tensors') 37 | assert(input:nElement() == gradOutput:nElement()) 38 | self.gradInput:set(gradOutput:view(input:size())) 39 | return self.gradInput 40 | end 41 | -------------------------------------------------------------------------------- /luarocks/strict/0-0/doc/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Want to contribute? Great! First, read this page (including the small print at the end). 2 | 3 | ### Before you contribute 4 | Before we can use your code, you must sign the 5 | [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual?csw=1) 6 | (CLA), which you can do online. The CLA is necessary mainly because you own the 7 | copyright to your changes, even after your contribution becomes part of our 8 | codebase, so we need your permission to use and distribute your code. We also 9 | need to be sure of various other things—for instance that you'll tell us if you 10 | know that your code infringes on other people's patents. You don't have to sign 11 | the CLA until after you've submitted your code for review and a member has 12 | approved it, but you must do it before we can put your code into our codebase. 13 | Before you start working on a larger contribution, you should get in touch with 14 | us first through the issue tracker with your idea so that we can help out and 15 | possibly guide you. Coordinating up front makes it much easier to avoid 16 | frustration later on. 17 | 18 | ### Code reviews 19 | All submissions, including submissions by project members, require review. We 20 | use Github pull requests for this purpose. 21 | 22 | ### The small print 23 | Contributions made by corporations are covered by a different agreement than 24 | the one above, the Software Grant and Corporate Contributor License Agreement. 25 | -------------------------------------------------------------------------------- /lua/nn/HingeEmbeddingCriterion.lua: -------------------------------------------------------------------------------- 1 | local HingeEmbeddingCriterion, parent = torch.class('nn.HingeEmbeddingCriterion', 'nn.Criterion') 2 | 3 | function HingeEmbeddingCriterion:__init(margin) 4 | parent.__init(self) 5 | self.margin = margin or 1 6 | self.sizeAverage = true 7 | end 8 | 9 | function HingeEmbeddingCriterion:updateOutput(input,y) 10 | self.buffer = self.buffer or input.new() 11 | if not torch.isTensor(y) then 12 | self.ty = self.ty or input.new():resize(1) 13 | self.ty[1]=y 14 | y=self.ty 15 | end 16 | 17 | self.buffer:resizeAs(input):copy(input) 18 | self.buffer[torch.eq(y, -1)] = 0 19 | self.output = self.buffer:sum() 20 | 21 | self.buffer:fill(self.margin):add(-1, input) 22 | self.buffer:cmax(0) 23 | self.buffer[torch.eq(y, 1)] = 0 24 | self.output = self.output + self.buffer:sum() 25 | 26 | if (self.sizeAverage == nil or self.sizeAverage == true) then 27 | self.output = self.output / input:nElement() 28 | end 29 | 30 | return self.output 31 | end 32 | 33 | function HingeEmbeddingCriterion:updateGradInput(input, y) 34 | if not torch.isTensor(y) then self.ty[1]=y; y=self.ty end 35 | self.gradInput:resizeAs(input):copy(y) 36 | self.gradInput[torch.cmul(torch.eq(y, -1), torch.gt(input, self.margin))] = 0 37 | 38 | if (self.sizeAverage == nil or self.sizeAverage == true) then 39 | self.gradInput:mul(1 / input:nElement()) 40 | end 41 | 42 | return self.gradInput 43 | end 44 | -------------------------------------------------------------------------------- /include/TH/THAllocator.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_ALLOCATOR_INC 2 | #define TH_ALLOCATOR_INC 3 | 4 | #include "THGeneral.h" 5 | 6 | #define TH_ALLOCATOR_MAPPED_SHARED 1 7 | #define TH_ALLOCATOR_MAPPED_SHAREDMEM 2 8 | #define TH_ALLOCATOR_MAPPED_EXCLUSIVE 4 9 | #define TH_ALLOCATOR_MAPPED_NOCREATE 8 10 | #define TH_ALLOCATOR_MAPPED_KEEPFD 16 11 | #define TH_ALLOCATOR_MAPPED_FROMFD 32 12 | #define TH_ALLOCATOR_MAPPED_UNLINK 64 13 | 14 | /* Custom allocator 15 | */ 16 | typedef struct THAllocator { 17 | void* (*malloc)(void*, long); 18 | void* (*realloc)(void*, void*, long); 19 | void (*free)(void*, void*); 20 | } THAllocator; 21 | 22 | /* default malloc/free allocator. malloc and realloc raise an error (using 23 | * THError) on allocation failure. 24 | */ 25 | extern THAllocator THDefaultAllocator; 26 | 27 | /* file map allocator 28 | */ 29 | typedef struct THMapAllocatorContext_ THMapAllocatorContext; 30 | TH_API THMapAllocatorContext *THMapAllocatorContext_new(const char *filename, int flags); 31 | TH_API THMapAllocatorContext *THMapAllocatorContext_newWithFd(const char *filename, 32 | int fd, int flags); 33 | TH_API char * THMapAllocatorContext_filename(THMapAllocatorContext *ctx); 34 | TH_API int THMapAllocatorContext_fd(THMapAllocatorContext *ctx); 35 | TH_API long THMapAllocatorContext_size(THMapAllocatorContext *ctx); 36 | TH_API void THMapAllocatorContext_free(THMapAllocatorContext *ctx); 37 | 38 | extern THAllocator THMapAllocator; 39 | extern THAllocator THRefcountedMapAllocator; 40 | 41 | #endif 42 | -------------------------------------------------------------------------------- /lua/nn/SpatialBatchNormalization.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | This file implements Batch Normalization as described in the paper: 3 | "Batch Normalization: Accelerating Deep Network Training 4 | by Reducing Internal Covariate Shift" 5 | by Sergey Ioffe, Christian Szegedy 6 | 7 | This implementation is useful for inputs coming from convolution layers. 8 | For non-convolutional layers, see BatchNormalization.lua 9 | 10 | The operation implemented is: 11 | y = ( x - mean(x) ) 12 | -------------------- * gamma + beta 13 | standard-deviation(x) 14 | where gamma and beta are learnable parameters. 15 | 16 | The learning of gamma and beta is optional. 17 | 18 | Usage: 19 | with learnable parameters: nn.SpatialBatchNormalization(N [,eps] [,momentum]) 20 | where N = dimensionality of input 21 | without learnable parameters: nn.SpatialBatchNormalization(N [,eps] [,momentum], false) 22 | 23 | eps is a small value added to the variance to avoid divide-by-zero. 24 | Defaults to 1e-5 25 | 26 | In training time, this layer keeps a running estimate of it's computed mean and std. 27 | The running sum is kept with a default momentum of 0.1 (unless over-ridden) 28 | In test time, this running mean/std is used to normalize. 29 | ]]-- 30 | local BN, parent = torch.class('nn.SpatialBatchNormalization', 'nn.BatchNormalization') 31 | 32 | BN.__version = 2 33 | 34 | -- expected dimension of input 35 | BN.nDim = 4 36 | -------------------------------------------------------------------------------- /include/TH/generic/THTensorRandom.h: -------------------------------------------------------------------------------- 1 | #ifndef TH_GENERIC_FILE 2 | #define TH_GENERIC_FILE "generic/THTensorRandom.h" 3 | #else 4 | 5 | TH_API void THTensor_(random)(THTensor *self, THGenerator *_generator); 6 | TH_API void THTensor_(geometric)(THTensor *self, THGenerator *_generator, double p); 7 | TH_API void THTensor_(bernoulli)(THTensor *self, THGenerator *_generator, double p); 8 | TH_API void THTensor_(bernoulli_FloatTensor)(THTensor *self, THGenerator *_generator, THFloatTensor *p); 9 | TH_API void THTensor_(bernoulli_DoubleTensor)(THTensor *self, THGenerator *_generator, THDoubleTensor *p); 10 | 11 | #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) 12 | TH_API void THTensor_(uniform)(THTensor *self, THGenerator *_generator, double a, double b); 13 | TH_API void THTensor_(normal)(THTensor *self, THGenerator *_generator, double mean, double stdv); 14 | TH_API void THTensor_(exponential)(THTensor *self, THGenerator *_generator, double lambda); 15 | TH_API void THTensor_(cauchy)(THTensor *self, THGenerator *_generator, double median, double sigma); 16 | TH_API void THTensor_(logNormal)(THTensor *self, THGenerator *_generator, double mean, double stdv); 17 | TH_API void THTensor_(multinomial)(THLongTensor *self, THGenerator *_generator, THTensor *prob_dist, int n_sample, int with_replacement); 18 | #endif 19 | 20 | #if defined(TH_REAL_IS_BYTE) 21 | TH_API void THTensor_(getRNGState)(THGenerator *_generator, THTensor *self); 22 | TH_API void THTensor_(setRNGState)(THGenerator *_generator, THTensor *self); 23 | #endif 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /lua/nn/ParallelCriterion.lua: -------------------------------------------------------------------------------- 1 | local ParallelCriterion, parent = torch.class('nn.ParallelCriterion', 'nn.Criterion') 2 | 3 | function ParallelCriterion:__init(repeatTarget) 4 | parent.__init(self) 5 | self.criterions = {} 6 | self.weights = {} 7 | self.gradInput = {} 8 | self.repeatTarget = repeatTarget 9 | end 10 | 11 | function ParallelCriterion:add(criterion, weight) 12 | assert(criterion, 'no criterion provided') 13 | weight = weight or 1 14 | table.insert(self.criterions, criterion) 15 | table.insert(self.weights, weight) 16 | return self 17 | end 18 | 19 | function ParallelCriterion:updateOutput(input, target) 20 | self.output = 0 21 | for i,criterion in ipairs(self.criterions) do 22 | local target = self.repeatTarget and target or target[i] 23 | self.output = self.output + self.weights[i]*criterion:updateOutput(input[i],target) 24 | end 25 | return self.output 26 | end 27 | 28 | function ParallelCriterion:updateGradInput(input, target) 29 | self.gradInput = nn.utils.recursiveResizeAs(self.gradInput, input) 30 | nn.utils.recursiveFill(self.gradInput, 0) 31 | for i,criterion in ipairs(self.criterions) do 32 | local target = self.repeatTarget and target or target[i] 33 | nn.utils.recursiveAdd(self.gradInput[i], self.weights[i], criterion:updateGradInput(input[i], target)) 34 | end 35 | return self.gradInput 36 | end 37 | 38 | function ParallelCriterion:type(type, tensorCache) 39 | self.gradInput = {} 40 | return parent.type(self, type, tensorCache) 41 | end 42 | -------------------------------------------------------------------------------- /lua/nnx/SpatialMatching.lua: -------------------------------------------------------------------------------- 1 | local SpatialMatching, parent = torch.class('nn.SpatialMatching', 'nn.Module') 2 | 3 | function SpatialMatching:__init(maxh, maxw, full_output) 4 | -- If full_output is false, output is computed on elements of the first input 5 | -- for which all the possible corresponding elements exist in the second input 6 | -- In addition, if full_output is set to false, the pixel (1,1) of the first input 7 | -- is supposed to correspond to the pixel (maxh/2, maxw/2) of the second one 8 | parent.__init(self) 9 | self.maxw = maxw or 11 10 | self.maxh = maxh or 11 11 | if full_output == nil then 12 | full_output = false 13 | end 14 | self.full_output = full_output 15 | self.gradInput1 = torch.Tensor() 16 | self.gradInput2 = torch.Tensor() 17 | end 18 | 19 | function SpatialMatching:updateOutput(input) 20 | -- input is a table of 2 inputs, each one being KxHxW 21 | -- if not full_output, the 1st one is KxH1xW1 where H1 <= H-maxh+1, W1 <= W-maxw+1 22 | self.output:resize(input[1]:size(2), input[1]:size(3), self.maxh, self.maxw) 23 | input[1].nn.SpatialMatching_updateOutput(self, input[1], input[2]) 24 | return self.output 25 | end 26 | 27 | function SpatialMatching:updateGradInput(input, gradOutput) 28 | self.gradInput1:resize(input[1]:size()):zero() 29 | self.gradInput2:resize(input[2]:size()):zero() 30 | input[1].nn.SpatialMatching_updateGradInput(self, input[1], input[2], gradOutput) 31 | self.gradInput = {self.gradInput1, self.gradInput2} 32 | return self.gradInput 33 | end 34 | -------------------------------------------------------------------------------- /lua/nn/SpatialMaxUnpooling.lua: -------------------------------------------------------------------------------- 1 | local SpatialMaxUnpooling, parent = torch.class('nn.SpatialMaxUnpooling', 'nn.Module') 2 | 3 | function SpatialMaxUnpooling:__init(poolingModule) 4 | parent.__init(self) 5 | assert(torch.type(poolingModule)=='nn.SpatialMaxPooling', 'Argument must be a nn.SpatialMaxPooling module') 6 | assert(poolingModule.kH==poolingModule.dH and poolingModule.kW==poolingModule.dW, "The size of pooling module's kernel must be equal to its stride") 7 | self.pooling = poolingModule 8 | end 9 | 10 | function SpatialMaxUnpooling:setParams() 11 | self.indices = self.pooling.indices 12 | self.oheight = self.pooling.iheight 13 | self.owidth = self.pooling.iwidth 14 | end 15 | 16 | function SpatialMaxUnpooling:updateOutput(input) 17 | self:setParams() 18 | input.THNN.SpatialMaxUnpooling_updateOutput( 19 | input:cdata(), 20 | self.output:cdata(), 21 | self.indices:cdata(), 22 | self.owidth, self.oheight 23 | ) 24 | return self.output 25 | end 26 | 27 | function SpatialMaxUnpooling:updateGradInput(input, gradOutput) 28 | self:setParams() 29 | input.THNN.SpatialMaxUnpooling_updateGradInput( 30 | input:cdata(), 31 | gradOutput:cdata(), 32 | self.gradInput:cdata(), 33 | self.indices:cdata(), 34 | self.owidth, self.oheight 35 | ) 36 | return self.gradInput 37 | end 38 | 39 | function SpatialMaxUnpooling:empty() 40 | self:clearState() 41 | end 42 | 43 | function SpatialMaxUnpooling:__tostring__() 44 | return 'nn.SpatialMaxUnpooling associated to '..tostring(self.pooling) 45 | end 46 | -------------------------------------------------------------------------------- /luarocks/strict/0-0/doc/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2014, Google Inc. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the name of Google Inc. nor the names of its 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | --------------------------------------------------------------------------------