├── example
├── rcnn
│ ├── __init__.py
│ ├── helper
│ │ ├── __init__.py
│ │ ├── dataset
│ │ │ └── __init__.py
│ │ └── processing
│ │ │ ├── __init__.py
│ │ │ └── bbox_process.py
│ ├── rcnn
│ │ ├── __init__.py
│ │ └── rpn
│ │ │ └── __init__.py
│ ├── test
│ │ ├── __init__.py
│ │ └── test_data_iter.py
│ ├── tools
│ │ └── __init__.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── combine_model.py
│ │ └── save_model.py
│ └── test.py
├── speech-demo
│ ├── io_func
│ │ ├── __init__.py
│ │ ├── feat_readers
│ │ │ ├── __init__.py
│ │ │ └── reader_bvec.py
│ │ └── info.py
│ ├── tests
│ │ └── test_nothing.py
│ └── python_wrap
│ │ ├── example_usage
│ │ ├── data.scp
│ │ ├── data.txt
│ │ ├── data.ark
│ │ └── README.txt
│ │ └── Makefile
├── neural-style
│ ├── .gitignore
│ ├── find_mxnet.py
│ ├── download.sh
│ ├── end_to_end
│ │ └── README.md
│ └── README.md
├── dqn
│ ├── README.md
│ └── game.py
├── nce-loss
│ ├── get_text8.sh
│ └── README.md
├── python-howto
│ ├── README.md
│ └── multiple_outputs.py
├── model-parallel-lstm
│ ├── README.md
│ └── get_ptb_data.sh
├── image-classification
│ ├── find_mxnet.py
│ └── symbol_mlp.R
├── torch
│ └── torch_function.py
├── fcn-xs
│ └── run_fcnxs.sh
├── numpy-ops
│ └── README.md
├── bi-lstm-sort
│ └── README.md
├── autoencoder
│ ├── data.py
│ └── mnist_sae.py
├── svm_mnist
│ └── README.md
├── rnn
│ └── get_ptb_data.sh
├── multi-task
│ └── README.md
├── memcost
│ └── Makefile
├── bayesian-methods
│ └── README.md
└── cpp
│ └── image-classification
│ └── Makefile
├── docs
├── requirements.txt
├── .dockerignore
├── .gitignore
├── _static
│ ├── mxnet-theme
│ │ ├── theme.conf
│ │ └── footer.html
│ ├── selectlang.js
│ └── js
│ │ └── auto_module_index.js
├── zh
│ ├── index.md
│ └── api
│ │ └── python
│ │ └── index.md
├── tutorials
│ ├── unsupervised_learning
│ │ ├── gan.md
│ │ └── auto_encoders.md
│ ├── general_ml
│ │ └── recommendation_systems.md
│ └── nlp
│ │ ├── rnn.md
│ │ └── nce_loss.md
├── api
│ ├── c++
│ │ └── index.md
│ ├── julia
│ │ └── index.md
│ ├── r
│ │ ├── Makefile
│ │ └── index.md
│ ├── scala
│ │ └── index.md
│ └── python
│ │ └── index.md
├── how_to
│ ├── develop_and_hack.md
│ ├── pretrained.md
│ └── index.md
├── build-preview.sh
├── index.md
├── community
│ └── index.md
└── architecture
│ └── read_code.md
├── tests
├── cpp
│ ├── .gitignore
│ └── unittest.mk
├── .gitignore
├── nightly
│ ├── .gitignore
│ ├── download.sh
│ └── README.md
├── travis
│ ├── travis_after_failure.sh
│ ├── r_vignettes.R
│ ├── setup.sh
│ └── is_core_changed.sh
├── python
│ ├── train
│ │ └── common.py
│ ├── unittest
│ │ └── common.py
│ ├── README.md
│ └── gpu
│ │ └── test_rtc.py
└── jenkins
│ └── format
├── amalgamation
├── .gitignore
└── jni
│ └── org
│ └── dmlc
│ └── mxnet
│ └── MxnetException.java
├── tools
├── bandwidth
│ └── .gitignore
├── caffe_converter
│ ├── caffe_parse
│ │ ├── __init__.py
│ │ └── parse_from_protobuf.py
│ ├── make_win32.bat
│ └── Makefile
├── accnn
│ └── config.json
└── kill-mxnet.py
├── python
├── .gitignore
├── mxnet
│ ├── _ndarray_internal.py
│ ├── _symbol_internal.py
│ └── module
│ │ └── __init__.py
├── README.md
└── setup.py
├── readthedocs.yml
├── R-package
├── tests
│ ├── testthat.R
│ └── testthat
│ │ └── test_symbol.R
├── .gitignore
├── src
│ ├── Makevars
│ └── Makevars.win
├── .Rbuildignore
├── R
│ ├── kvstore.R
│ └── util.R
├── demo
│ ├── basic_random.R
│ ├── basic_bench.R
│ ├── basic_kvstore.R
│ ├── 00Index
│ ├── basic_ndarray.R
│ ├── basic_symbol.R
│ └── basic_executor.R
├── man
│ ├── ctx.Rd
│ ├── mx.io.extract.Rd
│ ├── outputs.Rd
│ ├── arguments.Rd
│ ├── is.mx.symbol.Rd
│ ├── is.mx.dataiter.Rd
│ ├── mx.metric.custom.Rd
│ ├── is.mx.context.Rd
│ ├── mx.kv.create.Rd
│ ├── mx.nd.cos.Rd
│ ├── mx.nd.exp.Rd
│ ├── mx.nd.log.Rd
│ ├── mx.nd.sin.Rd
│ ├── mx.nd.sqrt.Rd
│ ├── mx.gpu.Rd
│ ├── mx.nd.rsqrt.Rd
│ ├── mx.symbol.load.json.Rd
│ ├── mx.nd.square.Rd
│ ├── mx.callback.log.train.metric.Rd
│ ├── mx.nd.ceil.Rd
│ ├── mx.nd.sign.Rd
│ ├── dim.MXNDArray.Rd
│ ├── mx.nd.abs.Rd
│ ├── mx.nd.floor.Rd
│ ├── mx.nd.round.Rd
│ ├── print.MXNDArray.Rd
│ ├── length.MXNDArray.Rd
│ ├── as.array.MXNDArray.Rd
│ ├── mx.apply.Rd
│ ├── as.matrix.MXNDArray.Rd
│ ├── mx.cpu.Rd
│ ├── mx.exec.backward.Rd
│ ├── mx.metric.accuracy.Rd
│ ├── mx.simple.bind.Rd
│ ├── Ops.MXNDArray.Rd
│ ├── mx.exec.forward.Rd
│ ├── mx.metric.mae.Rd
│ ├── mx.symbol.Variable.Rd
│ ├── mx.opt.create.Rd
│ ├── mx.metric.rmse.Rd
│ ├── mx.init.normal.Rd
│ ├── mx.nd.dot.Rd
│ ├── mx.symbol.Group.Rd
│ ├── is.mx.ndarray.Rd
│ ├── mx.metric.rmsle.Rd
│ ├── mx.model.load.Rd
│ ├── mx.symbol.infer.shape.Rd
│ ├── mx.init.uniform.Rd
│ ├── mx.nd.copyto.Rd
│ ├── mx.callback.save.checkpoint.Rd
│ ├── mx.nd.max.Rd
│ ├── mx.nd.min.Rd
│ ├── mx.nd.sum.Rd
│ ├── mx.symbol.Flatten.Rd
│ ├── mx.ctx.default.Rd
│ ├── mx.nd.norm.Rd
│ ├── mxnet.export.Rd
│ ├── mx.nd.clip.Rd
│ ├── mx.symbol.cos.Rd
│ ├── mx.symbol.exp.Rd
│ ├── mx.symbol.log.Rd
│ ├── mx.symbol.sin.Rd
│ ├── mx.opt.sgd.Rd
│ ├── mx.symbol.sqrt.Rd
│ ├── mx.symbol.rsqrt.Rd
│ ├── mx.exec.update.arg.arrays.Rd
│ ├── mx.exec.update.aux.arrays.Rd
│ ├── mx.exec.update.grad.arrays.Rd
│ ├── mx.symbol.abs.Rd
│ ├── mx.symbol.ceil.Rd
│ ├── mx.symbol.sign.Rd
│ ├── mx.symbol.square.Rd
│ ├── mx.symbol.floor.Rd
│ ├── mx.symbol.load.Rd
│ ├── mx.symbol.round.Rd
│ ├── mx.model.save.Rd
│ ├── mx.nd.load.Rd
│ ├── mx.init.internal.default.Rd
│ ├── mx.symbol.BlockGrad.Rd
│ ├── mx.symbol.save.Rd
│ ├── mx.nd.argmax.channel.Rd
│ ├── mx.nd.save.Rd
│ ├── mx.opt.get.updater.Rd
│ ├── mx.symbol.ElementWiseSum.Rd
│ ├── mx.io.arrayiter.Rd
│ ├── mx.symbol.Dropout.Rd
│ ├── mx.symbol.Cast.Rd
│ ├── mxnet.Rd
│ ├── mx.init.create.Rd
│ ├── mx.symbol.SliceChannel.Rd
│ ├── mx.nd.ones.Rd
│ ├── mx.symbol.Reshape.Rd
│ ├── mx.nd.zeros.Rd
│ ├── mx.symbol.SwapAxis.Rd
│ ├── mx.init.Xavier.Rd
│ ├── mx.symbol.Embedding.Rd
│ ├── mx.nd.choose.element.0index.Rd
│ ├── mx.gru.forward.Rd
│ ├── mx.rnn.forward.Rd
│ ├── mx.lstm.forward.Rd
│ ├── mx.symbol.Concat.Rd
│ ├── mx.io.CSVIter.Rd
│ ├── mx.nd.array.Rd
│ ├── mx.symbol.BatchNorm.Rd
│ ├── mx.rnorm.Rd
│ ├── mx.symbol.LinearRegressionOutput.Rd
│ ├── mx.symbol.MAERegressionOutput.Rd
│ ├── mx.nd.fill.element.0index.Rd
│ ├── mx.runif.Rd
│ ├── mx.symbol.FullyConnected.Rd
│ ├── mx.symbol.Pooling.Rd
│ ├── mx.symbol.Activation.Rd
│ ├── mx.symbol.IdentityAttachKLSparseReg.Rd
│ ├── mx.symbol.LRN.Rd
│ ├── mx.symbol.LeakyReLU.Rd
│ ├── mx.opt.adadelta.Rd
│ └── mx.symbol.LogisticRegressionOutput.Rd
└── DESCRIPTION
├── plugin
├── opencv
│ ├── __init__.py
│ ├── opencv.mk
│ └── cv_api.h
├── sframe
│ └── plugin.mk
├── warpctc
│ ├── warpctc.mk
│ ├── warpctc.cu
│ └── warpctc.cc
├── torch
│ ├── torch.mk
│ ├── torch_criterion.cu
│ ├── torch_module.cu
│ ├── torch_criterion.cc
│ └── torch_module.cc
└── caffe
│ ├── caffe_stream.cc
│ ├── caffe.mk
│ ├── caffe_stream.h
│ ├── caffe_common.cc
│ ├── caffe_op.cu
│ └── caffe_loss.cu
├── src
├── operator
│ ├── smooth_l1_unary.cu
│ ├── smooth_l1_unary.cc
│ ├── broadcast_mask_op.cc
│ ├── broadcast_mask_op.cu
│ ├── loss_binary_op.cc
│ ├── loss_binary_op.cu
│ ├── elementwise_binary_op.cu
│ ├── elementwise_binary_op.cc
│ ├── elementwise_binary_scalar_op.cc
│ ├── elementwise_binary_scalar_op.cu
│ ├── sample_op.cu
│ ├── matrix_op.cu
│ ├── elementwise_binary_broadcast_op.cc
│ ├── elementwise_binary_broadcast_op.cu
│ ├── broadcast_reduce_op.cu
│ ├── elementwise_unary_op.cc
│ ├── elementwise_unary_op.cu
│ ├── crop.cu
│ ├── native_op.cu
│ ├── leaky_relu.cu
│ ├── make_loss.cu
│ ├── l2_normalization.cu
│ ├── sample_op.cc
│ ├── slice_channel.cu
│ ├── identity_attach_KL_sparse_reg.cu
│ ├── block_grad.cu
│ ├── concat.cu
│ ├── cudnn_batch_norm.cu
│ ├── dropout.cu
│ ├── embedding.cu
│ ├── reshape.cu
│ ├── broadcast_reduce_op.cc
│ ├── fully_connected.cu
│ ├── elementwise_sum.cu
│ ├── swapaxis.cu
│ ├── softmax_output.cu
│ ├── sequence_last.cu
│ ├── sequence_reverse.cu
│ ├── matrix_op.cc
│ ├── cast.cu
│ ├── batch_norm.cu
│ ├── softmax_activation.cu
│ ├── operator.cc
│ ├── native_op.cc
│ ├── slice_channel.cc
│ ├── leaky_relu.cc
│ ├── deconvolution.cu
│ ├── lrn.cu
│ ├── rnn.cu
│ ├── l2_normalization.cc
│ ├── make_loss.cc
│ ├── identity_attach_KL_sparse_reg.cc
│ └── regression_output.cu
├── c_api
│ └── c_api_error.cc
├── io
│ └── io.cc
├── optimizer
│ ├── sgd.cu
│ ├── optimizer.cc
│ └── sgd.cc
└── ndarray
│ └── ndarray_function.cc
├── .gitmodules
├── scala-package
├── core
│ ├── scripts
│ │ ├── get_mnist_data.sh
│ │ └── get_cifar_data.sh
│ └── src
│ │ └── test
│ │ ├── resources
│ │ └── log4j.properties
│ │ └── scala
│ │ └── ml
│ │ └── dmlc
│ │ └── mxnet
│ │ ├── ShapeSuite.scala
│ │ ├── CheckUtils.scala
│ │ └── AttrScopeSuite.scala
├── examples
│ ├── src
│ │ └── main
│ │ │ └── resources
│ │ │ └── log4j.properties
│ └── scripts
│ │ ├── neuralstyle_end2end
│ │ ├── run_test_end2end.sh
│ │ └── run_train_end2end.sh
│ │ ├── run_multitask.sh
│ │ ├── run_neuralstyle.sh
│ │ ├── run_visualization.sh
│ │ ├── rnn
│ │ ├── run_train_charrnn.sh
│ │ └── run_test_charrnn.sh
│ │ └── run_cnntextclassification.sh
├── spark
│ └── src
│ │ └── main
│ │ └── scala
│ │ └── ml
│ │ └── dmlc
│ │ └── mxnet
│ │ └── spark
│ │ ├── io
│ │ └── LongLivingDataBatch.scala
│ │ ├── MXNDArray.scala
│ │ └── utils
│ │ └── RepIterator.scala
├── init
│ └── src
│ │ └── main
│ │ └── scala
│ │ └── ml
│ │ └── dmlc
│ │ └── mxnet
│ │ └── init
│ │ └── LibInfo.scala
└── assembly
│ ├── linux-x86_64-cpu
│ └── src
│ │ └── main
│ │ └── assembly
│ │ └── assembly.xml
│ ├── linux-x86_64-gpu
│ └── src
│ │ └── main
│ │ └── assembly
│ │ └── assembly.xml
│ └── osx-x86_64-cpu
│ └── src
│ └── main
│ └── assembly
│ └── assembly.xml
├── docker
├── cpu
│ └── Dockerfile
└── cuda
│ └── Dockerfile
├── LICENSE
└── matlab
├── +mxnet
└── private
│ └── callmxnet.m
└── tests
└── prepare_data.m
/example/rcnn/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | breathe
2 |
--------------------------------------------------------------------------------
/example/rcnn/helper/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/example/rcnn/rcnn/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/example/rcnn/rcnn/rpn/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/example/rcnn/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/example/rcnn/tools/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/example/rcnn/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/cpp/.gitignore:
--------------------------------------------------------------------------------
1 | unittest
2 |
--------------------------------------------------------------------------------
/amalgamation/.gitignore:
--------------------------------------------------------------------------------
1 | *-all.cc
2 |
--------------------------------------------------------------------------------
/tools/bandwidth/.gitignore:
--------------------------------------------------------------------------------
1 | ResNet
2 |
--------------------------------------------------------------------------------
/example/rcnn/helper/dataset/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/example/rcnn/helper/processing/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/example/speech-demo/io_func/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/.gitignore:
--------------------------------------------------------------------------------
1 | *_test
2 | *_unittest
3 |
--------------------------------------------------------------------------------
/tests/nightly/.gitignore:
--------------------------------------------------------------------------------
1 | data/
2 | results/
3 |
--------------------------------------------------------------------------------
/tools/caffe_converter/caffe_parse/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/.dockerignore:
--------------------------------------------------------------------------------
1 | Dockerfile
2 | _build
3 |
4 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | _build/*
2 | *.pyc
3 | doxygen
4 |
--------------------------------------------------------------------------------
/example/speech-demo/io_func/feat_readers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/python/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | *.egg-info
3 | build
4 |
--------------------------------------------------------------------------------
/example/neural-style/.gitignore:
--------------------------------------------------------------------------------
1 | input
2 | output
3 | model
4 |
--------------------------------------------------------------------------------
/docs/_static/mxnet-theme/theme.conf:
--------------------------------------------------------------------------------
1 | [theme]
2 | inherit = basic
3 |
--------------------------------------------------------------------------------
/example/speech-demo/tests/test_nothing.py:
--------------------------------------------------------------------------------
1 | def test_nothing():
2 | pass
--------------------------------------------------------------------------------
/example/speech-demo/python_wrap/example_usage/data.scp:
--------------------------------------------------------------------------------
1 | test_feat data.ark:10
2 |
--------------------------------------------------------------------------------
/readthedocs.yml:
--------------------------------------------------------------------------------
1 | formats:
2 | - none
3 | requirements_file: docs/requirements.txt
4 |
--------------------------------------------------------------------------------
/R-package/tests/testthat.R:
--------------------------------------------------------------------------------
1 | library(testthat)
2 | library(mxnet)
3 |
4 | test_check("mxnet")
5 |
--------------------------------------------------------------------------------
/example/dqn/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hschen0712/mxnet/master/example/dqn/README.md
--------------------------------------------------------------------------------
/python/mxnet/_ndarray_internal.py:
--------------------------------------------------------------------------------
1 | """NDArray namespace used to register internal functions"""
2 |
--------------------------------------------------------------------------------
/python/mxnet/_symbol_internal.py:
--------------------------------------------------------------------------------
1 | """Symbol namespace used to register internal functions"""
2 |
--------------------------------------------------------------------------------
/R-package/.gitignore:
--------------------------------------------------------------------------------
1 | .Rhistory
2 | R-package.Rproj
3 | *.Rproj
4 | *.o
5 | *.so
6 | *.html
7 | inst/doc
8 |
--------------------------------------------------------------------------------
/R-package/src/Makevars:
--------------------------------------------------------------------------------
1 |
2 | PKG_CPPFLAGS = -I../inst/include
3 | PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS)
4 |
--------------------------------------------------------------------------------
/R-package/.Rbuildignore:
--------------------------------------------------------------------------------
1 | \.o$
2 | src/*.so$
3 | \.dll$
4 | ^.*\.Rproj$
5 | ^\.Rproj\.user$
6 |
7 | README.md
8 |
--------------------------------------------------------------------------------
/example/speech-demo/python_wrap/example_usage/data.txt:
--------------------------------------------------------------------------------
1 | test_feat [
2 | 1.2345 6.789
3 | -9.876 0.0001 ]
4 |
--------------------------------------------------------------------------------
/tools/caffe_converter/make_win32.bat:
--------------------------------------------------------------------------------
1 | @protoc --python_out=./ ./caffe_parse/caffe.proto
2 | @echo done.
3 | @pause
4 |
--------------------------------------------------------------------------------
/example/nce-loss/get_text8.sh:
--------------------------------------------------------------------------------
1 | mkdir -p ./data/
2 | cd ./data/
3 | wget http://mattmahoney.net/dc/text8.zip
4 | unzip text8.zip
5 |
--------------------------------------------------------------------------------
/tests/travis/travis_after_failure.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${TASK} == "r_test" ]; then
4 | cat mxnet/mxnet.Rcheck/*.log
5 | fi
6 |
--------------------------------------------------------------------------------
/R-package/src/Makevars.win:
--------------------------------------------------------------------------------
1 |
2 | PKG_CPPFLAGS = -I../inst/include
3 | PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) -L../inst/libs/x64/ -llibmxnet
4 |
--------------------------------------------------------------------------------
/docs/zh/index.md:
--------------------------------------------------------------------------------
1 | # MXNet 中文文档
2 |
3 | 我们希望能将MXNet文档翻译成中文。但工作量很大,希望大家能一起贡献。
4 |
5 | - [MXNet简介](./overview.md)
6 | - [MXNet的内存优化](./note_memory.md)
7 |
--------------------------------------------------------------------------------
/plugin/opencv/__init__.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | # pylint: disable=wildcard-import
3 |
4 | """Opencv plugin for mxnet"""
5 | from .opencv import *
6 |
7 |
--------------------------------------------------------------------------------
/example/speech-demo/python_wrap/example_usage/data.ark:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hschen0712/mxnet/master/example/speech-demo/python_wrap/example_usage/data.ark
--------------------------------------------------------------------------------
/docs/_static/mxnet-theme/footer.html:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
--------------------------------------------------------------------------------
/src/operator/smooth_l1_unary.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file smooth_l1.cu
4 | * \brief Smooth L1 loss
5 | */
6 | #include "./smooth_l1_unary-inl.h"
7 |
--------------------------------------------------------------------------------
/tests/travis/r_vignettes.R:
--------------------------------------------------------------------------------
1 | fnames <- list.files("R-package/vignettes/", pattern="*.Rmd")
2 | sapply(fnames, function(x){
3 | knitr::purl(paste0("R-package/vignettes/", x))
4 | })
--------------------------------------------------------------------------------
/src/operator/smooth_l1_unary.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file smooth_l1_unary.cc
4 | * \brief Smooth L1 loss
5 | */
6 | #include "./smooth_l1_unary-inl.h"
7 |
--------------------------------------------------------------------------------
/example/python-howto/README.md:
--------------------------------------------------------------------------------
1 | Python Howto Examples
2 | =====================
3 | * [Configuring Net to get Multiple Ouputs](multiple_outputs.py)
4 | * [Configuring Image Record Iterator](data_iter.py)
5 |
--------------------------------------------------------------------------------
/example/speech-demo/io_func/info.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | _mydir = os.path.dirname(__file__) or '.'
4 |
5 | ROOT = os.path.abspath(os.path.join(_mydir, "../.."))
6 | CONFIGS = os.path.join(ROOT, "configs")
7 |
--------------------------------------------------------------------------------
/src/operator/broadcast_mask_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file broadcast_mask_op.cc
4 | * \brief
5 | * \author Bing Xu
6 | */
7 | #include "./broadcast_mask_op-inl.h"
8 |
9 |
--------------------------------------------------------------------------------
/src/operator/broadcast_mask_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file broadcast_mask_op.cu
4 | * \brief
5 | * \author Bing Xu
6 | */
7 | #include "./broadcast_mask_op-inl.h"
8 |
9 |
--------------------------------------------------------------------------------
/src/operator/loss_binary_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file loss_binary_op.cc
4 | * \brief loss function that takes a data and label
5 | */
6 | #include "./loss_binary_op-inl.h"
7 |
--------------------------------------------------------------------------------
/src/operator/loss_binary_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file loss_binary_op.cu
4 | * \brief loss function that takes a data and label
5 | */
6 | #include "./loss_binary_op-inl.h"
7 |
--------------------------------------------------------------------------------
/src/operator/elementwise_binary_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file elementwise_binary_op.cu
4 | * \brief elementwise binary operator
5 | */
6 | #include "./elementwise_binary_op-inl.h"
7 |
--------------------------------------------------------------------------------
/src/operator/elementwise_binary_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file elementwise_binary_op.cc
4 | * \brief elementwise binary operator
5 | */
6 | #include "./elementwise_binary_op-inl.h"
7 |
8 |
--------------------------------------------------------------------------------
/tests/python/train/common.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
3 | sys.path.append(os.path.join(curr_path, '../common/'))
4 |
5 | import models
6 | import get_data
7 |
--------------------------------------------------------------------------------
/tests/python/unittest/common.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
3 | sys.path.append(os.path.join(curr_path, '../common/'))
4 |
5 | import models
6 | import get_data
7 |
--------------------------------------------------------------------------------
/src/operator/elementwise_binary_scalar_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file elementwise_binary_scalar_op.cc
4 | * \brief elementwise binary operator
5 | */
6 | #include "./elementwise_binary_scalar_op-inl.h"
7 |
--------------------------------------------------------------------------------
/src/operator/elementwise_binary_scalar_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file elementwise_binary_scalar_op.cu
4 | * \brief elementwise binary operator
5 | */
6 | #include "./elementwise_binary_scalar_op-inl.h"
7 |
--------------------------------------------------------------------------------
/example/model-parallel-lstm/README.md:
--------------------------------------------------------------------------------
1 | Model Paralell LSTM
2 | ===================
3 | This is an example showing how to do model parallel LSTM in MXNet.
4 | Most of the code is duplicated with the rnn example, and should be eventually merged.
5 |
--------------------------------------------------------------------------------
/plugin/opencv/opencv.mk:
--------------------------------------------------------------------------------
1 | OPENCV_SRC = $(wildcard plugin/opencv/*.cc)
2 | PLUGIN_OBJ += $(patsubst %.cc, build/%.o, $(OPENCV_SRC))
3 | OPENCV_CUSRC = $(wildcard plugin/opencv/*.cu)
4 | PLUGIN_CUOBJ += $(patsubst %.cu, build/%_gpu.o, $(OPENCV_CUSRC))
5 |
--------------------------------------------------------------------------------
/src/operator/sample_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2016 by Contributors
3 | * \file sample_op.cu
4 | * \brief GPU Implementation of sample op
5 | */
6 | // this will be invoked by nvcc and compile GPU version
7 | #include "./sample_op-inl.h"
8 |
--------------------------------------------------------------------------------
/src/operator/matrix_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file matrix_op.cu
4 | * \brief GPU Implementation of matrix operations
5 | */
6 | // this will be invoked by gcc and compile GPU version
7 | #include "./matrix_op-inl.h"
8 |
--------------------------------------------------------------------------------
/amalgamation/jni/org/dmlc/mxnet/MxnetException.java:
--------------------------------------------------------------------------------
1 | package org.dmlc.mxnet;
2 |
3 | public class MxnetException extends Exception {
4 | public MxnetException(){}
5 | public MxnetException(String txt) {
6 | super(txt);
7 | }
8 | }
9 |
10 |
--------------------------------------------------------------------------------
/src/operator/elementwise_binary_broadcast_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file elementwise_binary_broadcast_op.cc
4 | * \brief elementwise binary broadcast operator
5 | */
6 | #include "./elementwise_binary_broadcast_op-inl.h"
7 |
--------------------------------------------------------------------------------
/src/operator/elementwise_binary_broadcast_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file elementwise_binary_broadcast_op.cu
4 | * \brief elementwise binary broadcast operator
5 | */
6 | #include "./elementwise_binary_broadcast_op-inl.h"
7 |
--------------------------------------------------------------------------------
/example/neural-style/find_mxnet.py:
--------------------------------------------------------------------------------
1 | try:
2 | import mxnet as mx
3 | except ImportError:
4 | import os, sys
5 | curr_path = os.path.abspath(os.path.dirname(__file__))
6 | sys.path.append(os.path.join(curr_path, "../../python"))
7 | import mxnet as mx
8 |
--------------------------------------------------------------------------------
/example/image-classification/find_mxnet.py:
--------------------------------------------------------------------------------
1 | try:
2 | import mxnet as mx
3 | except ImportError:
4 | import os, sys
5 | curr_path = os.path.abspath(os.path.dirname(__file__))
6 | sys.path.append(os.path.join(curr_path, "../../python"))
7 | import mxnet as mx
8 |
--------------------------------------------------------------------------------
/src/operator/broadcast_reduce_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file broadcast_reduce_op.cu
4 | * \brief GPU Implementation of broadcast reduce op
5 | */
6 | // this will be invoked by gcc and compile CPU version
7 | #include "./broadcast_reduce_op-inl.h"
8 |
--------------------------------------------------------------------------------
/src/operator/elementwise_unary_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file elementwise_unary_op.cc
4 | * \brief CPU Implementation of unary function.
5 | */
6 | // this will be invoked by gcc and compile CPU version
7 | #include "./elementwise_unary_op-inl.h"
8 |
--------------------------------------------------------------------------------
/src/operator/elementwise_unary_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file elementwise_unary_op.cc
4 | * \brief GPU Implementation of unary function.
5 | */
6 | // this will be invoked by gcc and compile CPU version
7 | #include "./elementwise_unary_op-inl.h"
8 |
--------------------------------------------------------------------------------
/docs/tutorials/unsupervised_learning/gan.md:
--------------------------------------------------------------------------------
1 | # Generative Adversarial Network
2 | You can get the source code for GAN with MXNet example [here](https://github.com/dmlc/mxnet/tree/master/example/gan)
3 |
4 | # Recommended Next Steps
5 | * [MXNet tutorials index](http://mxnet.io/tutorials/index.html)
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "mshadow"]
2 | path = mshadow
3 | url = https://github.com/dmlc/mshadow.git
4 | [submodule "dmlc-core"]
5 | path = dmlc-core
6 | url = https://github.com/dmlc/dmlc-core.git
7 | [submodule "ps-lite"]
8 | path = ps-lite
9 | url = https://github.com/dmlc/ps-lite
10 |
--------------------------------------------------------------------------------
/R-package/R/kvstore.R:
--------------------------------------------------------------------------------
1 | is.MXKVStore <- function(x) {
2 | inherits(x, "Rcpp_MXKVStore")
3 | }
4 |
5 | #' Create a mxnet KVStore.
6 | #'
7 | #' @param type string(default="local") The type of kvstore.
8 | #' @return The kvstore.
9 | #'
10 | #' @name mx.kv.create
11 | #' @export
12 | NULL
13 |
--------------------------------------------------------------------------------
/R-package/demo/basic_random.R:
--------------------------------------------------------------------------------
1 | require(mxnet)
2 |
3 | mx.set.seed(10)
4 |
5 | print(mx.runif(c(2,2), -10, 10))
6 |
7 | # Test initialization module for neural nets.
8 | uinit = mx.init.uniform(0.1)
9 | print(uinit("fc1_weight", c(2, 2), mx.cpu()))
10 | print(uinit("fc1_gamma", c(2, 2), mx.cpu()))
11 |
--------------------------------------------------------------------------------
/docs/tutorials/unsupervised_learning/auto_encoders.md:
--------------------------------------------------------------------------------
1 | # Auto Encoders
2 | You can get the source code for Auto Encoders with MXNet example [here](https://github.com/dmlc/mxnet/tree/master/example/autoencoder)
3 |
4 | # Recommended Next Steps
5 | * [MXNet tutorials index](http://mxnet.io/tutorials/index.html)
--------------------------------------------------------------------------------
/plugin/sframe/plugin.mk:
--------------------------------------------------------------------------------
1 | SFRMAE_SRC = plugin/sframe/iter_sframe.cc
2 | PLUGIN_OBJ += build/plugin/sframe/iter_sframe.o
3 | CFLAGS += -I$(SFRAME_PATH)/oss_src/unity/lib/
4 | CFLAGS += -I$(SFRAME_PATH)/oss_src/
5 | LDFLAGS += -L$(SFRAME_PATH)/release/oss_src/unity/python/sframe/
6 | LDFLAGS += -lunity_shared
7 | LDFLAGS += -lboost_system
8 |
--------------------------------------------------------------------------------
/R-package/demo/basic_bench.R:
--------------------------------------------------------------------------------
1 | require(mxnet)
2 | require(methods)
3 |
4 |
5 | shape = c(1, 1)
6 | lr = 0.01
7 | x = mx.nd.ones(shape)
8 | y = mx.nd.zeros(shape)
9 | print(x)
10 | n = 1000
11 |
12 |
13 | tic = proc.time()
14 | for (i in 1 : n) {
15 | y = y + x *lr
16 | }
17 | toc = proc.time() - tic
18 | as.array(y)
19 | print(toc)
20 |
--------------------------------------------------------------------------------
/R-package/man/ctx.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{ctx}
4 | \alias{ctx}
5 | \title{Get the context of mx.ndarray}
6 | \usage{
7 | ctx(nd)
8 | }
9 | \arguments{
10 | \item{nd}{The mx.ndarray}
11 | }
12 | \description{
13 | Get the context of mx.ndarray
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/plugin/warpctc/warpctc.mk:
--------------------------------------------------------------------------------
1 | CFLAGS += -I$(WARPCTC_PATH)/include
2 | LDFLAGS += -L$(WARPCTC_PATH)/build -lwarpctc
3 |
4 | WARPCTC_SRC = $(wildcard plugin/warpctc/*.cc)
5 | PLUGIN_OBJ += $(patsubst %.cc, build/%.o, $(WARPCTC_SRC))
6 | WARPCTC_CUSRC = $(wildcard plugin/warpctc/*.cu)
7 | PLUGIN_CUOBJ += $(patsubst %.cu, build/%_gpu.o, $(WARPCTC_CUSRC))
8 |
--------------------------------------------------------------------------------
/scala-package/core/scripts/get_mnist_data.sh:
--------------------------------------------------------------------------------
1 | data_path="./data"
2 | if [ ! -d "$data_path" ]; then
3 | mkdir -p "$data_path"
4 | fi
5 |
6 | mnist_data_path="./data/mnist.zip"
7 | if [ ! -f "$mnist_data_path" ]; then
8 | wget http://data.dmlc.ml/mxnet/data/mnist.zip -P $data_path
9 | cd $data_path
10 | unzip -u mnist.zip
11 | fi
12 |
--------------------------------------------------------------------------------
/R-package/man/mx.io.extract.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/io.R
3 | \name{mx.io.extract}
4 | \alias{mx.io.extract}
5 | \title{Extract a certain field from DataIter.}
6 | \usage{
7 | mx.io.extract(iter, field)
8 | }
9 | \description{
10 | Extract a certain field from DataIter.
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/docs/tutorials/general_ml/recommendation_systems.md:
--------------------------------------------------------------------------------
1 | # Recommendation Systems
2 |
3 | You can get the source code for an example of Recommendation System with MXNet [here](https://github.com/dmlc/mxnet-notebooks/tree/master/python/recommendation_systems)
4 |
5 | # Recommended Next Steps
6 | * [MXNet tutorials index](http://mxnet.io/tutorials/index.html)
--------------------------------------------------------------------------------
/scala-package/core/scripts/get_cifar_data.sh:
--------------------------------------------------------------------------------
1 | data_path="./data"
2 | if [ ! -d "$data_path" ]; then
3 | mkdir -p "$data_path"
4 | fi
5 |
6 | cifar_data_path="./data/cifar10.zip"
7 | if [ ! -f "$cifar_data_path" ]; then
8 | wget http://data.dmlc.ml/mxnet/data/cifar10.zip -P $data_path
9 | cd $data_path
10 | unzip -u cifar10.zip
11 | fi
12 |
--------------------------------------------------------------------------------
/R-package/man/outputs.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{outputs}
4 | \alias{outputs}
5 | \title{Get the outputs of a symbol.}
6 | \usage{
7 | outputs(x)
8 | }
9 | \arguments{
10 | \item{x}{The input symbol}
11 | }
12 | \description{
13 | Get the outputs of a symbol.
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/arguments.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{arguments}
4 | \alias{arguments}
5 | \title{Get the arguments of symbol.}
6 | \usage{
7 | arguments(x)
8 | }
9 | \arguments{
10 | \item{x}{The input symbol}
11 | }
12 | \description{
13 | Get the arguments of symbol.
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/docs/api/c++/index.md:
--------------------------------------------------------------------------------
1 | # MXNet - C++ API
2 |
3 | Please refer below for Namespaces, Classes and code files of MXNet C++ package.
4 |
5 | * [Namespaces](http://mxnet.io/doxygen/namespaces.html)
6 | * [Classes](http://mxnet.io/doxygen/annotated.html)
7 | * [Code Files](http://mxnet.io/doxygen/files.html)
8 | * [MXNet CPP Package](https://github.com/dmlc/MXNet.cpp)
9 |
--------------------------------------------------------------------------------
/R-package/man/is.mx.symbol.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{is.mx.symbol}
4 | \alias{is.mx.symbol}
5 | \title{Judge if an object is mx.symbol}
6 | \usage{
7 | is.mx.symbol(x)
8 | }
9 | \value{
10 | Logical indicator
11 | }
12 | \description{
13 | Judge if an object is mx.symbol
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/docs/how_to/develop_and_hack.md:
--------------------------------------------------------------------------------
1 | # Develop and Hack MXNet
2 | - [Create new operators](new_op.md)
3 | - [Use Torch from MXNet](torch.md)
4 | - [Set environment variables of MXNet](env_var.md)
5 |
6 | # Other Resources
7 | - [MXNet System Architecture Overview](http://mxnet.io/architecture/overview.html)
8 | - [Contributor Guidelines](http://mxnet.io/community/contribute.html)
--------------------------------------------------------------------------------
/R-package/demo/basic_kvstore.R:
--------------------------------------------------------------------------------
1 | require(mxnet)
2 |
3 | kv = mx.kv.create()
4 |
5 | dlist = lapply(1:3, function(i) {
6 | x = as.array(c(i, i+1))
7 | mat = mx.nd.array(x, mx.cpu(i))
8 | list(x=mat)
9 | })
10 | kv$init(c(0), dlist[[1]])
11 | kv$push(c(0), dlist, 0)
12 | kv$pull(c(0), dlist, 0)
13 |
14 | print(as.array(dlist[[1]][[1]]))
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/R-package/man/is.mx.dataiter.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/io.R
3 | \name{is.mx.dataiter}
4 | \alias{is.mx.dataiter}
5 | \title{Judge if an object is mx.dataiter}
6 | \usage{
7 | is.mx.dataiter(x)
8 | }
9 | \value{
10 | Logical indicator
11 | }
12 | \description{
13 | Judge if an object is mx.dataiter
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.metric.custom.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/metric.R
3 | \name{mx.metric.custom}
4 | \alias{mx.metric.custom}
5 | \title{Helper function to create a customized metric}
6 | \usage{
7 | mx.metric.custom(name, feval)
8 | }
9 | \description{
10 | Helper function to create a customized metric
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/R-package/man/is.mx.context.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/context.R
3 | \name{is.mx.context}
4 | \alias{is.mx.context}
5 | \title{Check if the type is mxnet context.}
6 | \usage{
7 | is.mx.context(x)
8 | }
9 | \value{
10 | Logical indicator
11 | }
12 | \description{
13 | Check if the type is mxnet context.
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/example/rcnn/test.py:
--------------------------------------------------------------------------------
1 | import mxnet as mx
2 |
3 | from tools.test_rcnn import test_rcnn
4 | from tools.test_rcnn import parse_args
5 |
6 | if __name__ == '__main__':
7 | args = parse_args()
8 | ctx = mx.gpu(args.gpu_id)
9 | test_rcnn(args.image_set, args.year, args.root_path, args.devkit_path, args.prefix, args.epoch, ctx, args.vis,
10 | args.has_rpn)
11 |
--------------------------------------------------------------------------------
/src/operator/crop.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file concat.cu
4 | * \brief
5 | * \author Wei Wu
6 | */
7 |
8 | #include "./crop-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator* CreateOp(CropParam param) {
14 | return new CropOp(param);
15 | }
16 |
17 | } // namespace op
18 | } // namespace mxnet
19 |
--------------------------------------------------------------------------------
/docs/build-preview.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Script to build the HTML docs and serve them.
4 | # Run within docker container for best results.
5 |
6 | echo "Building MXNet documentation..."
7 | make clean
8 | make html
9 | echo "Done building MXNet documentation..."
10 |
11 | echo "Serving MXNet docs on port 8008..."
12 | cd _build/html
13 | python -m SimpleHTTPServer 8008
14 |
15 |
--------------------------------------------------------------------------------
/R-package/man/mx.kv.create.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/kvstore.R
3 | \name{mx.kv.create}
4 | \alias{mx.kv.create}
5 | \title{Create a mxnet KVStore.}
6 | \arguments{
7 | \item{type}{string(default="local") The type of kvstore.}
8 | }
9 | \value{
10 | The kvstore.
11 | }
12 | \description{
13 | Create a mxnet KVStore.
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.cos.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.cos}
4 | \alias{mx.nd.cos}
5 | \title{Take cos of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take cos of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.exp.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.exp}
4 | \alias{mx.nd.exp}
5 | \title{Take exp of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take exp of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.log.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.log}
4 | \alias{mx.nd.log}
5 | \title{Take log of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take log of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.sin.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.sin}
4 | \alias{mx.nd.sin}
5 | \title{Take sin of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take sin of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/example/speech-demo/python_wrap/Makefile:
--------------------------------------------------------------------------------
1 |
2 |
3 | all:
4 |
5 | include ../kaldi.mk
6 |
7 | OBJFILES = ctypes.o
8 |
9 | LIBNAME = kaldi-python-wrap
10 |
11 | ADDLIBS = ../util/kaldi-util.a ../matrix/kaldi-matrix.a ../base/kaldi-base.a ../hmm/kaldi-hmm.a ../cudamatrix/kaldi-cudamatrix.a ../nnet/kaldi-nnet.a ../thread/kaldi-thread.a
12 |
13 | include ../makefiles/default_rules.mk
14 |
--------------------------------------------------------------------------------
/scala-package/core/src/test/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # for development debugging
2 | log4j.rootLogger = debug, stdout
3 |
4 | log4j.appender.stdout = org.apache.log4j.ConsoleAppender
5 | log4j.appender.stdout.Target = System.out
6 | log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
7 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] [%c] [%p] - %m%n
8 |
--------------------------------------------------------------------------------
/src/operator/native_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file native_op.cu
4 | * \brief
5 | * \author Junyuan Xie
6 | */
7 | #include "./native_op-inl.h"
8 | namespace mxnet {
9 | namespace op {
10 | template<>
11 | Operator* CreateOp(NativeOpParam param) {
12 | return new NativeOp(param);
13 | }
14 | } // namespace op
15 | } // namespace mxnet
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.sqrt.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.sqrt}
4 | \alias{mx.nd.sqrt}
5 | \title{Take sqrt of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take sqrt of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/scala-package/examples/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # for development debugging
2 | log4j.rootLogger = info, stdout
3 |
4 | log4j.appender.stdout = org.apache.log4j.ConsoleAppender
5 | log4j.appender.stdout.Target = System.out
6 | log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
7 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] [%c] [%p] - %m%n
8 |
--------------------------------------------------------------------------------
/R-package/man/mx.gpu.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/context.R
3 | \name{mx.gpu}
4 | \alias{mx.gpu}
5 | \title{Create a mxnet GPU context.}
6 | \arguments{
7 | \item{dev.id}{optional, default=0
8 | The GPU device ID, starts from 0.}
9 | }
10 | \value{
11 | The GPU context.
12 | }
13 | \description{
14 | Create a mxnet GPU context.
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.rsqrt.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.rsqrt}
4 | \alias{mx.nd.rsqrt}
5 | \title{Take rsqrt of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take rsqrt of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.load.json.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{mx.symbol.load.json}
4 | \alias{mx.symbol.load.json}
5 | \title{Load an mx.symbol object from a json string}
6 | \arguments{
7 | \item{str}{the json str represent a mx.symbol}
8 | }
9 | \description{
10 | Load an mx.symbol object from a json string
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/R-package/demo/00Index:
--------------------------------------------------------------------------------
1 | basic_bench Basic benchmark
2 | basic_executor Basic executor operations
3 | basic_kvstore Basic kvstore operations
4 | basic_model Basic model operations
5 | basic_ndarray Basic ndarray operations
6 | basic_random Basic random number generators
7 | basic_symbol Basic symbol operations
8 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.square.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.square}
4 | \alias{mx.nd.square}
5 | \title{Take square of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take square of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.callback.log.train.metric.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/callback.R
3 | \name{mx.callback.log.train.metric}
4 | \alias{mx.callback.log.train.metric}
5 | \title{Log training metric each period}
6 | \usage{
7 | mx.callback.log.train.metric(period, logger = NULL)
8 | }
9 | \description{
10 | Log training metric each period
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.ceil.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.ceil}
4 | \alias{mx.nd.ceil}
5 | \title{Take ceil value of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take ceil value of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.sign.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.sign}
4 | \alias{mx.nd.sign}
5 | \title{Take sign value of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take sign value of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/example/torch/torch_function.py:
--------------------------------------------------------------------------------
1 | import mxnet as mx
2 | x = mx.th.randn(2, 2, ctx=mx.cpu(0))
3 | print x.asnumpy()
4 | y = mx.th.abs(x)
5 | print y.asnumpy()
6 |
7 | x = mx.th.randn(2, 2, ctx=mx.cpu(0))
8 | print x.asnumpy()
9 | mx.th.abs(x, x) # in-place
10 | print x.asnumpy()
11 |
12 | x = mx.th.ones(2, 2, ctx=mx.cpu(0))
13 | y = mx.th.ones(2, 2, ctx=mx.cpu(0))*2
14 | print mx.th.cdiv(x,y).asnumpy()
15 |
--------------------------------------------------------------------------------
/R-package/man/dim.MXNDArray.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{dim.MXNDArray}
4 | \alias{dim.MXNDArray}
5 | \title{Dimension operator overload of mx.ndarray}
6 | \usage{
7 | \method{dim}{MXNDArray}(nd)
8 | }
9 | \arguments{
10 | \item{nd}{The mx.ndarray}
11 | }
12 | \description{
13 | Dimension operator overload of mx.ndarray
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.abs.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.abs}
4 | \alias{mx.nd.abs}
5 | \title{Take absolute value of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take absolute value of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.floor.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.floor}
4 | \alias{mx.nd.floor}
5 | \title{Take floor value of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take floor value of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.round.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.round}
4 | \alias{mx.nd.round}
5 | \title{Take round value of the src}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take round value of the src
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/print.MXNDArray.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{print.MXNDArray}
4 | \alias{print.MXNDArray}
5 | \title{print operator overload of mx.ndarray}
6 | \usage{
7 | \method{print}{MXNDArray}(nd)
8 | }
9 | \arguments{
10 | \item{nd}{The mx.ndarray}
11 | }
12 | \description{
13 | print operator overload of mx.ndarray
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/python/README.md:
--------------------------------------------------------------------------------
1 | MXNet Python Package
2 | ====================
3 | MXNet is a deep learning framework designed for both *efficiency* and *flexibility*.
4 | It allows you to mix the flavours of deep learning programs together to maximize the efficiency and your productivity.
5 |
6 |
7 | Installation
8 | ------------
9 | To install, check [Build Instruction](http://mxnet.readthedocs.org/en/latest/build.html)
10 |
--------------------------------------------------------------------------------
/src/operator/leaky_relu.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file leaky_relu.cc
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./leaky_relu-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator *CreateOp(LeakyReLUParam param) {
14 | return new LeakyReLUOp(param);
15 | }
16 |
17 | } // namespace op
18 | } // namespace mxnet
19 |
20 |
--------------------------------------------------------------------------------
/src/operator/make_loss.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file make_loss.cu
4 | * \brief special layer for propagating loss
5 | */
6 | #include "./make_loss-inl.h"
7 |
8 | namespace mxnet {
9 | namespace op {
10 | template<>
11 | Operator *CreateOp(MakeLossParam param) {
12 | return new MakeLossOp(param);
13 | }
14 |
15 | } // namespace op
16 | } // namespace mxnet
17 |
18 |
--------------------------------------------------------------------------------
/tools/caffe_converter/Makefile:
--------------------------------------------------------------------------------
1 | # find protoc
2 | ifndef PROTOC
3 | DEPS_PROTOC=../../deps/bin/protoc
4 | ifneq ("$(wildcard $(DEPS_PROTOC))","")
5 | PROTOC = $(DEPS_PROTOC)
6 | else
7 | PROTOC = protoc
8 | endif
9 | endif
10 |
11 | all: caffe_parse/caffe_pb2.py
12 |
13 | clean:
14 | rm caffe_parse/caffe_pb2.py*
15 |
16 | caffe_parse/caffe_pb2.py:
17 | $(PROTOC) --python_out=./ ./caffe_parse/caffe.proto
18 |
--------------------------------------------------------------------------------
/R-package/man/length.MXNDArray.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{length.MXNDArray}
4 | \alias{length.MXNDArray}
5 | \title{Length operator overload of mx.ndarray}
6 | \usage{
7 | \method{length}{MXNDArray}(nd)
8 | }
9 | \arguments{
10 | \item{nd}{The mx.ndarray}
11 | }
12 | \description{
13 | Length operator overload of mx.ndarray
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/python/mxnet/module/__init__.py:
--------------------------------------------------------------------------------
1 | """A module is like a FeedForward model. but we would like to make it
2 | easier to be composed. So it is more like the Torch modules.
3 | """
4 |
5 | from .base_module import BaseModule
6 | from .module import Module
7 | from .bucketing_module import BucketingModule
8 | from .sequential_module import SequentialModule
9 |
10 | from .python_module import PythonModule, PythonLossModule
11 |
--------------------------------------------------------------------------------
/src/operator/l2_normalization.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file l2_normalization.cu
4 | * \brief l2 normalization operator
5 | */
6 | #include "./l2_normalization-inl.h"
7 | namespace mxnet {
8 | namespace op {
9 | template<>
10 | Operator* CreateOp(L2NormalizationParam param) {
11 | return new L2NormalizationOp(param);
12 | }
13 | } // namespace op
14 | } // namespace mxnet
15 |
--------------------------------------------------------------------------------
/R-package/man/as.array.MXNDArray.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{as.array.MXNDArray}
4 | \alias{as.array.MXNDArray}
5 | \title{as.array operator overload of mx.ndarray}
6 | \usage{
7 | \method{as.array}{MXNDArray}(nd)
8 | }
9 | \arguments{
10 | \item{nd}{The mx.ndarray}
11 | }
12 | \description{
13 | as.array operator overload of mx.ndarray
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.apply.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{mx.apply}
4 | \alias{mx.apply}
5 | \title{Apply symbol to the inputs.}
6 | \usage{
7 | mx.apply(x, ...)
8 | }
9 | \arguments{
10 | \item{x}{The symbol to be applied}
11 |
12 | \item{kwargs}{The keyword arguments to the symbol}
13 | }
14 | \description{
15 | Apply symbol to the inputs.
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/example/speech-demo/python_wrap/example_usage/README.txt:
--------------------------------------------------------------------------------
1 | # If not already done, make sure kaldi/src is compiled as shared libraries
2 | cd kaldi/src
3 | ./configure --shared
4 | make depend
5 | make
6 |
7 | # Copy python_wrap/ to kaldi/src and compile it
8 | cd python_wrap/
9 | make
10 |
11 | cd example_usage/
12 | # Add kaldi/src/lib to LD_LIBRARY_PATH
13 | export LD_LIBRARY_PATH=../../lib:$LD_LIBRARY_PATH
14 | python example.py
--------------------------------------------------------------------------------
/src/operator/sample_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2016 by Contributors
3 | * \file sample_op.cc
4 | * \brief CPU Implementation of sample op
5 | */
6 | // this will be invoked by cc
7 | #include "./sample_op-inl.h"
8 |
9 | namespace mxnet {
10 | namespace op {
11 |
12 | DMLC_REGISTER_PARAMETER(SampleUniformParam);
13 | DMLC_REGISTER_PARAMETER(SampleNormalParam);
14 |
15 | } // namespace op
16 | } // namespace mxnet
17 |
--------------------------------------------------------------------------------
/src/operator/slice_channel.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file slice_channel.cc
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./slice_channel-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator* CreateOp(SliceChannelParam param) {
14 | return new SliceChannelOp(param);
15 | }
16 |
17 | } // namespace op
18 | } // namespace mxnet
19 |
20 |
--------------------------------------------------------------------------------
/R-package/man/as.matrix.MXNDArray.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{as.matrix.MXNDArray}
4 | \alias{as.matrix.MXNDArray}
5 | \title{as.matrix operator overload of mx.ndarray}
6 | \usage{
7 | \method{as.matrix}{MXNDArray}(nd)
8 | }
9 | \arguments{
10 | \item{nd}{The mx.ndarray}
11 | }
12 | \description{
13 | as.matrix operator overload of mx.ndarray
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/example/fcn-xs/run_fcnxs.sh:
--------------------------------------------------------------------------------
1 | # train fcn-32s model
2 | python -u fcn_xs.py --model=fcn32s --prefix=VGG_FC_ILSVRC_16_layers \
3 | --epoch=74 --init-type=vgg16
4 |
5 | ## train fcn-16s model
6 | #python -u fcn_xs.py --model=fcn16s --prefix=FCN32s_VGG16 \
7 | #--epoch=31 --init-type=fcnxs
8 |
9 | # train fcn-8s model
10 | #python -u fcn_xs.py --model=fcn8s --prefix=FCN16s_VGG16 \
11 | #--epoch=27 --init-type=fcnxs
12 |
--------------------------------------------------------------------------------
/example/numpy-ops/README.md:
--------------------------------------------------------------------------------
1 | # Training MNIST With NumpyOp
2 |
3 | Uses the same setup as example/mnist/mlp.py. Except the loss symbol is
4 | custom defined with NumpyOp. mxnet.operator.NumpyOp help move computation
5 | in a symbol's forward/backward operation to python frontend. This is for
6 | fast implementation/experimentation of non-performance-critical symbols.
7 | If it is becoming a bottleneck, please consider write a C++/CUDA version.
--------------------------------------------------------------------------------
/example/bi-lstm-sort/README.md:
--------------------------------------------------------------------------------
1 | This is an example of using bidirection lstm to sort an array.
2 |
3 | Firstly, generate data by:
4 |
5 | cd data
6 | python gen_data.py
7 |
8 | Then, train the model by:
9 |
10 | python lstm_sort.py
11 |
12 | At last, test model by:
13 |
14 | python infer_sort.py 234 189 785 763 231
15 |
16 | and will output sorted seq
17 |
18 | 189
19 | 231
20 | 234
21 | 763
22 | 785
23 |
24 |
25 |
--------------------------------------------------------------------------------
/plugin/torch/torch.mk:
--------------------------------------------------------------------------------
1 | CFLAGS += -I$(TORCH_PATH)/install/include -I$(TORCH_PATH)/install/include/TH -I$(TORCH_PATH)/install/include/THC/ -DMXNET_USE_TORCH=1
2 | LDFLAGS += -L$(TORCH_PATH)/install/lib -lluajit -lluaT -lTH -lTHC
3 |
4 | TORCH_SRC = $(wildcard plugin/torch/*.cc)
5 | PLUGIN_OBJ += $(patsubst %.cc, build/%.o, $(TORCH_SRC))
6 | TORCH_CUSRC = $(wildcard plugin/torch/*.cu)
7 | PLUGIN_CUOBJ += $(patsubst %.cu, build/%_gpu.o, $(TORCH_CUSRC))
8 |
--------------------------------------------------------------------------------
/R-package/man/mx.cpu.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/context.R
3 | \name{mx.cpu}
4 | \alias{mx.cpu}
5 | \title{Create a mxnet CPU context.}
6 | \arguments{
7 | \item{dev.id}{optional, default=0
8 | The device ID, this is meaningless for CPU, included for interface compatiblity.}
9 | }
10 | \value{
11 | The CPU context.
12 | }
13 | \description{
14 | Create a mxnet CPU context.
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/example/neural-style/download.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | mkdir -p model
4 | cd model
5 | wget https://github.com/dmlc/web-data/raw/master/mxnet/neural-style/model/vgg19.params
6 | cd ..
7 |
8 | mkdir -p input
9 | cd input
10 | wget https://github.com/dmlc/web-data/raw/master/mxnet/neural-style/input/IMG_4343.jpg
11 | wget https://github.com/dmlc/web-data/raw/master/mxnet/neural-style/input/starry_night.jpg
12 | cd ..
13 |
14 | mkdir -p output
15 |
--------------------------------------------------------------------------------
/R-package/man/mx.exec.backward.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/executor.R
3 | \name{mx.exec.backward}
4 | \alias{mx.exec.backward}
5 | \title{Peform an backward on the executors
6 | This function will MUTATE the state of exec}
7 | \usage{
8 | mx.exec.backward(exec, ...)
9 | }
10 | \description{
11 | Peform an backward on the executors
12 | This function will MUTATE the state of exec
13 | }
14 |
15 |
--------------------------------------------------------------------------------
/R-package/man/mx.metric.accuracy.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/metric.R
3 | \docType{data}
4 | \name{mx.metric.accuracy}
5 | \alias{mx.metric.accuracy}
6 | \title{Accuracy metric for classification}
7 | \format{An object of class \code{mx.metric} of length 3.}
8 | \usage{
9 | mx.metric.accuracy
10 | }
11 | \description{
12 | Accuracy metric for classification
13 | }
14 | \keyword{datasets}
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.simple.bind.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/executor.R
3 | \name{mx.simple.bind}
4 | \alias{mx.simple.bind}
5 | \title{Simple bind the symbol to executor,
6 | with information from input shapes.}
7 | \usage{
8 | mx.simple.bind(symbol, ctx, grad.req = "null", ...)
9 | }
10 | \description{
11 | Simple bind the symbol to executor,
12 | with information from input shapes.
13 | }
14 |
15 |
--------------------------------------------------------------------------------
/docs/how_to/pretrained.md:
--------------------------------------------------------------------------------
1 | Pre-trained Model Gallery
2 | ========================
3 | This document contains the the pre-trained in MXNet
4 |
5 | * [89.9% Top-5 Validation Accuracy for ImageNet 1,000 Classes Challenge](https://github.com/dmlc/mxnet-model-gallery/blob/master/imagenet-1k-inception-bn.md)
6 | * [37.2% Top-1 Training Accuracy for Full ImageNet 21,841 Classes](https://github.com/dmlc/mxnet-model-gallery/blob/master/imagenet-21k-inception.md)
7 |
8 |
--------------------------------------------------------------------------------
/R-package/man/Ops.MXNDArray.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{Ops.MXNDArray}
4 | \alias{Ops.MXNDArray}
5 | \title{Binary operator overloading of mx.ndarray}
6 | \usage{
7 | \method{Ops}{MXNDArray}(e1, e2)
8 | }
9 | \arguments{
10 | \item{e1}{The first operand}
11 |
12 | \item{e1}{The second operand}
13 | }
14 | \description{
15 | Binary operator overloading of mx.ndarray
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/R-package/man/mx.exec.forward.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/executor.R
3 | \name{mx.exec.forward}
4 | \alias{mx.exec.forward}
5 | \title{Peform an forward on the executors
6 | This function will MUTATE the state of exec}
7 | \usage{
8 | mx.exec.forward(exec, is.train = TRUE)
9 | }
10 | \description{
11 | Peform an forward on the executors
12 | This function will MUTATE the state of exec
13 | }
14 |
15 |
--------------------------------------------------------------------------------
/R-package/man/mx.metric.mae.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/metric.R
3 | \docType{data}
4 | \name{mx.metric.mae}
5 | \alias{mx.metric.mae}
6 | \title{MAE (Mean Absolute Error) metric for regression}
7 | \format{An object of class \code{mx.metric} of length 3.}
8 | \usage{
9 | mx.metric.mae
10 | }
11 | \description{
12 | MAE (Mean Absolute Error) metric for regression
13 | }
14 | \keyword{datasets}
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Variable.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{mx.symbol.Variable}
4 | \alias{mx.symbol.Variable}
5 | \title{Create a symbolic variable with specified name.}
6 | \arguments{
7 | \item{name}{string
8 | The name of the result symbol.}
9 | }
10 | \value{
11 | The result symbol
12 | }
13 | \description{
14 | Create a symbolic variable with specified name.
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/tools/accnn/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "conv_params": {
3 | "conv1_1": 5,
4 | "conv1_2": 32,
5 | "conv2_1": 64,
6 | "conv2_2": 64,
7 | "conv3_1": 96,
8 | "conv3_2": 160,
9 | "conv3_3": 192,
10 | "conv4_1": 256,
11 | "conv4_2": 256,
12 | "conv4_3": 320,
13 | "conv5_1": 384,
14 | "conv5_2": 384,
15 | "conv5_3": 384
16 | },
17 | "fc_params": {
18 | "fc6": 2048,
19 | "fc7": 2048
20 | }
21 | }
--------------------------------------------------------------------------------
/scala-package/core/src/test/scala/ml/dmlc/mxnet/ShapeSuite.scala:
--------------------------------------------------------------------------------
1 | package ml.dmlc.mxnet
2 |
3 | import org.scalatest.{BeforeAndAfterAll, FunSuite}
4 |
5 | class ShapeSuite extends FunSuite with BeforeAndAfterAll {
6 | test("to string") {
7 | val s = Shape(1, 2, 3)
8 | assert(s.toString === "(1,2,3)")
9 | }
10 |
11 | test("equals") {
12 | assert(Shape(1, 2, 3) === Shape(1, 2, 3))
13 | assert(Shape(1, 2) != Shape(1, 2, 3))
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.opt.create.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/optimizer.R
3 | \name{mx.opt.create}
4 | \alias{mx.opt.create}
5 | \title{Create an optimizer by name and parameters}
6 | \usage{
7 | mx.opt.create(name, ...)
8 | }
9 | \arguments{
10 | \item{name}{The name of the optimizer}
11 |
12 | \item{...}{Additional arguments}
13 | }
14 | \description{
15 | Create an optimizer by name and parameters
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/tests/jenkins/format:
--------------------------------------------------------------------------------
1 | # match line starting with 'error ', case-insensitive
2 | error /(?i)^error /
3 |
4 | # list of warnings here...
5 | warning /[Ww]arning/
6 | warning /WARNING/
7 |
8 | # create a quick access link to lines in the report containing 'INFO'
9 | info /INFO/
10 |
11 | # each line containing 'BUILD' represents the start of a section for grouping errors and warnings found after the line.
12 | # also creates a quick access link.
13 | start /BUILD/
14 |
--------------------------------------------------------------------------------
/R-package/man/mx.metric.rmse.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/metric.R
3 | \docType{data}
4 | \name{mx.metric.rmse}
5 | \alias{mx.metric.rmse}
6 | \title{RMSE (Root Mean Squared Error) metric for regression}
7 | \format{An object of class \code{mx.metric} of length 3.}
8 | \usage{
9 | mx.metric.rmse
10 | }
11 | \description{
12 | RMSE (Root Mean Squared Error) metric for regression
13 | }
14 | \keyword{datasets}
15 |
16 |
--------------------------------------------------------------------------------
/plugin/warpctc/warpctc.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file warpctc.cc
4 | * \brief warpctc op
5 | * \author Liang Xiang
6 | */
7 | #include "./warpctc-inl.h"
8 | #include
9 | #include "../../src/operator/mshadow_op.h"
10 |
11 | namespace mxnet {
12 | namespace op {
13 | template<>
14 | Operator *CreateOp(WarpCTCParam param) {
15 | return new WarpCTCOp(param);
16 | }
17 |
18 | } // namespace op
19 | } // namespace mxnet
20 |
--------------------------------------------------------------------------------
/plugin/torch/torch_criterion.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file activation.cc
4 | * \brief activation op
5 | * \author Bing Xu
6 | */
7 | #include "./torch_criterion-inl.h"
8 | #include "../../src/operator/mshadow_op.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator *CreateOp(TorchCriterionParam param) {
14 | return new TorchCriterionOp(param);
15 | }
16 |
17 | } // namespace op
18 | } // namespace mxnet
19 |
--------------------------------------------------------------------------------
/src/operator/identity_attach_KL_sparse_reg.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file identity_attach_KL_sparse_reg.cu
4 | * \brief
5 | */
6 | #include "./identity_attach_KL_sparse_reg-inl.h"
7 |
8 | namespace mxnet {
9 | namespace op {
10 | template<>
11 | Operator *CreateOp(IdentityAttachKLSparseRegParam param) {
12 | return new IdentityAttachKLSparseRegOp(param);
13 | }
14 |
15 | } // namespace op
16 | } // namespace mxnet
17 |
--------------------------------------------------------------------------------
/tests/cpp/unittest.mk:
--------------------------------------------------------------------------------
1 | TEST_SRC = $(wildcard tests/cpp/*_test.cc)
2 | TEST = $(patsubst tests/cpp/%_test.cc, tests/cpp/%_test, $(TEST_SRC))
3 |
4 | GTEST_LIB=$(GTEST_PATH)/lib/
5 | GTEST_INC=$(GTEST_PATH)/include/
6 |
7 | tests/cpp/% : tests/cpp/%.cc lib/libmxnet.a
8 | $(CXX) -std=c++0x $(CFLAGS) -MM -MT tests/cpp/$* $< >tests/cpp/$*.d
9 | $(CXX) -std=c++0x $(CFLAGS) -I$(GTEST_INC) -o $@ $(filter %.cc %.a, $^) $(LDFLAGS) -L$(GTEST_LIB) -lgtest
10 |
11 | -include tests/cpp/*.d
12 |
--------------------------------------------------------------------------------
/R-package/man/mx.init.normal.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/initializer.R
3 | \name{mx.init.normal}
4 | \alias{mx.init.normal}
5 | \title{Create a initializer that initialize the weight with normal(0, sd)}
6 | \usage{
7 | mx.init.normal(sd)
8 | }
9 | \arguments{
10 | \item{sd}{The standard deviation of normal distribution}
11 | }
12 | \description{
13 | Create a initializer that initialize the weight with normal(0, sd)
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.dot.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.dot}
4 | \alias{mx.nd.dot}
5 | \title{Calculate 2D matrix multiplication}
6 | \arguments{
7 | \item{lhs}{NDArray
8 | Left operand to the function.}
9 |
10 | \item{rhs}{NDArray
11 | Right operand to the function.}
12 | }
13 | \value{
14 | out The result mx.ndarray
15 | }
16 | \description{
17 | Calculate 2D matrix multiplication
18 | }
19 |
20 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Group.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{mx.symbol.Group}
4 | \alias{mx.symbol.Group}
5 | \title{Create a symbol that groups symbols together.}
6 | \usage{
7 | mx.symbol.Group(...)
8 | }
9 | \arguments{
10 | \item{kwarg}{Variable length of symbols or list of symbol.}
11 | }
12 | \value{
13 | The result symbol
14 | }
15 | \description{
16 | Create a symbol that groups symbols together.
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/R-package/demo/basic_ndarray.R:
--------------------------------------------------------------------------------
1 | require(mxnet)
2 |
3 |
4 | x = 1:3
5 | mat = mx.nd.array(x)
6 |
7 |
8 | mat = mat + 1.0
9 | mat = mat + mat
10 | mat = mat - 5
11 | mat = 10 / mat
12 | mat = 7 * mat
13 | mat = 1 - mat + (2 * mat)/(mat + 0.5)
14 | as.array(mat)
15 |
16 | x = as.array(matrix(1:4, 2, 2))
17 |
18 | mx.ctx.default(mx.cpu(1))
19 | print(mx.ctx.default())
20 | print(is.mx.context(mx.cpu()))
21 | mat = mx.nd.array(x)
22 | mat = (mat * 3 + 5) / 10
23 | as.array(mat)
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/R-package/man/is.mx.ndarray.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{is.mx.ndarray}
4 | \alias{is.mx.ndarray}
5 | \title{Check if src.array is mx.ndarray}
6 | \usage{
7 | is.mx.ndarray(src.array)
8 | }
9 | \value{
10 | Logical indicator
11 | }
12 | \description{
13 | Check if src.array is mx.ndarray
14 | }
15 | \examples{
16 | mat = mx.nd.array(1:10)
17 | is.mx.ndarray(mat)
18 | mat2 = 1:10
19 | is.mx.ndarray(mat2)
20 |
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.metric.rmsle.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/metric.R
3 | \docType{data}
4 | \name{mx.metric.rmsle}
5 | \alias{mx.metric.rmsle}
6 | \title{RMSLE (Root Mean Squared Logarithmic Error) metric for regression}
7 | \format{An object of class \code{mx.metric} of length 3.}
8 | \usage{
9 | mx.metric.rmsle
10 | }
11 | \description{
12 | RMSLE (Root Mean Squared Logarithmic Error) metric for regression
13 | }
14 | \keyword{datasets}
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.model.load.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/model.R
3 | \name{mx.model.load}
4 | \alias{mx.model.load}
5 | \title{Load model checkpoint from file.}
6 | \usage{
7 | mx.model.load(prefix, iteration)
8 | }
9 | \arguments{
10 | \item{prefix}{string prefix of the model name}
11 |
12 | \item{iteration}{integer Iteration number of model we would like to load.}
13 | }
14 | \description{
15 | Load model checkpoint from file.
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.infer.shape.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{mx.symbol.infer.shape}
4 | \alias{mx.symbol.infer.shape}
5 | \title{Inference the shape of arguments, outputs, and auxiliary states.}
6 | \usage{
7 | mx.symbol.infer.shape(symbol, ...)
8 | }
9 | \arguments{
10 | \item{symbol}{The \code{mx.symbol} object}
11 | }
12 | \description{
13 | Inference the shape of arguments, outputs, and auxiliary states.
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.init.uniform.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/initializer.R
3 | \name{mx.init.uniform}
4 | \alias{mx.init.uniform}
5 | \title{Create a initializer that initialize the weight with uniform [-scale, scale]}
6 | \usage{
7 | mx.init.uniform(scale)
8 | }
9 | \arguments{
10 | \item{scale}{The scale of uniform distribution}
11 | }
12 | \description{
13 | Create a initializer that initialize the weight with uniform [-scale, scale]
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.copyto.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{mx.nd.copyto}
4 | \alias{mx.nd.copyto}
5 | \title{Generate an mx.ndarray object on ctx, with data copied from src}
6 | \usage{
7 | mx.nd.copyto(src, ctx)
8 | }
9 | \arguments{
10 | \item{src}{The source mx.ndarray object.}
11 |
12 | \item{ctx}{The target context.}
13 | }
14 | \description{
15 | Generate an mx.ndarray object on ctx, with data copied from src
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/docker/cpu/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 | MAINTAINER Mu Li
3 |
4 | # install the core library
5 | RUN apt-get update && apt-get install -y build-essential git libopenblas-dev libopencv-dev
6 | RUN git clone --recursive https://github.com/dmlc/mxnet/ && cd mxnet && \
7 | cp make/config.mk . && \
8 | echo "USE_BLAS=openblas" >>config.mk && \
9 | make -j$(nproc)
10 |
11 | # python pakcage
12 | RUN apt-get install -y python-numpy wget unzip
13 | ENV PYTHONPATH /mxnet/python
14 |
--------------------------------------------------------------------------------
/plugin/torch/torch_module.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file activation.cc
4 | * \brief activation op
5 | * \author Bing Xu
6 | */
7 | #include "./torch_module-inl.h"
8 | #include "../../src/operator/mshadow_op.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator *CreateOp(TorchModuleParam param, TorchState* torchState) {
14 | return new TorchModuleOp(param, torchState);
15 | }
16 |
17 | } // namespace op
18 | } // namespace mxnet
19 |
--------------------------------------------------------------------------------
/src/operator/block_grad.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file block_grad.cc
4 | * \brief
5 | * \author Bing Xu
6 | */
7 | #include "./block_grad-inl.h"
8 |
9 | namespace mxnet {
10 | namespace op {
11 | template<>
12 | Operator *CreateOp(int dtype) {
13 | Operator *op = NULL;
14 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
15 | op = new BlockGradientOp();
16 | });
17 | return op;
18 | }
19 |
20 | } // namespace op
21 | } // namespace mxnet
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.callback.save.checkpoint.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/callback.R
3 | \name{mx.callback.save.checkpoint}
4 | \alias{mx.callback.save.checkpoint}
5 | \title{Save checkpoint to files each period iteration.}
6 | \usage{
7 | mx.callback.save.checkpoint(prefix, period = 1)
8 | }
9 | \arguments{
10 | \item{prefix}{The prefix of the model checkpoint.}
11 | }
12 | \description{
13 | Save checkpoint to files each period iteration.
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.max.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.max}
4 | \alias{mx.nd.max}
5 | \title{Take max of the src.The result will be ndarray of shape (1,) on the same device.}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take max of the src.The result will be ndarray of shape (1,) on the same device.
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.min.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.min}
4 | \alias{mx.nd.min}
5 | \title{Take min of the src.The result will be ndarray of shape (1,) on the same device.}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take min of the src.The result will be ndarray of shape (1,) on the same device.
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.sum.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.sum}
4 | \alias{mx.nd.sum}
5 | \title{Take sum of the src.The result will be ndarray of shape (1,) on the same device.}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take sum of the src.The result will be ndarray of shape (1,) on the same device.
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Flatten.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.Flatten}
4 | \alias{mx.symbol.Flatten}
5 | \title{Flatten input}
6 | \usage{
7 | mx.symbol.Flatten(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to flatten.}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Flatten input
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/plugin/caffe/caffe_stream.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2016 by Contributors
3 | * \file caffe_stream.cc
4 | * \brief define stream opertors >> and <<
5 | * \author Haoran Wang
6 | */
7 | #include"caffe_stream.h"
8 |
9 | namespace dmlc {
10 | namespace parameter {
11 | std::istringstream &operator>>(std::istringstream &is, ::caffe::LayerParameter ¶_) {
12 | return is;
13 | }
14 | std::ostream &operator<<(std::ostream &os, ::caffe::LayerParameter ¶_) {
15 | return os;
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/src/operator/concat.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file concat.cu
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./concat-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator* CreateOp(ConcatParam param, int dtype) {
14 | Operator *op = NULL;
15 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
16 | op = new ConcatOp(param);
17 | });
18 | return op;
19 | }
20 |
21 | } // namespace op
22 | } // namespace mxnet
23 |
24 |
--------------------------------------------------------------------------------
/src/operator/cudnn_batch_norm.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file cudnn_batch_norm.cu
4 | * \brief
5 | * \author Junyuan Xie
6 | */
7 |
8 | #include "./cudnn_batch_norm-inl.h"
9 | #include
10 |
11 | namespace mxnet {
12 | namespace op {
13 | #if CUDNN_MAJOR == 4
14 | template<>
15 | Operator *CreateOp_CuDNNv4(BatchNormParam param) {
16 | return new CuDNNBatchNormOp(param);
17 | }
18 | #endif // CUDNN_MAJOR == 4
19 | } // namespace op
20 | } // namespace mxnet
21 |
22 |
--------------------------------------------------------------------------------
/tests/python/README.md:
--------------------------------------------------------------------------------
1 | Python Test Case
2 | ================
3 | This folder contains test cases for mxnet in python.
4 |
5 | * [common](common) contains common utils for all test modules.
6 | - From subfolders, import with ```from ..common import get_data```
7 | * [unittest](unittest) contains unit test component for each modules.
8 | - These are basic tests that must pass for every commit.
9 | * [train](train) contains tests that runs on real network training.
10 | - These tests can be time consuming.
11 |
--------------------------------------------------------------------------------
/R-package/man/mx.ctx.default.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/context.R
3 | \name{mx.ctx.default}
4 | \alias{mx.ctx.default}
5 | \title{Set/Get default context for array creation.}
6 | \usage{
7 | mx.ctx.default(new = NULL)
8 | }
9 | \arguments{
10 | \item{new, }{optional takes \code{mx.cpu()} or \code{mx.gpu(id)}, new default ctx.}
11 | }
12 | \value{
13 | The default context.
14 | }
15 | \description{
16 | Set/Get default context for array creation.
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.norm.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.norm}
4 | \alias{mx.nd.norm}
5 | \title{Take L2 norm of the src.The result will be ndarray of shape (1,) on the same device.}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take L2 norm of the src.The result will be ndarray of shape (1,) on the same device.
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/R-package/man/mxnet.export.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/util.R
3 | \name{mxnet.export}
4 | \alias{mxnet.export}
5 | \title{Internal function to generate mxnet_generated.R
6 | Users do not need to call this function.}
7 | \usage{
8 | mxnet.export(path)
9 | }
10 | \arguments{
11 | \item{path}{The path to the root of the package.}
12 | }
13 | \description{
14 | Internal function to generate mxnet_generated.R
15 | Users do not need to call this function.
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/src/operator/dropout.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file dropout.cc
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./dropout-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator *CreateOp(DropoutParam param, int dtype) {
14 | Operator *op = NULL;
15 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
16 | op = new DropoutOp(param);
17 | });
18 | return op;
19 | }
20 | } // namespace op
21 | } // namespace mxnet
22 |
23 |
24 |
--------------------------------------------------------------------------------
/src/operator/embedding.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file embedding.cu
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./embedding-inl.h"
9 | namespace mxnet {
10 | namespace op {
11 | template<>
12 | Operator* CreateOp(EmbeddingParam param, int dtype) {
13 | Operator *op = NULL;
14 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
15 | op = new EmbeddingOp(param);
16 | });
17 | return op;
18 | }
19 | } // namespace op
20 | } // namespace mxnet
21 |
22 |
--------------------------------------------------------------------------------
/src/operator/reshape.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file flatten.cc
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./reshape-inl.h"
9 |
10 |
11 | namespace mxnet {
12 | namespace op {
13 | template<>
14 | Operator *CreateOp(ReshapeParam param, int dtype) {
15 | Operator *op = NULL;
16 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
17 | op = new ReshapeOp(param);
18 | });
19 | return op;
20 | }
21 |
22 | } // namespace op
23 | } // namespace mxnet
24 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.clip.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.clip}
4 | \alias{mx.nd.clip}
5 | \title{Clip ndarray elements to range (a_min, a_max)}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input}
9 |
10 | \item{a.min}{real_t
11 | Minimum value}
12 |
13 | \item{a.max}{real_t
14 | Maximum value}
15 | }
16 | \value{
17 | out The result mx.ndarray
18 | }
19 | \description{
20 | Clip ndarray elements to range (a_min, a_max)
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.cos.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.cos}
4 | \alias{mx.symbol.cos}
5 | \title{Take cos of the src}
6 | \usage{
7 | mx.symbol.cos(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take cos of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.exp.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.exp}
4 | \alias{mx.symbol.exp}
5 | \title{Take exp of the src}
6 | \usage{
7 | mx.symbol.exp(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take exp of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.log.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.log}
4 | \alias{mx.symbol.log}
5 | \title{Take log of the src}
6 | \usage{
7 | mx.symbol.log(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take log of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.sin.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.sin}
4 | \alias{mx.symbol.sin}
5 | \title{Take sin of the src}
6 | \usage{
7 | mx.symbol.sin(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take sin of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.opt.sgd.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/optimizer.R
3 | \name{mx.opt.sgd}
4 | \alias{mx.opt.sgd}
5 | \title{Create an SGD optimizer with respective parameters.
6 | Perform SGD with momentum update}
7 | \usage{
8 | mx.opt.sgd(learning.rate, momentum = 0, wd = 0, rescale.grad = 1,
9 | clip_gradient = NULL, lr_scheduler = NULL)
10 | }
11 | \description{
12 | Create an SGD optimizer with respective parameters.
13 | Perform SGD with momentum update
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.sqrt.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.sqrt}
4 | \alias{mx.symbol.sqrt}
5 | \title{Take sqrt of the src}
6 | \usage{
7 | mx.symbol.sqrt(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take sqrt of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/src/operator/broadcast_reduce_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file broadcast_reduce_op.cc
4 | * \brief CPU Implementation of broadcast reduce op
5 | */
6 | // this will be invoked by gcc and compile CPU version
7 | #include "./broadcast_reduce_op-inl.h"
8 | namespace mxnet {
9 | namespace op {
10 |
11 | DMLC_REGISTER_PARAMETER(ReduceAxisParam);
12 | DMLC_REGISTER_PARAMETER(BroadcastAxisParam);
13 | DMLC_REGISTER_PARAMETER(BroadcastToParam);
14 |
15 | } // namespace op
16 | } // namespace mxnet
17 |
--------------------------------------------------------------------------------
/src/operator/fully_connected.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file fully_connected.cu
4 | * \brief fully connect operator
5 | */
6 | #include "./fully_connected-inl.h"
7 | namespace mxnet {
8 | namespace op {
9 | template<>
10 | Operator* CreateOp(FullyConnectedParam param, int dtype) {
11 | Operator *op = NULL;
12 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
13 | op = new FullyConnectedOp(param);
14 | })
15 | return op;
16 | }
17 | } // namespace op
18 | } // namespace mxnet
19 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.rsqrt.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.rsqrt}
4 | \alias{mx.symbol.rsqrt}
5 | \title{Take rsqrt of the src}
6 | \usage{
7 | mx.symbol.rsqrt(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take rsqrt of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/src/operator/elementwise_sum.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file elementwise_sum.cu
4 | * \brief elementwise sum operator
5 | */
6 | #include "./elementwise_sum-inl.h"
7 | namespace mxnet {
8 | namespace op {
9 | template<>
10 | Operator* CreateOp(ElementWiseSumParam param, int dtype) {
11 | Operator *op = NULL;
12 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
13 | op = new ElementWiseSumOp(param);
14 | });
15 | return op;
16 | }
17 | } // namespace op
18 | } // namespace mxnet
19 |
--------------------------------------------------------------------------------
/src/operator/swapaxis.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file swapaxis.cu
4 | * \brief
5 | * \author Ming Zhang
6 | */
7 |
8 | #include "./swapaxis-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 |
13 | template<>
14 | Operator *CreateOp(SwapAxisParam param, int dtype) {
15 | Operator *op = NULL;
16 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
17 | op = new SwapAxisOp(param);
18 | });
19 | return op;
20 | }
21 |
22 | } // namespace op
23 | } // namespace mxnet
24 |
25 |
--------------------------------------------------------------------------------
/R-package/man/mx.exec.update.arg.arrays.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/executor.R
3 | \name{mx.exec.update.arg.arrays}
4 | \alias{mx.exec.update.arg.arrays}
5 | \title{Update the executors with new arrays
6 | This function will MUTATE the state of exec}
7 | \usage{
8 | mx.exec.update.arg.arrays(exec, arg.arrays, match.name = FALSE,
9 | skip.null = FALSE)
10 | }
11 | \description{
12 | Update the executors with new arrays
13 | This function will MUTATE the state of exec
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.exec.update.aux.arrays.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/executor.R
3 | \name{mx.exec.update.aux.arrays}
4 | \alias{mx.exec.update.aux.arrays}
5 | \title{Update the executors with new arrays
6 | This function will MUTATE the state of exec}
7 | \usage{
8 | mx.exec.update.aux.arrays(exec, arg.arrays, match.name = FALSE,
9 | skip.null = FALSE)
10 | }
11 | \description{
12 | Update the executors with new arrays
13 | This function will MUTATE the state of exec
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.exec.update.grad.arrays.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/executor.R
3 | \name{mx.exec.update.grad.arrays}
4 | \alias{mx.exec.update.grad.arrays}
5 | \title{Update the executors with new arrays
6 | This function will MUTATE the state of exec}
7 | \usage{
8 | mx.exec.update.grad.arrays(exec, arg.arrays, match.name = FALSE,
9 | skip.null = FALSE)
10 | }
11 | \description{
12 | Update the executors with new arrays
13 | This function will MUTATE the state of exec
14 | }
15 |
16 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.abs.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.abs}
4 | \alias{mx.symbol.abs}
5 | \title{Take absolute value of the src}
6 | \usage{
7 | mx.symbol.abs(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take absolute value of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.ceil.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.ceil}
4 | \alias{mx.symbol.ceil}
5 | \title{Take ceil value of the src}
6 | \usage{
7 | mx.symbol.ceil(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take ceil value of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.sign.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.sign}
4 | \alias{mx.symbol.sign}
5 | \title{Take sign value of the src}
6 | \usage{
7 | mx.symbol.sign(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take sign value of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.square.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.square}
4 | \alias{mx.symbol.square}
5 | \title{Take square of the src}
6 | \usage{
7 | mx.symbol.square(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take square of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.floor.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.floor}
4 | \alias{mx.symbol.floor}
5 | \title{Take floor value of the src}
6 | \usage{
7 | mx.symbol.floor(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take floor value of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.load.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{mx.symbol.load}
4 | \alias{mx.symbol.load}
5 | \title{Load an mx.symbol object}
6 | \usage{
7 | mx.symbol.load(filename)
8 | }
9 | \arguments{
10 | \item{filename}{the filename (including the path)}
11 | }
12 | \description{
13 | Load an mx.symbol object
14 | }
15 | \examples{
16 | data = mx.symbol.Variable('data')
17 | mx.symbol.save(data, 'temp.symbol')
18 | data2 = mx.symbol.load('temp.symbol')
19 |
20 | }
21 |
22 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.round.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.round}
4 | \alias{mx.symbol.round}
5 | \title{Take round value of the src}
6 | \usage{
7 | mx.symbol.round(...)
8 | }
9 | \arguments{
10 | \item{src}{Symbol
11 | Source symbolic input to the function}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Take round value of the src
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/src/operator/softmax_output.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file softmax_output.cu
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./softmax_output-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator *CreateOp(SoftmaxOutputParam param, int dtype) {
14 | Operator *op = NULL;
15 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
16 | op = new SoftmaxOutputOp(param);
17 | })
18 | return op;
19 | }
20 |
21 | } // namespace op
22 | } // namespace mxnet
23 |
24 |
--------------------------------------------------------------------------------
/example/rcnn/helper/processing/bbox_process.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def unique_boxes(boxes, scale=1.0):
5 | """ return indices of unique boxes """
6 | v = np.array([1, 1e3, 1e6, 1e9])
7 | hashes = np.round(boxes * scale).dot(v)
8 | _, index = np.unique(hashes, return_index=True)
9 | return np.sort(index)
10 |
11 |
12 | def filter_small_boxes(boxes, min_size):
13 | w = boxes[:, 2] - boxes[:, 0]
14 | h = boxes[:, 3] - boxes[:, 1]
15 | keep = np.where((w >= min_size) & (h > min_size))[0]
16 | return keep
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.model.save.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/model.R
3 | \name{mx.model.save}
4 | \alias{mx.model.save}
5 | \title{Save model checkpoint into file.}
6 | \usage{
7 | mx.model.save(model, prefix, iteration)
8 | }
9 | \arguments{
10 | \item{model}{The feedforward model to be saved.}
11 |
12 | \item{prefix}{string prefix of the model name}
13 |
14 | \item{iteration}{integer Iteration number of model we would like to load.}
15 | }
16 | \description{
17 | Save model checkpoint into file.
18 | }
19 |
20 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.load.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{mx.nd.load}
4 | \alias{mx.nd.load}
5 | \title{Load an mx.nd.array object on disk}
6 | \usage{
7 | mx.nd.load(filename)
8 | }
9 | \arguments{
10 | \item{filename}{the filename (including the path)}
11 | }
12 | \description{
13 | Load an mx.nd.array object on disk
14 | }
15 | \examples{
16 | mat = mx.nd.array(1:3)
17 | mx.nd.save(mat, 'temp.mat')
18 | mat2 = mx.nd.load('temp.mat')
19 | as.array(mat)
20 | as.array(mat2)
21 |
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/example/autoencoder/data.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from sklearn.datasets import fetch_mldata
4 |
5 | def get_mnist():
6 | np.random.seed(1234) # set seed for deterministic ordering
7 | data_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
8 | data_path = os.path.join(data_path, '../../data')
9 | mnist = fetch_mldata('MNIST original', data_home=data_path)
10 | p = np.random.permutation(mnist.data.shape[0])
11 | X = mnist.data[p].astype(np.float32)*0.02
12 | Y = mnist.target[p]
13 | return X, Y
14 |
--------------------------------------------------------------------------------
/src/operator/sequence_last.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file sequence_last.cu
4 | * \brief
5 | * \author Sebastian Bodenstein
6 | */
7 |
8 | #include "./sequence_last-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template <> Operator *CreateOp(SequenceLastParam param, int dtype) {
13 | Operator *op = NULL;
14 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType,
15 | { op = new SequenceLastOp(param); })
16 | return op;
17 | }
18 |
19 | } // namespace op
20 | } // namespace mxnet
21 |
--------------------------------------------------------------------------------
/src/operator/sequence_reverse.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file sequence_reverse.cu
4 | * \brief
5 | * \author Sebastian Bodenstein
6 | */
7 |
8 | #include "./sequence_reverse-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template <> Operator *CreateOp(SequenceReverseParam param, int dtype) {
13 | Operator *op = NULL;
14 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
15 | op = new SequenceReverseOp(param);
16 | })
17 | return op;
18 | }
19 |
20 | } // namespace op
21 | } // namespace mxnet
22 |
--------------------------------------------------------------------------------
/R-package/man/mx.init.internal.default.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/initializer.R
3 | \name{mx.init.internal.default}
4 | \alias{mx.init.internal.default}
5 | \title{Internal default value initialization scheme.}
6 | \usage{
7 | mx.init.internal.default(name, shape, ctx, allow.unknown = FALSE)
8 | }
9 | \arguments{
10 | \item{name}{the name of the variable.}
11 |
12 | \item{shape}{the shape of the array to be generated.}
13 | }
14 | \description{
15 | Internal default value initialization scheme.
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/src/c_api/c_api_error.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file c_api_error.cc
4 | * \brief C error handling
5 | */
6 | #include "./c_api_error.h"
7 | #include "../common/thread_local.h"
8 |
9 | struct ErrorEntry {
10 | std::string last_error;
11 | };
12 |
13 | typedef mxnet::common::ThreadLocalStore MXAPIErrorStore;
14 |
15 | const char *MXGetLastError() {
16 | return MXAPIErrorStore::Get()->last_error.c_str();
17 | }
18 |
19 | void MXAPISetLastError(const char* msg) {
20 | MXAPIErrorStore::Get()->last_error = msg;
21 | }
22 |
--------------------------------------------------------------------------------
/plugin/caffe/caffe.mk:
--------------------------------------------------------------------------------
1 | CFLAGS += -I$(CAFFE_PATH)/include -I$(CAFFE_PATH)/build/src
2 | LDFLAGS += -lprotobuf -lboost_system -lboost_thread -lboost_filesystem -lgflags -lglog -L$(CAFFE_PATH)/build/lib -lcaffe
3 |
4 | ifeq ($(USE_CUDNN), 1)
5 | CFLAGS += -DUSE_CUDNN=1
6 | endif
7 |
8 | ifeq ($(USE_CUDA), 0)
9 | CFLAGS += -DCPU_ONLY=1
10 | endif
11 |
12 | CAFFE_SRC = $(wildcard plugin/caffe/*.cc)
13 | PLUGIN_OBJ += $(patsubst %.cc, build/%.o, $(CAFFE_SRC))
14 | CAFFE_CUSRC = $(wildcard plugin/caffe/*.cu)
15 | PLUGIN_CUOBJ += $(patsubst %.cu, build/%_gpu.o, $(CAFFE_CUSRC))
16 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | Contents
2 | --------
3 | These are used to generate the index used in search.
4 |
5 | - [Python Documents](api/python/index.md)
6 | - [R Documents](api/r/index.md)
7 | - [Julia Documents](api/julia/index.md)
8 | - [Julia Documents](api/c++/index.md)
9 | - [Scala Documents](api/scala/index.md)
10 | - [Howto Documents](how_to/index.md)
11 | - [Get Started Documents](get_started/index.md)
12 | - [System Documents](architecture/index.md)
13 | - [Tutorials](tutorials/index.md)
14 |
15 | # Chinese translation of docs
16 | - [Chinese translation of docs](index_zh.md)
17 |
--------------------------------------------------------------------------------
/scala-package/core/src/test/scala/ml/dmlc/mxnet/CheckUtils.scala:
--------------------------------------------------------------------------------
1 | package ml.dmlc.mxnet
2 |
3 | object CheckUtils {
4 | def reldiff(a: NDArray, b: NDArray): Float = {
5 | val diff = NDArray.sum(NDArray.abs(a - b)).toScalar
6 | val norm = NDArray.sum(NDArray.abs(a)).toScalar
7 | diff / norm
8 | }
9 |
10 | def reldiff(a: Array[Float], b: Array[Float]): Float = {
11 | val diff =
12 | (a zip b).map { case (aElem, bElem) => Math.abs(aElem - bElem) }.sum
13 | val norm: Float = a.reduce(Math.abs(_) + Math.abs(_))
14 | diff / norm
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.BlockGrad.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.BlockGrad}
4 | \alias{mx.symbol.BlockGrad}
5 | \title{Get output from a symbol and pass 0 gradient back}
6 | \usage{
7 | mx.symbol.BlockGrad(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data.}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Get output from a symbol and pass 0 gradient back
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/scala-package/spark/src/main/scala/ml/dmlc/mxnet/spark/io/LongLivingDataBatch.scala:
--------------------------------------------------------------------------------
1 | package ml.dmlc.mxnet.spark.io
2 |
3 | import ml.dmlc.mxnet.{NDArray, DataBatch}
4 |
5 | /**
6 | * Dispose only when 'disposeForce' called
7 | * @author Yizhi Liu
8 | */
9 | class LongLivingDataBatch(
10 | override val data: IndexedSeq[NDArray],
11 | override val label: IndexedSeq[NDArray],
12 | override val index: IndexedSeq[Long],
13 | override val pad: Int) extends DataBatch(data, label, index, pad) {
14 | override def dispose(): Unit = {}
15 | def disposeForce(): Unit = super.dispose()
16 | }
17 |
--------------------------------------------------------------------------------
/R-package/demo/basic_symbol.R:
--------------------------------------------------------------------------------
1 | require(mxnet)
2 |
3 | data = mx.symbol.Variable('data')
4 | net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
5 | net1 = mx.symbol.FullyConnected(data=net1, name='fc2', num_hidden=100)
6 |
7 | all.equal(arguments(net1), c('data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias'))
8 |
9 | net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
10 | net2 = mx.symbol.Activation(data=net2, act_type='relu')
11 | net2 = mx.symbol.FullyConnected(data=net2, name='fc4', num_hidden=20)
12 |
13 | composed = mx.apply(net2, fc3_data=net1, name='composed')
14 |
--------------------------------------------------------------------------------
/scala-package/examples/scripts/neuralstyle_end2end/run_test_end2end.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd)
4 | CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-gpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
5 |
6 | INPUT_IMG=$1
7 | MODEL_DIR=$2
8 | OUTPUT_DIR=$3
9 | GPU=0
10 |
11 | java -Xmx1024m -cp $CLASS_PATH \
12 | ml.dmlc.mxnet.examples.neuralstyle.end2end.BoostInference \
13 | --model-path $MODEL_DIR \
14 | --input-image $INPUT_IMG \
15 | --output-path $OUTPUT_DIR \
16 | --gpu $GPU
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.save.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{mx.symbol.save}
4 | \alias{mx.symbol.save}
5 | \title{Save an mx.symbol object}
6 | \usage{
7 | mx.symbol.save(symbol, filename)
8 | }
9 | \arguments{
10 | \item{symbol}{the \code{mx.symbol} object}
11 |
12 | \item{filename}{the filename (including the path)}
13 | }
14 | \description{
15 | Save an mx.symbol object
16 | }
17 | \examples{
18 | data = mx.symbol.Variable('data')
19 | mx.symbol.save(data, 'temp.symbol')
20 | data2 = mx.symbol.load('temp.symbol')
21 |
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/src/operator/matrix_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file matrix_op.cc
4 | * \brief CPU Implementation of matrix operations
5 | */
6 | // this will be invoked by gcc and compile CPU version
7 | #include "./matrix_op-inl.h"
8 |
9 | namespace mxnet {
10 | namespace op {
11 | DMLC_REGISTER_PARAMETER(TransposeParam);
12 | DMLC_REGISTER_PARAMETER(ExpandDimParam);
13 | DMLC_REGISTER_PARAMETER(SimpleCropParam);
14 | DMLC_REGISTER_PARAMETER(SimpleCropAssignScalarParam);
15 | DMLC_REGISTER_PARAMETER(SliceParam);
16 | DMLC_REGISTER_PARAMETER(FlipParam);
17 | } // op
18 | } // mxnet
19 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.argmax.channel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.argmax.channel}
4 | \alias{mx.nd.argmax.channel}
5 | \title{Take argmax indices of each channel of the src.The result will be ndarray of shape (num_channel,) on the same device.}
6 | \arguments{
7 | \item{src}{NDArray
8 | Source input to the function}
9 | }
10 | \value{
11 | out The result mx.ndarray
12 | }
13 | \description{
14 | Take argmax indices of each channel of the src.The result will be ndarray of shape (num_channel,) on the same device.
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/tests/nightly/download.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | dmlc_download() {
4 | url=http://data.dmlc.ml/mxnet/datasets/
5 | dir=$1
6 | file=$2
7 | if [ ! -e data/${dir}/$file ]; then
8 | wget ${url}/${dir}/${file} -P data/${dir}/ || exit -1
9 | else
10 | echo "data/${dir}/$file already exits"
11 | fi
12 | }
13 |
14 | dmlc_download mnist t10k-images-idx3-ubyte
15 | dmlc_download mnist t10k-labels-idx1-ubyte
16 | dmlc_download mnist train-images-idx3-ubyte
17 | dmlc_download mnist train-labels-idx1-ubyte
18 |
19 | dmlc_download cifar10 train.rec
20 | dmlc_download cifar10 test.rec
21 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.save.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{mx.nd.save}
4 | \alias{mx.nd.save}
5 | \title{Save an mx.nd.array object}
6 | \usage{
7 | mx.nd.save(ndarray, filename)
8 | }
9 | \arguments{
10 | \item{ndarray}{the \code{mx.nd.array} object}
11 |
12 | \item{filename}{the filename (including the path)}
13 | }
14 | \description{
15 | Save an mx.nd.array object
16 | }
17 | \examples{
18 | mat = mx.nd.array(1:3)
19 | mx.nd.save(mat, 'temp.mat')
20 | mat2 = mx.nd.load('temp.mat')
21 | as.array(mat)
22 | as.array(mat2[[1]])
23 |
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/plugin/caffe/caffe_stream.h:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2016 by Contributors
3 | * \file caffe_stream.h
4 | * \brief define stream opertors >> and <<
5 | * \author Haoran Wang
6 | */
7 | #ifndef PLUGIN_CAFFE_CAFFE_STREAM_H_
8 | #define PLUGIN_CAFFE_CAFFE_STREAM_H_
9 |
10 | #include
11 | #include
12 | namespace dmlc {
13 | namespace parameter {
14 | std::istringstream &operator>>(std::istringstream &is, ::caffe::LayerParameter ¶_);
15 | std::ostream &operator<<(std::ostream &os, ::caffe::LayerParameter ¶_);
16 | }
17 | }
18 |
19 | #endif // PLUGIN_CAFFE_CAFFE_STREAM_H_
20 |
--------------------------------------------------------------------------------
/tests/python/gpu/test_rtc.py:
--------------------------------------------------------------------------------
1 | # pylint: skip-file
2 | import mxnet as mx
3 | import numpy as np
4 | from numpy.testing import assert_allclose
5 |
6 | if __name__ == '__main__':
7 | x = mx.nd.zeros((10,), ctx=mx.gpu(0))
8 | x[:] = 1
9 | y = mx.nd.zeros((10,), ctx=mx.gpu(0))
10 | y[:] = 2
11 | rtc = mx.rtc('abc', [('x', x)], [('y', y)], """
12 | __shared__ float s_rec[10];
13 | s_rec[threadIdx.x] = x[threadIdx.x];
14 | y[threadIdx.x] = expf(s_rec[threadIdx.x]*5.0);""")
15 | rtc.push([x], [y], (1, 1, 1), (10,1,1))
16 | assert_allclose(y.asnumpy(), np.exp(x.asnumpy()*5.0))
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2015-2016 by Contributors
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/R-package/man/mx.opt.get.updater.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/optimizer.R
3 | \name{mx.opt.get.updater}
4 | \alias{mx.opt.get.updater}
5 | \title{Get an updater closure that can take list of weight and gradient
6 | and return updated list of weight.}
7 | \usage{
8 | mx.opt.get.updater(optimizer, weights)
9 | }
10 | \arguments{
11 | \item{optimizer}{The optimizer}
12 |
13 | \item{weights}{The weights to be optimized}
14 | }
15 | \description{
16 | Get an updater closure that can take list of weight and gradient
17 | and return updated list of weight.
18 | }
19 |
20 |
--------------------------------------------------------------------------------
/scala-package/examples/scripts/run_multitask.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | MXNET_ROOT=$(cd "$(dirname $0)/../../.."; pwd)
4 | CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-gpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
5 |
6 | # which gpu card to use, -1 means cpu
7 | GPU=$1
8 |
9 | # the mnist data path
10 | # you can get the mnist data using the script core/scripts/get_mnist_data.sh
11 | DATA_PATH=$2
12 |
13 | java -Xmx4G -cp $CLASS_PATH \
14 | ml.dmlc.mxnet.examples.multitask.ExampleMultiTask \
15 | --data-path $DATA_PATH \
16 | --gpu $GPU \
17 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.ElementWiseSum.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.ElementWiseSum}
4 | \alias{mx.symbol.ElementWiseSum}
5 | \title{Perform an elementwise sum over all the inputs.}
6 | \usage{
7 | mx.symbol.ElementWiseSum(...)
8 | }
9 | \arguments{
10 | \item{num.args}{int, required
11 | Number of inputs to be summed.}
12 |
13 | \item{name}{string, optional
14 | Name of the resulting symbol.}
15 | }
16 | \value{
17 | out The result mx.symbol
18 | }
19 | \description{
20 | Perform an elementwise sum over all the inputs.
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.io.arrayiter.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/io.R
3 | \name{mx.io.arrayiter}
4 | \alias{mx.io.arrayiter}
5 | \title{Create MXDataIter compatible iterator from R's array}
6 | \usage{
7 | mx.io.arrayiter(data, label, batch.size = 128, shuffle = FALSE)
8 | }
9 | \arguments{
10 | \item{data}{The data array.}
11 |
12 | \item{label}{The label array.}
13 |
14 | \item{batch.size}{The batch size used to pack the array.}
15 |
16 | \item{shuffle}{Whether shuffle the data}
17 | }
18 | \description{
19 | Create MXDataIter compatible iterator from R's array
20 | }
21 |
22 |
--------------------------------------------------------------------------------
/docs/api/julia/index.md:
--------------------------------------------------------------------------------
1 | MXNet - Julia API
2 | =================
3 | MXNet supports Julia programming language. The MXNet Julia package brings flexible and efficient GPU
4 | computing and state-of-art deep learning to Julia.
5 |
6 | - It enables you to write seamless tensor/matrix computation with multiple GPUs in R.
7 | - It also enables you construct and customize the state-of-art deep learning models in R,
8 | and apply them to tasks such as image classification and data science challenges.
9 |
10 |
11 |
12 |
13 | Julia documents are available at [http://dmlc.ml/MXNet.jl/latest/](http://dmlc.ml/MXNet.jl/latest/).
14 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Dropout.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.Dropout}
4 | \alias{mx.symbol.Dropout}
5 | \title{Apply dropout to input}
6 | \usage{
7 | mx.symbol.Dropout(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to dropout.}
12 |
13 | \item{p}{float, optional, default=0.5
14 | Fraction of the input that gets dropped out at training time}
15 |
16 | \item{name}{string, optional
17 | Name of the resulting symbol.}
18 | }
19 | \value{
20 | out The result mx.symbol
21 | }
22 | \description{
23 | Apply dropout to input
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/src/operator/cast.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file cast.cu
4 | * \brief
5 | * \author Junyuan Xie
6 | */
7 | #include
8 |
9 | #include "./cast-inl.h"
10 | #include "./mshadow_op.h"
11 |
12 | namespace mxnet {
13 | namespace op {
14 | template<>
15 | Operator *CreateOp(CastParam param, std::vector *in_type) {
16 | Operator *op = NULL;
17 | MSHADOW_TYPE_SWITCH((*in_type)[0], SrcDType, {
18 | MSHADOW_TYPE_SWITCH(param.dtype, DstDType, {
19 | op = new CastOp();
20 | })
21 | })
22 | return op;
23 | }
24 | } // namespace op
25 | } // namespace mxnet
26 |
27 |
--------------------------------------------------------------------------------
/src/io/io.cc:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2015 by Contributors
2 |
3 | #include
4 | #include
5 | #include "./image_augmenter.h"
6 | #include "./iter_normalize.h"
7 | #include "./iter_batchloader.h"
8 | #include "./iter_prefetcher.h"
9 |
10 | // Registers
11 | namespace dmlc {
12 | DMLC_REGISTRY_ENABLE(::mxnet::DataIteratorReg);
13 | } // namespace dmlc
14 |
15 | namespace mxnet {
16 | namespace io {
17 | // Register parameters in header files
18 | DMLC_REGISTER_PARAMETER(BatchParam);
19 | DMLC_REGISTER_PARAMETER(PrefetcherParam);
20 | DMLC_REGISTER_PARAMETER(ImageNormalizeParam);
21 | } // namespace io
22 | } // namespace mxnet
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Cast.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.Cast}
4 | \alias{mx.symbol.Cast}
5 | \title{Cast array to a different data type.}
6 | \usage{
7 | mx.symbol.Cast(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to cast function.}
12 |
13 | \item{dtype}{{'float16', 'float32', 'float64', 'int32', 'uint8'}, required
14 | Target data type.}
15 |
16 | \item{name}{string, optional
17 | Name of the resulting symbol.}
18 | }
19 | \value{
20 | out The result mx.symbol
21 | }
22 | \description{
23 | Cast array to a different data type.
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/example/image-classification/symbol_mlp.R:
--------------------------------------------------------------------------------
1 | library(mxnet)
2 |
3 | get_symbol <- function(num_classes = 1000) {
4 | data <- mx.symbol.Variable('data')
5 | fc1 <- mx.symbol.FullyConnected(data = data, name = 'fc1', num_hidden = 128)
6 | act1 <- mx.symbol.Activation(data = fc1, name = 'relu1', act_type = "relu")
7 | fc2 <- mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
8 | act2 <- mx.symbol.Activation(data = fc2, name = 'relu2', act_type = "relu")
9 | fc3 <- mx.symbol.FullyConnected(data = act2, name = 'fc3', num_hidden = num_classes)
10 | mlp <- mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
11 | return(mlp)
12 | }
13 |
--------------------------------------------------------------------------------
/R-package/man/mxnet.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/zzz.R
3 | \docType{package}
4 | \name{mxnet}
5 | \alias{mxnet}
6 | \alias{mxnet-package}
7 | \title{MXNet: Flexible and Efficient GPU computing and Deep Learning.}
8 | \description{
9 | MXNet is a flexible and efficient GPU computing and deep learning framework.
10 | }
11 | \details{
12 | It enables you to write seamless tensor/matrix computation with multiple GPUs in R.
13 |
14 | It also enables you construct and customize the state-of-art deep learning models in R,
15 | and apply them to tasks such as image classification and data science challenges.
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/example/svm_mnist/README.md:
--------------------------------------------------------------------------------
1 | # Use case with Support Vector Machine
2 |
3 | To ensure that not only the implementation is learning, but is able to outsmart the softmax, as [this article](arxiv.org/pdf/1306.0239.pdf) suggests, I ran svm_mnist.py script. It was based on the MNIST experiment description on the article and [this tutorial](https://github.com/dmlc/mxnet-gtc-tutorial/blob/master/tutorial.ipynb).
4 |
5 |
6 | ## To this you will need
7 |
8 | * [Numpy](http://www.scipy.org/scipylib/download.html)
9 | * [Sklearn](http://scikit-learn.org/stable/install.html)
10 |
11 | I recommend installing [matplot](http://matplotlib.org/users/installing.html) to visualize examples
--------------------------------------------------------------------------------
/R-package/man/mx.init.create.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/initializer.R
3 | \name{mx.init.create}
4 | \alias{mx.init.create}
5 | \title{Create initialization of argument like arg.array}
6 | \usage{
7 | mx.init.create(initializer, shape.array, ctx, skip.unknown = TRUE)
8 | }
9 | \arguments{
10 | \item{initializer}{The initializer.}
11 |
12 | \item{shape.array}{named-list The shape of the weights}
13 |
14 | \item{ctx}{mx.context The context of the weights}
15 |
16 | \item{skip.unknown}{Whether skip the unknown weight types}
17 | }
18 | \description{
19 | Create initialization of argument like arg.array
20 | }
21 |
22 |
--------------------------------------------------------------------------------
/example/rnn/get_ptb_data.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | RNN_DIR=$(cd `dirname $0`; pwd)
4 | DATA_DIR="${RNN_DIR}/data/"
5 |
6 | if [[ ! -d "${DATA_DIR}" ]]; then
7 | echo "${DATA_DIR} doesn't exist, will create one";
8 | mkdir -p ${DATA_DIR}
9 | fi
10 |
11 | wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/ptb/ptb.train.txt;
12 | wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/ptb/ptb.valid.txt;
13 | wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/ptb/ptb.test.txt;
14 | wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/tinyshakespeare/input.txt;
15 |
--------------------------------------------------------------------------------
/scala-package/examples/scripts/run_neuralstyle.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | MXNET_ROOT=$(cd "$(dirname $0)/../../.."; pwd)
4 | CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-gpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
5 | INPUT_IMG=$1
6 | STYLE_IMG=$2
7 | MODEL_PATH=$MXNET_ROOT/example/neural-style/model/vgg19.params
8 | OUTPUT_DIR=$MXNET_ROOT/example/neural-style/output
9 |
10 | java -Xmx1024m -cp $CLASS_PATH \
11 | ml.dmlc.mxnet.examples.neuralstyle.NeuralStyle \
12 | --content-image $INPUT_IMG \
13 | --style-image $STYLE_IMG \
14 | --model-path $MODEL_PATH \
15 | --output-dir $OUTPUT_DIR
16 |
--------------------------------------------------------------------------------
/src/operator/batch_norm.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file batch_norm.cu
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./batch_norm-inl.h"
9 | #include "./cudnn_batch_norm-inl.h"
10 |
11 | namespace mxnet {
12 | namespace op {
13 | template<>
14 | Operator *CreateOp(BatchNormParam param, int dtype) {
15 | #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
16 | if (!param.use_global_stats) {
17 | return new CuDNNBatchNormOp(param);
18 | } else {
19 | return new BatchNormOp(param);
20 | }
21 | #else
22 | return new BatchNormOp(param);
23 | #endif
24 | }
25 |
26 | } // namespace op
27 | } // namespace mxnet
28 |
29 |
--------------------------------------------------------------------------------
/src/operator/softmax_activation.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file softmax_activation.cu
4 | * \brief
5 | * \author Junyuan Xie
6 | */
7 | #include "./softmax_activation-inl.h"
8 | #include "./mshadow_op.h"
9 | #if MXNET_USE_CUDNN == 1
10 | #include "./cudnn_softmax_activation-inl.h"
11 | #endif
12 |
13 | namespace mxnet {
14 | namespace op {
15 | template<>
16 | Operator *CreateOp(SoftmaxActivationParam param) {
17 | #if MXNET_USE_CUDNN == 1
18 | return new CuDNNSoftmaxActivationOp(param);
19 | #else
20 | return new SoftmaxActivationOp(param);
21 | #endif // MXNET_USE_CUDNN
22 | }
23 | } // namespace op
24 | } // namespace mxnet
25 |
26 |
--------------------------------------------------------------------------------
/docs/api/r/Makefile:
--------------------------------------------------------------------------------
1 | # This is the makefile for compiling Rmarkdown files into the md file with results.
2 | PKGROOT=../../R-package
3 |
4 | # ADD The Markdown to be built here, with suffix md
5 | classifyRealImageWithPretrainedModel.md:
6 | mnistCompetition.md:
7 | ndarrayAndSymbolTutorial.md:
8 | fiveMinutesNeuralNetwork.md:
9 |
10 | # General Rules for build rmarkdowns, need knitr
11 | %.md: $(PKGROOT)/vignettes/%.Rmd
12 | rm -rf "../../web-data/mxnet/knitr/$(basename $@)-"*;
13 | Rscript -e \
14 | "require(knitr);"\
15 | "knitr::opts_knit\$$set(root.dir=\".\");"\
16 | "knitr::opts_chunk\$$set(fig.path=\"../../web-data/mxnet/knitr/$(basename $@)-\");"\
17 | "knitr::knit(\"$+\")"
18 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.SliceChannel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.SliceChannel}
4 | \alias{mx.symbol.SliceChannel}
5 | \title{Slice input equally along specified axis}
6 | \usage{
7 | mx.symbol.SliceChannel(...)
8 | }
9 | \arguments{
10 | \item{num.outputs}{int, required
11 | Number of outputs to be sliced.}
12 |
13 | \item{axis}{int, optional, default='1'
14 | Dimension along which to slice.}
15 |
16 | \item{name}{string, optional
17 | Name of the resulting symbol.}
18 | }
19 | \value{
20 | out The result mx.symbol
21 | }
22 | \description{
23 | Slice input equally along specified axis
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/example/model-parallel-lstm/get_ptb_data.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | RNN_DIR=$(cd `dirname $0`; pwd)
4 | DATA_DIR="${RNN_DIR}/data/"
5 |
6 | if [[ ! -d "${DATA_DIR}" ]]; then
7 | echo "${DATA_DIR} doesn't exist, will create one";
8 | mkdir -p ${DATA_DIR}
9 | fi
10 |
11 | wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/ptb/ptb.train.txt;
12 | wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/ptb/ptb.valid.txt;
13 | wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/ptb/ptb.test.txt;
14 | wget -P ${DATA_DIR} https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/tinyshakespeare/input.txt;
15 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.ones.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{mx.nd.ones}
4 | \alias{mx.nd.ones}
5 | \title{Generate an mx.ndarray object with ones}
6 | \usage{
7 | mx.nd.ones(shape, ctx = NULL)
8 | }
9 | \arguments{
10 | \item{shape}{the dimension of the \code{mx.ndarray}}
11 |
12 | \item{ctx}{optional The context device of the array. mx.ctx.default() will be used in default.}
13 | }
14 | \description{
15 | Generate an mx.ndarray object with ones
16 | }
17 | \examples{
18 | mat = mx.nd.ones(10)
19 | as.array(mat)
20 | mat2 = mx.nd.ones(c(5,5))
21 | as.array(mat)
22 | mat3 = mx.nd.ones(c(3,3,3))
23 | as.array(mat3)
24 |
25 | }
26 |
27 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Reshape.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.Reshape}
4 | \alias{mx.symbol.Reshape}
5 | \title{Reshape input to target shape}
6 | \usage{
7 | mx.symbol.Reshape(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to reshape.}
12 |
13 | \item{target.shape}{Shape(tuple), required
14 | Target new shape. One and only one dim can be 0, in which case it will be inferred from the rest of dims}
15 |
16 | \item{name}{string, optional
17 | Name of the resulting symbol.}
18 | }
19 | \value{
20 | out The result mx.symbol
21 | }
22 | \description{
23 | Reshape input to target shape
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/docs/api/r/index.md:
--------------------------------------------------------------------------------
1 | MXNet - R API
2 | =============
3 | MXNet supports R programming language. The MXNet R package brings flexible and efficient GPU
4 | computing and state-of-art deep learning to R.
5 |
6 | - It enables you to write seamless tensor/matrix computation with multiple GPUs in R.
7 | - It also enables you construct and customize the state-of-art deep learning models in R,
8 | and apply them to tasks such as image classification and data science challenges.
9 |
10 | We are working on MXNet for R API interface documentation. In the mean time you can refer below resources to get started.
11 |
12 | Resources
13 | =========
14 | * [MXNet for R Tutorials](http://mxnet.io/tutorials/index.html#R-Tutorials)
--------------------------------------------------------------------------------
/scala-package/spark/src/main/scala/ml/dmlc/mxnet/spark/MXNDArray.scala:
--------------------------------------------------------------------------------
1 | package ml.dmlc.mxnet.spark
2 |
3 | import ml.dmlc.mxnet.NDArray
4 |
5 | /**
6 | * A wrapper for serialize & deserialize [[ml.dmlc.mxnet.NDArray]] in spark job
7 | * @author Yizhi Liu
8 | */
9 | class MXNDArray(@transient private var ndArray: NDArray) extends Serializable {
10 | require(ndArray != null)
11 | private val arrayBytes: Array[Byte] = ndArray.serialize()
12 |
13 | def get: NDArray = {
14 | if (ndArray == null) {
15 | ndArray = NDArray.deserialize(arrayBytes)
16 | }
17 | ndArray
18 | }
19 | }
20 |
21 | object MXNDArray {
22 | def apply(ndArray: NDArray): MXNDArray = new MXNDArray(ndArray)
23 | }
24 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.zeros.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{mx.nd.zeros}
4 | \alias{mx.nd.zeros}
5 | \title{Generate an mx.nd.array object with zeros}
6 | \usage{
7 | mx.nd.zeros(shape, ctx = NULL)
8 | }
9 | \arguments{
10 | \item{shape}{the dimension of the \code{mx.nd.array}}
11 |
12 | \item{ctx}{optional The context device of the array. mx.ctx.default() will be used in default.}
13 | }
14 | \description{
15 | Generate an mx.nd.array object with zeros
16 | }
17 | \examples{
18 | mat = mx.nd.zeros(10)
19 | as.array(mat)
20 | mat2 = mx.nd.zeros(c(5,5))
21 | as.array(mat)
22 | mat3 = mx.nd.zeroes(c(3,3,3))
23 | as.array(mat3)
24 |
25 | }
26 |
27 |
--------------------------------------------------------------------------------
/tests/nightly/README.md:
--------------------------------------------------------------------------------
1 | # Nightly build for mxnet
2 |
3 | This fold contains scripts to test some heavy jobs, often training with multiple
4 | GPUs, to ensure everything is right. Normally it runs everyday.
5 | The current build server is equipped with Intel i7-4790 and 4 Nvidia GTX
6 | 970 Tis. The build status is available at [ci.dmlc.ml](http://ci.dmlc.ml). We
7 | thank [Dave Andersen](http://www.cs.cmu.edu/~dga) for providing the build machine.
8 |
9 | ## How to use
10 |
11 | ### Run locallly
12 |
13 | Run `tests/nightly/test_all.sh 4` if there are 4 GPUs.
14 |
15 | ### Run on Jenkins
16 |
17 | First merge codes into the master branch, then go to
18 | http://ci.dmlc.ml/job/mxnet/, click **Build Now**.
19 |
--------------------------------------------------------------------------------
/example/multi-task/README.md:
--------------------------------------------------------------------------------
1 | # Mulit-task learning example
2 |
3 | This is a simple example to show how to use mxnet for multi-task learning. It uses MNIST as an example and mocks up the multi-label task.
4 |
5 | ## Usage
6 | First, you need to write a multi-task iterator on your own. The iterator needs to generate multiple labels according to your applications, and the label names should be specified in the `provide_label` function, which needs to be consist with the names of output layers.
7 |
8 | Then, if you want to show metrics of different tasks separately, you need to write your own metric class and specify the `num` parameter. In the `update` function of metric, calculate the metrics seperately for different tasks.
9 |
--------------------------------------------------------------------------------
/src/optimizer/sgd.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file sgd.cc
4 | * \brief sgd optimizer
5 | */
6 | #include "./sgd-inl.h"
7 |
8 | namespace mxnet {
9 | namespace opt {
10 |
11 | void call_sgd_mom_update_gpu(RunContext ctx, TBlob weight, const TBlob grad, TBlob mom,
12 | float lr, float wd, const SGDParam& param) {
13 | sgd_mom_update(ctx, weight, grad, mom, lr, wd, param);
14 | }
15 | void call_sgd_update_gpu(RunContext ctx, TBlob weight, const TBlob grad,
16 | float lr, float wd, const SGDParam& param) {
17 | sgd_update(ctx, weight, grad, lr, wd, param);
18 | }
19 |
20 | } // namespace opt
21 | } // namespace mxnet
22 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.SwapAxis.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.SwapAxis}
4 | \alias{mx.symbol.SwapAxis}
5 | \title{Apply swapaxis to input.}
6 | \usage{
7 | mx.symbol.SwapAxis(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to the SwapAxisOp.}
12 |
13 | \item{dim1}{int (non-negative), optional, default=0
14 | the first axis to be swapped.}
15 |
16 | \item{dim2}{int (non-negative), optional, default=0
17 | the second axis to be swapped.}
18 |
19 | \item{name}{string, optional
20 | Name of the resulting symbol.}
21 | }
22 | \value{
23 | out The result mx.symbol
24 | }
25 | \description{
26 | Apply swapaxis to input.
27 | }
28 |
29 |
--------------------------------------------------------------------------------
/docs/api/scala/index.md:
--------------------------------------------------------------------------------
1 | MXNet - Scala API
2 | =================
3 | MXNet supports Scala programming language. The MXNet Scala package brings flexible and efficient GPU
4 | computing and state-of-art deep learning to Scala.
5 |
6 | - It enables you to write seamless tensor/matrix computation with multiple GPUs in Scala.
7 | - It also enables you construct and customize the state-of-art deep learning models in Scala,
8 | and apply them to tasks such as image classification and data science challenges.
9 |
10 | We are working on MXNet for Scala API interface documentation. Stay tuned! In the mean time you can refer below resources to get started.
11 |
12 | Resources
13 | =========
14 | * [MXNet Tutorials](http://mxnet.io/tutorials/index.html)
--------------------------------------------------------------------------------
/R-package/man/mx.init.Xavier.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/initializer.R
3 | \name{mx.init.Xavier}
4 | \alias{mx.init.Xavier}
5 | \title{Xavier initializer}
6 | \usage{
7 | mx.init.Xavier(rnd_type = "uniform", factor_type = "avg", magnitude = 3)
8 | }
9 | \arguments{
10 | \item{rnd_type}{A string of \code{character} indicating the type of
11 | distribution from which the weights are initialized.}
12 |
13 | \item{factor_type}{A string of \code{character}.}
14 |
15 | \item{magnitude}{A \code{numeric} number indicating the scale of random
16 | number range.}
17 | }
18 | \description{
19 | Create a initializer which initialize weight with Xavier or
20 | similar initialization scheme.
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/docs/_static/selectlang.js:
--------------------------------------------------------------------------------
1 | function changeLanguage(langSelect, langSelectLabel, rootpath){
2 | langSelect.change(function() {
3 | var lang = langSelect.val();
4 | if(lang == 'zh'){
5 | location.href = rootpath + 'zh/index.html';
6 | } else {
7 | location.href = rootpath + 'index.html';
8 | }
9 | });
10 | }
11 |
12 | $(document).ready(function () {
13 | var langSelect = $("#lang-select");
14 | var langSelectLabel = $("#lang-select-label > span");
15 | currHref = location.href;
16 |
17 | if(/\/zh\//.test(currHref)){
18 | langSelect.val("zh");
19 | } else {
20 | langSelect.val("en");
21 | }
22 | langSelectLabel.text($("option:selected").text());
23 |
24 | changeLanguage(langSelect, langSelectLabel, getRootPath());
25 | })
--------------------------------------------------------------------------------
/src/operator/operator.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file operator.cc
4 | * \brief operator module of mxnet
5 | */
6 | #include
7 | #include
8 | #include
9 |
10 | namespace dmlc {
11 | DMLC_REGISTRY_ENABLE(::mxnet::OperatorPropertyReg);
12 | } // namespace dmlc
13 |
14 | namespace mxnet {
15 | // implementation of all factory functions
16 | OperatorProperty *OperatorProperty::Create(const char* type_name) {
17 | auto *creator = dmlc::Registry::Find(type_name);
18 | if (creator == nullptr) {
19 | LOG(FATAL) << "Cannot find Operator " << type_name << " in registry";
20 | }
21 | return creator->body();
22 | }
23 | } // namespace mxnet
24 |
--------------------------------------------------------------------------------
/example/rcnn/test/test_data_iter.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from helper.dataset import pascal_voc
4 | from helper.processing import roidb
5 | from rcnn import data_iter
6 |
7 | # test flip
8 | devkit_path = os.path.join(os.path.expanduser('~'), 'Dataset', 'VOCdevkit')
9 | voc = pascal_voc.PascalVOC('trainval', '2007', devkit_path)
10 | gt_roidb = voc.gt_roidb()
11 | ss_roidb = voc.selective_search_roidb(gt_roidb)
12 | ss_roidb = voc.append_flipped_images(ss_roidb)
13 | roidb.prepare_roidb(voc, ss_roidb)
14 | means, stds = roidb.add_bbox_regression_targets(ss_roidb)
15 |
16 | roi_iter = data_iter.ROIIter(ss_roidb, shuffle=True)
17 |
18 | for j in range(0, 20):
19 | print j
20 | for databatch in roi_iter:
21 | i = 0
22 | roi_iter.reset()
23 |
--------------------------------------------------------------------------------
/example/neural-style/end_to_end/README.md:
--------------------------------------------------------------------------------
1 | # End to End Neural Art
2 |
3 | This is an implementation of blog: [http://dmlc.ml/mxnet/2016/06/20/end-to-end-neural-style.html](http://dmlc.ml/mxnet/2016/06/20/end-to-end-neural-style.html)
4 |
5 |
6 | We will release a Multi-GPU training code soon.
7 |
8 | ## How to use
9 |
10 |
11 | 1. First use `download.sh` to download pre-trained model and sample inputs
12 |
13 | 2. Then prepare training dataset according to the blog
14 |
15 | 3. Modify [boost_train.py](boost_train.py)
16 |
17 | ## Pretrained Model
18 |
19 | Weight [https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip](https://github.com/dmlc/web-data/raw/master/mxnet/art/model.zip)
20 | Inference [boost_inference.py](boost_inference.py)
21 |
--------------------------------------------------------------------------------
/plugin/warpctc/warpctc.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file warpctc.cc
4 | * \brief warpctc op
5 | * \author Liang Xiang
6 | */
7 |
8 | #include "./warpctc-inl.h"
9 | #include "../../src/operator/mshadow_op.h"
10 |
11 | namespace mxnet {
12 | namespace op {
13 | template<>
14 | Operator *CreateOp(WarpCTCParam param) {
15 | return new WarpCTCOp(param);
16 | }
17 |
18 | Operator *WarpCTCProp::CreateOperator(Context ctx) const {
19 | DO_BIND_DISPATCH(CreateOp, param_);
20 | }
21 |
22 | DMLC_REGISTER_PARAMETER(WarpCTCParam);
23 |
24 | MXNET_REGISTER_OP_PROPERTY(WarpCTC, WarpCTCProp)
25 | .describe("warp ctc.")
26 | .add_arguments(WarpCTCParam::__FIELDS__());
27 |
28 | } // namespace op
29 | } // namespace mxnet
30 |
--------------------------------------------------------------------------------
/src/operator/native_op.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file native_op.cc
4 | * \brief
5 | * \author Junyuan Xie
6 | */
7 | #include "./native_op-inl.h"
8 |
9 | namespace mxnet {
10 | namespace op {
11 | template<>
12 | Operator *CreateOp(NativeOpParam param) {
13 | return new NativeOp(param);
14 | }
15 |
16 | Operator* NativeOpProp::CreateOperator(Context ctx) const {
17 | DO_BIND_DISPATCH(CreateOp, param_);
18 | }
19 |
20 | DMLC_REGISTER_PARAMETER(NativeOpParam);
21 |
22 | MXNET_REGISTER_OP_PROPERTY(_Native, NativeOpProp)
23 | .describe("Stub for implementing an operator implemented in native frontend language.")
24 | .add_arguments(NativeOpParam::__FIELDS__());
25 |
26 | } // namespace op
27 | } // namespace mxnet
28 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Embedding.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.Embedding}
4 | \alias{mx.symbol.Embedding}
5 | \title{Get embedding for one-hot input}
6 | \usage{
7 | mx.symbol.Embedding(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to the EmbeddingOp.}
12 |
13 | \item{weight}{Symbol
14 | Enbedding weight matrix.}
15 |
16 | \item{input.dim}{int, required
17 | input dim of one-hot encoding}
18 |
19 | \item{output.dim}{int, required
20 | output dim of embedding}
21 |
22 | \item{name}{string, optional
23 | Name of the resulting symbol.}
24 | }
25 | \value{
26 | out The result mx.symbol
27 | }
28 | \description{
29 | Get embedding for one-hot input
30 | }
31 |
32 |
--------------------------------------------------------------------------------
/docker/cuda/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:7.5-cudnn4-devel
2 | MAINTAINER Mu Li
3 |
4 | # install the core library
5 | RUN apt-get update && apt-get install -y build-essential git libopenblas-dev libopencv-dev
6 | RUN git clone --recursive https://github.com/dmlc/mxnet/ && cd mxnet && \
7 | cp make/config.mk . && \
8 | echo "USE_CUDA=1" >>config.mk && \
9 | echo "USE_CUDA_PATH=/usr/local/cuda" >>config.mk && \
10 | echo "USE_CUDNN=1" >>config.mk && \
11 | echo "USE_BLAS=openblas" >>config.mk && \
12 | make -j$(nproc) ADD_LDFLAGS=-L/usr/local/cuda/lib64/stubs
13 | ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:$LD_LIBRARY_PATH
14 |
15 | # python pakcage
16 | RUN apt-get install -y python-numpy wget unzip
17 | ENV PYTHONPATH /mxnet/python
18 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.choose.element.0index.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.choose.element.0index}
4 | \alias{mx.nd.choose.element.0index}
5 | \title{Choose one element from each line(row for python, column for R/Julia) in lhs according to index indicated by rhs. This function assume rhs uses 0-based index.}
6 | \arguments{
7 | \item{lhs}{NDArray
8 | Left operand to the function.}
9 |
10 | \item{rhs}{NDArray
11 | Right operand to the function.}
12 | }
13 | \value{
14 | out The result mx.ndarray
15 | }
16 | \description{
17 | Choose one element from each line(row for python, column for R/Julia) in lhs according to index indicated by rhs. This function assume rhs uses 0-based index.
18 | }
19 |
20 |
--------------------------------------------------------------------------------
/plugin/caffe/caffe_common.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2016 by Contributors
3 | * \file caffe_common.h
4 | * \brief Common functions for caffeOp and caffeLoss symbols
5 | * \author Haoran Wang
6 | */
7 | #include
8 | #include
9 | #include"caffe_common.h"
10 |
11 | namespace mxnet {
12 | namespace op {
13 | namespace caffe {
14 |
15 | // Cpu implementation of set_mode
16 | template<>
17 | void CaffeMode::SetMode() {
18 | ::caffe::Caffe::set_mode(::caffe::Caffe::CPU);
19 | }
20 |
21 | // Gpu implementation of set_mode
22 | template<>
23 | void CaffeMode::SetMode() {
24 | ::caffe::Caffe::set_mode(::caffe::Caffe::GPU);
25 | }
26 |
27 | } // namespace caffe
28 | } // namespace op
29 | } // namespace mxnet
30 |
--------------------------------------------------------------------------------
/scala-package/examples/scripts/run_visualization.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | MXNET_ROOT=$(cd "$(dirname $0)/../../.."; pwd)
4 | CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-cpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
5 |
6 | # please install the Graphviz library
7 | # if you are using ubuntu, use the following command:
8 | # sudo apt-get install graphviz
9 |
10 | # path to save the generated visualization result
11 | OUT_DIR=$1
12 | # net to visualze, e.g. "LeNet", "AlexNet", "VGG", "GoogleNet", "Inception_BN", "Inception_V3", "ResNet_Small"
13 | NET=$2
14 |
15 | java -Xmx1024m -cp $CLASS_PATH \
16 | ml.dmlc.mxnet.examples.visualization.ExampleVis \
17 | --out-dir $OUT_DIR \
18 | --net $NET
19 |
--------------------------------------------------------------------------------
/scala-package/core/src/test/scala/ml/dmlc/mxnet/AttrScopeSuite.scala:
--------------------------------------------------------------------------------
1 | package ml.dmlc.mxnet
2 |
3 | import org.scalatest.{BeforeAndAfterAll, FunSuite}
4 |
5 | class AttrScopeSuite extends FunSuite with BeforeAndAfterAll {
6 | test("attr basic") {
7 | val (data, gdata) =
8 | AttrScope(Map("group" -> "4", "data" -> "great")).withScope {
9 | val data = Symbol.Variable("data", attr = Map("dtype" -> "data", "group" -> "1"))
10 | val gdata = Symbol.Variable("data2")
11 | (data, gdata)
12 | }
13 | assert(gdata.attr("group").get === "4")
14 | assert(data.attr("group").get === "1")
15 |
16 | val exceedScopeData = Symbol.Variable("data3")
17 | assert(exceedScopeData.attr("group") === None, "No group attr in global attr scope")
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/src/optimizer/optimizer.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file optimizer.cc
4 | * \brief optimizer module of mxnet
5 | * \author Junyuan Xie
6 | */
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | namespace dmlc {
13 | DMLC_REGISTRY_ENABLE(::mxnet::OptimizerReg);
14 | } // namespace dmlc
15 |
16 | namespace mxnet {
17 | // implementation of all factory functions
18 | Optimizer *Optimizer::Create(const char* type_name) {
19 | auto *creator = dmlc::Registry::Find(type_name);
20 | if (creator == nullptr) {
21 | LOG(FATAL) << "Cannot find Optimizer " << type_name << " in registry";
22 | }
23 | return creator->body();
24 | }
25 | } // namespace mxnet
26 |
--------------------------------------------------------------------------------
/R-package/man/mx.gru.forward.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/gru.R
3 | \name{mx.gru.forward}
4 | \alias{mx.gru.forward}
5 | \title{Using forward function to predict in gru inference model}
6 | \usage{
7 | mx.gru.forward(model, input.data, new.seq = FALSE)
8 | }
9 | \arguments{
10 | \item{model}{gru model
11 | A gru inference model}
12 |
13 | \item{input.data, }{array.matrix
14 | The input data for forward function}
15 |
16 | \item{new.seq}{boolean, default=FALSE
17 | Whether the input is the start of a new sequence}
18 | }
19 | \value{
20 | result A list(prob=prob, model=model) containing the result probability of each label and the model.
21 | }
22 | \description{
23 | Using forward function to predict in gru inference model
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/R-package/man/mx.rnn.forward.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/rnn.R
3 | \name{mx.rnn.forward}
4 | \alias{mx.rnn.forward}
5 | \title{Using forward function to predict in rnn inference model}
6 | \usage{
7 | mx.rnn.forward(model, input.data, new.seq = FALSE)
8 | }
9 | \arguments{
10 | \item{model}{rnn model
11 | A rnn inference model}
12 |
13 | \item{input.data, }{array.matrix
14 | The input data for forward function}
15 |
16 | \item{new.seq}{boolean, default=FALSE
17 | Whether the input is the start of a new sequence}
18 | }
19 | \value{
20 | result A list(prob=prob, model=model) containing the result probability of each label and the model.
21 | }
22 | \description{
23 | Using forward function to predict in rnn inference model
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/scala-package/examples/scripts/rnn/run_train_charrnn.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd)
4 | CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-gpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
5 |
6 | # which gpu card to use, -1 means cpu
7 | GPU=$1
8 | # you can get the training data file using the following command
9 | # wget http://data.dmlc.ml/mxnet/data/lab_data.zip
10 | # unzip -o lab_data.zip
11 | # for example ./datas/obama.txt
12 | DATA_PATH=$2
13 | # for example ./models
14 | SAVE_MODEL_PATH=$3
15 |
16 | java -Xmx4G -cp $CLASS_PATH \
17 | ml.dmlc.mxnet.examples.rnn.TrainCharRnn \
18 | --data-path $DATA_PATH \
19 | --save-model-path $SAVE_MODEL_PATH \
20 | --gpu $GPU \
21 |
--------------------------------------------------------------------------------
/R-package/man/mx.lstm.forward.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/lstm.R
3 | \name{mx.lstm.forward}
4 | \alias{mx.lstm.forward}
5 | \title{Using forward function to predict in lstm inference model}
6 | \usage{
7 | mx.lstm.forward(model, input.data, new.seq = FALSE)
8 | }
9 | \arguments{
10 | \item{model}{lstm model
11 | A Lstm inference model}
12 |
13 | \item{input.data, }{array.matrix
14 | The input data for forward function}
15 |
16 | \item{new.seq}{boolean, default=FALSE
17 | Whether the input is the start of a new sequence}
18 | }
19 | \value{
20 | result A list(prob=prob, model=model) containing the result probability of each label and the model.
21 | }
22 | \description{
23 | Using forward function to predict in lstm inference model
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/example/nce-loss/README.md:
--------------------------------------------------------------------------------
1 | #Examples of NCE Loss
2 |
3 | nce-loss is used to speedup multi-class classification when class num is huge.
4 |
5 | ## Toy example
6 |
7 | * toy_softmax.py: a multi class example using softmax output
8 | * toy_nce.py: a multi-class example using nce loss
9 |
10 | ## Word2Vec
11 |
12 | * word2vec.py: a CBOW word2vec example using nce loss
13 |
14 | You can run it by
15 |
16 | ```
17 | ./get_text8.sh
18 | python word2vec.py
19 |
20 | ```
21 |
22 | ## LSTM
23 |
24 | * lstm_word.py: a lstm example use nce loss
25 |
26 | You can run it by
27 |
28 | ```
29 | ./get_text8.sh
30 | python lstm_word.py
31 | ```
32 |
33 | ## References
34 |
35 | You can refer to [http://www.jianshu.com/p/e439b43ea464](http://www.jianshu.com/p/e439b43ea464) for more details. (In Chinese)
36 |
--------------------------------------------------------------------------------
/scala-package/examples/scripts/neuralstyle_end2end/run_train_end2end.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd)
4 | CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-gpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
5 |
6 | # more details please refer to
7 | # https://github.com/Ldpe2G/mxnet/blob/develop/example/neural-style/end_to_end/README.md
8 | TRAIN_DATA_PATH=$1
9 | STYLE_IMG=$2
10 | VGG_MODEL_PATH=$3
11 | SAVE_MODEL_DIR=$4
12 | GPU=0
13 |
14 | java -Xmx1024m -cp $CLASS_PATH \
15 | ml.dmlc.mxnet.examples.neuralstyle.end2end.BoostTrain \
16 | --data-path $TRAIN_DATA_PATH \
17 | --vgg--model-path $VGG_MODEL_PATH \
18 | --save--model-path $SAVE_MODEL_DIR \
19 | --style-image $STYLE_IMG \
20 | --gpu $GPU
--------------------------------------------------------------------------------
/src/operator/slice_channel.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file slice_channel.cc
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./slice_channel-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator* CreateOp(SliceChannelParam param) {
14 | return new SliceChannelOp(param);
15 | }
16 |
17 | Operator* SliceChannelProp::CreateOperator(Context ctx) const {
18 | DO_BIND_DISPATCH(CreateOp, param_);
19 | }
20 |
21 | DMLC_REGISTER_PARAMETER(SliceChannelParam);
22 |
23 | MXNET_REGISTER_OP_PROPERTY(SliceChannel, SliceChannelProp)
24 | .describe("Slice input equally along specified axis")
25 | .set_return_type("Symbol[]")
26 | .add_arguments(SliceChannelParam::__FIELDS__());
27 |
28 | } // namespace op
29 | } // namespace mxnet
30 |
31 |
--------------------------------------------------------------------------------
/example/memcost/Makefile:
--------------------------------------------------------------------------------
1 |
2 | .PHONY: no_optimization with_inplace with_sharing with_both
3 |
4 | no_optimization:
5 | @echo "Estimating the cost with no optimization..."
6 | @MXNET_EXEC_ENABLE_INPLACE=false MXNET_EXEC_MATCH_RANGE=0 python inception_memcost.py
7 |
8 | with_inplace:
9 | @echo "Estimating the cost with inplace optimization..."
10 | @MXNET_EXEC_ENABLE_INPLACE=true MXNET_EXEC_MATCH_RANGE=0 python inception_memcost.py
11 |
12 | with_sharing:
13 | @echo "Estimating the cost with memory sharing ..."
14 | @MXNET_EXEC_ENABLE_INPLACE=false python inception_memcost.py
15 |
16 | with_both:
17 | @echo "Estimating the cost with all optimizations ..."
18 | @python inception_memcost.py
19 |
20 | forward_only:
21 | @echo "Estimating the cost of forward only ..."
22 | @python inception_memcost.py 'null'
23 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Concat.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/symbol.R
3 | \name{mx.symbol.Concat}
4 | \alias{mx.symbol.Concat}
5 | \title{Perform an feature concat on channel dim (dim 1) over all the inputs.}
6 | \usage{
7 | mx.symbol.Concat(data, num.args, dim = NULL, name = NULL)
8 | }
9 | \arguments{
10 | \item{data}{list, required
11 | List of tensors to concatenate}
12 |
13 | \item{num.args}{int, required
14 | Number of inputs to be concated.}
15 |
16 | \item{dim}{int, optional, default='1'
17 | the dimension to be concated.}
18 |
19 | \item{name}{string, optional
20 | Name of the resulting symbol.}
21 | }
22 | \value{
23 | out The result mx.symbol
24 | }
25 | \description{
26 | Perform an feature concat on channel dim (dim 1) over all the inputs.
27 | }
28 |
29 |
--------------------------------------------------------------------------------
/src/operator/leaky_relu.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file leaky_relu.cc
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./leaky_relu-inl.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator *CreateOp(LeakyReLUParam param) {
14 | return new LeakyReLUOp(param);
15 | }
16 |
17 | Operator *LeakyReLUProp::CreateOperator(Context ctx) const {
18 | DO_BIND_DISPATCH(CreateOp, param_);
19 | }
20 |
21 | DMLC_REGISTER_PARAMETER(LeakyReLUParam);
22 |
23 | MXNET_REGISTER_OP_PROPERTY(LeakyReLU, LeakyReLUProp)
24 | .describe("Apply activation function to input.")
25 | .add_argument("data", "Symbol", "Input data to activation function.")
26 | .add_arguments(LeakyReLUParam::__FIELDS__());
27 |
28 | } // namespace op
29 | } // namespace mxnet
30 |
31 |
--------------------------------------------------------------------------------
/R-package/man/mx.io.CSVIter.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.io.CSVIter}
4 | \alias{mx.io.CSVIter}
5 | \title{Create iterator for dataset in csv.}
6 | \usage{
7 | mx.io.CSVIter(...)
8 | }
9 | \arguments{
10 | \item{data.csv}{string, required
11 | Dataset Param: Data csv path.}
12 |
13 | \item{data.shape}{Shape(tuple), required
14 | Dataset Param: Shape of the data.}
15 |
16 | \item{label.csv}{string, optional, default='NULL'
17 | Dataset Param: Label csv path. If is NULL, all labels will be returned as 0}
18 |
19 | \item{label.shape}{Shape(tuple), optional, default=(1,)
20 | Dataset Param: Shape of the label.}
21 | }
22 | \value{
23 | iter The result mx.dataiter
24 | }
25 | \description{
26 | Create iterator for dataset in csv.
27 | }
28 |
29 |
--------------------------------------------------------------------------------
/plugin/caffe/caffe_op.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2016 by Contributors
3 | * \file caffe_operator_gpu.cc
4 | * \brief caffe operator
5 | * \author Haoran Wang
6 | */
7 | #include "./caffe_op-inl.h"
8 | namespace mxnet {
9 | namespace op {
10 |
11 | template<>
12 | Operator *CreateOp(CaffeOpParam param, int dtype) {
13 | Operator *op = NULL;
14 | switch (dtype) {
15 | case mshadow::kFloat32:
16 | op = new CaffeOp(param);
17 | break;
18 | case mshadow::kFloat64:
19 | op = new CaffeOp(param);
20 | break;
21 | case mshadow::kFloat16:
22 | LOG(FATAL) << "float16 layer is not supported by caffe";
23 | break;
24 | default:
25 | LOG(FATAL) << "Unsupported type " << dtype;
26 | }
27 | return op;
28 | }
29 |
30 | } // namespace op
31 | } // namespace mxnet
32 |
--------------------------------------------------------------------------------
/src/operator/deconvolution.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file deconvolution.cu
4 | * \brief
5 | * \author Wei Wu
6 | */
7 |
8 | #include "./deconvolution-inl.h"
9 | #if MXNET_USE_CUDNN == 1
10 | #include "./cudnn_deconvolution-inl.h"
11 | #endif // MXNET_USE_CUDNN
12 |
13 | namespace mxnet {
14 | namespace op {
15 | template<>
16 | Operator* CreateOp(DeconvolutionParam param, int dtype) {
17 | Operator *op = NULL;
18 | #if MXNET_USE_CUDNN == 1
19 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
20 | op = new CuDNNDeconvolutionOp(param);
21 | });
22 | #else
23 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
24 | op = new DeconvolutionOp(param);
25 | });
26 | #endif // MXNET_USE_CUDNN
27 | return op;
28 | }
29 |
30 | } // namespace op
31 | } // namespace mxnet
32 |
--------------------------------------------------------------------------------
/src/operator/lrn.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file lrn.cu
4 | * \brief
5 | * \author Bing Xu
6 | */
7 |
8 | #include "./lrn-inl.h"
9 | #if MXNET_USE_CUDNN == 1
10 | #include "./cudnn_lrn-inl.h"
11 | #endif
12 |
13 | namespace mxnet {
14 | namespace op {
15 | template<>
16 | Operator* CreateOp(LRNParam param, int dtype) {
17 | #if MXNET_USE_CUDNN == 1
18 | return new CuDNNLocalResponseNormOp(param);
19 | #else
20 | #if CUDA_VERSION == 7000
21 | LOG(FATAL) << "Due to old CUDA compiler bug, LRN is disabled."
22 | << "Please upgrade CUDA to 7.5+ or use CUDNN";
23 | return NULL;
24 | #else
25 | return new LocalResponseNormOp(param);
26 | #endif // CUDA_VERSION
27 | #endif // MXNET_USE_CUDNN
28 | }
29 |
30 | } // namespace op
31 | } // namespace mxnet
32 |
33 |
34 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.array.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ndarray.R
3 | \name{mx.nd.array}
4 | \alias{mx.nd.array}
5 | \title{Create a new \code{mx.ndarray} that copies the content from src on ctx.}
6 | \usage{
7 | mx.nd.array(src.array, ctx = NULL)
8 | }
9 | \arguments{
10 | \item{src.array}{Source array data of class \code{array}, \code{vector} or \code{matrix}.}
11 |
12 | \item{ctx}{optional The context device of the array. mx.ctx.default() will be used in default.}
13 | }
14 | \value{
15 | An \code{mx.ndarray}
16 |
17 | An Rcpp_MXNDArray object
18 | }
19 | \description{
20 | Create a new \code{mx.ndarray} that copies the content from src on ctx.
21 | }
22 | \examples{
23 | mat = mx.nd.array(x)
24 | mat = 1 - mat + (2 * mat)/(mat + 0.5)
25 | as.array(mat)
26 |
27 | }
28 |
29 |
--------------------------------------------------------------------------------
/example/rcnn/utils/combine_model.py:
--------------------------------------------------------------------------------
1 | from load_model import load_checkpoint
2 | from save_model import save_checkpoint
3 |
4 |
5 | def combine_model(prefix1, epoch1, prefix2, epoch2, prefix_out, epoch_out):
6 | args1, auxs1 = load_checkpoint(prefix1, epoch1)
7 | args2, auxs2 = load_checkpoint(prefix2, epoch2)
8 | arg_names = args1.keys() + args2.keys()
9 | aux_names = auxs1.keys() + auxs2.keys()
10 | args = dict()
11 | for arg in arg_names:
12 | if arg in args1:
13 | args[arg] = args1[arg]
14 | else:
15 | args[arg] = args2[arg]
16 | auxs = dict()
17 | for aux in aux_names:
18 | if aux in auxs1:
19 | auxs[aux] = auxs1[aux]
20 | else:
21 | auxs[aux] = auxs2[aux]
22 | save_checkpoint(prefix_out, epoch_out, args, auxs)
23 |
--------------------------------------------------------------------------------
/plugin/caffe/caffe_loss.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2016 by Contributors
3 | * \file caffe_loss_gpu.cc
4 | * \brief caffe loss
5 | * \author Haoran Wang
6 | */
7 | #include "./caffe_loss-inl.h"
8 |
9 | namespace mxnet {
10 | namespace op {
11 | template<>
12 | Operator* CreateOp(CaffeLossParam param, int dtype) {
13 | Operator *op = NULL;
14 | switch (dtype) {
15 | case mshadow::kFloat32:
16 | op = new CaffeLoss(param);
17 | break;
18 | case mshadow::kFloat64:
19 | op = new CaffeLoss(param);
20 | break;
21 | case mshadow::kFloat16:
22 | LOG(FATAL) << "float16 layer is not supported by caffe";
23 | break;
24 | default:
25 | LOG(FATAL) << "Unsupported type " << dtype;
26 | }
27 | return op;
28 | }
29 |
30 | } // namespace op
31 | } // namespace mxnet
32 |
--------------------------------------------------------------------------------
/src/operator/rnn.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file rnn.cu
4 | * \brief
5 | * \author Sebastian Bodenstein
6 | */
7 |
8 | #include "./rnn-inl.h"
9 | #include
10 | #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR == 5
11 | #include "./cudnn_rnn-inl.h"
12 | #endif // MXNET_USE_CUDNN && CUDNN_MAJOR
13 |
14 | namespace mxnet {
15 | namespace op {
16 | template<>
17 | Operator* CreateOp(RNNParam param, int dtype) {
18 | Operator *op = NULL;
19 | #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR == 5
20 | MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
21 | op = new CuDNNRNNOp(param);
22 | })
23 | #else
24 | LOG(FATAL) << "RNN is only available for cuDNN at the moment.";
25 | #endif // MXNET_USE_CUDNN && CUDNN_MAJOR
26 | return op;
27 | }
28 |
29 | } // namespace op
30 | } // namespace mxnet
31 |
--------------------------------------------------------------------------------
/tools/kill-mxnet.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os, sys
4 |
5 | if len(sys.argv) != 2:
6 | print "usage: %s " % sys.argv[0]
7 | sys.exit(1)
8 |
9 | host_file = sys.argv[1]
10 | prog_name = "train_imagenet"
11 |
12 | # Get host IPs
13 | with open(host_file, "r") as f:
14 | hosts = f.read().splitlines()
15 | ssh_cmd = (
16 | "ssh "
17 | "-o StrictHostKeyChecking=no "
18 | "-o UserKnownHostsFile=/dev/null "
19 | "-o LogLevel=quiet "
20 | )
21 | kill_cmd = (
22 | " "
23 | "ps aux |"
24 | "grep -v grep |"
25 | "grep 'python train_imagenet.py' |"
26 | "awk '{print \$2}'|"
27 | "xargs kill"
28 | )
29 | print kill_cmd
30 | for host in hosts:
31 | cmd = ssh_cmd + host +" \""+ kill_cmd+"\""
32 | print cmd
33 | os.system(cmd)
34 |
35 | print "Done killing"
36 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.BatchNorm.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.BatchNorm}
4 | \alias{mx.symbol.BatchNorm}
5 | \title{Apply batch normalization to input.}
6 | \usage{
7 | mx.symbol.BatchNorm(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to batch normalization}
12 |
13 | \item{eps}{float, optional, default=0.001
14 | Epsilon to prevent div 0}
15 |
16 | \item{momentum}{float, optional, default=0.9
17 | Momentum for moving average}
18 |
19 | \item{fix.gamma}{boolean, optional, default=True
20 | Fix gamma while training}
21 |
22 | \item{name}{string, optional
23 | Name of the resulting symbol.}
24 | }
25 | \value{
26 | out The result mx.symbol
27 | }
28 | \description{
29 | Apply batch normalization to input.
30 | }
31 |
32 |
--------------------------------------------------------------------------------
/example/python-howto/multiple_outputs.py:
--------------------------------------------------------------------------------
1 | """Create a Multiple output configuration.
2 |
3 | This example shows how to create a multiple output configuration.
4 | """
5 | import mxnet as mx
6 |
7 | net = mx.symbol.Variable('data')
8 | fc1 = mx.symbol.FullyConnected(data=net, name='fc1', num_hidden=128)
9 | net = mx.symbol.Activation(data=fc1, name='relu1', act_type="relu")
10 | net = mx.symbol.FullyConnected(data=net, name='fc2', num_hidden=64)
11 | out = mx.symbol.SoftmaxOutput(data=net, name='softmax')
12 | # group fc1 and out together
13 | group = mx.symbol.Group([fc1, out])
14 | print group.list_outputs()
15 |
16 | # You can go ahead and bind on the group
17 | # executor = group.simple_bind(data=data_shape)
18 | # executor.forward()
19 | # executor.output[0] will be value of fc1
20 | # executor.output[1] will be value of softmax
21 |
--------------------------------------------------------------------------------
/scala-package/examples/scripts/rnn/run_test_charrnn.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd)
4 | CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-gpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
5 |
6 | # you can get the training data file using the following command
7 | # wget http://data.dmlc.ml/mxnet/data/lab_data.zip
8 | # unzip -o lab_data.zip
9 | # for example ./datas/obama.txt
10 | DATA_PATH=$1
11 | # for example ./models/obama
12 | MODEL_PREFIX=$2
13 | # feel free to change the starter sentence
14 | STARTER_SENTENCE="The joke"
15 |
16 | java -Xmx4G -cp $CLASS_PATH \
17 | ml.dmlc.mxnet.examples.rnn.TestCharRnn \
18 | --data-path $DATA_PATH \
19 | --model-prefix $MODEL_PREFIX \
20 | --starter-sentence "$STARTER_SENTENCE"
21 |
--------------------------------------------------------------------------------
/example/bayesian-methods/README.md:
--------------------------------------------------------------------------------
1 | Bayesian Methods
2 | ================
3 |
4 | This folder contains examples related to Bayesian Methods.
5 |
6 | We curently have *Stochastic Gradient Langevin Dynamics (SGLD)* [(Welling and Teh, 2011)](http://www.icml-2011.org/papers/398_icmlpaper.pdf)
7 | and *Bayesian Dark Knowledge (BDK)* [(Balan, Rathod, Murphy and Welling, 2015)](http://papers.nips.cc/paper/5965-bayesian-dark-knowledge).
8 |
9 | **sgld.ipynb** shows how to use MXNet to repeat the toy experiment in the original SGLD paper.
10 |
11 | **bdk.ipynb** shows how to use MXNet to implement the DistilledSGLD algorithm in Bayesian Dark Knowledge.
12 |
13 | **bdk_demo.py** contains scripts (more than the notebook) related to Bayesian Dark Knowledge. Use `python bdk_demo.py -d 1 -l 2 -t 50000` to run classification on MNIST.
--------------------------------------------------------------------------------
/R-package/man/mx.rnorm.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/random.R
3 | \name{mx.rnorm}
4 | \alias{mx.rnorm}
5 | \title{Generate nomal distribution with mean and sd.}
6 | \usage{
7 | mx.rnorm(shape, mean = 0, sd = 1, ctx = NULL)
8 | }
9 | \arguments{
10 | \item{shape}{Dimension, The shape(dimension) of the result.}
11 |
12 | \item{mean}{numeric, The mean of distribution.}
13 |
14 | \item{sd}{numeric, The standard deviations.}
15 |
16 | \item{ctx, }{optional The context device of the array. mx.ctx.default() will be used in default.}
17 | }
18 | \description{
19 | Generate nomal distribution with mean and sd.
20 | }
21 | \examples{
22 |
23 | mx.set.seed(0)
24 | as.array(mx.runif(2))
25 | # 0.5488135 0.5928446
26 | mx.set.seed(0)
27 | as.array(mx.rnorm(2))
28 | # 2.212206 1.163079
29 |
30 | }
31 |
32 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.LinearRegressionOutput.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.LinearRegressionOutput}
4 | \alias{mx.symbol.LinearRegressionOutput}
5 | \title{Use linear regression for final output, this is used on final output of a net.}
6 | \usage{
7 | mx.symbol.LinearRegressionOutput(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to function.}
12 |
13 | \item{label}{Symbol
14 | Input label to function.}
15 |
16 | \item{grad.scale}{float, optional, default=1
17 | Scale the gradient by a float factor}
18 |
19 | \item{name}{string, optional
20 | Name of the resulting symbol.}
21 | }
22 | \value{
23 | out The result mx.symbol
24 | }
25 | \description{
26 | Use linear regression for final output, this is used on final output of a net.
27 | }
28 |
29 |
--------------------------------------------------------------------------------
/example/rcnn/utils/save_model.py:
--------------------------------------------------------------------------------
1 | import mxnet as mx
2 |
3 |
4 | def save_checkpoint(prefix, epoch, arg_params, aux_params):
5 | """Checkpoint the model data into file.
6 | :param prefix: Prefix of model name.
7 | :param epoch: The epoch number of the model.
8 | :param arg_params: dict of str to NDArray
9 | Model parameter, dict of name to NDArray of net's weights.
10 | :param aux_params: dict of str to NDArray
11 | Model parameter, dict of name to NDArray of net's auxiliary states.
12 | :return: None
13 | prefix-epoch.params will be saved for parameters.
14 | """
15 | save_dict = {('arg:%s' % k) : v for k, v in arg_params.items()}
16 | save_dict.update({('aux:%s' % k) : v for k, v in aux_params.items()})
17 | param_name = '%s-%04d.params' % (prefix, epoch)
18 | mx.nd.save(param_name, save_dict)
19 |
--------------------------------------------------------------------------------
/plugin/opencv/cv_api.h:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2016 by Contributors
3 | * \file cv_api.h
4 | * \brief C API for opencv
5 | * \author Junyuan Xie
6 | */
7 | #ifndef PLUGIN_OPENCV_CV_API_H_
8 | #define PLUGIN_OPENCV_CV_API_H_
9 |
10 | #include
11 |
12 | MXNET_DLL int MXCVImdecode(
13 | const unsigned char *img,
14 | const mx_uint len,
15 | const int flag,
16 | NDArrayHandle *out);
17 |
18 | MXNET_DLL int MXCVResize(
19 | NDArrayHandle src,
20 | const mx_uint w,
21 | const mx_uint h,
22 | const int interpolation,
23 | NDArrayHandle *out);
24 |
25 | MXNET_DLL int MXCVcopyMakeBorder(
26 | NDArrayHandle src,
27 | const int top,
28 | const int bot,
29 | const int left,
30 | const int right,
31 | const int type,
32 | const double value,
33 | NDArrayHandle *out);
34 |
35 | #endif // PLUGIN_OPENCV_CV_API_H_
36 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.MAERegressionOutput.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.MAERegressionOutput}
4 | \alias{mx.symbol.MAERegressionOutput}
5 | \title{Use mean absolute error regression for final output, this is used on final output of a net.}
6 | \usage{
7 | mx.symbol.MAERegressionOutput(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to function.}
12 |
13 | \item{label}{Symbol
14 | Input label to function.}
15 |
16 | \item{grad.scale}{float, optional, default=1
17 | Scale the gradient by a float factor}
18 |
19 | \item{name}{string, optional
20 | Name of the resulting symbol.}
21 | }
22 | \value{
23 | out The result mx.symbol
24 | }
25 | \description{
26 | Use mean absolute error regression for final output, this is used on final output of a net.
27 | }
28 |
29 |
--------------------------------------------------------------------------------
/docs/how_to/index.md:
--------------------------------------------------------------------------------
1 | # MXNet How To
2 |
3 | This page contains guidelines to use and develop MXNets.
4 |
5 | ## Use MXNet on Specific Tasks
6 | - [How to train with multiple CPU/GPUs with data parallelism](multi_devices.md)
7 | - [How to train with multiple GPUs in model parallelism - train LSTM](model_parallel_lstm.md)
8 | - [How to run MXNet on smart-devices/mobiles](smart_device.md)
9 | - [How To Setup MXNet on the AWS cloud using EC2 and S3](cloud.md)
10 | - [How to use pre-trained models](pretrained.md)
11 | - [How to use MXNet on variable input length/size (bucketing)](bucketing.md)
12 | - [How to improve MXNet Performance(perf.md)
13 |
14 | ## Develop and Hack MXNet
15 | - [Create new operators](new_op.md)
16 | - [Use Torch from MXNet](torch.md)
17 | - [Set environment variables of MXNet](env_var.md)
18 |
19 | ## Frequently Ask Questions
20 | - [FAQ](faq.md)
21 |
--------------------------------------------------------------------------------
/scala-package/init/src/main/scala/ml/dmlc/mxnet/init/LibInfo.scala:
--------------------------------------------------------------------------------
1 | package ml.dmlc.mxnet.init
2 |
3 | import ml.dmlc.mxnet.init.Base._
4 |
5 | import scala.collection.mutable.ListBuffer
6 |
7 | class LibInfo {
8 | @native def mxSymbolListAtomicSymbolCreators(symbolList: ListBuffer[SymbolHandle]): Int
9 | @native def mxSymbolGetAtomicSymbolInfo(handle: SymbolHandle,
10 | name: RefString,
11 | desc: RefString,
12 | numArgs: RefInt,
13 | argNames: ListBuffer[String],
14 | argTypes: ListBuffer[String],
15 | argDescs: ListBuffer[String],
16 | keyVarNumArgs: RefString): Int
17 | }
18 |
--------------------------------------------------------------------------------
/example/neural-style/README.md:
--------------------------------------------------------------------------------
1 | # Neural art
2 |
3 | This is an implementation of the paper
4 | [A Neural Algorithm of Artistic Style](http://arxiv.org/abs/1508.06576) by Leon
5 | A. Gatys, Alexander S. Ecker, and Matthias Bethge.
6 |
7 | ## How to use
8 |
9 | First use `download.sh` to download pre-trained model and sample inputs
10 |
11 | Then run `python run.py`, use `-h` to see more options
12 |
13 | ## Sample results
14 |
15 |
16 |
17 | It takes 30 secs for a Titan X to generate the above 600x400 image.
18 |
19 | ## Note
20 |
21 | * The current implementation is based the
22 | [torch implementation](https://github.com/jcjohnson/neural-style). But we may
23 | change it dramatically in the near future.
24 |
25 | * We will release multi-GPU version soon.
26 |
--------------------------------------------------------------------------------
/R-package/man/mx.nd.fill.element.0index.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.nd.fill.element.0index}
4 | \alias{mx.nd.fill.element.0index}
5 | \title{Fill one element of each line(row for python, column for R/Julia) in lhs according to index indicated by rhs and values indicated by mhs. This function assume rhs uses 0-based index.}
6 | \arguments{
7 | \item{lhs}{NDArray
8 | Left operand to the function.}
9 |
10 | \item{mhs}{NDArray
11 | Middle operand to the function.}
12 |
13 | \item{rhs}{NDArray
14 | Right operand to the function.}
15 | }
16 | \value{
17 | out The result mx.ndarray
18 | }
19 | \description{
20 | Fill one element of each line(row for python, column for R/Julia) in lhs according to index indicated by rhs and values indicated by mhs. This function assume rhs uses 0-based index.
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/R-package/tests/testthat/test_symbol.R:
--------------------------------------------------------------------------------
1 | require(mxnet)
2 |
3 | context("symbol")
4 |
5 | test_that("basic symbol operation", {
6 | data = mx.symbol.Variable('data')
7 | net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
8 | net1 = mx.symbol.FullyConnected(data=net1, name='fc2', num_hidden=100)
9 |
10 | expect_equal(arguments(net1), c('data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias'))
11 |
12 | net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
13 | net2 = mx.symbol.Activation(data=net2, act_type='relu')
14 | net2 = mx.symbol.FullyConnected(data=net2, name='fc4', num_hidden=20)
15 |
16 | composed = mx.apply(net2, fc3_data=net1, name='composed')
17 |
18 | expect_equal(arguments(composed), c('data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias', 'fc3_weight', 'fc3_bias', 'fc4_weight', 'fc4_bias'))
19 | })
20 |
21 |
22 |
--------------------------------------------------------------------------------
/plugin/torch/torch_criterion.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file activation.cc
4 | * \brief activation op
5 | * \author Junyuan Xie
6 | */
7 | #include "./torch_criterion-inl.h"
8 | #include "../../src/operator/mshadow_op.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator *CreateOp(TorchCriterionParam param) {
14 | return new TorchCriterionOp(param);
15 | }
16 |
17 | // DO_BIND_DISPATCH comes from operator_common.h
18 | Operator *TorchCriterionProp::CreateOperator(Context ctx) const {
19 | DO_BIND_DISPATCH(CreateOp, param_);
20 | }
21 |
22 | DMLC_REGISTER_PARAMETER(TorchCriterionParam);
23 |
24 | MXNET_REGISTER_OP_PROPERTY(TorchCriterion, TorchCriterionProp)
25 | .describe("Criterions from torch.")
26 | .add_arguments(TorchCriterionParam::__FIELDS__());
27 |
28 | } // namespace op
29 | } // namespace mxnet
30 |
--------------------------------------------------------------------------------
/plugin/torch/torch_module.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file activation.cc
4 | * \brief activation op
5 | * \author Bing Xu
6 | */
7 | #include "./torch_module-inl.h"
8 | #include "../../src/operator/mshadow_op.h"
9 |
10 | namespace mxnet {
11 | namespace op {
12 | template<>
13 | Operator *CreateOp(TorchModuleParam param, TorchState* torchState) {
14 | return new TorchModuleOp(param, torchState);
15 | }
16 |
17 | // DO_BIND_DISPATCH comes from operator_common.h
18 | Operator *TorchModuleProp::CreateOperator(Context ctx) const {
19 | DO_BIND_DISPATCH(CreateOp, param_, torchState_);
20 | }
21 |
22 | DMLC_REGISTER_PARAMETER(TorchModuleParam);
23 |
24 | MXNET_REGISTER_OP_PROPERTY(TorchModule, TorchModuleProp)
25 | .describe("Modules from torch.")
26 | .add_arguments(TorchModuleParam::__FIELDS__());
27 |
28 | } // namespace op
29 | } // namespace mxnet
30 |
--------------------------------------------------------------------------------
/R-package/DESCRIPTION:
--------------------------------------------------------------------------------
1 | Package: mxnet
2 | Type: Package
3 | Title: MXNet
4 | Version: 0.7
5 | Date: 2015-12-23
6 | Author: Tianqi Chen, Qiang Kou, Tong He
7 | Maintainer: Qiang Kou
8 | Description: MXNet is a deep learning framework designed for both efficiency
9 | and flexibility. It allows you to mix the flavours of deep learning programs
10 | together to maximize the efficiency and your productivity.
11 | License: BSD
12 | URL: https://github.com/dmlc/mxnet/R-package
13 | BugReports: https://github.com/dmlc/mxnet/issues
14 | Imports:
15 | methods,
16 | Rcpp (>= 0.12.1),
17 | DiagrammeR (>= 0.8.1),
18 | data.table,
19 | jsonlite,
20 | magrittr,
21 | stringr
22 | Suggests:
23 | testthat,
24 | mlbench,
25 | knitr,
26 | rmarkdown,
27 | imager,
28 | roxygen2
29 | LinkingTo: Rcpp
30 | RoxygenNote: 5.0.1
31 | VignetteBuilder: knitr
32 |
--------------------------------------------------------------------------------
/R-package/man/mx.runif.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/random.R
3 | \name{mx.runif}
4 | \alias{mx.runif}
5 | \title{Generate uniform distribution in [low, high) with specified shape.}
6 | \usage{
7 | mx.runif(shape, min = 0, max = 1, ctx = NULL)
8 | }
9 | \arguments{
10 | \item{shape}{Dimension, The shape(dimension) of the result.}
11 |
12 | \item{min}{numeric, The lower bound of distribution.}
13 |
14 | \item{max}{numeric, The upper bound of distribution.}
15 |
16 | \item{ctx, }{optional The context device of the array. mx.ctx.default() will be used in default.}
17 | }
18 | \description{
19 | Generate uniform distribution in [low, high) with specified shape.
20 | }
21 | \examples{
22 |
23 | mx.set.seed(0)
24 | as.array(mx.runif(2))
25 | # 0.5488135 0.5928446
26 | mx.set.seed(0)
27 | as.array(mx.rnorm(2))
28 | # 2.212206 1.163079
29 |
30 | }
31 |
32 |
--------------------------------------------------------------------------------
/docs/community/index.md:
--------------------------------------------------------------------------------
1 | # MXNet Community
2 | ## Issue Tracker
3 | The project tracks bugs and new feature requests on MXNet Github repo issues folder - [mxnet/issues](https://github.com/dmlc/mxnet/issues)
4 | ## Contributors
5 | MXNet has been developed and used by a group of active community members. Everyone is more than welcome to contribute. It is a way to make the project better and more accessible to more users.
6 |
7 |
8 |
9 | Refer here for guide to [contributions](http://mxnet.io/community/contribute.html).
10 |
11 | ## Roadmap
12 |
13 | MXNet is evolving fast with community contributions. To better understand what next in MXNet and what we are working on internally, refer here - [MXNet Roadmap](https://github.com/dmlc/mxnet/labels/Roadmap)
14 |
15 |
16 |
17 | We welcome community contributions. Refer here for guide to [contributions](http://mxnet.io/community/contribute.html).
18 |
--------------------------------------------------------------------------------
/example/dqn/game.py:
--------------------------------------------------------------------------------
1 |
2 | DEFAULT_MAX_EPISODE_STEP = 1000000
3 |
4 | class Game(object):
5 | def __init__(self):
6 | self.total_reward = 0
7 | self.episode_reward = 0
8 | self.episode_step = 0
9 | self.max_episode_step = DEFAULT_MAX_EPISODE_STEP
10 |
11 | def start(self):
12 | raise NotImplementedError("Must Implement!")
13 |
14 | def begin_episode(self, max_episode_step):
15 | raise NotImplementedError("Must Implement!")
16 |
17 | @property
18 | def episode_terminate(self):
19 | raise NotImplementedError
20 |
21 | def get_observation(self):
22 | raise NotImplementedError
23 |
24 | @property
25 | def state_enabled(self):
26 | raise NotImplementedError
27 |
28 | def current_state(self):
29 | return self.replay_memory.latest_slice()
30 |
31 | def play(self, a):
32 | raise NotImplementedError
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.FullyConnected.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.FullyConnected}
4 | \alias{mx.symbol.FullyConnected}
5 | \title{Apply matrix multiplication to input then add a bias.}
6 | \usage{
7 | mx.symbol.FullyConnected(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to the FullyConnectedOp.}
12 |
13 | \item{weight}{Symbol
14 | Weight matrix.}
15 |
16 | \item{bias}{Symbol
17 | Bias parameter.}
18 |
19 | \item{num.hidden}{int, required
20 | Number of hidden nodes of the output.}
21 |
22 | \item{no.bias}{boolean, optional, default=False
23 | Whether to disable bias parameter.}
24 |
25 | \item{name}{string, optional
26 | Name of the resulting symbol.}
27 | }
28 | \value{
29 | out The result mx.symbol
30 | }
31 | \description{
32 | Apply matrix multiplication to input then add a bias.
33 | }
34 |
35 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Pooling.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.Pooling}
4 | \alias{mx.symbol.Pooling}
5 | \title{Perform spatial pooling on inputs.}
6 | \usage{
7 | mx.symbol.Pooling(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to the pooling operator.}
12 |
13 | \item{kernel}{Shape(tuple), required
14 | pooling kernel size: (y, x)}
15 |
16 | \item{pool.type}{{'avg', 'max', 'sum'}, required
17 | Pooling type to be applied.}
18 |
19 | \item{stride}{Shape(tuple), optional, default=(1,1)
20 | stride: for pooling (y, x)}
21 |
22 | \item{pad}{Shape(tuple), optional, default=(0,0)
23 | pad for pooling: (y, x)}
24 |
25 | \item{name}{string, optional
26 | Name of the resulting symbol.}
27 | }
28 | \value{
29 | out The result mx.symbol
30 | }
31 | \description{
32 | Perform spatial pooling on inputs.
33 | }
34 |
35 |
--------------------------------------------------------------------------------
/docs/zh/api/python/index.md:
--------------------------------------------------------------------------------
1 | MXNet Python Package
2 | ====================
3 |
4 | 这个页面包含 python 程序包中所有相关的文档.
5 | 为了安装 python 程序包, 请 checkout [Build and Installation Instruction](../../how_to/build.md).
6 |
7 | 这里有关于 mxnet 的三种文档.
8 |
9 | * [Tutorials](#tutorials) 介绍一个特定的关于 mxnet 的用例.
10 | * [Code Examples](../../../example) 示例代码.
11 | * [Python API Documents](#python-api-documents) 关于指定模块的文档, 同时也包含所有 API 的参考文档.
12 |
13 | Tutorials
14 | ---------
15 | * [Python Overview Tutorial](tutorial.md)
16 | * [Symbolic Configuration and Execution in Pictures](symbol_in_pictures.md)
17 | * [How to Create New Operations (Layers)](../../how_to/new_op.md)
18 |
19 | Python API Documents
20 | --------------------
21 | * [High Level Model Training Related API](model.md)
22 | * [The Module API](module.md)
23 | * [NDArray API](ndarray.md)
24 | * [Symbolic API](symbol.md)
25 | * [KVStore API](kvstore.md)
26 | * [Data Loading API](io.md)
27 |
--------------------------------------------------------------------------------
/src/optimizer/sgd.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file sgd.cc
4 | * \brief sgd optimizer
5 | */
6 | #include
7 | #include "./sgd-inl.h"
8 |
9 |
10 | namespace mxnet {
11 | namespace opt {
12 |
13 | void call_sgd_mom_update_cpu(RunContext ctx, TBlob weight, const TBlob grad, TBlob mom,
14 | float lr, float wd, const SGDParam& param) {
15 | sgd_mom_update(ctx, weight, grad, mom, lr, wd, param);
16 | }
17 | void call_sgd_update_cpu(RunContext ctx, TBlob weight, const TBlob grad,
18 | float lr, float wd, const SGDParam& param) {
19 | sgd_update(ctx, weight, grad, lr, wd, param);
20 | }
21 |
22 | DMLC_REGISTER_PARAMETER(SGDParam);
23 |
24 | MXNET_REGISTER_OPTIMIZER(ccsgd, SGDOpt)
25 | .describe("Stochastic gradient decent optimizer implemented in C++.");
26 |
27 | } // namespace opt
28 | } // namespace mxnet
29 |
--------------------------------------------------------------------------------
/src/operator/l2_normalization.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file l2_normalization.cc
4 | * \brief l2 normalization operator
5 | */
6 | #include "./l2_normalization-inl.h"
7 | namespace mxnet {
8 | namespace op {
9 | template<>
10 | Operator* CreateOp(L2NormalizationParam param) {
11 | return new L2NormalizationOp(param);
12 | }
13 |
14 | // DO_BIND_DISPATCH comes from static_operator_common.h
15 | Operator* L2NormalizationProp::CreateOperator(Context ctx) const {
16 | DO_BIND_DISPATCH(CreateOp, param_);
17 | }
18 |
19 | DMLC_REGISTER_PARAMETER(L2NormalizationParam);
20 |
21 | MXNET_REGISTER_OP_PROPERTY(L2Normalization, L2NormalizationProp)
22 | .describe("Set the l2 norm of each instance to a constant.")
23 | .add_argument("data", "Symbol", "Input data to the L2NormalizationOp.")
24 | .add_arguments(L2NormalizationParam::__FIELDS__());
25 | } // namespace op
26 | } // namespace mxnet
27 |
--------------------------------------------------------------------------------
/R-package/R/util.R:
--------------------------------------------------------------------------------
1 | # Internal function to check if name end with suffix
2 | mx.util.str.endswith <- function(name, suffix) {
3 | slen <- nchar(suffix)
4 | nlen <- nchar(name)
5 | if (slen > nlen) return (FALSE)
6 | nsuf <- substr(name, nlen - slen + 1, nlen)
7 | return (nsuf == suffix)
8 | }
9 |
10 | mx.util.str.startswith <- function(name, prefix) {
11 | slen <- nchar(prefix)
12 | nlen <- nchar(name)
13 | if (slen > nlen) return (FALSE)
14 | npre <- substr(name, 1, slen)
15 | return (npre == prefix)
16 | }
17 |
18 | # filter out null, keep the names
19 | mx.util.filter.null <- function(lst) {
20 | lst[!sapply(lst, is.null)]
21 | }
22 |
23 | #' Internal function to generate mxnet_generated.R
24 | #' Users do not need to call this function.
25 | #' @param path The path to the root of the package.
26 | #'
27 | #' @export
28 | mxnet.export <- function(path) {
29 | mxnet.internal.export(path.expand(path))
30 | }
31 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.Activation.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.Activation}
4 | \alias{mx.symbol.Activation}
5 | \title{Apply activation function to input.Softmax Activation is only available with CUDNN on GPUand will be computed at each location across channel if input is 4D.}
6 | \usage{
7 | mx.symbol.Activation(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to activation function.}
12 |
13 | \item{act.type}{{'relu', 'sigmoid', 'softrelu', 'tanh'}, required
14 | Activation function to be applied.}
15 |
16 | \item{name}{string, optional
17 | Name of the resulting symbol.}
18 | }
19 | \value{
20 | out The result mx.symbol
21 | }
22 | \description{
23 | Apply activation function to input.Softmax Activation is only available with CUDNN on GPUand will be computed at each location across channel if input is 4D.
24 | }
25 |
26 |
--------------------------------------------------------------------------------
/tools/caffe_converter/caffe_parse/parse_from_protobuf.py:
--------------------------------------------------------------------------------
1 | from google.protobuf import text_format
2 | import numpy as np
3 | import caffe_pb2
4 |
5 | def parse_caffemodel(filepath):
6 | '''
7 | parses the trained .caffemodel file
8 |
9 | filepath: /path/to/trained-model.caffemodel
10 |
11 | returns: layers
12 | '''
13 | f = open(filepath, 'rb')
14 | contents = f.read()
15 |
16 | netparam = caffe_pb2.NetParameter()
17 | netparam.ParseFromString(contents)
18 |
19 | layers = find_layers(netparam)
20 | return layers
21 |
22 | def find_layers(netparam):
23 | if len(netparam.layers) > 0:
24 | return netparam.layers
25 | elif len(netparam.layer) > 0:
26 | return netparam.layer
27 | else:
28 | raise Exception ("Couldn't find layers")
29 |
30 | def main():
31 | param_dict = parse_caffemodel('xxx.caffemodel')
32 |
33 | if __name__ == '__main__':
34 | main()
35 |
--------------------------------------------------------------------------------
/docs/_static/js/auto_module_index.js:
--------------------------------------------------------------------------------
1 | function auto_index(module) {
2 | $(document).ready(function () {
3 | // find all classes or functions
4 | var div_query = "div[class='section'][id='module-" + module + "']";
5 | var class_query = div_query + " dl[class='class'] > dt";
6 | var func_query = div_query + " dl[class='function'] > dt";
7 | var targets = $(class_query + ',' + func_query);
8 |
9 | var li_node = $("li a[href='#module-" + module + "']").parent();
10 | var html = "";
11 |
12 | for (var i = 0; i < targets.length; ++i) {
13 | var id = $(targets[i]).attr('id');
14 | // remove 'mxnet.' prefix to make menus shorter
15 | var id_simple = id.replace(/^mxnet\./, '');
16 | html += "- " + id_simple + "
";
19 | }
20 |
21 | html += "
";
22 | li_node.append(html);
23 | });
24 | }
--------------------------------------------------------------------------------
/src/operator/make_loss.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file make_loss.cc
4 | * \brief special layer for propagating loss
5 | */
6 | #include "./make_loss-inl.h"
7 |
8 | namespace mxnet {
9 | namespace op {
10 | template<>
11 | Operator *CreateOp(MakeLossParam param) {
12 | return new MakeLossOp(param);
13 | }
14 |
15 | Operator *MakeLossProp::CreateOperator(Context ctx) const {
16 | DO_BIND_DISPATCH(CreateOp, param_);
17 | }
18 |
19 | DMLC_REGISTER_PARAMETER(MakeLossParam);
20 |
21 | MXNET_REGISTER_OP_PROPERTY(MakeLoss, MakeLossProp)
22 | .describe("Get output from a symbol and pass 1 gradient back. "
23 | "This is used as a terminal loss if unary and binary operator "
24 | "are used to composite a loss with no declaration of backward "
25 | "dependency")
26 | .add_argument("data", "Symbol", "Input data.")
27 | .add_arguments(MakeLossParam::__FIELDS__());
28 |
29 | } // namespace op
30 | } // namespace mxnet
31 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.IdentityAttachKLSparseReg.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.IdentityAttachKLSparseReg}
4 | \alias{mx.symbol.IdentityAttachKLSparseReg}
5 | \title{Apply a sparse regularization to the output a sigmoid activation function.}
6 | \usage{
7 | mx.symbol.IdentityAttachKLSparseReg(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data.}
12 |
13 | \item{sparseness.target}{float, optional, default=0.1
14 | The sparseness target}
15 |
16 | \item{penalty}{float, optional, default=0.001
17 | The tradeoff parameter for the sparseness penalty}
18 |
19 | \item{momentum}{float, optional, default=0.9
20 | The momentum for running average}
21 |
22 | \item{name}{string, optional
23 | Name of the resulting symbol.}
24 | }
25 | \value{
26 | out The result mx.symbol
27 | }
28 | \description{
29 | Apply a sparse regularization to the output a sigmoid activation function.
30 | }
31 |
32 |
--------------------------------------------------------------------------------
/docs/architecture/read_code.md:
--------------------------------------------------------------------------------
1 | # Read MXNet Code
2 | - All the module interface are listed in [include](../../include), these
3 | interfaces are heavily documented.
4 | - You read the
5 | [Doxygen Version](https://mxnet.readthedocs.org/en/latest/doxygen) of the
6 | document.
7 | - Each module will only depend on other module by the header files in
8 | [include](../../include).
9 | - The implementation of module is in [src](../../src) folder.
10 | - Each source code only sees the file within its folder,
11 | [src/common](../../src/common) and [include](../../include).
12 |
13 | Most modules are mostly self-contained, with interface dependency on engine. So
14 | you are free to pick the one you are interested in, and read that part.
15 |
16 | # Other resources
17 | * [Doxygen Version of C++ API](https://mxnet.readthedocs.org/en/latest/doxygen) gives a comprehensive document of C++ API.
18 |
19 | # Recommended Next Steps
20 |
21 | * [Develop and hack MXNet](http://mxnet.io/how_to/develop_and_hack.html)
--------------------------------------------------------------------------------
/src/operator/identity_attach_KL_sparse_reg.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file identity_attach_KL_sparse_reg.cc
4 | * \brief\
5 | */
6 | #include "./identity_attach_KL_sparse_reg-inl.h"
7 |
8 | namespace mxnet {
9 | namespace op {
10 | template<>
11 | Operator *CreateOp(IdentityAttachKLSparseRegParam param) {
12 | return new IdentityAttachKLSparseRegOp(param);
13 | }
14 |
15 | Operator *IdentityAttachKLSparseRegProp::CreateOperator(Context ctx) const {
16 | DO_BIND_DISPATCH(CreateOp, param_);
17 | }
18 |
19 | DMLC_REGISTER_PARAMETER(IdentityAttachKLSparseRegParam);
20 |
21 | MXNET_REGISTER_OP_PROPERTY(IdentityAttachKLSparseReg, IdentityAttachKLSparseRegProp)
22 | .describe("Apply a sparse regularization to the output a sigmoid activation function.")
23 | .add_argument("data", "Symbol", "Input data.")
24 | .add_arguments(IdentityAttachKLSparseRegParam::__FIELDS__());
25 |
26 |
27 | } // namespace op
28 | } // namespace mxnet
29 |
30 |
--------------------------------------------------------------------------------
/tests/travis/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if ! tests/travis/is_core_changed.sh
4 | then
5 | exit 0
6 | fi
7 |
8 | if [ ${TRAVIS_OS_NAME} == "osx" ]; then
9 | brew update
10 | brew tap homebrew/science
11 | brew install opencv
12 | brew install python3
13 | brew install fftw
14 | brew install libpng
15 | brew install ImageMagick
16 | if [ ${TASK} == "python_test" ]; then
17 | python -m pip install nose numpy --user `whoami`
18 | python3 -m pip install nose numpy --user `whoami`
19 | fi
20 | fi
21 |
22 | if [ ${TASK} == "lint" ]; then
23 | pip install cpplint 'pylint==1.4.4' 'astroid==1.3.6' --user `whoami`
24 | fi
25 |
26 | if [ ${TASK} == "julia" ]; then
27 | mkdir -p ~/julia
28 | curl -s -L --retry 7 "https://s3.amazonaws.com/julialang/bin/linux/x64/${JULIA_VER}/julia-${JULIA_VER}-latest-linux-x86_64.tar.gz" | tar -C ~/julia -x -z --strip-components=1 -f -
29 | export PATH="${PATH}:${HOME}/julia/bin"
30 | julia -e 'versioninfo()'
31 | fi
32 |
--------------------------------------------------------------------------------
/docs/tutorials/nlp/rnn.md:
--------------------------------------------------------------------------------
1 | # Recurrent Neural Networks
2 | You can get the source code for below example [here](https://github.com/dmlc/mxnet/tree/master/example/rnn)
3 |
4 | This folder contains RNN examples using low level symbol interface.
5 |
6 | ## Python
7 |
8 | - [lstm.py](lstm.py) Functions for building a LSTM Network
9 | - [gru.py](gru.py) Functions for building a GRU Network
10 | - [lstm_bucketing.py](lstm_bucketing.py) PennTreeBank language model by using LSTM
11 | - [gru_bucketing.py](gru_bucketing.py) PennTreeBank language model by using GRU
12 | - [char-rnn.ipynb](char-rnn.ipynb) Notebook to demo how to train a character LSTM by using ```lstm.py```
13 |
14 |
15 | Performance Note:
16 | More ```MXNET_GPU_WORKER_NTHREADS``` may lead to better performance. For setting ```MXNET_GPU_WORKER_NTHREADS```, please refer to [Environment Variables](https://mxnet.readthedocs.org/en/latest/how_to/env_var.html).
17 |
18 | # Recommended Next Steps
19 | * [MXNet tutorials index](http://mxnet.io/tutorials/index.html)
--------------------------------------------------------------------------------
/matlab/+mxnet/private/callmxnet.m:
--------------------------------------------------------------------------------
1 | function callmxnet(func, varargin)
2 | %CALLMXNET call mxnet functions
3 |
4 | if ~libisloaded('libmxnet')
5 | cur_pwd = pwd;
6 | mxnet_root = [fileparts(mfilename('fullpath')), '/../../../'];
7 | cd(mxnet_root);
8 | mxnet_root = pwd;
9 | cd(cur_pwd);
10 | assert(exist([mxnet_root, '/lib/libmxnet.so'], 'file') == 2 || ...
11 | exist([mxnet_root, '/lib/libmxnet.dylib'], 'file') == 2 || ...
12 | exist([mxnet_root, '/lib/libmxnet.dll'], 'file') == 2, ...
13 | 'you need to build mxnet first');
14 | assert(exist([mxnet_root, '/include/mxnet/c_predict_api.h']) == 2, ...
15 | 'failed to find c_predict_api.h')
16 | addpath([mxnet_root, '/lib'])
17 | addpath([mxnet_root, '/include/mxnet'])
18 |
19 | [err, warn] = loadlibrary('libmxnet', 'c_predict_api.h');
20 | assert(isempty(err));
21 | if warn, warn, end
22 | end
23 |
24 | assert(ischar(func))
25 | ret = calllib('libmxnet', func, varargin{:});
26 | assert(ret == 0)
27 | end
28 |
--------------------------------------------------------------------------------
/python/setup.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=invalid-name, exec-used
2 | """Setup mxnet package."""
3 | from __future__ import absolute_import
4 | import os
5 | from setuptools import setup
6 |
7 | # We can not import `mxnet.info.py` in setup.py directly since mxnet/__init__.py
8 | # Will be invoked which introduces dependences
9 | CURRENT_DIR = os.path.dirname(__file__)
10 | libinfo_py = os.path.join(CURRENT_DIR, 'mxnet/libinfo.py')
11 | libinfo = {'__file__': libinfo_py}
12 | exec(compile(open(libinfo_py, "rb").read(), libinfo_py, 'exec'), libinfo, libinfo)
13 |
14 | LIB_PATH = libinfo['find_lib_path']()
15 | __version__ = libinfo['__version__']
16 |
17 | setup(name='mxnet',
18 | version=__version__,
19 | description=open(os.path.join(CURRENT_DIR, 'README.md')).read(),
20 | install_requires=[
21 | 'numpy',
22 | ],
23 | zip_safe=False,
24 | packages=['mxnet', 'mxnet.module'],
25 | data_files=[('mxnet', [LIB_PATH[0]])],
26 | url='https://github.com/dmlc/mxnet')
27 |
--------------------------------------------------------------------------------
/scala-package/examples/scripts/run_cnntextclassification.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | MXNET_ROOT=$(cd "$(dirname $0)/../../.."; pwd)
4 | CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-gpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
5 |
6 | # which gpu card to use, -1 means cpu
7 | GPU=$1
8 | # the mr dataset path, you should put the pos and neg file in the same folder
9 | MR_DATASET_PATH=$2
10 | # the trained word2vec file path, binary or text format
11 | W2V_FILE_PATH=$3
12 | # whether the format of the word2vec file is binary,1 means binary, 0 means text
13 | W2V_FORMAT_BIN=$4
14 | BATCH_SIZE=$5
15 | SAVE_MODEL_PATH=$6
16 |
17 | java -Xmx8G -cp $CLASS_PATH \
18 | ml.dmlc.mxnet.examples.cnnclassification.CNNTextClassification \
19 | --gpu $GPU \
20 | --mr-dataset-path $MR_DATASET_PATH \
21 | --w2v-file-path $W2V_FILE_PATH \
22 | --w2v-format-bin $W2V_FORMAT_BIN \
23 | --batch-size $BATCH_SIZE \
24 | --save-model-path $SAVE_MODEL_PATH
25 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.LRN.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.LRN}
4 | \alias{mx.symbol.LRN}
5 | \title{Apply convolution to input then add a bias.}
6 | \usage{
7 | mx.symbol.LRN(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to the ConvolutionOp.}
12 |
13 | \item{alpha}{float, optional, default=0.0001
14 | value of the alpha variance scaling parameter in the normalization formula}
15 |
16 | \item{beta}{float, optional, default=0.75
17 | value of the beta power parameter in the normalization formula}
18 |
19 | \item{knorm}{float, optional, default=2
20 | value of the k parameter in normalization formula}
21 |
22 | \item{nsize}{int (non-negative), required
23 | normalization window width in elements.}
24 |
25 | \item{name}{string, optional
26 | Name of the resulting symbol.}
27 | }
28 | \value{
29 | out The result mx.symbol
30 | }
31 | \description{
32 | Apply convolution to input then add a bias.
33 | }
34 |
35 |
--------------------------------------------------------------------------------
/docs/tutorials/nlp/nce_loss.md:
--------------------------------------------------------------------------------
1 | # NCE Loss
2 | You can get the source code for below example [here](https://github.com/dmlc/mxnet/tree/master/example/nce-loss)
3 |
4 |
5 |
6 |
7 | nce-loss is used to speedup multi-class classification when class num is huge.
8 |
9 | ## Toy example
10 |
11 | * toy_softmax.py: a multi class example using softmax output
12 | * toy_nce.py: a multi-class example using nce loss
13 |
14 | ## Word2Vec
15 |
16 | * word2vec.py: a CBOW word2vec example using nce loss
17 |
18 | You can run it by
19 |
20 | ```
21 | ./get_text8.sh
22 | python word2vec.py
23 |
24 | ```
25 |
26 | ## LSTM
27 |
28 | * lstm_word.py: a lstm example use nce loss
29 |
30 | You can run it by
31 |
32 | ```
33 | ./get_text8.sh
34 | python lstm_word.py
35 | ```
36 |
37 | ## References
38 |
39 | You can refer to [http://www.jianshu.com/p/e439b43ea464](http://www.jianshu.com/p/e439b43ea464) for more details. (In Chinese)
40 |
41 | # Recommended Next Steps
42 | * [MXNet tutorials index](http://mxnet.io/tutorials/index.html)
--------------------------------------------------------------------------------
/src/operator/regression_output.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file regression_output.cu
4 | * \brief regression output operator
5 | */
6 | #include "./regression_output-inl.h"
7 | #include "./mshadow_op.h"
8 |
9 | namespace mxnet {
10 | namespace op {
11 |
12 | template<>
13 | Operator *CreateRegressionOutputOp(reg_enum::RegressionOutputType type,
14 | RegressionOutputParam param) {
15 | switch (type) {
16 | case reg_enum::kLinear:
17 | return new RegressionOutputOp(param);
18 | case reg_enum::kLogistic:
19 | return new RegressionOutputOp(param);
20 | case reg_enum::kMAE:
21 | return new RegressionOutputOp(param);
22 | default:
23 | LOG(FATAL) << "unknown activation type " << type;
24 | }
25 | return NULL;
26 | }
27 | } // namespace op
28 | } // namespace mxnet
29 |
30 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.LeakyReLU.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.LeakyReLU}
4 | \alias{mx.symbol.LeakyReLU}
5 | \title{Apply activation function to input.}
6 | \usage{
7 | mx.symbol.LeakyReLU(...)
8 | }
9 | \arguments{
10 | \item{data}{Symbol
11 | Input data to activation function.}
12 |
13 | \item{act.type}{{'elu', 'leaky', 'prelu', 'rrelu'},optional, default='leaky'
14 | Activation function to be applied.}
15 |
16 | \item{slope}{float, optional, default=0.25
17 | Init slope for the activation. (For leaky and elu only)}
18 |
19 | \item{lower.bound}{float, optional, default=0.125
20 | Lower bound of random slope. (For rrelu only)}
21 |
22 | \item{upper.bound}{float, optional, default=0.334
23 | Upper bound of random slope. (For rrelu only)}
24 |
25 | \item{name}{string, optional
26 | Name of the resulting symbol.}
27 | }
28 | \value{
29 | out The result mx.symbol
30 | }
31 | \description{
32 | Apply activation function to input.
33 | }
34 |
35 |
--------------------------------------------------------------------------------
/scala-package/assembly/linux-x86_64-cpu/src/main/assembly/assembly.xml:
--------------------------------------------------------------------------------
1 |
2 | full
3 |
4 | jar
5 |
6 | false
7 |
8 |
9 |
10 | *:*:jar
11 |
12 | /
13 | true
14 | true
15 | runtime
16 |
17 |
18 | lib/native
19 | libmxnet-scala.so
20 | false
21 | false
22 | false
23 |
24 | ml.dmlc.mxnet:libmxnet-scala-linux-x86_64-cpu:so
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/scala-package/assembly/linux-x86_64-gpu/src/main/assembly/assembly.xml:
--------------------------------------------------------------------------------
1 |
2 | full
3 |
4 | jar
5 |
6 | false
7 |
8 |
9 |
10 | *:*:jar
11 |
12 | /
13 | true
14 | true
15 | runtime
16 |
17 |
18 | lib/native
19 | libmxnet-scala.so
20 | false
21 | false
22 | false
23 |
24 | ml.dmlc.mxnet:libmxnet-scala-linux-x86_64-gpu:so
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/R-package/man/mx.opt.adadelta.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/optimizer.R
3 | \name{mx.opt.adadelta}
4 | \alias{mx.opt.adadelta}
5 | \title{Create an AdaDelta optimizer with respective parameters.}
6 | \usage{
7 | mx.opt.adadelta(rho = 0.9, epsilon = 1e-05, wd = 0, rescale.grad = 1,
8 | clip_gradient = NULL)
9 | }
10 | \arguments{
11 | \item{rho}{float, default=0.90
12 | Decay rate for both squared gradients and delta x.}
13 |
14 | \item{epsilon}{float, default=1e-5
15 | The constant as described in the thesis.}
16 |
17 | \item{wd}{float, default=0.0
18 | L2 regularization coefficient add to all the weights.}
19 |
20 | \item{rescale.grad}{float, default=1.0
21 | rescaling factor of gradient.}
22 |
23 | \item{clip_gradient}{float, optional
24 | clip gradient in range [-clip_gradient, clip_gradient].}
25 | }
26 | \description{
27 | AdaDelta optimizer as described in Zeiler, M. D. (2012).
28 | *ADADELTA: An adaptive learning rate method.*
29 | http://arxiv.org/abs/1212.5701
30 | }
31 |
32 |
--------------------------------------------------------------------------------
/scala-package/assembly/osx-x86_64-cpu/src/main/assembly/assembly.xml:
--------------------------------------------------------------------------------
1 |
2 | full
3 |
4 | jar
5 |
6 | false
7 |
8 |
9 |
10 | *:*:jar
11 |
12 | /
13 | true
14 | true
15 | runtime
16 |
17 |
18 | lib/native
19 | libmxnet-scala.jnilib
20 | false
21 | false
22 | false
23 |
24 | ml.dmlc.mxnet:libmxnet-scala-osx-x86_64-cpu:jnilib
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/scala-package/spark/src/main/scala/ml/dmlc/mxnet/spark/utils/RepIterator.scala:
--------------------------------------------------------------------------------
1 | package ml.dmlc.mxnet.spark.utils
2 |
3 | import scala.collection.Iterator
4 |
5 | /**
6 | * Repeatable Iterator useful in mapPartitions
7 | * @author Yuance.Li
8 | */
9 | class RepIterator[T](iteratorInternal: Iterator[T], repetition: Int = 1) extends Iterator[T] {
10 | assert(repetition > 0)
11 | var counter = repetition - 1
12 | var (currentIter, backupIter) = iteratorInternal.duplicate
13 |
14 | override def hasNext: Boolean = {
15 | currentIter.hasNext || counter > 0
16 | }
17 |
18 | override def next(): T = {
19 | assert(hasNext)
20 | if(currentIter.hasNext) {
21 | currentIter.next()
22 | } else if (counter > 0) {
23 | counter = counter - 1
24 | var iterTuple = backupIter.duplicate
25 | currentIter = iterTuple._1
26 | backupIter = iterTuple._2
27 | currentIter.next()
28 | } else {
29 | throw new NoSuchElementException("No element in this collection")
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/example/cpp/image-classification/Makefile:
--------------------------------------------------------------------------------
1 | # Special thanks to https://github.com/pertusa for the Makefile
2 | CFLAGS=-std=c++11 -Wno-unknown-pragmas -Wall
3 |
4 | # Added for openblas
5 | # export OPENBLAS_ROOT=/usr/local/opt/openblas
6 |
7 | # CFLAGS+= -I${OPENBLAS_ROOT}/include
8 | # LDFLAGS=-L${OPENBLAS_ROOT}/lib -lopenblas
9 |
10 | # Added for opencv
11 | CFLAGS+= `pkg-config --cflags opencv`
12 | LDFLAGS+=`pkg-config --libs opencv`
13 |
14 | # Added for mxnet
15 | export MXNET_ROOT=`pwd`/../../../../mxnet
16 |
17 | CFLAGS+= -I$(MXNET_ROOT)/include
18 | LDFLAGS+=$(MXNET_ROOT)/lib/libmxnet.so
19 |
20 | image-classification-predict: image-classification-predict.o
21 | g++ -O3 -o image-classification-predict image-classification-predict.o $(LDFLAGS)
22 |
23 | image-classification-predict.o: image-classification-predict.cc
24 | g++ -O3 -c image-classification-predict.cc ${CFLAGS}
25 |
26 | clean:
27 | rm image-classification-predict
28 | rm -f *.d *.o
29 |
30 | lint:
31 | python ../../../dmlc-core/scripts/lint.py mxnet "cpp" ./
32 |
--------------------------------------------------------------------------------
/example/speech-demo/io_func/feat_readers/reader_bvec.py:
--------------------------------------------------------------------------------
1 | import struct
2 | import array
3 | import numpy
4 | from common import *
5 |
6 | class bvecReader(BaseReader):
7 |
8 | def __init__(self, featureFile, labelFile, byteOrder=None):
9 | BaseReader.__init__(self, featureFile, labelFile, byteOrder)
10 |
11 | def Read(self):
12 |
13 | with open(self.featureFile,"rb") as f:
14 |
15 | dt = numpy.dtype([('numSamples',(numpy.int32,1)),('dim',(numpy.int32,1))])
16 | header = numpy.fromfile(f,dt.newbyteorder('>'),count=1)
17 |
18 | numSamples = header[0]['numSamples']
19 | dim = header[0]['dim']
20 |
21 | print 'Num samples = {}'.format(numSamples)
22 | print 'dim = {}'.format(dim)
23 |
24 | dt = numpy.dtype([('sample',(numpy.float32,dim))])
25 | samples = numpy.fromfile(f,dt.newbyteorder('>'),count=numSamples)
26 |
27 | self._markDone()
28 |
29 | return samples[:]['sample'], ReadLabel(self.labelFile)
30 |
--------------------------------------------------------------------------------
/docs/api/python/index.md:
--------------------------------------------------------------------------------
1 | # MXNet - Python API
2 |
3 | ## Introduction
4 | MXNet supports Python programming language. The MXNet Python package brings flexible and efficient GPU
5 | computing and state-of-art deep learning to Python.
6 |
7 | - It enables you to write seamless tensor/matrix computation with multiple GPUs in Python.
8 | - It also enables you construct and customize the state-of-art deep learning models in Python,
9 | and apply them to tasks such as image classification and data science challenges.
10 |
11 |
12 | ## Python API Reference
13 | * [Module API](module.md) a flexible high-level interface for training neural networks
14 | * [Model API](model.md) an alternate simple high-level interface for training neural networks
15 | * [Symbolic API](symbol.md) for operations on NDArrays to assemble neural networks from layers
16 | * [IO Data Loading API](io.md) for parsing and loading data
17 | * [NDArray API](ndarray.md) for vector/matrix/tensor operations
18 | * [KVStore API](kvstore.md) for multi-GPU and multi-host distributed training
19 |
--------------------------------------------------------------------------------
/src/ndarray/ndarray_function.cc:
--------------------------------------------------------------------------------
1 | /*!
2 | * Copyright (c) 2015 by Contributors
3 | * \file ndarray_function_cpu.cc
4 | * \brief CPU Implementation of ndarray function.
5 | */
6 |
7 | // this will be invoked by gcc and compile CPU version
8 | #include "./ndarray_function.h"
9 | #include "./ndarray_function-inl.h"
10 |
11 | namespace mxnet {
12 | namespace ndarray {
13 | template<>
14 | void Copy(const TBlob &from, TBlob *to,
15 | Context from_ctx, Context to_ctx,
16 | RunContext ctx) {
17 | MSHADOW_TYPE_SWITCH(to->type_flag_, DType, {
18 | if (to->type_flag_ == from.type_flag_) {
19 | mshadow::Copy(to->FlatTo1D(),
20 | from.FlatTo1D());
21 | } else {
22 | MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
23 | to->FlatTo1D() =
24 | mshadow::expr::tcast(from.FlatTo1D());
25 | })
26 | }
27 | })
28 | }
29 | } // namespace ndarray
30 | } // namespace mxnet
31 |
--------------------------------------------------------------------------------
/R-package/man/mx.symbol.LogisticRegressionOutput.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mxnet_generated.R
3 | \name{mx.symbol.LogisticRegressionOutput}
4 | \alias{mx.symbol.LogisticRegressionOutput}
5 | \title{Use Logistic regression for final output, this is used on final output of a net.
6 | Logistic regression is suitable for binary classification or probability prediction tasks.}
7 | \usage{
8 | mx.symbol.LogisticRegressionOutput(...)
9 | }
10 | \arguments{
11 | \item{data}{Symbol
12 | Input data to function.}
13 |
14 | \item{label}{Symbol
15 | Input label to function.}
16 |
17 | \item{grad.scale}{float, optional, default=1
18 | Scale the gradient by a float factor}
19 |
20 | \item{name}{string, optional
21 | Name of the resulting symbol.}
22 | }
23 | \value{
24 | out The result mx.symbol
25 | }
26 | \description{
27 | Use Logistic regression for final output, this is used on final output of a net.
28 | Logistic regression is suitable for binary classification or probability prediction tasks.
29 | }
30 |
31 |
--------------------------------------------------------------------------------
/matlab/tests/prepare_data.m:
--------------------------------------------------------------------------------
1 | %% download cifar10 dataset
2 | system('wget https://www.cs.toronto.edu/~kriz/cifar-10-matlab.tar.gz')
3 | system('tar -xzvf cifar-10-matlab.tar.gz')
4 | load cifar-10-batches-mat/test_batch.mat
5 |
6 | %% convert test dataset of cifar10, and save
7 | X = reshape(data', [32, 32, 3, 10000]);
8 | X = permute(X, [2 1 3 4]);
9 | Y = labels + 1;
10 |
11 |
12 | save cifar10-test X Y
13 | %% preview one picture
14 | imshow(imresize(X(:,:,:,2), [128, 128]))
15 |
16 | %%
17 |
18 | !wget http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
19 | !wget http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
20 | !gunzip t10k-images-idx3-ubyte.gz
21 | !gunzip t10k-labels-idx1-ubyte.gz
22 |
23 | %%
24 |
25 | fid = fopen('t10k-images-idx3-ubyte', 'r');
26 | d = fread(fid, inf, '*uint8');
27 | fclose(fid);
28 | X = reshape(d(17:end), [28 28 1 10000]);
29 | X = permute(X, [2 1 3 4]);
30 |
31 | fid = fopen('t10k-labels-idx1-ubyte', 'r');
32 | d = fread(fid, inf, '*uint8');
33 | fclose(fid);
34 | Y = d(9:end) + 1;
35 |
36 | save mnist-test X Y
37 |
--------------------------------------------------------------------------------
/R-package/demo/basic_executor.R:
--------------------------------------------------------------------------------
1 | require(mxnet)
2 | # TODO(KK, tong) think about setter getter interface(which breaks immutability, or current set and move interface.
3 | # We need to make a choice between
4 | # exec_old = exec
5 | # exec$arg.arrays = some.array, this changes exec_old$arg.arrays as well, user won't aware
6 | # V.S.
7 | # exec_old = exec
8 | # exec = mx.exec.set.arg.arrays(exec, some.array)
9 | # exec_old is moved, user get an error when use exec_old
10 |
11 | A = mx.symbol.Variable('A')
12 | B = mx.symbol.Variable('B')
13 | C = A + B
14 | a = mx.nd.zeros(c(2), mx.cpu())
15 | b = mx.nd.array(as.array(c(1, 2)), mx.cpu())
16 |
17 | exec = mxnet:::mx.symbol.bind(
18 | symbol=C,
19 | ctx=mx.cpu(),
20 | arg.arrays = list(A=a, B=b),
21 | aux.arrays = list(),
22 | grad.reqs = list("null", "null"))
23 |
24 | # calculate outputs
25 | mx.exec.forward(exec)
26 | out = as.array(exec$outputs[[1]])
27 | print(out)
28 |
29 | mx.exec.update.arg.arrays(exec, list(A=b, B=b))
30 | mx.exec.forward(exec)
31 |
32 | out = as.array(exec$outputs[[1]])
33 | print(out)
34 |
35 |
--------------------------------------------------------------------------------
/tests/travis/is_core_changed.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # this is a util script to test whether the "core" of
4 | # mxnet has changed. Please modify the regex patterns here
5 | # to ensure the components are covered if you add new "core"
6 | # components to mxnet
7 |
8 | # temporarily disable this b/c the OS X tests are failing mysteriously
9 | exit 0
10 |
11 | # DEBUG
12 | echo "Files changed in this PR includes:"
13 | echo "**********************************"
14 | git diff --name-only HEAD^
15 | echo "**********************************"
16 |
17 | # we ignore examples, and docs
18 | core_patterns=(
19 | '^dmlc-core'
20 | '^matlab'
21 | '^plugin'
22 | '^python'
23 | '^src'
24 | '^tools'
25 | '^R-package'
26 | '^amalgamation'
27 | '^include'
28 | '^mshadow'
29 | '^ps-lite'
30 | '^scala-package'
31 | '^tests'
32 | )
33 |
34 | for pat in ${core_patterns[@]}; do
35 | if git diff --name-only HEAD^ | grep "$pat"
36 | then
37 | exit
38 | fi
39 | done
40 |
41 | echo "I think we are good to skip this travis ci run now"
42 | exit 1 # means nothing has changed
43 |
--------------------------------------------------------------------------------
/example/autoencoder/mnist_sae.py:
--------------------------------------------------------------------------------
1 | # pylint: skip-file
2 | import mxnet as mx
3 | import numpy as np
4 | import logging
5 | import data
6 | from autoencoder import AutoEncoderModel
7 |
8 | if __name__ == '__main__':
9 | # set to INFO to see less information during training
10 | logging.basicConfig(level=logging.DEBUG)
11 | ae_model = AutoEncoderModel(mx.gpu(0), [784,500,500,2000,10], pt_dropout=0.2,
12 | internal_act='relu', output_act='relu')
13 |
14 | X, _ = data.get_mnist()
15 | train_X = X[:60000]
16 | val_X = X[60000:]
17 |
18 | ae_model.layerwise_pretrain(train_X, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
19 | lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
20 | ae_model.finetune(train_X, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
21 | lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
22 | ae_model.save('mnist_pt.arg')
23 | ae_model.load('mnist_pt.arg')
24 | print "Training error:", ae_model.eval(train_X)
25 | print "Validation error:", ae_model.eval(val_X)
26 |
--------------------------------------------------------------------------------