├── .gitignore ├── .gitmodules ├── 3rdparty ├── bin │ └── README.md ├── include │ └── README.md └── lib │ └── README.md ├── Dragon ├── CMakeLists.txt ├── include │ ├── core │ │ ├── common.h │ │ ├── context.h │ │ ├── context_cuda.h │ │ ├── graph.h │ │ ├── graph_gradient.h │ │ ├── mixedmem.h │ │ ├── operator.h │ │ ├── operator_gradient.h │ │ ├── operator_schema.h │ │ ├── registry.h │ │ ├── tensor.h │ │ ├── typeid.h │ │ ├── types.h │ │ └── workspace.h │ ├── operators │ │ ├── activation │ │ │ ├── dropout_op.h │ │ │ ├── elu_op.h │ │ │ ├── prelu_op.h │ │ │ ├── relu_op.h │ │ │ ├── selu_op.h │ │ │ ├── sigmoid_op.h │ │ │ ├── softmax_op.h │ │ │ └── tanh_op.h │ │ ├── arithmetic │ │ │ ├── add_op.h │ │ │ ├── bias_add_op.h │ │ │ ├── clip_op.h │ │ │ ├── div_op.h │ │ │ ├── dot_op.h │ │ │ ├── eltwise_op.h │ │ │ ├── exp_op.h │ │ │ ├── gram_matrix_op.h │ │ │ ├── inner_product_op.h │ │ │ ├── log_op.h │ │ │ ├── matmul_op.h │ │ │ ├── mul_op.h │ │ │ ├── pow_op.h │ │ │ ├── scale_op.h │ │ │ ├── square_op.h │ │ │ └── sub_op.h │ │ ├── cast │ │ │ └── float2half_op.h │ │ ├── control_flow │ │ │ ├── compare_op.h │ │ │ ├── copy_op.h │ │ │ └── scan_op.h │ │ ├── loss │ │ │ ├── l1_loss_op.h │ │ │ ├── l2_loss_op.h │ │ │ ├── sigmoid_cross_entropy_op.h │ │ │ ├── smooth_l1_loss_op.h │ │ │ ├── softmax_cross_entropy_op.h │ │ │ ├── sparse_softmax_cross_entropy_op.h │ │ │ └── sparse_softmax_focal_loss_op.h │ │ ├── misc │ │ │ ├── accuracy_op.h │ │ │ ├── gradient_op.h │ │ │ ├── image_data_op.h │ │ │ ├── initialize_op.h │ │ │ └── python_op.h │ │ ├── mpi │ │ │ ├── base_mpi_op.h │ │ │ ├── mpi_broadcast_op.h │ │ │ └── mpi_gather_op.h │ │ ├── ndarray │ │ │ ├── arange_op.h │ │ │ ├── argmax_op.h │ │ │ ├── argmin_op.h │ │ │ ├── concat_op.h │ │ │ ├── crop_op.h │ │ │ ├── expand_dims_op.h │ │ │ ├── flatten_op.h │ │ │ ├── gather_op.h │ │ │ ├── one_hot_op.h │ │ │ ├── pad_op.h │ │ │ ├── random_pick_op.h │ │ │ ├── reduce_op.h │ │ │ ├── repeat_op.h │ │ │ ├── reshape_op.h │ │ │ ├── shape_op.h │ │ │ ├── slice_op.h │ │ │ ├── stack_op.h │ │ │ ├── tile_op.h │ │ │ └── transpose_op.h │ │ ├── norm │ │ │ ├── batch_norm_op.h │ │ │ ├── batch_renorm_op.h │ │ │ ├── instance_norm_op.h │ │ │ └── l2_norm_op.h │ │ ├── recurrent │ │ │ └── lstm_unit_op.h │ │ ├── update │ │ │ ├── adam_update_op.h │ │ │ ├── collective_update_op.h │ │ │ ├── moving_average_op.h │ │ │ ├── nesterov_update_op.h │ │ │ ├── rmsprop_update_op.h │ │ │ ├── sgd_update_op.h │ │ │ └── update_op_base.h │ │ └── vision │ │ │ ├── bilinear_resize_op.h │ │ │ ├── conv_op.h │ │ │ ├── conv_op_base.h │ │ │ ├── conv_transpose_op.h │ │ │ ├── dense_concat_op.h │ │ │ ├── lrn_op.h │ │ │ ├── nn_resize_op.h │ │ │ ├── pooling_op.h │ │ │ ├── roi_align_op.h │ │ │ └── roi_pooling_op.h │ └── utils │ │ ├── caffemodel.h │ │ ├── cast.h │ │ ├── cuda_device.h │ │ ├── cudnn_device.h │ │ ├── filler.h │ │ ├── logging.h │ │ ├── math_functions.h │ │ ├── omp_alternative.h │ │ ├── op_kernel.h │ │ ├── proto_utils.h │ │ ├── sse_alternative.h │ │ ├── sse_device.h │ │ ├── string.h │ │ └── thread.h ├── modules │ └── python │ │ ├── CMakeLists.txt │ │ ├── dragon.cc │ │ ├── dragon.h │ │ └── py_mpi.h ├── python │ ├── dragon │ │ ├── __init__.py │ │ ├── config.py │ │ ├── core │ │ │ ├── __init__.py │ │ │ ├── gradient_maker.py │ │ │ ├── mpi.py │ │ │ ├── scope.py │ │ │ ├── tensor.py │ │ │ ├── utils.py │ │ │ └── workspace.py │ │ ├── docs │ │ │ ├── Makefile │ │ │ ├── _extensions │ │ │ │ └── mathmacro.py │ │ │ ├── _templates │ │ │ │ ├── layout.html │ │ │ │ ├── localtoc.html │ │ │ │ ├── navbar.html │ │ │ │ └── search.html │ │ │ ├── conf.py │ │ │ ├── contents │ │ │ │ ├── config.rst │ │ │ │ ├── core.rst │ │ │ │ ├── core │ │ │ │ │ ├── gradient_maker.rst │ │ │ │ │ ├── mpi.rst │ │ │ │ │ ├── scope.rst │ │ │ │ │ ├── tensor.rst │ │ │ │ │ └── workspace.rst │ │ │ │ ├── io.rst │ │ │ │ ├── io │ │ │ │ │ ├── blob_fetcher.rst │ │ │ │ │ ├── data_batch.rst │ │ │ │ │ ├── data_reader.rst │ │ │ │ │ └── data_transformer.rst │ │ │ │ ├── memonger.rst │ │ │ │ ├── operators.rst │ │ │ │ ├── operators │ │ │ │ │ ├── activation.rst │ │ │ │ │ ├── arithmetic.rst │ │ │ │ │ ├── cast.rst │ │ │ │ │ ├── contrib │ │ │ │ │ │ └── rcnn.rst │ │ │ │ │ ├── control_flow.rst │ │ │ │ │ ├── custom │ │ │ │ │ │ ├── data_process.rst │ │ │ │ │ │ ├── minibatch.rst │ │ │ │ │ │ └── vec_mult.rst │ │ │ │ │ ├── data.rst │ │ │ │ │ ├── initializer.rst │ │ │ │ │ ├── loss.rst │ │ │ │ │ ├── misc.rst │ │ │ │ │ ├── mpi.rst │ │ │ │ │ ├── ndarray.rst │ │ │ │ │ ├── norm.rst │ │ │ │ │ ├── recurrent.rst │ │ │ │ │ └── vision.rst │ │ │ │ ├── ops.rst │ │ │ │ ├── tools.rst │ │ │ │ ├── tools │ │ │ │ │ ├── db.rst │ │ │ │ │ ├── im2db.rst │ │ │ │ │ └── summary_writer.rst │ │ │ │ ├── updaters.rst │ │ │ │ ├── vm.rst │ │ │ │ └── vm │ │ │ │ │ ├── caffe.rst │ │ │ │ │ ├── caffe │ │ │ │ │ ├── layer.rst │ │ │ │ │ ├── misc.rst │ │ │ │ │ ├── net.rst │ │ │ │ │ └── solver.rst │ │ │ │ │ ├── theano.rst │ │ │ │ │ └── theano │ │ │ │ │ ├── compile.rst │ │ │ │ │ └── tensor.rst │ │ │ ├── index.rst │ │ │ ├── install.rst │ │ │ └── make.bat │ │ ├── import_c_apis.py │ │ ├── io │ │ │ ├── __init__.py │ │ │ ├── blob_fetcher.py │ │ │ ├── data_batch.py │ │ │ ├── data_reader.py │ │ │ ├── data_transformer.py │ │ │ └── utils.py │ │ ├── memonger.py │ │ ├── operators │ │ │ ├── __init__.py │ │ │ ├── activation.py │ │ │ ├── arithmetic.py │ │ │ ├── cast.py │ │ │ ├── contrib │ │ │ │ ├── __init__.py │ │ │ │ └── rcnn │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── ops.py │ │ │ ├── control_flow.py │ │ │ ├── custom │ │ │ │ ├── __init__.py │ │ │ │ ├── data_process.py │ │ │ │ ├── minibatch.py │ │ │ │ └── vec_mult.py │ │ │ ├── data.py │ │ │ ├── initializer.py │ │ │ ├── loss.py │ │ │ ├── misc.py │ │ │ ├── mpi.py │ │ │ ├── ndarray.py │ │ │ ├── norm.py │ │ │ ├── recurrent.py │ │ │ └── vision.py │ │ ├── ops.py │ │ ├── protos │ │ │ ├── __init__.py │ │ │ ├── dragon.proto │ │ │ └── dragon_pb2.py │ │ ├── tools │ │ │ ├── __init__.py │ │ │ ├── board │ │ │ │ ├── __init__.py │ │ │ │ └── app.py │ │ │ ├── db.py │ │ │ ├── im2db.py │ │ │ └── summary_writer.py │ │ ├── updaters.py │ │ └── vm │ │ │ ├── __init__.py │ │ │ ├── caffe │ │ │ ├── __init__.py │ │ │ ├── coord_map.py │ │ │ ├── layer.py │ │ │ ├── layers │ │ │ │ ├── __init__.py │ │ │ │ ├── common.py │ │ │ │ ├── data.py │ │ │ │ ├── loss.py │ │ │ │ ├── mpi.py │ │ │ │ ├── neuron.py │ │ │ │ └── vision.py │ │ │ ├── misc.py │ │ │ ├── model_libs.py │ │ │ ├── net.py │ │ │ ├── net_spec.py │ │ │ ├── proto │ │ │ │ ├── __init__.py │ │ │ │ ├── caffe.proto │ │ │ │ └── caffe_pb2.py │ │ │ ├── solver.py │ │ │ ├── timer.py │ │ │ └── utils.py │ │ │ ├── tensorflow │ │ │ ├── __init__.py │ │ │ ├── client │ │ │ │ ├── __init__.py │ │ │ │ ├── client_lib.py │ │ │ │ └── session.py │ │ │ ├── contrib │ │ │ │ ├── __init__.py │ │ │ │ ├── framework │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── ops │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── variables.py │ │ │ │ ├── layers │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── initializers.py │ │ │ │ │ └── layers.py │ │ │ │ ├── learn │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── datasets │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base.py │ │ │ │ │ │ └── mnist.py │ │ │ │ └── slim │ │ │ │ │ └── __init__.py │ │ │ ├── examples │ │ │ │ ├── __init__.py │ │ │ │ └── tutorials │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── mnist │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── input_data.py │ │ │ ├── framework │ │ │ │ ├── __init__.py │ │ │ │ ├── constant_op.py │ │ │ │ ├── dtypes.py │ │ │ │ ├── framework_lib.py │ │ │ │ ├── ops.py │ │ │ │ ├── random_seed.py │ │ │ │ └── tensor_shape.py │ │ │ ├── layers │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── convolutional.py │ │ │ │ ├── core.py │ │ │ │ ├── layers.py │ │ │ │ ├── normalization.py │ │ │ │ ├── pooling.py │ │ │ │ └── utils.py │ │ │ ├── ops │ │ │ │ ├── __init__.py │ │ │ │ ├── array_ops.py │ │ │ │ ├── control_flow_ops.py │ │ │ │ ├── dtypes.py │ │ │ │ ├── gradients_impl.py │ │ │ │ ├── init_ops.py │ │ │ │ ├── math_ops.py │ │ │ │ ├── nn.py │ │ │ │ ├── nn_impl.py │ │ │ │ ├── nn_ops.py │ │ │ │ ├── random_ops.py │ │ │ │ ├── standard_ops.py │ │ │ │ ├── var_scope.py │ │ │ │ └── variables.py │ │ │ ├── protobuf │ │ │ │ ├── __init__.py │ │ │ │ ├── config.proto │ │ │ │ ├── config_pb2.py │ │ │ │ ├── types.proto │ │ │ │ └── types_pb2.py │ │ │ ├── training │ │ │ │ ├── __init__.py │ │ │ │ ├── optimizer.py │ │ │ │ ├── saver.py │ │ │ │ ├── train.py │ │ │ │ └── training.py │ │ │ └── util │ │ │ │ ├── __init__.py │ │ │ │ ├── deprecation.py │ │ │ │ └── nest.py │ │ │ └── theano │ │ │ ├── __init__.py │ │ │ ├── compile │ │ │ ├── __init__.py │ │ │ ├── function.py │ │ │ ├── scan.py │ │ │ └── sharedvalue.py │ │ │ ├── configdefaults.py │ │ │ ├── gradient.py │ │ │ └── tensor │ │ │ ├── __init__.py │ │ │ ├── basic.py │ │ │ ├── extra_ops.py │ │ │ └── nnet.py │ ├── requirements.txt │ └── setup.py └── src │ ├── contrib │ └── rcnn │ │ ├── bbox_utils.cc │ │ ├── bbox_utils.cu │ │ ├── bbox_utils.h │ │ ├── proposal_op.cc │ │ └── proposal_op.h │ ├── core │ ├── context.cc │ ├── graph.cc │ ├── graph_gradient.cc │ ├── mixedmem.cc │ ├── operator.cc │ ├── operator_schema.cc │ └── workspace.cc │ ├── operators │ ├── activation │ │ ├── cudnn_elu_op.cc │ │ ├── cudnn_relu_op.cc │ │ ├── cudnn_sigmoid_op.cc │ │ ├── cudnn_softmax_op.cc │ │ ├── cudnn_tanh_op.cc │ │ ├── dropout_op.cc │ │ ├── elu_op.cc │ │ ├── prelu_op.cc │ │ ├── relu_op.cc │ │ ├── selu_op.cc │ │ ├── sigmoid_op.cc │ │ ├── softmax_op.cc │ │ └── tanh_op.cc │ ├── arithmetic │ │ ├── add_op.cc │ │ ├── bias_add_op.cc │ │ ├── clip_op.cc │ │ ├── div_op.cc │ │ ├── dot_op.cc │ │ ├── eltwise_op.cc │ │ ├── exp_op.cc │ │ ├── gram_matrix_op.cc │ │ ├── inner_product_op.cc │ │ ├── log_op.cc │ │ ├── matmul_op.cc │ │ ├── mul_op.cc │ │ ├── pow_op.cc │ │ ├── radd_op .cc │ │ ├── rdiv_op.cc │ │ ├── rmul_op.cc │ │ ├── rsub_op.cc │ │ ├── scale_op.cc │ │ ├── square_op.cc │ │ └── sub_op.cc │ ├── cast │ │ └── float2half.cc │ ├── control_flow │ │ ├── compare_op.cc │ │ ├── copy_op.cc │ │ └── scan_op.cc │ ├── loss │ │ ├── l1_loss_op.cc │ │ ├── l2_loss_op.cc │ │ ├── sigmoid_cross_entropy_op.cc │ │ ├── smooth_l1_loss_op.cc │ │ ├── softmax_cross_entropy_op.cc │ │ ├── sparse_softmax_cross_entropy_op.cc │ │ └── sparse_softmax_focal_loss_op.cc │ ├── misc │ │ ├── accuracy_op.cc │ │ ├── gradient_op.cc │ │ ├── image_data_op.cc │ │ ├── initialize_op.cc │ │ └── python_op.cc │ ├── mpi │ │ ├── mpi_broadcast_op.cc │ │ └── mpi_gather_op.cc │ ├── ndarray │ │ ├── arange_op.cc │ │ ├── argmax_op.cc │ │ ├── argmin_op.cc │ │ ├── concat_op.cc │ │ ├── crop_op.cc │ │ ├── expand_dims_op.cc │ │ ├── flatten_op.cc │ │ ├── gather_op.cc │ │ ├── one_hot_op.cc │ │ ├── pad_op.cc │ │ ├── random_pick_op.cc │ │ ├── reduce_op.cc │ │ ├── repeat_op.cc │ │ ├── reshape_op.cc │ │ ├── shape_op.cc │ │ ├── slice_op.cc │ │ ├── stack_op.cc │ │ ├── tile_op.cc │ │ └── transpose_op.cc │ ├── norm │ │ ├── batch_norm_op.cc │ │ ├── batch_renorm_op.cc │ │ ├── cudnn_batch_norm_op.cc │ │ ├── fused_batch_norm.cc │ │ ├── instance_norm_op.cc │ │ └── l2_norm_op.cc │ ├── recurrent │ │ └── lstm_unit_op.cc │ ├── update │ │ ├── adam_update_op.cc │ │ ├── collective_update_op.cc │ │ ├── moving_average_op.cc │ │ ├── nesterov_update_op.cc │ │ ├── rmsprop_update_op.cc │ │ ├── sgd_update_op.cc │ │ └── update_op_base.cc │ └── vision │ │ ├── bilinear_resize_op.cc │ │ ├── conv2d_op.cc │ │ ├── conv2d_transpose_op.cc │ │ ├── conv_op_base.cc │ │ ├── cudnn_conv2d_op.cc │ │ ├── cudnn_conv2d_transpose_op.cc │ │ ├── cudnn_lrn_op.cc │ │ ├── cudnn_pooling2d_op.cc │ │ ├── dense_concat_op.cc │ │ ├── lrn_op.cc │ │ ├── nn_resize_op.cc │ │ ├── pooling2d_op.cc │ │ ├── roi_align_op.cc │ │ └── roi_pooling_op.cc │ ├── protos │ ├── caffemodel.proto │ └── dragon.proto │ └── utils │ ├── cudnn_device.cc │ ├── logging.cc │ ├── math_functions.cc │ ├── math_functions.cu │ ├── op_kernel.cc │ ├── op_kernel.cu │ └── sse_alternative.cc ├── LICENSE ├── README.md └── examples ├── GA3C ├── Config.py ├── Environment.py ├── GA3C.py ├── GameManager.py ├── NetworkVP.py ├── ProcessAgent.py ├── ProcessStats.py ├── README.md ├── Server.py ├── ThreadDynamicAdjustment.py ├── ThreadPredictor.py └── ThreadTrainer.py ├── README.md ├── Seg-FCN ├── README.md ├── colors │ └── pascal_voc.act ├── data │ ├── demo │ │ └── 001763.jpg │ └── seg11valid.txt ├── infer.py ├── score.py ├── surgery.py ├── transplants │ └── VGG16 │ │ ├── net.prototxt │ │ ├── new_net.prototxt │ │ └── solve.py ├── voc-fcn16s │ ├── caffemodel-url │ ├── net.py │ ├── solve.py │ ├── solver.prototxt │ ├── test.py │ ├── train.prototxt │ └── val.prototxt ├── voc-fcn32s │ ├── caffemodel-url │ ├── net.py │ ├── solve.py │ ├── solver.prototxt │ ├── test.py │ ├── train.prototxt │ └── val.prototxt ├── voc-fcn8s-atonce │ ├── caffemodel-url │ ├── net.py │ ├── solve.py │ ├── solver.prototxt │ ├── test.py │ ├── train.prototxt │ └── val.prototxt ├── voc-fcn8s │ ├── caffemodel-url │ ├── deploy.prototxt │ ├── net.py │ ├── solve.py │ ├── solver.prototxt │ ├── test.py │ ├── train.prototxt │ └── val.prototxt └── voc_layers.py └── cifar10 ├── README.md ├── cifar10_full_deploy.prototxt ├── cifar10_full_solver.prototxt ├── cifar10_full_train_test.prototxt ├── cifar10_quick_deploy.prototxt ├── cifar10_quick_solver.prototxt ├── cifar10_quick_train_test.prototxt ├── data ├── README.md └── demo │ └── cat.jpg ├── gen_lmdb.py ├── infer.py ├── solve_full.py └── solve_quick.py /.gitignore: -------------------------------------------------------------------------------- 1 | ## General 2 | 3 | # Compiled Object files 4 | *.slo 5 | *.lo 6 | *.o 7 | *.cuo 8 | 9 | # Compiled Dynamic libraries 10 | # *.so 11 | *.dylib 12 | 13 | # Compiled Static libraries 14 | *.lai 15 | *.la 16 | #*.a 17 | 18 | # Compiled python 19 | *.pyc 20 | 21 | # Compiled MATLAB 22 | *.mex* 23 | 24 | # IPython notebook checkpoints 25 | .ipynb_checkpoints 26 | 27 | # Editor temporaries 28 | *.swp 29 | *~ 30 | 31 | # Sublime Text settings 32 | *.sublime-workspace 33 | *.sublime-project 34 | 35 | # Eclipse Project settings 36 | *.*project 37 | .settings 38 | 39 | # QtCreator files 40 | *.user 41 | 42 | # PyCharm files 43 | .idea 44 | 45 | # OSX dir files 46 | .DS_Store -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "DragonBoard"] 2 | path = DragonBoard 3 | url = git://github.com/neopenx/DragonBoard.git 4 | -------------------------------------------------------------------------------- /3rdparty/bin/README.md: -------------------------------------------------------------------------------- 1 | This directory holds (*after you download them*): 2 | - msmpi.dll / mpiexec.exe / smpd.exe (for ``mpi``, Windows Only) 3 | - cudnn64_*.dll (For ``cudnn``, Windows Only) 4 | - libopenblas.dll / libquadmath-0.dll / libgfortran-3.dll / libgcc_s_seh-1.dll (For ``cblas``, Windows Only) 5 | -------------------------------------------------------------------------------- /3rdparty/include/README.md: -------------------------------------------------------------------------------- 1 | This directory holds (*after you download them*): 2 | - mpi/*.h (for ``mpi``, Windows/Linux) 3 | - google/protobuf/*.h (For ``google protobuf``, Windows Only) 4 | - cudnn.h (For ``cudnn``, Windows Only) 5 | - cblas.h and relevent header files (For ``cblas``, Windows/Linux) 6 | - getopt.h and unistd.h (For ``platform-relevent`` header files, Windows Only) 7 | -------------------------------------------------------------------------------- /3rdparty/lib/README.md: -------------------------------------------------------------------------------- 1 | This directory holds (*after you download them*): 2 | - msmpi.lib/libmpi.so (for ``mpi``, Windows/Linux) 3 | - libprotobuf.lib (For ``google protobuf``, Windows Only) 4 | - cudnn.lib (For ``cudnn``, Windows Only) 5 | - libopenblas.lib (For ``cblas``, Windows Only) 6 | - python27.lib/python35.lib/python36.lib (For ``python27/35/36``, Windows Only) 7 | -------------------------------------------------------------------------------- /Dragon/include/core/common.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_CORE_COMMON_H_ 8 | #define DRAGON_CORE_COMMON_H_ 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include "core/types.h" 24 | #include "protos/dragon.pb.h" 25 | #include "utils/logging.h" 26 | 27 | namespace dragon { 28 | 29 | using std::string; 30 | using std::queue; 31 | using std::stack; 32 | using std::vector; 33 | using std::pair; 34 | using std::set; 35 | using std::map; 36 | using std::mutex; 37 | using std::unique_ptr; 38 | using std::shared_ptr; 39 | 40 | template 41 | using Map = std::unordered_map; 42 | 43 | template 44 | using Set = std::unordered_set ; 45 | 46 | #define CONCATENATE_IMPL(s1, s2) s1##s2 47 | #define CONCATENATE(s1, s2) CONCATENATE_IMPL(s1,s2) 48 | #define ANONYMOUS_VARIABLE(str) CONCATENATE(str, __LINE__) 49 | #define NOT_IMPLEMENTED LOG(FATAL) << "This module has not been implemented yet." 50 | 51 | } // namespace dragon 52 | 53 | #endif // DRAGON_CORE_COMMON_H_ -------------------------------------------------------------------------------- /Dragon/include/core/graph_gradient.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_CORE_GRAPH_GRADIENT_H_ 8 | #define DRAGON_CORE_GRAPH_GRADIENT_H_ 9 | 10 | #include "core/common.h" 11 | 12 | namespace dragon { 13 | 14 | typedef pair > > CheckTuple; 15 | 16 | class GraphGradientMaker { 17 | public: 18 | GraphGradientMaker(const GraphDef& forward_def, 19 | const vector& targets) 20 | : cur_op_idx_(0), 21 | forward_def_(forward_def) { 22 | for (auto& target : targets) targets_set_.insert(target); 23 | } 24 | 25 | GraphDef Make(); 26 | 27 | inline void SetTerms(const Map& terms) { terms_ = terms; } 28 | inline void SetOperatorPrefix(const string& prefix) { op_prefix_ = prefix; } 29 | inline void SetOperatorSuffix(const string& suffix) { op_suffix_ = suffix; } 30 | inline void AddExternalGrad(const string& name) { external_grads_.insert(name); } 31 | 32 | private: 33 | CheckTuple CheckMissingGrad(OperatorDef* forward_op); 34 | string GetOperatorName(); 35 | 36 | GraphDef forward_def_, new_def_; 37 | Map terms_, inputs_to_grads_; 38 | Set targets_set_, blacklist_set_, external_grads_; 39 | string op_prefix_, op_suffix_; 40 | int cur_op_idx_; 41 | }; 42 | 43 | } // namespace dragon 44 | 45 | #endif -------------------------------------------------------------------------------- /Dragon/include/core/mixedmem.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_CORE_MIXEDMEM_H_ 8 | #define DRAGON_CORE_MIXEDMEM_H_ 9 | 10 | #include "typeid.h" 11 | #include "context.h" 12 | #include "context_cuda.h" 13 | 14 | namespace dragon { 15 | 16 | class MixedMemory { 17 | public: 18 | enum State { UNINITIALIZED, STATE_AT_CPU, STATE_AT_CUDA, SWITCHED, SYNCED }; 19 | MixedMemory() 20 | : state_(UNINITIALIZED), 21 | cpu_ptr_(nullptr), cuda_ptr_(nullptr), 22 | nbytes_(0) {} 23 | MixedMemory(const TypeMeta& meta, const size_t nbytes) 24 | : state_(UNINITIALIZED), meta_(meta), 25 | cpu_ptr_(nullptr), cuda_ptr_(nullptr), 26 | nbytes_(nbytes) {} 27 | ~MixedMemory(); 28 | 29 | const void* cpu_data(); 30 | const void* cuda_data(); 31 | void* mutable_cpu_data(); 32 | void* mutable_cuda_data(); 33 | #ifdef WITH_CUDA 34 | void async_cuda_data(const cudaStream_t& stream); 35 | #endif 36 | 37 | void SwitchToDevice(); 38 | 39 | inline size_t nbytes() const { return nbytes_; } 40 | 41 | inline void* cpu_ptr() { state_ = STATE_AT_CPU; return cpu_ptr_; } 42 | inline void* cuda_ptr() { state_ = STATE_AT_CUDA; return cuda_ptr_; } 43 | 44 | inline State state() { return state_; } 45 | 46 | private: 47 | void ToCUDA(); 48 | void ToCPU(); 49 | 50 | void* cpu_ptr_, *cuda_ptr_; 51 | State state_; 52 | size_t nbytes_; 53 | TypeMeta meta_; 54 | }; 55 | 56 | } // namespace dragon 57 | 58 | #endif 59 | -------------------------------------------------------------------------------- /Dragon/include/core/types.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_CORE_TYPES_H_ 8 | #define DRAGON_CORE_TYPES_H_ 9 | 10 | namespace dragon { 11 | 12 | #ifdef _MSC_VER 13 | 14 | typedef struct __declspec(align(2)) { 15 | unsigned short x; 16 | } float16; 17 | 18 | typedef struct __declspec(align(4)) { 19 | unsigned int x; 20 | } float32; 21 | 22 | #else 23 | 24 | typedef struct { 25 | unsigned short x; 26 | } __attribute__((aligned(2))) float16; 27 | 28 | typedef struct { 29 | unsigned int x; 30 | } __attribute__((aligned(4))) float32; 31 | 32 | #endif 33 | 34 | } // namespace dragon 35 | 36 | #endif // DRAGON_CORE_TYPES_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/activation/dropout_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ACTIVATION_DROPOUT_OP_H_ 8 | #define DRAGON_OPERATORS_ACTIVATION_DROPOUT_OP_H_ 9 | 10 | #include "core/operator.h" 11 | #include "utils/math_functions.h" 12 | 13 | namespace dragon { 14 | 15 | template 16 | class DropoutOp final : public Operator { 17 | public: 18 | DropoutOp(const OperatorDef& op_def, Workspace* ws) 19 | : Operator(op_def, ws), 20 | use_scale(OperatorBase::GetSingleArg("scale", true)) { 21 | GET_ARGUMENT_WITH_DESC(float, prob, 0.5); 22 | } 23 | 24 | void RunOnDevice() override; 25 | template void RunWithType(); 26 | 27 | protected: 28 | DECLARE_ARGUMENT_WITH_DESC(float, prob); 29 | bool use_scale; 30 | Tensor* mask; 31 | }; 32 | 33 | template 34 | class DropoutGradientOp final : public Operator { 35 | public: 36 | DropoutGradientOp(const OperatorDef& op_def, Workspace* ws) 37 | : Operator(op_def, ws), 38 | use_scale(OperatorBase::GetSingleArg("scale", true)) { 39 | GET_ARGUMENT_WITH_DESC(float, prob, 0.5); 40 | DISABLE_SHARE_GRADIENT; 41 | } 42 | 43 | void RunOnDevice() override; 44 | template void RunWithType(); 45 | 46 | protected: 47 | DECLARE_ARGUMENT_WITH_DESC(float, prob); 48 | bool use_scale; 49 | Tensor* mask; 50 | }; 51 | 52 | DEFINE_ARGUMENT_WITH_DESC(float, DropoutOp, prob); 53 | DEFINE_ARGUMENT_WITH_DESC(float, DropoutGradientOp, prob); 54 | 55 | } // namespace dragon 56 | 57 | #endif // DRAGON_OPERATORS_ACTIVATION_DROPOUT_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/activation/prelu_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ACTIVATION_PRELU_OP_H_ 8 | #define DRAGON_OPERATORS_ACTIVATION_PRELU_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class PReluOp : public Operator { 16 | public: 17 | PReluOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | channel_shared(OperatorBase::GetSingleArg("channel_shared", false)), 20 | data_format(OperatorBase::GetSingleArg("data_format", "NCHW")) {} 21 | 22 | void RunOnDevice() override; 23 | template void RunWithType(); 24 | 25 | protected: 26 | bool channel_shared; 27 | string data_format; 28 | TIndex channels, dim; 29 | }; 30 | 31 | template 32 | class PReluGradientOp : public Operator { 33 | public: 34 | PReluGradientOp(const OperatorDef& op_def, Workspace* ws) 35 | : Operator(op_def, ws), 36 | channel_shared(OperatorBase::GetSingleArg("channel_shared", false)), 37 | data_format(OperatorBase::GetSingleArg("data_format", "NCHW")) {} 38 | 39 | void RunOnDevice() override; 40 | template void RunWithType(); 41 | 42 | protected: 43 | bool channel_shared; 44 | string data_format; 45 | TIndex channels, dim; 46 | Tensor* bcast_dw, *multiplier; 47 | }; 48 | 49 | } // namespace dragon 50 | 51 | #endif // DRAGON_OPERATORS_ACTIVATION_PRELU_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/activation/selu_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ACTIVATION_SELU_OP_H_ 8 | #define DRAGON_OPERATORS_ACTIVATION_SELU_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class SEluOp : public Operator { 16 | public: 17 | SEluOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws) {} 19 | 20 | void RunOnDevice() override; 21 | template void RunWithType(); 22 | }; 23 | 24 | template 25 | class SEluGradientOp : public Operator { 26 | public: 27 | SEluGradientOp(const OperatorDef& op_def, Workspace* ws) 28 | : Operator(op_def, ws) { 29 | DISABLE_SHARE_GRADIENT; 30 | } 31 | 32 | void RunOnDevice() override; 33 | template void RunWithType(); 34 | }; 35 | 36 | } // namespace dragon 37 | 38 | #endif // DRAGON_OPERATORS_ACTIVATION_SELU_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/bias_add_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_BIAS_ADD_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_BIAS_ADD_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class BiasAddOp : public Operator { 16 | public: 17 | BiasAddOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | data_format(OperatorBase::GetSingleArg("data_format", "NCHW")) {} 20 | 21 | void RunOnDevice() override; 22 | template void RunWithType(); 23 | 24 | protected: 25 | TIndex outer_dim, dim, inner_dim; 26 | string data_format; 27 | Tensor* bias_multiplier; 28 | }; 29 | 30 | template 31 | class BiasAddGradientOp final : public Operator { 32 | public: 33 | BiasAddGradientOp(const OperatorDef& op_def, Workspace* ws) 34 | : Operator(op_def, ws), 35 | data_format(OperatorBase::GetSingleArg("data_format", "NCHW")) { 36 | DISABLE_SHARE_GRADIENT; 37 | } 38 | 39 | void RunOnDevice() override; 40 | template void RunWithType(); 41 | 42 | protected: 43 | int outer_dim, dim, inner_dim; 44 | string data_format; 45 | Tensor* bias_multiplier; 46 | }; 47 | 48 | } // namespace dragon 49 | 50 | #endif // DRAGON_OPERATORS_ARITHMETIC_BIAS_ADD_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/clip_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_CLIP_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_CLIP_OP_H_ 9 | 10 | #include 11 | #include "core/operator.h" 12 | 13 | namespace dragon { 14 | 15 | template 16 | class ClipOp final : public Operator { 17 | public: 18 | ClipOp(const OperatorDef& op_def, Workspace* ws) 19 | : Operator(op_def, ws), 20 | low(OperatorBase::GetSingleArg("low", -FLT_MAX)), 21 | high(OperatorBase::GetSingleArg("high", FLT_MAX)) {} 22 | 23 | void RunOnDevice() override; 24 | template void RunWithType(); 25 | 26 | protected: 27 | float low, high; 28 | Tensor* mask; 29 | }; 30 | 31 | template 32 | class ClipGradientOp final : public Operator { 33 | public: 34 | USE_SIMPLE_CTOR_DTOR(ClipGradientOp); 35 | 36 | void RunOnDevice() override; 37 | template void RunWithType(); 38 | 39 | protected: 40 | Tensor* mask; 41 | }; 42 | 43 | } // namespace dragon 44 | 45 | #endif // DRAGON_OPERATORS_ARITHMETIC_CLIP_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/dot_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_DOT_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_DOT_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class DotOp final : public Operator { 16 | public: 17 | DotOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | transA(OperatorBase::GetSingleArg("TransA", false)), 20 | transB(OperatorBase::GetSingleArg("TransB", false)) {} 21 | 22 | void RunOnDevice() override; 23 | template void DotRunWithType(); 24 | template void GemmRunWithType(); 25 | template void GemvRunWithType(); 26 | 27 | protected: 28 | bool transA, transB; 29 | TIndex M, K1, K2, N1, N2; 30 | }; 31 | 32 | template 33 | class DotGradientOp final : public Operator { 34 | public: 35 | DotGradientOp(const OperatorDef& op_def, Workspace* ws) 36 | : Operator(op_def, ws), 37 | transA(OperatorBase::GetSingleArg("TransA", false)), 38 | transB(OperatorBase::GetSingleArg("TransB", false)) {} 39 | 40 | void ShareGradient() override; 41 | void RunOnDevice() override; 42 | template void DotRunWithType(); 43 | template void GemmRunWithType(); 44 | template void GemvRunWithType(); 45 | 46 | protected: 47 | bool transA, transB; 48 | TIndex M, K1, K2, N1, N2; 49 | }; 50 | 51 | } // namespace dragon 52 | 53 | #endif // DRAGON_OPERATORS_ARITHMETIC_DOT_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/exp_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_EXP_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_EXP_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ExpOp final : public Operator { 16 | public: 17 | USE_SIMPLE_CTOR_DTOR(ExpOp); 18 | 19 | void RunOnDevice() override; 20 | template void RunWithType(); 21 | }; 22 | 23 | template 24 | class ExpGradientOp final : public Operator { 25 | public: 26 | USE_SIMPLE_CTOR_DTOR(ExpGradientOp); 27 | 28 | void RunOnDevice() override; 29 | template void RunWithType(); 30 | }; 31 | 32 | } // namespace dragon 33 | 34 | #endif // DRAGON_OPERATORS_ARITHMETIC_EXP_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/gram_matrix_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_GRAM_MATRIX_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_GRAM_MATRIX_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class GramMatrixOp final : public Operator { 16 | public: 17 | GramMatrixOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", 1)) {} 20 | 21 | void RunOnDevice() override; 22 | template void RunWithType(); 23 | 24 | protected: 25 | TIndex axis, outer_dim, dim, inner_dim; 26 | TIndex x_offset, y_offset; 27 | }; 28 | 29 | template 30 | class GramMatrixGradientOp final : public Operator { 31 | public: 32 | GramMatrixGradientOp(const OperatorDef& op_def, Workspace* ws) 33 | : Operator(op_def, ws), 34 | axis(OperatorBase::GetSingleArg("axis", 1)) {} 35 | 36 | void RunOnDevice() override; 37 | template void RunWithType(); 38 | 39 | protected: 40 | TIndex axis, outer_dim, dim, inner_dim; 41 | TIndex x_offset, y_offset; 42 | }; 43 | 44 | } // namespace dragon 45 | 46 | #endif // DRAGON_OPERATORS_ARITHMETIC_GRAM_MATRIX_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/inner_product_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_INNER_PRODUCT_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_INNER_PRODUCT_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class InnerProductOp: public Operator { 16 | public: 17 | InnerProductOp(const OperatorDef& op_def, Workspace *ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", 1)), 20 | num_output(OperatorBase::GetSingleArg("num_output", 0)), 21 | transW(OperatorBase::GetSingleArg("TransW", true)) {} 22 | 23 | void RunOnDevice(); 24 | template void TransRunWithType(); 25 | template void NoTransRunWithType(); 26 | 27 | protected: 28 | TIndex axis, num_output, M, K; 29 | bool transW; 30 | Tensor* bias_multiplier; 31 | }; 32 | 33 | template 34 | class InnerProductGradientOp final : public Operator { 35 | public: 36 | InnerProductGradientOp(const OperatorDef& op_def, Workspace *ws) 37 | : Operator(op_def, ws), 38 | axis(OperatorBase::GetSingleArg("axis", 1)), 39 | num_output(OperatorBase::GetSingleArg("num_output", 0)), 40 | transW(OperatorBase::GetSingleArg("TransW", true)) {} 41 | 42 | void RunOnDevice() override; 43 | template void RunWithType(); 44 | 45 | protected: 46 | TIndex axis, num_output, M, K; 47 | bool transW; 48 | Tensor* bias_multiplier; 49 | }; 50 | 51 | } // namespace dragon 52 | 53 | #endif // DRAGON_OPERATORS_ARITHMETIC_INNER_PRODUCT_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/log_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_LOG_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_LOG_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class LogOp final : public Operator { 16 | public: 17 | USE_SIMPLE_CTOR_DTOR(LogOp); 18 | 19 | void RunOnDevice() override; 20 | template void RunWithType(); 21 | }; 22 | 23 | template 24 | class LogGradientOp final : public Operator { 25 | public: 26 | USE_SIMPLE_CTOR_DTOR(LogGradientOp); 27 | 28 | void RunOnDevice() override; 29 | template void RunWithType(); 30 | }; 31 | 32 | } // namespace dragon 33 | 34 | #endif // DRAGON_OPERATORS_ARITHMETIC_LOG_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/matmul_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_MATMUL_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_MATMUL_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class MatmulOp final : public Operator { 16 | public: 17 | MatmulOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | transA(OperatorBase::GetSingleArg("TransA", false)), 20 | transB(OperatorBase::GetSingleArg("TransB", false)) {} 21 | 22 | void RunOnDevice() override; 23 | template void RunWithType(); 24 | 25 | protected: 26 | bool transA, transB; 27 | TIndex n, x1_offset, x2_offset, y_offset; 28 | TIndex M, K1, K2, N; 29 | }; 30 | 31 | template 32 | class MatmulGradientOp final : public Operator { 33 | public: 34 | MatmulGradientOp(const OperatorDef& op_def, Workspace* ws) 35 | : Operator(op_def, ws), 36 | transA(OperatorBase::GetSingleArg("TransA", false)), 37 | transB(OperatorBase::GetSingleArg("TransB", false)) {} 38 | 39 | void ShareGradient() override; 40 | void RunOnDevice() override; 41 | template void RunWithType(); 42 | 43 | protected: 44 | bool transA, transB; 45 | TIndex n, x1_offset, x2_offset, y_offset; 46 | TIndex M, K1, K2, N; 47 | }; 48 | 49 | } // namespace dragon 50 | 51 | #endif // DRAGON_OPERATORS_ARITHMETIC_MATMUL_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/pow_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_POW_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_POW_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class PowOp: public Operator { 16 | public: 17 | PowOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | scale(OperatorBase::GetSingleArg("scale", 1.0)), 20 | shift(OperatorBase::GetSingleArg("shift", 0.0)), 21 | power(OperatorBase::GetSingleArg("power", 1.0)) { 22 | power_scale = power * scale; 23 | } 24 | 25 | void RunOnDevice() override; 26 | template void RunWithType(); 27 | 28 | protected: 29 | float scale, shift, power, power_scale; 30 | }; 31 | 32 | template 33 | class PowGradientOp final : public Operator { 34 | public: 35 | PowGradientOp(const OperatorDef& op_def, Workspace* ws) 36 | : Operator(op_def, ws), 37 | scale(OperatorBase::GetSingleArg("scale", 1.0)), 38 | shift(OperatorBase::GetSingleArg("shift", 0.0)), 39 | power(OperatorBase::GetSingleArg("power", 1.0)) { 40 | power_scale = power * scale; 41 | } 42 | 43 | void RunOnDevice() override; 44 | template void RunWithType(); 45 | 46 | protected: 47 | float scale, shift, power, power_scale; 48 | }; 49 | 50 | } // namespace dragon 51 | 52 | #endif // DRAGON_OPERATORS_ARITHMETIC_POW_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/scale_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_SCALE_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_SCALE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ScaleOp : public Operator { 16 | public: 17 | ScaleOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", 1)), 20 | num_axes(OperatorBase::GetSingleArg("num_axes", 1)) {} 21 | 22 | void RunOnDevice() override; 23 | template void RunWithType(); 24 | 25 | protected: 26 | TIndex axis, start_axis, num_axes; 27 | TIndex inner_dim; 28 | Tensor* bias_multiplier; 29 | }; 30 | 31 | template 32 | class ScaleGradientOp final : public Operator { 33 | public: 34 | ScaleGradientOp(const OperatorDef& op_def, Workspace* ws) 35 | : Operator(op_def, ws), 36 | axis(OperatorBase::GetSingleArg("axis", 1)), 37 | num_axes(OperatorBase::GetSingleArg("num_axes", -1)) {} 38 | 39 | void RunOnDevice() override; 40 | template void BiasRunWithType(); 41 | template void ScaleRunWithType(); 42 | template void RunWithType(); 43 | 44 | protected: 45 | TIndex axis, start_axis, num_axes; 46 | TIndex outer_dim, inner_dim, scale_dim, sum_dim, dim; 47 | Tensor* bias_multiplier, *sum_multiplier; 48 | Tensor sum_result; 49 | }; 50 | 51 | } // namespace dragon 52 | 53 | #endif // DRAGON_OPERATORS_ARITHMETIC_SCALE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/arithmetic/square_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_ARITHMETIC_SQUARE_OP_H_ 8 | #define DRAGON_OPERATORS_ARITHMETIC_SQUARE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class SquareOp final : public Operator { 16 | public: 17 | USE_SIMPLE_CTOR_DTOR(SquareOp); 18 | 19 | void RunOnDevice() override; 20 | template void RunWithType(); 21 | }; 22 | 23 | template 24 | class SquareGradientOp final : public Operator { 25 | public: 26 | USE_SIMPLE_CTOR_DTOR(SquareGradientOp); 27 | 28 | void RunOnDevice() override; 29 | template void RunWithType(); 30 | }; 31 | 32 | } // namespace dragon 33 | 34 | #endif // DRAGON_OPERATORS_ARITHMETIC_SQUARE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/cast/float2half_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_CAST_FLOAT2HALF_OP_H_ 8 | #define DRAGON_OPERATORS_CAST_FLOAT2HALF_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class FloatToHalfOp final : public Operator { 16 | public: 17 | USE_SIMPLE_CTOR_DTOR(FloatToHalfOp); 18 | void RunOnDevice() override; 19 | }; 20 | 21 | } // namespace dragon 22 | 23 | #endif // DRAGON_OPERATORS_CAST_FLOAT2HALF_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/control_flow/compare_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_CONTROL_FLOW_COMPARE_OP_H_ 8 | #define DRAGON_OPERATORS_CONTROL_FLOW_COMPARE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class CompareOp final : public Operator { 16 | public: 17 | CompareOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | operation(OperatorBase::GetSingleArg("operation", "NONE")) {} 20 | 21 | void RunOnDevice() override; 22 | template void EqualRunWithType(); 23 | 24 | protected: 25 | string operation; 26 | }; 27 | 28 | } // namespace dragon 29 | 30 | #endif // DRAGON_OPERATORS_CONTROL_FLOW_COMPARE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/control_flow/copy_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_CONTROL_FLOW_COPY_OP_H_ 8 | #define DRAGON_OPERATORS_CONTROL_FLOW_COPY_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class CopyOp final : public Operator { 16 | public: 17 | USE_SIMPLE_CTOR_DTOR(CopyOp); 18 | void RunOnDevice() override; 19 | template void RunWithType(); 20 | }; 21 | 22 | } // namespace dragon 23 | 24 | #endif // DRAGON_OPERATORS_CONTROL_FLOW_COPY_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/loss/l1_loss_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_LOSS_L1_LOSS_OP_H_ 8 | #define DRAGON_OPERATORS_LOSS_L1_LOSS_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class L1LossOp : public Operator { 16 | public: 17 | L1LossOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | normalization(OperatorBase::GetSingleArg("normalization", "BATCH_SIZE")) {} 20 | 21 | void RunOnDevice() override; 22 | template void RunWithType(); 23 | 24 | protected: 25 | Tensor* diff; 26 | string normalization; 27 | }; 28 | 29 | template 30 | class L1LossGradientOp final : public Operator { 31 | public: 32 | L1LossGradientOp(const OperatorDef& op_def, Workspace* ws) 33 | : Operator(op_def, ws), 34 | normalization(OperatorBase::GetSingleArg("normalization", "BATCH_SIZE")) {} 35 | 36 | void ShareGradient() override; 37 | void RunOnDevice() override; 38 | template void RunWithType(); 39 | 40 | protected: 41 | Tensor* diff; 42 | string normalization; 43 | }; 44 | 45 | } // namespace dragon 46 | 47 | #endif // DRAGON_OPERATORS_LOSS_L1_LOSS_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/loss/l2_loss_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_LOSS_L2_LOSS_OP_H_ 8 | #define DRAGON_OPERATORS_LOSS_L2_LOSS_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class L2LossOp : public Operator { 16 | public: 17 | L2LossOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | normalization(OperatorBase::GetSingleArg("normalization", "BATCH_SIZE")) {} 20 | 21 | void RunOnDevice() override; 22 | template void RunWithType(); 23 | 24 | protected: 25 | Tensor* diff; 26 | string normalization; 27 | }; 28 | 29 | template 30 | class L2LossGradientOp final : public Operator { 31 | public: 32 | L2LossGradientOp(const OperatorDef& op_def, Workspace* ws) 33 | : Operator(op_def, ws), 34 | normalization(OperatorBase::GetSingleArg("normalization", "BATCH_SIZE")) {} 35 | 36 | void ShareGradient() override; 37 | void RunOnDevice() override; 38 | template void RunWithType(); 39 | 40 | protected: 41 | Tensor* diff; 42 | string normalization; 43 | }; 44 | 45 | } // namespace dragon 46 | 47 | #endif // DRAGON_OPERATORS_LOSS_L2_LOSS_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/loss/sigmoid_cross_entropy_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_LOSS_SIGMOID_CROSS_ENTROPY_OP_H_ 8 | #define DRAGON_OPERATORS_LOSS_SIGMOID_CROSS_ENTROPY_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class SigmoidCrossEntropyOp final : public Operator { 16 | public: 17 | SigmoidCrossEntropyOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | normalization(OperatorBase::GetSingleArg("normalization", "VALID")) {} 20 | 21 | void RunOnDevice() override; 22 | template void RunWithType(); 23 | 24 | protected: 25 | Tensor valid, losses; 26 | string normalization; 27 | }; 28 | 29 | template 30 | class SigmoidCrossEntropyGradientOp final : public Operator { 31 | public: 32 | SigmoidCrossEntropyGradientOp(const OperatorDef& op_def, Workspace* ws) 33 | : Operator(op_def, ws), 34 | normalization(OperatorBase::GetSingleArg("normalization", "VALID")) {} 35 | 36 | void RunOnDevice() override; 37 | template void RunWithType(); 38 | 39 | protected: 40 | Tensor valid; 41 | string normalization; 42 | }; 43 | 44 | } // namespace dragon 45 | 46 | #endif // DRAGON_OPERATORS_LOSS_SIGMOID_CROSS_ENTROPY_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/loss/smooth_l1_loss_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_LOSS_SMOOTH_L1_LOSS_OP_H_ 8 | #define DRAGON_OPERATORS_LOSS_SMOOTH_L1_LOSS_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class SmoothL1LossOp final : public Operator { 16 | public: 17 | SmoothL1LossOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | sigma2(OperatorBase::GetSingleArg("sigma", 1.0)), 20 | normalization(OperatorBase::GetSingleArg("normalization", "BATCH_SIZE")) { 21 | sigma2 *= sigma2; 22 | } 23 | 24 | void RunOnDevice() override; 25 | template void RunWithType(); 26 | 27 | protected: 28 | float sigma2; 29 | Tensor* diff, *error; 30 | string normalization; 31 | }; 32 | 33 | template 34 | class SmoothL1LossGradientOp final : public Operator { 35 | public: 36 | SmoothL1LossGradientOp(const OperatorDef& op_def, Workspace* ws) 37 | : Operator(op_def, ws), 38 | sigma2(OperatorBase::GetSingleArg("sigma", 1.0)), 39 | normalization(OperatorBase::GetSingleArg("normalization", "BATCH_SIZE")) { 40 | sigma2 *= sigma2; 41 | } 42 | 43 | void RunOnDevice() override; 44 | template void RunWithType(); 45 | 46 | protected: 47 | float sigma2; 48 | Tensor* diff; 49 | string normalization; 50 | }; 51 | 52 | } // namespace dragon 53 | 54 | #endif // DRAGON_OPERATORS_LOSS_SMOOTH_L1_LOSS_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/misc/accuracy_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_MISC_ACCURACY_OP_H_ 8 | #define DRAGON_OPERATORS_MISC_ACCURACY_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class AccuracyOp final: public Operator { 16 | public: 17 | AccuracyOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | top_k(OperatorBase::GetSingleArg("top_k", 1)), 20 | axis(OperatorBase::GetSingleArg("axis", 1)) { 21 | vector args = OperatorBase::GetRepeatedArg("ignore_labels"); 22 | if (args.size()) { 23 | ignore_labels.Reshape(vector(1, args.size())); 24 | int* ignore_data = ignore_labels.mutable_data(); 25 | for (int i = 0; i < args.size(); i++) ignore_data[i] = args[i]; 26 | } 27 | } 28 | 29 | void RunOnDevice() override; 30 | template void RunWithType(); 31 | 32 | protected: 33 | TIndex top_k, axis, outer_dim, inner_dim, num_classes; 34 | Tensor ignore_labels; 35 | }; 36 | 37 | } // namespace dragon 38 | 39 | #endif // DRAGON_OPERATORS_MISC_ACCURACY_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/misc/python_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_MISC_PYTHON_OP_H_ 8 | #define DRAGON_OPERATORS_MISC_PYTHON_OP_H_ 9 | 10 | #include 11 | 12 | #include "core/operator.h" 13 | 14 | #ifdef WITH_PYTHON3 15 | #define PyBytes_FromStringAndSize PyUnicode_FromStringAndSize 16 | #endif 17 | 18 | namespace dragon { 19 | 20 | template 21 | class RunOp : public Operator { 22 | public: 23 | RunOp(const OperatorDef& op_def, Workspace* ws); 24 | PyObject* String(const char* str) { 25 | return PyBytes_FromStringAndSize(str, string(str).size()); 26 | } 27 | 28 | void RunOnDevice() override; 29 | 30 | protected: 31 | PyObject* self, *inputs, *outputs; 32 | string module, op, param_str; 33 | }; 34 | 35 | template 36 | class TemplateOp : public RunOp { 37 | public: 38 | TemplateOp(const OperatorDef& op_def, Workspace* ws) 39 | : RunOp(op_def, ws) {} 40 | }; 41 | 42 | template 43 | class TemplateGradientOp : public TemplateOp { 44 | public: 45 | TemplateGradientOp(const OperatorDef& op_def, Workspace* ws) 46 | : TemplateOp(op_def, ws) { 47 | DISABLE_SHARE_GRADIENT; 48 | } 49 | void RunOnDevice() override; 50 | }; 51 | 52 | 53 | } // namespace dragon 54 | 55 | #endif // DRAGON_OPERATORS_MISC_PYTHON_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/mpi/mpi_broadcast_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_MPI_MPI_BROADCAST_OP_H_ 8 | #define DRAGON_OPERATORS_MPI_MPI_BROADCAST_OP_H_ 9 | 10 | #ifdef WITH_MPI 11 | 12 | #include "operators/mpi/base_mpi_op.h" 13 | 14 | namespace dragon { 15 | 16 | template 17 | class MPIBroadcastOp final : public ModelMPIBase { 18 | public: 19 | MPIBroadcastOp(const OperatorDef& op_def, Workspace* ws) 20 | : ModelMPIBase(op_def, ws) {} 21 | 22 | void RunOnDevice() override; 23 | template void RunWithType(); 24 | }; 25 | 26 | template 27 | class MPIBroadcastGradientOp final : public ModelMPIBase { 28 | public: 29 | MPIBroadcastGradientOp(const OperatorDef& op_def, Workspace* ws) 30 | : ModelMPIBase(op_def, ws) { 31 | DISABLE_SHARE_GRADIENT; 32 | } 33 | 34 | void RunOnDevice() override; 35 | template void RunWithType(); 36 | }; 37 | 38 | } // namespace dragon 39 | 40 | #endif // WITH_MPI 41 | 42 | #endif //DRAGON_OPERATORS_MPI_MPI_BROADCAST_OP_H_ 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /Dragon/include/operators/mpi/mpi_gather_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_MPI_MPI_GATHER_OP_H_ 8 | #define DRAGON_OPERATORS_MPI_MPI_GATHER_OP_H_ 9 | 10 | #ifdef WITH_MPI 11 | 12 | #include "operators/mpi/base_mpi_op.h" 13 | 14 | namespace dragon { 15 | 16 | template 17 | class MPIGatherOp final : public ModelMPIBase { 18 | public: 19 | MPIGatherOp(const OperatorDef& op_def, Workspace *ws) 20 | : ModelMPIBase(op_def, ws) {} 21 | 22 | void RunOnDevice() override; 23 | template void RunWithType(); 24 | }; 25 | 26 | template 27 | class MPIGatherGradientOp final : public ModelMPIBase { 28 | public: 29 | MPIGatherGradientOp(const OperatorDef& op_def, Workspace *ws) 30 | : ModelMPIBase(op_def, ws) { 31 | DISABLE_SHARE_GRADIENT; 32 | } 33 | 34 | void RunOnDevice() override; 35 | template void RunWithType(); 36 | }; 37 | 38 | } // namespace dragon 39 | 40 | #endif // WITH_MPI 41 | 42 | #endif // DRAGON_OPERATORS_MPI_MPI_GATHER_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/arange_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_ARGMAX_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_ARGMAX_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ArangeOp final : public Operator { 16 | public: 17 | ArangeOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | dtype(OperatorBase::GetSingleArg("dtype", "FLOAT32")) { 20 | GET_ARGUMENT_WITH_DESC(int, start, 0); 21 | GET_ARGUMENT_WITH_DESC(int, stop, 0); 22 | GET_ARGUMENT_WITH_DESC(int, step, 1); 23 | } 24 | 25 | void RunOnDevice() override; 26 | template void RunWithType(); 27 | 28 | protected: 29 | DECLARE_ARGUMENT_WITH_DESC(int, start); 30 | DECLARE_ARGUMENT_WITH_DESC(int, stop); 31 | DECLARE_ARGUMENT_WITH_DESC(int, step); 32 | string dtype; 33 | }; 34 | 35 | DEFINE_ARGUMENT_WITH_DESC(int, ArangeOp, start); 36 | DEFINE_ARGUMENT_WITH_DESC(int, ArangeOp, stop); 37 | DEFINE_ARGUMENT_WITH_DESC(int, ArangeOp, step); 38 | 39 | } // namespace dragon 40 | 41 | #endif // DRAGON_OPERATORS_NDARRAY_ARANGE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/argmax_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_ARGMAX_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_ARGMAX_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ArgmaxOp final : public Operator { 16 | public: 17 | ArgmaxOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", -1)), 20 | keep_dims(OperatorBase::GetSingleArg("keep_dims", false)), 21 | top_k(OperatorBase::GetSingleArg("top_k", 1)) {} 22 | 23 | void RunOnDevice() override; 24 | template void RunWithType(); 25 | 26 | protected: 27 | TIndex axis, axis_dim, top_k, count, inner_dim; 28 | bool keep_dims; 29 | }; 30 | 31 | } // namespace dragon 32 | 33 | #endif // DRAGON_OPERATORS_NDARRAY_ARGMAX_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/argmin_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_ARGMIN_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_ARGMIN_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ArgminOp final : public Operator { 16 | public: 17 | ArgminOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", -1)), 20 | keep_dims(OperatorBase::GetSingleArg("keep_dims", false)), 21 | top_k(OperatorBase::GetSingleArg("top_k", 1)) {} 22 | 23 | void RunOnDevice() override; 24 | template void RunWithType(); 25 | 26 | protected: 27 | TIndex axis, axis_dim, top_k, count, inner_dim; 28 | bool keep_dims; 29 | }; 30 | 31 | } // namespace dragon 32 | 33 | #endif // DRAGON_OPERATORS_NDARRAY_ARGMIN_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/concat_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_CONCAT_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_CONCAT_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ConcatOp : public Operator { 16 | public: 17 | ConcatOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", 1)), 20 | nin(OperatorBase::GetSingleArg("num_input", 1)) {} 21 | 22 | void RunOnDevice() override; 23 | template void RunWithType(); 24 | 25 | protected: 26 | TIndex axis, nin, outer_dim, inner_dim, x_concat_dim, y_concat_dim; 27 | TIndex x_offset, y_offset, concat_offset; 28 | vector concat_dims; 29 | }; 30 | 31 | template 32 | class ConcatGradientOp : public Operator { 33 | public: 34 | ConcatGradientOp(const OperatorDef& op_def, Workspace* ws) 35 | : Operator(op_def, ws), 36 | axis(OperatorBase::GetSingleArg("axis", 1)), 37 | nin(OperatorBase::GetSingleArg("num_input", 1)) { 38 | DISABLE_SHARE_GRADIENT; 39 | } 40 | 41 | void RunOnDevice() override; 42 | template void RunWithType(); 43 | 44 | protected: 45 | TIndex axis, nin, outer_dim, inner_dim, x_concat_dim, y_concat_dim; 46 | TIndex x_offset, y_offset, concat_offset; 47 | vector concat_dims; 48 | }; 49 | 50 | } // namespace dragon 51 | 52 | #endif // DRAGON_OPERATORS_NDARRAY_CONCAT_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/expand_dims_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_EXPAND_DIMS_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_EXPAND_DIMS_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ExpandDimsOp final : public Operator { 16 | public: 17 | ExpandDimsOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", -1)) {} 20 | 21 | void RunOnDevice() override; 22 | 23 | protected: 24 | TIndex axis; 25 | }; 26 | 27 | template 28 | class ExpandDimsGradientOp final : public Operator { 29 | public: 30 | ExpandDimsGradientOp(const OperatorDef& op_def, Workspace* ws) 31 | : Operator(op_def, ws) { 32 | DISABLE_SHARE_GRADIENT; 33 | } 34 | 35 | void RunOnDevice() override; 36 | }; 37 | 38 | } // namespace dragon 39 | 40 | #endif // DRAGON_OPERATORS_NDARRAY_EXPAND_DIMS_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/flatten_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_FLATTEN_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_FLATTEN_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class FlattenOp final : public Operator { 16 | public: 17 | FlattenOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", 0)), 20 | num_axes(OperatorBase::GetSingleArg("num_axes", -1)), 21 | keep_axes(OperatorBase::GetSingleArg("keep_axes", INT_MAX)) {} 22 | 23 | void RunOnDevice() override; 24 | void SqueezeRun(); 25 | void KeepRun(); 26 | 27 | protected: 28 | TIndex axis, num_axes, keep_axes; 29 | }; 30 | 31 | template 32 | class FlattenGradientOp final : public Operator { 33 | public: 34 | FlattenGradientOp(const OperatorDef& op_def, Workspace* ws) 35 | : Operator(op_def, ws) { 36 | DISABLE_SHARE_GRADIENT; 37 | } 38 | 39 | void RunOnDevice() override; 40 | }; 41 | 42 | } // namespace dragon 43 | 44 | #endif // DRAGON_OPERATORS_NDARRAY_FLATTEN_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/gather_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_GATHER_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_GATHER_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class GatherOp final : public Operator { 16 | public: 17 | GatherOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", 0)) {} 20 | 21 | void RunOnDevice() override; 22 | template void RunWithType(); 23 | 24 | protected: 25 | TIndex axis, outer_dim, inner_dim, x_slice_dim, y_slice_dim; 26 | vector output_dims; 27 | }; 28 | 29 | template 30 | class GatherGradientOp final : public Operator { 31 | public: 32 | GatherGradientOp(const OperatorDef& op_def, Workspace* ws) 33 | : Operator(op_def, ws), 34 | axis(OperatorBase::GetSingleArg("axis", 0)), 35 | acc_grad(OperatorBase::GetSingleArg("acc_gradient", false)) {} 36 | 37 | void RunOnDevice() override; 38 | template void RunWithType(); 39 | 40 | protected: 41 | TIndex axis, outer_dim, inner_dim, x_slice_dim, y_slice_dim; 42 | bool acc_grad; 43 | }; 44 | 45 | } // namespace dragon 46 | 47 | #endif // DRAGON_OPERATORS_NDARRAY_GATHER_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/one_hot_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_ONE_HOT_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_ONE_HOT_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class OneHotOp final : public Operator < Context > { 16 | public: 17 | OneHotOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | depth(OperatorBase::GetSingleArg("depth", -1)), 20 | on_value(OperatorBase::GetSingleArg("on_value", 1)), 21 | off_value(OperatorBase::GetSingleArg("off_value", 0)) {} 22 | 23 | void RunOnDevice() override; 24 | template void RunWithType(); 25 | 26 | protected: 27 | TIndex depth, on_value, off_value; 28 | }; 29 | 30 | } // namespace dragon 31 | 32 | #endif // DRAGON_OPERATORS_NDARRAY_ONE_HOT_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/random_pick_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_RANDOM_PICK_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_RANDOM_PICK_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class RandomPickOp : public Operator { 16 | public: 17 | RandomPickOp(const OperatorDef& op_def, Workspace* ws) : 18 | Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", 0)), 20 | max_samples(OperatorBase::GetSingleArg("max_samples", 1)) {} 21 | 22 | void RunOnDevice() override; 23 | template void RunWithType(); 24 | 25 | protected: 26 | TIndex axis, max_samples; 27 | TIndex outer_dim, inner_dim, x_slice_dim, y_slice_dim; 28 | vector output_dims; 29 | Tensor* pick_indices; 30 | }; 31 | 32 | template 33 | class RandomPickGradientOp final : public Operator { 34 | public: 35 | RandomPickGradientOp(const OperatorDef& op_def, Workspace* ws) 36 | : Operator(op_def, ws), 37 | axis(OperatorBase::GetSingleArg("axis", 0)) { 38 | DISABLE_SHARE_GRADIENT; 39 | } 40 | 41 | void RunOnDevice() override; 42 | template void RunWithType(); 43 | 44 | protected: 45 | TIndex axis; 46 | TIndex outer_dim, inner_dim, x_slice_dim, y_slice_dim; 47 | Tensor* pick_indices; 48 | }; 49 | 50 | } // namespace dragon 51 | 52 | #endif // DRAGON_OPERATORS_NDARRAY_RANDOM_PICK_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/reduce_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_REDUCE_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_REDUCE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ReduceOp final : public Operator { 16 | public: 17 | ReduceOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", -1)), 20 | operation(OperatorBase::GetSingleArg("operation", "NONE")), 21 | keep_dims(OperatorBase::GetSingleArg("keep_dims", false)) {} 22 | 23 | void RunOnDevice() override; 24 | template void SumRunWithType(); 25 | template void MeanRunWithType(); 26 | 27 | protected: 28 | bool keep_dims; 29 | string operation; 30 | TIndex axis, axis_dim, count, inner_dim; 31 | Tensor* multiplier; 32 | }; 33 | 34 | template 35 | class ReduceGradientOp final : public Operator { 36 | public: 37 | ReduceGradientOp(const OperatorDef& op_def, Workspace* ws) 38 | : Operator(op_def, ws), 39 | axis(OperatorBase::GetSingleArg("axis", -1)), 40 | operation(OperatorBase::GetSingleArg("operation", "NONE")) {} 41 | 42 | void RunOnDevice() override; 43 | template void SumRunWithType(); 44 | template void MeanRunWithType(); 45 | 46 | protected: 47 | string operation; 48 | TIndex axis, axis_dim, count, inner_dim; 49 | }; 50 | 51 | } // namespace dragon 52 | 53 | #endif // DRAGON_OPERATORS_NDARRAY_REDUCE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/repeat_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_REPEAT_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_REPEAT_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class RepeatOp : public Operator { 16 | public: 17 | RepeatOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", -1)) { 20 | GET_ARGUMENT_WITH_DESC(int, repeats, 1); 21 | } 22 | 23 | void RunOnDevice() override; 24 | template void RunWithType(); 25 | 26 | protected: 27 | DECLARE_ARGUMENT_WITH_DESC(int, repeats); 28 | TIndex axis, outer_dim, dim, inner_dim; 29 | }; 30 | 31 | template 32 | class RepeatGradientOp : public Operator { 33 | public: 34 | RepeatGradientOp(const OperatorDef& op_def, Workspace* ws) 35 | : Operator(op_def, ws), 36 | axis(OperatorBase::GetSingleArg("axis", -1)) { 37 | GET_ARGUMENT_WITH_DESC(int, repeats, 1); 38 | } 39 | 40 | void RunOnDevice() override; 41 | template void RunWithType(); 42 | 43 | protected: 44 | DECLARE_ARGUMENT_WITH_DESC(int, repeats); 45 | TIndex axis, outer_dim, dim, inner_dim, reps; 46 | }; 47 | 48 | DEFINE_ARGUMENT_WITH_DESC(int, RepeatOp, repeats); 49 | DEFINE_ARGUMENT_WITH_DESC(int, RepeatGradientOp, repeats); 50 | 51 | } // namespace dragon 52 | 53 | #endif // DRAGON_OPERATORS_NDARRAY_REPEAT_OP_H_ 54 | -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/reshape_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_RESHAPE_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_RESHAPE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ReshapeOp final : public Operator { 16 | public: 17 | ReshapeOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | shape(OperatorBase::GetRepeatedArg("shape")) { 20 | new_shape.resize(shape.size()); 21 | } 22 | 23 | void RunOnDevice() override; 24 | 25 | protected: 26 | vector shape; 27 | vector new_shape; 28 | }; 29 | 30 | template 31 | class ReshapeGradientOp final : public Operator { 32 | public: 33 | ReshapeGradientOp(const OperatorDef& op_def, Workspace* ws) 34 | : Operator(op_def, ws) { 35 | DISABLE_SHARE_GRADIENT; 36 | } 37 | 38 | void RunOnDevice() override; 39 | }; 40 | 41 | } // namespace dragon 42 | 43 | #endif // DRAGON_OPERATORS_NDARRAY_RESHAPE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/shape_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_SHAPE_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_SHAPE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ShapeOp final : public Operator { 16 | public: 17 | USE_SIMPLE_CTOR_DTOR(ShapeOp); 18 | void RunOnDevice() override; 19 | }; 20 | 21 | } // namespace dragon 22 | 23 | #endif //DRAGON_OPERATORS_NDARRAY_SHAPE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/slice_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_SLICE_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_SLICE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class SliceOp : public Operator { 16 | public: 17 | SliceOp(const OperatorDef& op_def, Workspace* ws): 18 | Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", 1)), 20 | nout(OperatorBase::GetSingleArg("num_output", 1)) {} 21 | 22 | void RunOnDevice() override; 23 | template void RunWithType(); 24 | 25 | protected: 26 | TIndex axis, nout, steps; 27 | TIndex outer_dim, inner_dim, x_slice_dim, y_slice_dim; 28 | TIndex slice_offset; 29 | vector slice_dims; 30 | }; 31 | 32 | template 33 | class SliceGradientOp final : public Operator { 34 | public: 35 | SliceGradientOp(const OperatorDef& op_def, Workspace* ws): 36 | Operator(op_def, ws), 37 | axis(OperatorBase::GetSingleArg("axis", 1)), 38 | nout(OperatorBase::GetSingleArg("num_output", 1)) { 39 | DISABLE_SHARE_GRADIENT; 40 | } 41 | 42 | void RunOnDevice() override; 43 | template void RunWithType(); 44 | 45 | protected: 46 | TIndex axis, nout; 47 | TIndex outer_dim, inner_dim, x_slice_dim, y_slice_dim; 48 | TIndex x_offset, y_offset, slice_offset; 49 | vector slice_dims; 50 | }; 51 | 52 | } // namespace dragon 53 | 54 | #endif // DRAGON_OPERATORS_NDARRAY_SLICE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/stack_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_STACK_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_STACK_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class StackOp : public Operator { 16 | public: 17 | StackOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | axis(OperatorBase::GetSingleArg("axis", 0)), 20 | nin(OperatorBase::GetSingleArg("num_input", 1)) {} 21 | 22 | void RunOnDevice() override; 23 | template void RunWithType(); 24 | 25 | protected: 26 | TIndex axis, nin, outer_dim, inner_dim, x_concat_dim, y_concat_dim; 27 | TIndex x_offset, y_offset, concat_offset; 28 | vector stack_dims, concat_dims; 29 | }; 30 | 31 | template 32 | class StackGradientOp : public Operator { 33 | public: 34 | StackGradientOp(const OperatorDef& op_def, Workspace* ws) 35 | : Operator(op_def, ws), 36 | axis(OperatorBase::GetSingleArg("axis", 0)), 37 | nin(OperatorBase::GetSingleArg("num_input", 1)) {} 38 | 39 | void ShareGradient() override; 40 | void RunOnDevice() override; 41 | template void RunWithType(); 42 | 43 | protected: 44 | TIndex axis, nin, outer_dim, inner_dim, x_concat_dim, y_concat_dim; 45 | TIndex x_offset, y_offset, concat_offset; 46 | vector concat_dims; 47 | }; 48 | 49 | } // namespace dragon 50 | 51 | #endif // DRAGON_OPERATORS_NDARRAY_STACK_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/tile_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_TILE_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_TILE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class TileOp : public Operator { 16 | public: 17 | TileOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws) { 19 | GET_ARGUMENTS_WITH_DESC(int, multiples); 20 | } 21 | 22 | void RunOnDevice() override; 23 | template void TileRunWithType(); 24 | 25 | protected: 26 | DECLARE_ARGUMENTS_WITH_DESC(int, multiples); 27 | TIndex axis, multiple, outer_dim, ex_inner_dim; 28 | Tensor* dest, *source; 29 | }; 30 | 31 | template 32 | class TileGradientOp : public Operator { 33 | public: 34 | TileGradientOp(const OperatorDef& op_def, Workspace* ws) 35 | : Operator(op_def, ws) { 36 | GET_ARGUMENTS_WITH_DESC(int, multiples); 37 | DISABLE_SHARE_GRADIENT; 38 | } 39 | 40 | void RunOnDevice() override; 41 | template void TileRunWithType(); 42 | 43 | protected: 44 | DECLARE_ARGUMENTS_WITH_DESC(int, multiples); 45 | TIndex axis, multiple, outer_dim, ex_inner_dim; 46 | Tensor* dest, *source; 47 | }; 48 | 49 | DEFINE_ARGUMENTS_WITH_DESC(int, TileOp, multiples); 50 | DEFINE_ARGUMENTS_WITH_DESC(int, TileGradientOp, multiples); 51 | 52 | } // namespace dragon 53 | 54 | #endif // DRAGON_OPERATORS_NDARRAY_TILE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/ndarray/transpose_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_NDARRAY_TRANSPOSE_OP_H_ 8 | #define DRAGON_OPERATORS_NDARRAY_TRANSPOSE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class TransposeOp final: public Operator { 16 | public: 17 | TransposeOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | perms(OperatorBase::GetRepeatedArg("perms")) { 20 | if (perms.size() > 0) reverse_dims = false; 21 | else reverse_dims = true; 22 | } 23 | 24 | void RunOnDevice() override; 25 | template void RunWithType(); 26 | 27 | protected: 28 | vector perms; 29 | bool reverse_dims; 30 | Tensor* order, *old_steps, *new_steps; 31 | }; 32 | 33 | 34 | template 35 | class TransposeGradientOp final : public Operator { 36 | public: 37 | TransposeGradientOp(const OperatorDef& op_def, Workspace* ws) 38 | : Operator(op_def, ws) {} 39 | 40 | void RunOnDevice() override; 41 | template void RunWithType(); 42 | 43 | protected: 44 | Tensor* order, *old_steps, *new_steps; 45 | }; 46 | 47 | } // namespace dragon 48 | 49 | #endif // DRAGON_OPERATORS_NDARRAY_TRANSPOSE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/recurrent/lstm_unit_op.h: -------------------------------------------------------------------------------- 1 | // -// -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_RECURRENT_LSTM_UNIT_OP_H_ 8 | #define DRAGON_OPERATORS_RECURRENT_LSTM_UNIT_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class LSTMUnitOp : public Operator { 16 | public: 17 | LSTMUnitOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | has_cont(OperatorBase::GetSingleArg("cont_t", "")) {} 20 | 21 | void RunOnDevice() override; 22 | template void RunWithType(); 23 | 24 | protected: 25 | TIndex num, channels; 26 | string has_cont; 27 | Tensor* cont_t; 28 | }; 29 | 30 | template 31 | class LSTMUnitGradientOp : public Operator { 32 | public: 33 | LSTMUnitGradientOp(const OperatorDef& op_def, Workspace* ws) 34 | : Operator(op_def, ws) { 35 | this->allow_share_grads_ = false; 36 | } 37 | 38 | void RunOnDevice() override; 39 | template void RunWithType(); 40 | 41 | protected: 42 | TIndex num, channels; 43 | Tensor* zeros; 44 | }; 45 | 46 | } // namespace dragon 47 | 48 | #endif // DRAGON_OPERATORS_RECURRENT_LSTM_UNIT_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/update/adam_update_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_UPDATE_ADAM_UPDATE_OP_H_ 8 | #define DRAGON_OPERATORS_UPDATE_ADAM_UPDATE_OP_H_ 9 | 10 | #include "operators/update/update_op_base.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class AdamUpdateOp final : public UpdateOpBase { 16 | public: 17 | AdamUpdateOp(const OperatorDef& op_def, Workspace* ws) 18 | : UpdateOpBase(op_def, ws), 19 | t(0), 20 | eps(param("eps")), 21 | beta1(param("beta1")), 22 | beta2(param("beta2")) {} 23 | 24 | void ComputeRunWithFloat() override; 25 | 26 | protected: 27 | float lr, beta1, beta2, eps, coeff; 28 | int t; 29 | unique_ptr m, v; 30 | Tensor temp; 31 | }; 32 | 33 | } // namespace dragon 34 | 35 | #endif // DRAGON_OPERATORS_UPDATE_ADAM_UPDATE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/update/collective_update_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_UPDATE_COLLECTIVE_UPDATE_OP_H_ 8 | #define DRAGON_OPERATORS_UPDATE_COLLECTIVE_UPDATE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | #ifdef WITH_MPI 15 | 16 | template 17 | class CollectiveUpdateOp : public Operator { 18 | public: 19 | CollectiveUpdateOp(const OperatorDef& op_def, Workspace* ws) 20 | : Operator(op_def, ws), 21 | mode(OperatorBase::GetSingleArg("mode", "UNKNOWN")) { 22 | InitMPI(); 23 | if (mode.find("NCCL") != string::npos) InitNCCL(); 24 | } 25 | 26 | void InitMPI(); 27 | void InitNCCL(); 28 | 29 | void RunOnDevice() override; 30 | void MPIAllReduceWithFloat(); 31 | void NCCLAllReduceWithFloat(); 32 | void MPIBcastWithFloat(); 33 | void NCCLBcastWithFloat(); 34 | 35 | protected: 36 | int comm_size, comm_rank, comm_root; 37 | int world_size, world_rank; 38 | Tensor* buffer; 39 | string mode; 40 | 41 | MPI_Comm comm; 42 | MPI_Group group; 43 | 44 | #ifdef WITH_MPI_NCCL 45 | ncclComm_t nccl_comm; 46 | cudaStream_t stream; 47 | #endif 48 | }; 49 | 50 | #endif // WITH_MPI 51 | 52 | } // namespace dragon 53 | 54 | #endif // DRAGON_OPERATORS_UPDATE_COLLECTIVE_UPDATE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/update/moving_average_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_UPDATE_MOVING_AVERAGE_OP_H_ 8 | #define DRAGON_OPERATORS_UPDATE_MOVING_AVERAGE_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class MovingAverageOp final : public Operator { 16 | public: 17 | MovingAverageOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | decay(OperatorBase::GetSingleArg("decay", 1.0)) {} 20 | 21 | void RunOnDevice() override; 22 | template void RunWithType(); 23 | 24 | protected: 25 | float decay; 26 | 27 | }; 28 | 29 | } // namespace dragon 30 | 31 | 32 | #endif // DRAGON_OPERATORS_UPDATE_MOVING_AVERAGE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/update/nesterov_update_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_UPDATE_NESTEROV_UPDATE_OP_H_ 8 | #define DRAGON_OPERATORS_UPDATE_NESTEROV_UPDATE_OP_H_ 9 | 10 | #include "operators/update/update_op_base.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class NesterovUpdateOp final : public UpdateOpBase { 16 | public: 17 | NesterovUpdateOp(const OperatorDef& op_def, Workspace* ws) 18 | : UpdateOpBase(op_def, ws), 19 | momentum(param("momentum")) {} 20 | 21 | void ComputeRunWithFloat() override; 22 | 23 | protected: 24 | float lr, momentum; 25 | unique_ptr history; 26 | Tensor temp; 27 | }; 28 | 29 | } // namespace dragon 30 | 31 | #endif // DRAGON_OPERATORS_UPDATE_NESTEROV_UPDATE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/update/rmsprop_update_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_UPDATE_RMSPROP_UPDATE_OP_H_ 8 | #define DRAGON_OPERATORS_UPDATE_RMSPROP_UPDATE_OP_H_ 9 | 10 | #include "operators/update/update_op_base.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class RMSPropUpdateOp final : public UpdateOpBase { 16 | public: 17 | RMSPropUpdateOp(const OperatorDef& op_def, Workspace* ws) 18 | : UpdateOpBase(op_def, ws), 19 | eps(param("eps")), 20 | decay(param("decay")) {} 21 | 22 | void ComputeRunWithFloat() override; 23 | 24 | protected: 25 | float lr, decay, eps; 26 | unique_ptr history; 27 | Tensor temp; 28 | }; 29 | 30 | } // namespace dragon 31 | 32 | #endif // DRAGON_OPERATORS_UPDATE_RMSPROP_UPDATE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/update/sgd_update_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_UPDATE_SGD_UPDATE_OP_H_ 8 | #define DRAGON_OPERATORS_UPDATE_SGD_UPDATE_OP_H_ 9 | 10 | #include "operators/update/update_op_base.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class SGDUpdateOp final : public UpdateOpBase { 16 | public: 17 | SGDUpdateOp(const OperatorDef& op_def, Workspace* ws) 18 | : UpdateOpBase(op_def, ws), 19 | momentum(param("momentum")) {} 20 | 21 | void ComputeRunWithFloat() override; 22 | 23 | protected: 24 | float lr, momentum; 25 | unique_ptr history; 26 | 27 | }; 28 | 29 | } // namespace dragon 30 | 31 | #endif // DRAGON_OPERATORS_UPDATE_SGD_UPDATE_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/update/update_op_base.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_UPDATE_UPDATE_OP_BASE_H_ 8 | #define DRAGON_OPERATORS_UPDATE_UPDATE_OP_BASE_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class UpdateOpBase : public Operator { 16 | public: 17 | UpdateOpBase(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | lr_mult(OperatorBase::GetSingleArg("lr_mult", 1.0)), 20 | decay_mult(OperatorBase::GetSingleArg("decay_mult", 1.0)), 21 | domain(OperatorBase::GetSingleArg("domain", "_")) {} 22 | 23 | float param(const string& name) const; 24 | 25 | void RunOnDevice() override; 26 | template void PreprocessRunWithType(); 27 | virtual void ComputeRunWithFloat() = 0; 28 | template void UpdateRunWithType(); 29 | 30 | protected: 31 | float lr_mult, decay_mult; 32 | float l2_decay, clip_thresh, scale_factor; 33 | string domain; 34 | }; 35 | 36 | } // namespace dragon 37 | 38 | #endif // DRAGON_OPERATORS_UPDATE_UPDATE_OP_BASE_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/vision/dense_concat_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_VISION_DENSE_CONCAT_OP_H_ 8 | #define DRAGON_OPERATORS_VISION_DENSE_CONCAT_OP_H_ 9 | 10 | #include "operators/ndarray/concat_op.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class DenseConcatOp final : public ConcatOp { 16 | public: 17 | DenseConcatOp(const OperatorDef& op_def, Workspace* ws) 18 | : ConcatOp(op_def, ws) {} 19 | }; 20 | 21 | template 22 | class DenseConcatGradientOp : public ConcatGradientOp { 23 | public: 24 | DenseConcatGradientOp(const OperatorDef& op_def, Workspace* ws) 25 | : ConcatGradientOp(op_def, ws), 26 | growth_rate(OperatorBase::GetSingleArg("growth_rate", 0)) {} 27 | 28 | void ElimateCorruption() override; 29 | template void RestoreX1(); 30 | 31 | protected: 32 | TIndex growth_rate; 33 | }; 34 | 35 | 36 | } // namespace dragon 37 | 38 | #endif // DRAGON_OPERATORS_VISION_DENSE_CONCAT_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/operators/vision/roi_pooling_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_OPERATORS_VISION_ROI_POOLING_OP_H_ 8 | #define DRAGON_OPERATORS_VISION_ROI_POOLING_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ROIPoolingOp : public Operator { 16 | public: 17 | ROIPoolingOp(const OperatorDef& op_def, Workspace *ws) 18 | : Operator(op_def, ws), 19 | pool_h(OperatorBase::GetSingleArg("pool_h", 0)), 20 | pool_w(OperatorBase::GetSingleArg("pool_w", 0)), 21 | spatial_scale(OperatorBase::GetSingleArg("spatial_scale", 1.0)) { 22 | CHECK_GT(pool_h, 0) << "\npool_h must > 0"; 23 | CHECK_GT(pool_w, 0) << "\npool_w must > 0"; 24 | } 25 | 26 | void RunOnDevice() override; 27 | template void RunWithType(); 28 | 29 | protected: 30 | int pool_h, pool_w; 31 | float spatial_scale; 32 | Tensor* mask; 33 | }; 34 | 35 | template 36 | class ROIPoolingGradientOp final : public Operator { 37 | public: 38 | ROIPoolingGradientOp(const OperatorDef& op_def, Workspace* ws) 39 | : Operator(op_def, ws), 40 | pool_h(OperatorBase::GetSingleArg("pool_h", 0)), 41 | pool_w(OperatorBase::GetSingleArg("pool_w", 0)), 42 | spatial_scale(OperatorBase::GetSingleArg("spatial_scale", 1.0)) {} 43 | 44 | void RunOnDevice() override; 45 | template void RunWithType(); 46 | 47 | protected: 48 | int pool_h, pool_w; 49 | float spatial_scale; 50 | Tensor* mask; 51 | }; 52 | 53 | } // namespace dragon 54 | 55 | #endif // DRAGON_OPERATORS_VISION_ROI_POOLING_OP_H_ -------------------------------------------------------------------------------- /Dragon/include/utils/omp_alternative.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_UTILS_OMP_ALTERNATIVE_H_ 8 | #define DRAGON_UTILS_OMP_ALTERNATIVE_H_ 9 | 10 | #ifdef WITH_OMP 11 | 12 | #include 13 | #include 14 | 15 | namespace dragon { 16 | 17 | #define OMP_MIN_ITERATORS_PER_CORE 200000 18 | 19 | inline int GET_OMP_THREADS(const int N) { 20 | int threads = std::max(N / OMP_MIN_ITERATORS_PER_CORE, 1); 21 | return std::min(threads, omp_get_num_procs()); 22 | } 23 | 24 | } 25 | 26 | #endif // WITH_OMP 27 | 28 | #endif // DRAGON_UTILS_OMP_ALTERNATIVE_H_ -------------------------------------------------------------------------------- /Dragon/include/utils/sse_alternative.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_UTILS_SSE_ALTERNATIVE_H_ 8 | #define DRAGON_UTILS_SSE_ALTERNATIVE_H_ 9 | 10 | #ifdef WITH_SSE 11 | 12 | #include "utils/sse_device.h" 13 | 14 | namespace dragon { 15 | 16 | namespace sse { 17 | 18 | /******************** Level-0 ********************/ 19 | 20 | template 21 | void Set(const int n, const T alpha, T* x); 22 | 23 | /******************** Level-1 ********************/ 24 | 25 | template 26 | void Add(const int n, const T* a, const T* b, T* y); 27 | 28 | template 29 | void Sub(const int n, const T* a, const T* b, T* y); 30 | 31 | template 32 | void Mul(const int n, const T* a, const T* b, T* y); 33 | 34 | template 35 | void Div(const int n, const T* a, const T* b, T* y); 36 | 37 | /******************** Level-2 ********************/ 38 | 39 | template 40 | void Scal(const int n, const T alpha, T* y); 41 | 42 | template 43 | void Scale(const int n, const T alpha, const T* x, T* y); 44 | 45 | template 46 | T Dot(const int n, const T* a, const T* b); 47 | 48 | template 49 | T ASum(const int n, const T *x); 50 | 51 | template 52 | void AddScalar(const int n, const T alpha, T* y); 53 | 54 | template 55 | void MulScalar(const int n, const T alpha, T* y); 56 | 57 | template 58 | void Axpy(const int n, const T alpha, const T* x, T *y); 59 | 60 | template 61 | void Axpby(const int n, const T alpha, const T* x, const T beta, T *y); 62 | 63 | } // namespace ssd 64 | 65 | } // namespace dragon 66 | 67 | #endif // WITH_SSE 68 | 69 | #endif // DRAGON_UTILS_SSE_ALTERNATIVE_H_ -------------------------------------------------------------------------------- /Dragon/include/utils/sse_device.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_UTILS_SSE_DEVICE_H_ 8 | #define DRAGON_UTILS_SSE_DEVICE_H_ 9 | 10 | #ifdef WITH_SSE 11 | 12 | #include 13 | #include 14 | 15 | namespace dragon { 16 | 17 | #define SSE_LOOP1(i, n) \ 18 | for (i = 0; i < n - 4; i += 4) \ 19 | 20 | #define SSE_LOOP2(i, n) \ 21 | for (; i < n; ++i) 22 | 23 | #define SSE_FP32_LOAD _mm_loadu_ps 24 | #define SSE_FP32_STORE _mm_storeu_ps 25 | #define SSE_FP32_ADD _mm_add_ps 26 | #define SSE_FP32_SUB _mm_sub_ps 27 | #define SSE_FP32_MUL _mm_mul_ps 28 | #define SSE_FP32_DIV _mm_div_ps 29 | #define SSE_FP32_MAX _mm_max_ps 30 | #define SSE_FP32_ZERO _mm_setzero_ps() 31 | #define SSE_FP32_SCALAR _mm_setscalar_ps 32 | 33 | #define SSE_INT32_SCALAR _mm_setscalar_epi 34 | #define SSE_INT128_STORE _mm_storeu_si128 35 | 36 | inline __m128 _mm_setscalar_ps(const float scalar) { 37 | return _mm_set_ps(scalar, scalar, scalar, scalar); 38 | } 39 | 40 | inline __m128i _mm_setscalar_epi(const int scalar) { 41 | return _mm_set_epi32(scalar, scalar, scalar, scalar); 42 | } 43 | 44 | } // namespace dragon 45 | 46 | #endif // WITH_SSE 47 | 48 | #endif // DRAGON_UTILS_SSE_DEVICE_H_ -------------------------------------------------------------------------------- /Dragon/include/utils/string.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_UTILS_STRING_H_ 8 | #define DRAGON_UTILS_STRING_H_ 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "cast.h" 17 | 18 | namespace dragon { 19 | 20 | inline std::vector SplitString(const std::string& str, 21 | const std::string& c) { 22 | std::vector ret; 23 | std::string temp(str); 24 | size_t pos; 25 | while (pos = temp.find(c), pos != std::string::npos) { 26 | ret.push_back(temp.substr(0, pos)); 27 | temp.erase(0, pos + 1); 28 | } 29 | ret.push_back(temp); 30 | return ret; 31 | } 32 | 33 | template<> inline std::string dragon_cast(int val) { 34 | std::stringstream ss; 35 | ss << val; 36 | return ss.str(); 37 | } 38 | 39 | template<> inline int dragon_cast(std::string val) { 40 | return atoi(val.c_str()); 41 | } 42 | 43 | } // namespace dragon 44 | 45 | #endif // DRAGON_UTILS_STRING_H_ -------------------------------------------------------------------------------- /Dragon/include/utils/thread.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_UTILS_THREAD_H_ 8 | #define DRAGON_UTILS_THREAD_H_ 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | namespace dragon { 16 | 17 | struct thread_interrupted {}; 18 | 19 | class InterruptionPoint { 20 | public: 21 | InterruptionPoint() : stop(false) {} 22 | void Interrupt() { 23 | std::unique_lock lock(mutex); 24 | stop = true; 25 | } 26 | void InterruptionRequested() { 27 | std::unique_lock lock(mutex); 28 | if (stop) throw thread_interrupted(); 29 | } 30 | 31 | protected: 32 | bool stop; 33 | std::mutex mutex; 34 | std::condition_variable cond; 35 | }; 36 | 37 | class BaseThread { 38 | public: 39 | ~BaseThread() { Stop(); } 40 | void Start() { 41 | thread = std::unique_ptr( 42 | new std::thread(std::bind(&BaseThread::ThreadRun, this))); 43 | } 44 | void Stop() { 45 | interruption_point.Interrupt(); 46 | thread->join(); 47 | } 48 | bool must_stop() { 49 | interruption_point.InterruptionRequested(); 50 | return false; 51 | } 52 | 53 | protected: 54 | virtual void ThreadRun() { 55 | try { 56 | while (!must_stop()) {} 57 | } catch (const thread_interrupted&) {} 58 | } 59 | 60 | private: 61 | std::unique_ptr thread; 62 | InterruptionPoint interruption_point; 63 | }; 64 | 65 | } // namespace dragon 66 | 67 | #endif // DRAGON_UTILS_THREAD_H_ -------------------------------------------------------------------------------- /Dragon/modules/python/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | FILE(GLOB_RECURSE MODULE_FILES *.h *.hpp *.c *.cpp *.cu *.cc) 2 | FILE(GLOB_RECURSE SRC_FILES ../../src/*.c ../../src/*.cpp ../../src/*.cu ../../src/*.cc) 3 | 4 | # ---[ complier 5 | if (WITH_CUDA) 6 | CUDA_ADD_LIBRARY(${PROJECT_NAME}_python SHARED ${MODULE_FILES} ${SRC_FILES}) 7 | TARGET_LINK_LIBRARIES(${PROJECT_NAME}_python ${CUDA_LIBRARIES} ${CUDA_cublas_LIBRARY} ${CUDA_curand_LIBRARY}) 8 | else () 9 | ADD_LIBRARY(${PROJECT_NAME}_python SHARED ${MODULE_FILES} ${SRC_FILES}) 10 | endif() 11 | 12 | 13 | # ---[ link basics 14 | FILE(GLOB targets ${3RDPARTY_LIBS}/*.so ${3RDPARTY_LIBS}/*.lib) 15 | foreach(target ${targets}) 16 | TARGET_LINK_LIBRARIES(${PROJECT_NAME}_python ${target}) 17 | endforeach() 18 | 19 | # ---[ link optional libs 20 | if (UNIX AND WITH_CUDNN) 21 | TARGET_LINK_LIBRARIES(${PROJECT_NAME}_python cudnn) 22 | endif() 23 | if (UNIX AND WITH_BLAS) 24 | TARGET_LINK_LIBRARIES(${PROJECT_NAME}_python openblas) 25 | endif() 26 | if (UNIX AND WITH_MPI_NCCL) 27 | TARGET_LINK_LIBRARIES(${PROJECT_NAME}_python nccl) 28 | endif() 29 | 30 | # ---[ link platforms 31 | if(UNIX) 32 | TARGET_LINK_LIBRARIES(${PROJECT_NAME}_python protobuf pthread) 33 | endif() 34 | if(WIN32) 35 | TARGET_LINK_LIBRARIES(${PROJECT_NAME}_python shlwapi.lib) 36 | endif() 37 | 38 | set_target_properties(${PROJECT_NAME}_python PROPERTIES OUTPUT_NAME dragon) 39 | 40 | # ---[ install 41 | install (TARGETS ${PROJECT_NAME}_python DESTINATION ${PROJECT_BINARY_DIR}/../lib) -------------------------------------------------------------------------------- /Dragon/python/dragon/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | # config 8 | from dragon.config import * 9 | import dragon.config as config 10 | 11 | # core 12 | from dragon.core.tensor import Tensor 13 | import dragon.core.workspace as workspace 14 | 15 | # ops 16 | from dragon.ops import * 17 | 18 | # updaters 19 | from dragon.updaters import * 20 | 21 | # theano utilities 22 | from dragon.vm.theano.compile.function import function as function 23 | from dragon.vm.theano.tensor import grad as grad 24 | 25 | # scope 26 | from dragon.core.scope import TensorScope as name_scope 27 | from dragon.core.scope import PhaseScope as phase_scope 28 | from dragon.core.scope import DeviceScope as device_scope 29 | 30 | -------------------------------------------------------------------------------- /Dragon/python/dragon/core/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/_templates/localtoc.html: -------------------------------------------------------------------------------- 1 | {{ toctree(maxdepth=theme_globaltoc_depth|toint, collapse=True,includehidden=theme_globaltoc_includehidden|tobool) }} -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/conf.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | import sys 8 | import os 9 | import sphinx_bootstrap_theme 10 | 11 | # basic 12 | html_static_path = ['_static'] 13 | templates_path = ['_templates'] 14 | exclude_patterns = ['_build'] 15 | 16 | source_suffix = '.rst' 17 | master_doc = 'index' 18 | pygments_style = 'sphinx' 19 | todo_include_todos = True 20 | 21 | # extensions 22 | sys.path.insert(0, os.path.abspath('_extensions')) 23 | extensions = [ 24 | 'sphinx.ext.autodoc', 25 | 'sphinx.ext.mathjax', 26 | 'sphinx.ext.napoleon', 27 | #'sphinx.ext.viewcode', 28 | 'mathmacro', 29 | ] 30 | 31 | # project 32 | project = '' 33 | copyright = '2017, Ting Pan' 34 | author = 'Ting Pan' 35 | html_logo = "dragon.png" 36 | html_title = "" 37 | html_short_title = "" 38 | html_favicon = 'images/favicon.png' 39 | 40 | version = '' 41 | release = '' 42 | language = None 43 | 44 | # theme 45 | html_theme = 'bootstrap' 46 | html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() 47 | 48 | html_show_sourcelink = False 49 | html_show_sphinx = False 50 | html_show_copyright = False 51 | 52 | html_theme_options = { 53 | 'globaltoc_depth': -1, 54 | 'navbar_class': "navbar navbar-inverse", 55 | 'navbar_fixed_top': "true", 56 | 'bootswatch_theme': "yeti", 57 | } 58 | 59 | html_sidebars = {'index': ['localtoc.html'], 60 | 'install': ['localtoc.html'], 61 | 'contents/**': ['localtoc.html']} 62 | 63 | # overloads 64 | def setup(app): 65 | app.config.values['autodoc_member_order'] = ('bysource', True) -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/config.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | :mod:`dragon.config` 3 | ==================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | Quick Shortcut 9 | -------------- 10 | 11 | ==================== ============================================================================= 12 | List Brief 13 | ==================== ============================================================================= 14 | `EnableCPU`_ Enable CPU mode globally. 15 | `EnableCUDA`_ Enable CUDA mode globally. 16 | `SetRandomSeed`_ Set the global random seed. 17 | `GetRandomSeed`_ Get the global random seed. 18 | `SetGPU`_ Set the global id GPU. 19 | `GetGPU`_ Get the global id of GPU. 20 | `SetDebugMode`_ Enable Debug mode globally. 21 | `LogMetaGraph`_ Enable to log meta graph globally. 22 | `LogOptimizedGraph`_ Enable to log optimized graph globally. 23 | `ExportMetaGraph`_ Enable to export all runnable meta graphs into text files. 24 | `SetLoggingLevel`_ Set the minimum level of Logging. 25 | `SetLoggingFile`_ Redirect the logging into the specific file. 26 | ==================== ============================================================================= 27 | 28 | API Reference 29 | ------------- 30 | 31 | .. automodule:: dragon.config 32 | :members: 33 | 34 | .. _EnableCPU: #dragon.config.EnableCPU 35 | .. _EnableCUDA: #dragon.config.EnableCUDA 36 | .. _SetRandomSeed: #dragon.config.SetRandomSeed 37 | .. _GetRandomSeed: #dragon.config.GetRandomSeed 38 | .. _SetGPU: #dragon.config.SetGPU 39 | .. _GetGPU: #dragon.config.GetGPU 40 | .. _SetDebugMode: #dragon.config.SetDebugMode 41 | .. _LogMetaGraph: #dragon.config.LogMetaGraph 42 | .. _LogOptimizedGraph: #dragon.config.LogOptimizedGraph 43 | .. _ExportMetaGraph: #dragon.config.ExportMetaGraph 44 | .. _SetLoggingLevel: #dragon.config.SetLoggingLevel 45 | .. _SetLoggingFile: #dragon.config.SetLoggingFile -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/core.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | :mod:`dragon.core` 3 | ================== 4 | 5 | Data Structure 6 | -------------- 7 | 8 | .. toctree:: 9 | :hidden: 10 | 11 | core/tensor 12 | core/scope 13 | 14 | ============================== ======================================================================= 15 | List Brief 16 | ============================== ======================================================================= 17 | `dragon.core.scope`_ The Scope and Namespace. 18 | `dragon.core.tensor`_ The basic structure of VM. 19 | ============================== ======================================================================= 20 | 21 | C++ Binding Wrapper 22 | ------------------- 23 | 24 | .. toctree:: 25 | :hidden: 26 | 27 | core/workspace 28 | core/mpi 29 | core/gradient_maker 30 | 31 | ============================== ======================================================================= 32 | List Brief 33 | ============================== ======================================================================= 34 | `dragon.core.workspace`_ The interfaces of Workspace, mostly are the wrappers of C++. 35 | `dragon.core.gradient_maker`_ The generator of GradientOps. 36 | `dragon.core.mpi`_ The MPI utilities. 37 | ============================== ======================================================================= 38 | 39 | .. _dragon.core.mpi: core/mpi.html 40 | .. _dragon.core.scope: core/scope.html 41 | .. _dragon.core.tensor: core/tensor.html 42 | .. _dragon.core.workspace: core/workspace.html 43 | .. _dragon.core.gradient_maker: core/gradient_maker.html -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/core/gradient_maker.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | :mod:`GradientMaker` 3 | ==================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. currentmodule:: dragon.core.gradient_maker 9 | 10 | .. autoclass:: GraphGradientMaker 11 | :members: 12 | 13 | .. _theano.function(*args, **kwargs): ../vm/theano/compile.html#dragon.vm.theano.compile.function.function -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/core/scope.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | :mod:`Scope` 3 | ============ 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. currentmodule:: dragon.core.scope 9 | 10 | .. autoclass:: dragon.core.scope.TensorScope 11 | :members: 12 | 13 | .. autoclass:: dragon.core.scope.PhaseScope 14 | :members: 15 | 16 | .. autoclass:: dragon.core.scope.DeviceScope 17 | :members: 18 | -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/io.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | :mod:`dragon.io` 3 | ================ 4 | 5 | Wrapper 6 | ------- 7 | 8 | .. toctree:: 9 | :hidden: 10 | 11 | io/data_batch 12 | 13 | ========================== ===================================================================== 14 | List Brief 15 | ========================== ===================================================================== 16 | `dragon.io.data_batch`_ Efficient I/O based on `LMDB`_. 17 | ========================== ===================================================================== 18 | 19 | Component 20 | --------- 21 | 22 | .. toctree:: 23 | :hidden: 24 | 25 | io/data_reader 26 | io/data_transformer 27 | io/blob_fetcher 28 | 29 | ============================== ===================================================================== 30 | List Brief 31 | ============================== ===================================================================== 32 | `dragon.io.data_reader`_ Queue encoded string from `LMDB`_. 33 | `dragon.io.data_transformer`_ Queue transformed images from `DataReader`_. 34 | `dragon.io.blob_fetcher`_ Queue blobs from `DataTransformer`_. 35 | ============================== ===================================================================== 36 | 37 | 38 | .. _LMDB: http://lmdb.readthedocs.io/en/release 39 | .. _DataReader: io/data_reader.html#dragon.io.data_reader 40 | .. _DataTransformer: io/data_transformer.html#dragon.io.data_transformer 41 | .. _dragon.io.data_batch: io/data_batch.html 42 | .. _dragon.io.data_reader: io/data_reader.html 43 | .. _dragon.io.data_transformer: io/data_transformer.html 44 | .. _dragon.io.blob_fetcher: io/blob_fetcher.html -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/io/blob_fetcher.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | :mod:`BlobFetcher` 3 | ================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. currentmodule:: dragon.io.blob_fetcher 9 | 10 | .. autoclass:: BlobFetcher 11 | :members: 12 | 13 | .. automethod:: __init__ 14 | 15 | .. _DataTransformer: data_transformer.html#dragon.io.data_transformer -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/io/data_batch.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | :mod:`DataBatch` 3 | ================ 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. currentmodule:: dragon.io.data_batch 9 | 10 | .. autoclass:: DataBatch 11 | :members: 12 | 13 | .. automethod:: __init__ -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/io/data_reader.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | :mod:`DataReader` 3 | ================= 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. currentmodule:: dragon.io.data_reader 9 | 10 | .. autoclass:: DataReader 11 | :members: 12 | 13 | .. automethod:: __init__ 14 | 15 | .. _LMDB: http://lmdb.readthedocs.io/en/release -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/io/data_transformer.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | :mod:`DataTransformer` 3 | ====================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. currentmodule:: dragon.io.data_transformer 9 | 10 | .. autoclass:: DataTransformer 11 | :members: 12 | 13 | .. automethod:: __init__ 14 | 15 | .. _DataReader: data_reader.html#dragon.io.data_reader -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/memonger.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | :mod:`dragon.memonger` 3 | ====================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | Quick Shortcut 9 | -------------- 10 | 11 | ==================== ============================================================================= 12 | List Brief 13 | ==================== ============================================================================= 14 | `ShareGrads`_ Enable gradients sharing globally. 15 | `Drop`_ Drop(Share) the inputs for outputs. 16 | ==================== ============================================================================= 17 | 18 | API Reference 19 | ------------- 20 | 21 | .. automodule:: dragon.memonger 22 | :members: 23 | 24 | .. _ShareGrads: #dragon.memonger.ShareGrads 25 | .. _Drop: #dragon.memonger.Drop -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/activation.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | :mod:`Activation` 3 | ================= 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.activation 9 | :members: 10 | 11 | .. |sigmoid_function| mathmacro:: \, y = \frac{1}{1 + {e}^{-x}} 12 | 13 | .. |tanh_function| mathmacro:: \, y = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} 14 | 15 | .. |relu_function| mathmacro:: \, y = \left\{ \begin{array} \\ x & & (x > 0) \\ 0 & & (x <= 0) \\ \end{array} \right. 16 | 17 | .. |lrelu_function| mathmacro:: \, y = \left\{ \begin{array} \\ x & & (x > 0) \\ Slope * x & & (x <= 0) \\ \end{array} \right. 18 | 19 | .. |prelu_function| mathmacro:: \, y_{i} = \left\{ \begin{array} \\ x_{i} & & (x_{i} > 0) \\ \alpha_{i} * x_{i} & & (x <= 0) \\ \end{array} \right. 20 | 21 | .. |elu_function| mathmacro:: \, y = \left\{ \begin{array} \\ x & & (x > 0) \\ Alpha * (e^{x} - 1) & & (x <= 0) \\ \end{array} \right. 22 | 23 | .. |selu_function| mathmacro:: \, y = 1.0507 \left\{ \begin{array} \\ x & & (x > 0) \\ 1.6733 * (e^{x} - 1) & & (x <= 0) \\ \end{array} \right. 24 | 25 | .. |dropout_function| mathmacro:: \, y = x * Bernoulli(p=1 - prob) 26 | 27 | .. |softmax_function| mathmacro:: \, y = \frac{e^{x_{i}}}{\sum e^{x_{j}}} 28 | -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/arithmetic.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | :mod:`Arithmetic` 3 | ================= 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.arithmetic 9 | :members: 10 | 11 | .. |power_function| mathmacro:: \\ \, y = [(Scale * x) + Shift]^{Power} 12 | 13 | .. |scale_function| mathmacro:: \\ [Axis, Axis + NumAxes) 14 | 15 | .. |gram_matrix_function| mathmacro:: \\ \, y = xx^{T} -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/cast.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | :mod:`Cast` 3 | =========== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.cast 9 | :members: -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/contrib/rcnn.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | :mod:`R-CNN` 3 | ============ 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.contrib.rcnn.ops 9 | :members: -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/control_flow.rst: -------------------------------------------------------------------------------- 1 | =================== 2 | :mod:`Control Flow` 3 | =================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.control_flow 9 | :members: -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/custom/data_process.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | :mod:`DataProcess` 3 | ================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. currentmodule:: dragon.operators.custom.data_process 9 | 10 | .. autoclass:: DataProcessOp 11 | :members: -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/custom/minibatch.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | :mod:`MiniBatch` 3 | ================ 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. currentmodule:: dragon.operators.custom.minibatch 9 | 10 | .. autoclass:: MiniBatchOp 11 | :members: 12 | 13 | .. _dragon.io: ../../io.html -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/custom/vec_mult.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | :mod:`VecMult` 3 | ============== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. currentmodule:: dragon.operators.custom.vec_mult 9 | 10 | .. autoclass:: VecMultOp 11 | :members: -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/data.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | :mod:`Data` 3 | =========== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.data 9 | :members: 10 | 11 | .. _LMDB: http://lmdb.readthedocs.io/en/release 12 | .. _DataBatch: ../io/data_batch.html#dragon.io.data_batch 13 | .. _DataReader: ../io/data_reader.html#dragon.io.data_reader 14 | .. _DataTransformer: ../io/data_transformer.html#dragon.io.data_transformer 15 | .. _BlobFetcher: ../io/blob_fetcher.html#dragon.io.blob_fetcher -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/initializer.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | :mod:`Initializer` 3 | ================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.initializer 9 | :members: 10 | 11 | .. |glorot_uniform_bounds| mathmacro:: \\ (-\sqrt{\frac{Scale}{Fan}}, \sqrt{\frac{Scale}{Fan}}) 12 | 13 | .. |glorot_normal_parameters| mathmacro:: \\ (\mu = 0, \sigma = \sqrt{\frac{Scale}{Fan}}) 14 | 15 | .. |truncated_normal_bounds| mathmacro:: \\ (\mu - 2\sigma, \mu + 2\sigma) 16 | -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/loss.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | :mod:`Loss` 3 | =========== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.loss 9 | :members: 10 | 11 | .. |l1_loss_function| mathmacro:: \, Loss = \frac{ \sum \left| Weight * (Input - Target) \right|}{ Normalization} 12 | 13 | .. |l2_loss_function| mathmacro:: \, Loss = \frac{ \sum \frac{1}{2}\left|\left| Weight * (Input - Target) \right|\right|}{ Normalization} -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/misc.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | :mod:`Misc` 3 | =========== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.misc 9 | :members: 10 | 11 | .. _Template(*args, **kwargs): #dragon.operators.misc.Template 12 | .. _DataProcessOp: custom/data_process.html 13 | .. _VecMultOp: custom/vec_mult.html 14 | 15 | .. |moving_average_function| mathmacro:: \\ \, \\ Variable = Decay * Variable + (1 - decay) * Value -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/mpi.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | :mod:`MPI` 3 | ========== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.mpi 9 | :members: -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/ndarray.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | :mod:`NDArray` 3 | ============== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.ndarray 9 | :members: 10 | 11 | .. _ops.Reduce(*args, **kwargs): #dragon.operators.ndarray.Reduce -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/norm.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | :mod:`Normalization` 3 | ==================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.norm 9 | :members: 10 | 11 | .. |batchnorm_function| mathmacro:: \\ \, \\ \mu_{B} = \frac{1}{m} \sum_{i=1}^{m}x_{i} \\ 12 | \sigma_{B}^{2} = \frac{1}{m} \sum_{i=1}^{m}(x_{i} - \mu_{B})^{2} \\ 13 | \hat{x}_{i} = \frac{x_{i} - \mu_{B}}{\sqrt{\sigma_{B}^{2} + \epsilon}} \\ \, 14 | 15 | .. |batchnorm_scale_function| mathmacro:: \\ \, \\ \mu_{B} = \frac{1}{m} \sum_{i=1}^{m}x_{i} \\ 16 | \sigma_{B}^{2} = \frac{1}{m} \sum_{i=1}^{m}(x_{i} - \mu_{B})^{2} \\ 17 | \hat{x}_{i} = \frac{x_{i} - \mu_{B}}{\sqrt{\sigma_{B}^{2} + \epsilon}} \\ y_{i} = \gamma\hat{x}_{i} + \beta \\ \, 18 | 19 | .. |batchrenorm_function| mathmacro:: \\ \, \\ \mu_{B} = \frac{1}{m} \sum_{i=1}^{m}x_{i} \\ 20 | \sigma_{B}^{2} = \frac{1}{m} \sum_{i=1}^{m}(x_{i} - \mu_{B})^{2} \\ 21 | \hat{x}_{i} = \frac{x_{i} - \mu_{B}}{\sqrt{\sigma_{B}^{2} + \epsilon}} \cdot r + d \\ \, 22 | 23 | .. |default_moving_average_function| mathmacro:: \\ \, \\ x_{moving} \leftarrow Momentum * x_{moving} + (1 - Momentum) * x_{stat} \\ \, 24 | 25 | .. |caffe_moving_average_function| mathmacro:: \\ \, \\ x_{moving} \leftarrow Momentum * x_{moving} + x_{stat} \\ \, 26 | 27 | 28 | .. _ops.Scale(*args, **kwargs): arithmetic.html#dragon.operators.arithmetic.Scale 29 | 30 | .. _Caffe: https://github.com/BVLC/caffe/ 31 | -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/recurrent.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | :mod:`Recurrent` 3 | ================ 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.recurrent 9 | :members: -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/operators/vision.rst: -------------------------------------------------------------------------------- 1 | ============= 2 | :mod:`Vision` 3 | ============= 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | .. automodule:: dragon.operators.vision 9 | :members: 10 | 11 | .. |conv_output_dim| mathmacro:: \\ DilatedKernelSize = Dilation * (KernelSize - 1) + 1 \\ 12 | OutputDim = (InputDim + 2 * Pad - DilatedKernelSize) / Stride + 1 13 | 14 | .. |deconv_output_dim| mathmacro:: \\ DilatedKernelSize = Dilation * (KernelSize - 1) + 1 \\ 15 | OutputDim = Stride * (InputDim - 1) + DilatedKernelSize - 2 * Pad 16 | 17 | .. |pooling_output_dim| mathmacro:: \\ OutputDim = Ceil((InputDim + 2 * Pad - KernelSize) / Stride) + 1 -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/tools.rst: -------------------------------------------------------------------------------- 1 | =================== 2 | :mod:`dragon.tools` 3 | =================== 4 | 5 | Overview 6 | -------- 7 | 8 | |para| In this section, we will introduce several tools for flexible tasks of Deep Learning. 9 | 10 | |para| Those tools are based on the existing python packages, 11 | which are easy to install(by `pip`_) and can adapt to all os platforms. We will never rebuild them 12 | in the C++ backend because they could make our kernels dirty and messy(especially the **Sequential Databases**). 13 | 14 | 15 | ToolBox 16 | ------- 17 | 18 | .. toctree:: 19 | :hidden: 20 | 21 | tools/db 22 | tools/im2db 23 | tools/summary_writer 24 | 25 | ==================== ==================================================================================== 26 | List Brief 27 | ==================== ==================================================================================== 28 | `LMDB`_ A wrapper of LMDB package. 29 | `IM2DB`_ Make the sequential database for images. 30 | `SummaryWriter`_ Write summaries for DragonBoard. 31 | ==================== ==================================================================================== 32 | 33 | 34 | .. |para| raw:: html 35 | 36 |

37 | 38 | .. _pip: https://pypi.python.org/pypi/pip 39 | 40 | .. _LMDB: tools/db.html 41 | .. _IM2DB: tools/im2db.html 42 | .. _SummaryWriter: tools/summary_writer.html 43 | -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/tools/db.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | :mod:`LMDB` 3 | =========== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | Quick Shortcut 9 | -------------- 10 | 11 | ==================== ============================================================================= 12 | List Brief 13 | ==================== ============================================================================= 14 | `LMDB.open`_ Open the database. 15 | `LMDB.put`_ Put the item. 16 | `LMDB.commit`_ Commit all items that have been put. 17 | `LMDB.set`_ Set the cursor to the specific key. 18 | `LMDB.get`_ Get the value of the specific key. 19 | `LMDB.next`_ Set the cursor to the next. 20 | `LMDB.key`_ Get the key under the current cursor. 21 | `LMDB.value`_ Get the value under the current cursor. 22 | `LMDB.close`_ Close the database. 23 | ==================== ============================================================================= 24 | 25 | API Reference 26 | ------------- 27 | 28 | .. currentmodule:: dragon.tools.db 29 | 30 | .. autoclass:: LMDB 31 | :members: 32 | 33 | .. automethod:: __init__ 34 | 35 | .. _LMDB.open: #dragon.tools.db.LMDB.open 36 | .. _LMDB.put: #dragon.tools.db.LMDB.put 37 | .. _LMDB.commit: #dragon.tools.db.LMDB.commit 38 | .. _LMDB.set: #dragon.tools.db.LMDB.set 39 | .. _LMDB.get: #dragon.tools.db.LMDB.get 40 | .. _LMDB.next: #dragon.tools.db.LMDB.next 41 | .. _LMDB.key: #dragon.tools.db.LMDB.key 42 | .. _LMDB.value: #dragon.tools.db.LMDB.value 43 | .. _LMDB.close: #dragon.tools.db.LMDB.close -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/tools/im2db.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | :mod:`IM2DB` 3 | ============ 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | Quick Shortcut 9 | -------------- 10 | 11 | ==================== ============================================================================= 12 | List Brief 13 | ==================== ============================================================================= 14 | `resize_image`_ Resize the image by the shortest edge. 15 | `make_db`_ Make the image database. 16 | ==================== ============================================================================= 17 | 18 | 19 | API Reference 20 | ------------- 21 | 22 | .. automodule:: dragon.tools.im2db 23 | :members: 24 | 25 | .. _resize_image: #dragon.tools.im2db.resize_image 26 | .. _make_db: #dragon.tools.im2db.make_db -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/tools/summary_writer.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | :mod:`SummaryWriter` 3 | ==================== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | Quick Shortcut 9 | -------------- 10 | 11 | ==================== ============================================================================= 12 | List Brief 13 | ==================== ============================================================================= 14 | `ScalarSummary`_ Write scalar summary. 15 | ==================== ============================================================================= 16 | 17 | 18 | API Reference 19 | ------------- 20 | 21 | .. currentmodule:: dragon.tools.summary_writer 22 | 23 | .. autoclass:: ScalarSummary 24 | :members: 25 | 26 | .. automethod:: __init__ 27 | 28 | .. _ScalarSummary: #dragon.tools.summary_writer.ScalarSummary -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/vm.rst: -------------------------------------------------------------------------------- 1 | ================ 2 | :mod:`dragon.vm` 3 | ================ 4 | 5 | Overview 6 | -------- 7 | 8 | |para| Although the proposed `TinyDragon`_ can contribute a framework, we still desire it to be humanized. 9 | An interesting idea is that: basic primitives of `Theano`_ can be used for building `Caffe`_ or `TensorFlow`_, 10 | thus, these modern frameworks can share a common backend if providing enough operator kernels. 11 | 12 | |para| In this section, we demonstrate a cross-frameworks frontend is feasible, and further more, will get 13 | benefit from all participating crucial interfaces especially when one is not reasonable. 14 | 15 | VirtualBox 16 | ---------- 17 | 18 | .. toctree:: 19 | :hidden: 20 | 21 | vm/caffe 22 | vm/theano 23 | 24 | ==================== ==================================================================================== 25 | List Brief 26 | ==================== ==================================================================================== 27 | `Theano`_ **Theano** is an inception of the modern deep learning frameworks. 28 | `Caffe`_ **Caffe** is one of the most famous deep learning framework for Computer Vision. 29 | ==================== ==================================================================================== 30 | 31 | .. |para| raw:: html 32 | 33 |

34 | 35 | 36 | .. _TinyDragon: ../index.html#tinydragon 37 | .. _Theano: vm/theano.html 38 | .. _Caffe: vm/caffe.html 39 | .. _TensorFlow: ../index.html#tensorflow 40 | -------------------------------------------------------------------------------- /Dragon/python/dragon/docs/contents/vm/theano/compile.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | :mod:`Compile` 3 | ============== 4 | 5 | .. toctree:: 6 | :hidden: 7 | 8 | 9 | Quick Shortcut 10 | -------------- 11 | 12 | ============================== ======================================================================= 13 | List Brief 14 | ============================== ======================================================================= 15 | `function`_ Return a callable function that will compute outputs. 16 | `shared`_ Construct a Tensor initialized with numerical values. 17 | `scan`_ Run a dynamic loop of the given one step function. 18 | ============================== ======================================================================= 19 | 20 | 21 | API Reference 22 | ------------- 23 | 24 | .. automodule:: dragon.vm.theano.compile.function 25 | :members: 26 | 27 | .. automodule:: dragon.vm.theano.compile.sharedvalue 28 | :members: 29 | 30 | .. automodule:: dragon.vm.theano.compile.scan 31 | :members: 32 | 33 | .. _config.SetDebugMode(*args, **kwargs): ../../config.html#dragon.config.SetDebugMode 34 | .. _memonger.share_grads(*args, **kwargs): ../../memonger.html#dragon.memonger.share_grads 35 | .. _config.EnableCPU(): ../../config.html#dragon.config.EnableCPU 36 | .. _config.EnableCUDA(*args, **kwargs): ../../config.html#dragon.config.EnableCUDA 37 | .. _config.SetRandomSeed(*args, **kwargs): ../../config.html#dragon.config.SetRandomSeed 38 | .. _T.grad(*args, **kwargs): tensor.html#dragon.vm.theano.gradient.grad 39 | 40 | .. _function: #dragon.vm.theano.compile.function.function 41 | .. _shared: #dragon.vm.theano.compile.sharedvalue.shared 42 | .. _scan: #dragon.vm.theano.compile.scan.scan 43 | 44 | 45 | -------------------------------------------------------------------------------- /Dragon/python/dragon/import_c_apis.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import sys 12 | import logging 13 | 14 | try: 15 | from dragon.libdragon import * 16 | except ImportError as e: 17 | logging.critical( 18 | 'cannot load dragon. Error: {0}'.format(str(e))) 19 | sys.exit(1) -------------------------------------------------------------------------------- /Dragon/python/dragon/io/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from .data_batch import DataBatch 8 | 9 | 10 | -------------------------------------------------------------------------------- /Dragon/python/dragon/io/utils.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | def GetProperty(kwargs, name, default): 8 | return kwargs[name] \ 9 | if name in kwargs else default 10 | -------------------------------------------------------------------------------- /Dragon/python/dragon/memonger.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | def ShareGrads(enabled=True): 8 | """Enable gradients sharing globally. 9 | 10 | Parameters 11 | ---------- 12 | enabled : boolean 13 | Whether to share grads. 14 | 15 | Returns 16 | ------- 17 | None 18 | 19 | Examples 20 | -------- 21 | >>> import dragon.memonger as opt 22 | >>> opt.ShareGrads() 23 | 24 | """ 25 | from dragon.config import option 26 | option['share_grads'] = enabled 27 | 28 | 29 | def Drop(op_func, *args, **kwargs): 30 | """Drop(Share) the inputs for outputs. 31 | 32 | Parameters 33 | ---------- 34 | op_func : lambda 35 | The function of any operators. 36 | args : list 37 | The args of this operator. 38 | kwargs : dict 39 | The kwargs. The kwargs of this operator. 40 | 41 | Returns 42 | ------- 43 | Tensor or list of Tensor 44 | As the ``op_func`` returns. 45 | 46 | Examples 47 | -------- 48 | >>> from dragon.core.tensor import Tensor 49 | >>> import dragon.ops as ops 50 | >>> import dragon.memonger as opt 51 | >>> data = Tensor().Variable() 52 | >>> conv_1 = ops.Conv2D(data, num_output=8) 53 | >>> conv_1_bn = opt.Drop(ops.BatchNorm, [conv_1, Tensor().Variable(), Tensor.Variable()]) 54 | >>> conv_1_relu = opt.Drop(ops.Relu, conv_1_bn) 55 | 56 | """ 57 | kwargs['mirror_stage'] = True 58 | return op_func(*args, **kwargs) -------------------------------------------------------------------------------- /Dragon/python/dragon/operators/cast.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | from . import * 12 | 13 | 14 | def FloatToHalf(inputs, **kwargs): 15 | """Cast the type of tensor from ``float32`` to ``float16``. 16 | 17 | Parameters 18 | ---------- 19 | inputs : Tensor 20 | The ``float32`` tensor. 21 | 22 | Returns 23 | ------- 24 | Tensor 25 | The ``float16`` tensor. 26 | 27 | """ 28 | CheckInputs(inputs, 1) 29 | arguments = ParseArguments(locals()) 30 | 31 | output = Tensor.CreateOperator(nout=1, op_type='FloatToHalf', **arguments) 32 | 33 | if inputs.shape is not None: 34 | output.shape = inputs.shape[:] 35 | 36 | return output -------------------------------------------------------------------------------- /Dragon/python/dragon/operators/contrib/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | # R-CNN ops 12 | from dragon.operators.contrib.rcnn.ops import * 13 | 14 | -------------------------------------------------------------------------------- /Dragon/python/dragon/operators/contrib/rcnn/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/operators/control_flow.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | from . import * 12 | 13 | 14 | def Copy(inputs, **kwargs): 15 | """Copy A to B. 16 | 17 | Parameters 18 | ---------- 19 | inputs : list or Tensor 20 | The inputs, represent A and B respectively. 21 | 22 | Returns 23 | ------- 24 | Tensor 25 | The output tensor, i.e., B(taking values of A). 26 | 27 | """ 28 | CheckInputs(inputs, 2) 29 | arguments = ParseArguments(locals()) 30 | arguments['existing_outputs'] = [arguments['inputs'][1]] 31 | arguments['inputs'] = [arguments['inputs'][0]] 32 | 33 | output = Tensor.CreateOperator(nout=1, op_type='Copy', **arguments) 34 | 35 | if inputs[0].shape is not None: 36 | output.shape = inputs[0].shape[:] 37 | 38 | return output 39 | 40 | 41 | def Equal(inputs, **kwargs): 42 | """Equal Comparing between A and B. 43 | 44 | Parameters 45 | ---------- 46 | inputs : list of Tensor 47 | The inputs, represent A and B respectively. 48 | 49 | Returns 50 | ------- 51 | Tensor 52 | The comparing results. 53 | 54 | """ 55 | CheckInputs(inputs, 2) 56 | arguments = ParseArguments(locals()) 57 | 58 | output = Tensor.CreateOperator(nout=1, op_type='Compare', 59 | operation='EQUAL', **arguments) 60 | 61 | if all(input.shape is not None for input in inputs): 62 | output.shape = inputs[0].shape[:] 63 | 64 | return output -------------------------------------------------------------------------------- /Dragon/python/dragon/operators/custom/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/operators/custom/minibatch.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | import dragon.core.workspace as ws 8 | from dragon.io.data_batch import DataBatch 9 | 10 | class MiniBatchOp(object): 11 | """ 12 | How to form a minibatch based on `dragon.io`_ package. 13 | """ 14 | def setup(self, inputs, outputs): 15 | """Setup for params or options. 16 | 17 | Parameters 18 | ---------- 19 | inputs : list of str 20 | Indicating the name of input tensors. 21 | outputs : list of str 22 | Indicating the name of output tensors. 23 | 24 | Returns 25 | ------- 26 | None 27 | 28 | """ 29 | kwargs = eval(self.param_str) 30 | self._data_batch = DataBatch(**kwargs) 31 | 32 | 33 | def run(self, inputs, outputs): 34 | """Run method, i.e., forward pass. 35 | 36 | Parameters 37 | ---------- 38 | inputs : list of str 39 | Indicating the name of input tensors. 40 | outputs : list of str 41 | Indicating the name of output tensors. 42 | 43 | Returns 44 | ------- 45 | None 46 | 47 | """ 48 | blobs = self._data_batch.get() 49 | for idx, blob in enumerate(blobs): 50 | ws.FeedTensor(outputs[idx], blob) -------------------------------------------------------------------------------- /Dragon/python/dragon/operators/recurrent.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | from . import * 12 | 13 | 14 | def LSTMUnit(c_t_1, gate_input, cont_t=None, **kwargs): 15 | """Simple LSTMCell module. 16 | 17 | Parameters 18 | ---------- 19 | c_t_1 : Tensor 20 | The initial state of cell. 21 | gate_input : Tensor 22 | The concatenated input for 4 gates. 23 | cont_t : Tensor 24 | The mask to discard specific instances. Default is ``None``. 25 | 26 | Returns 27 | ------- 28 | tuple 29 | The lstm outputs, represent ``c`` and ``h`` respectively. 30 | 31 | """ 32 | arguments = ParseArguments(locals()) 33 | if cont_t is not None: 34 | if isinstance(cont_t, Tensor): 35 | raise TypeError('The tyoe of cont_t should Tensor.') 36 | arguments['cont_t'] = cont_t.name 37 | return Tensor.CreateOperator(inputs=[c_t_1, gate_input], nout=2, 38 | op_type='LSTMUnit', **arguments) -------------------------------------------------------------------------------- /Dragon/python/dragon/protos/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/tools/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/tools/board/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neopenx/Dragon/0e639a7319035ddc81918bd3df059230436ee0a1/Dragon/python/dragon/tools/board/__init__.py -------------------------------------------------------------------------------- /Dragon/python/dragon/tools/summary_writer.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.core.tensor import Tensor 8 | import dragon.core.workspace as ws 9 | import os 10 | 11 | class ScalarSummary(object): 12 | """Write scalar summary. 13 | 14 | Examples 15 | -------- 16 | >>> sw = ScalarSummary(log_dir='logs') 17 | >>> sw.add_summary(('loss', 2.333), 0) 18 | 19 | """ 20 | def __init__(self, log_dir='logs'): 21 | """Construct a ScalarSummary writer. 22 | 23 | Parameters 24 | ---------- 25 | log_dir : str 26 | The root folder of logs. 27 | 28 | Returns 29 | ------- 30 | ScalarSummary 31 | The scalar writer. 32 | 33 | """ 34 | self.log_dir = os.path.join(log_dir, 'scalar') 35 | if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) 36 | 37 | def add_summary(self, scalar, global_step): 38 | """Add a summary. 39 | 40 | Parameters 41 | ---------- 42 | scalar : tuple or Tensor 43 | The scalar. 44 | global_step : int 45 | The time step of this summary. 46 | 47 | Returns 48 | ------- 49 | None 50 | 51 | """ 52 | if isinstance(scalar, Tensor): 53 | key, value = scalar.name, ws.FetchTensor(scalar)[0] 54 | elif isinstance(scalar, tuple): key, value = scalar 55 | else: raise TypeError() 56 | key = key.replace('/', '_') 57 | 58 | with open(os.path.join(self.log_dir, key + '.txt'), 'a') as f: 59 | f.write(str(global_step) + ' ' + str(value) + '\n') -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/caffe/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Caffe @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from .solver import SGDSolver, NesterovSolver, RMSPropSolver, AdamSolver 8 | from .net import Net, PartialNet 9 | from .misc import set_mode_cpu, set_mode_gpu, set_device, set_random_seed, \ 10 | root_solver, set_root_solver 11 | 12 | Layer = object 13 | TRAIN = "TRAIN" 14 | TEST = "TEST" 15 | 16 | from .net_spec import layers, params, NetSpec, to_proto -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/caffe/layers/mpi.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Caffe @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.ops import MPIBroadcast, MPIGather 8 | 9 | from ..layer import Layer 10 | 11 | class MPIBroadcastLayer(Layer): 12 | """The implementation of ``MPIBroadcastLayer``. 13 | 14 | Parameters 15 | ---------- 16 | root : int 17 | The world rank of root. Refer `MPIParameter.root`_. 18 | 19 | """ 20 | def __init__(self, LayerParameter): 21 | super(MPIBroadcastLayer, self).__init__(LayerParameter) 22 | param = LayerParameter.mpi_param 23 | self._param = {'root': param.root} 24 | 25 | def Setup(self, bottom): 26 | super(MPIBroadcastLayer, self).Setup(bottom) 27 | input = bottom[0] if isinstance(bottom, list) else bottom 28 | return MPIBroadcast(input, **self._param) 29 | 30 | 31 | class MPIGatherLayer(Layer): 32 | """The implementation of ``MPIGatherLayer``. 33 | 34 | Parameters 35 | ---------- 36 | root : int 37 | The world rank of root. Refer `MPIParameter.root`_. 38 | 39 | """ 40 | def __init__(self, LayerParameter): 41 | super(MPIGatherLayer, self).__init__(LayerParameter) 42 | param = LayerParameter.mpi_param 43 | self._param = {'root': param.root} 44 | 45 | def Setup(self, bottom): 46 | super(MPIGatherLayer, self).Setup(bottom) 47 | input = bottom[0] if isinstance(bottom, list) else bottom 48 | return MPIGather(input, nout=len(self._top), **self._param) -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/caffe/proto/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Caffe @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/caffe/timer.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Caffe @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | import time 8 | 9 | class Timer(object): 10 | """A simple timer.""" 11 | def __init__(self): 12 | self.total_time = 0. 13 | self.calls = 0 14 | self.start_time = 0. 15 | self.diff = 0. 16 | self.average_time = 0. 17 | 18 | def tic(self): 19 | # using time.time instead of time.clock because time time.clock 20 | # does not normalize for multithreading 21 | self.start_time = time.time() 22 | 23 | def toc(self, average=True): 24 | self.diff = time.time() - self.start_time 25 | self.total_time += self.diff 26 | self.calls += 1 27 | self.average_time = self.total_time / self.calls 28 | if average: 29 | return self.average_time 30 | else: 31 | return self.diff 32 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/caffe/utils.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Caffe @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | def ToFillerArgs(FillerParamerer): 8 | kwargs = \ 9 | {'value' : FillerParamerer.value, 10 | 'low': FillerParamerer.min, 11 | 'high': FillerParamerer.max, 12 | 'mean': FillerParamerer.mean, 13 | 'std': FillerParamerer.std} 14 | return kwargs 15 | 16 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | # Framework 8 | from dragon.vm.tensorflow.framework.framework_lib import * 9 | 10 | # Session 11 | from dragon.vm.tensorflow.client.client_lib import * 12 | 13 | # Ops 14 | from dragon.vm.tensorflow.ops.standard_ops import * 15 | 16 | # Bring in subpackages. 17 | from dragon.vm.tensorflow.ops import nn 18 | 19 | # Import the names from training.py as train.Name. 20 | from dragon.vm.tensorflow.training import training as train 21 | 22 | #from .utils.gradients import * 23 | 24 | # Export modules and constants 25 | from dragon.vm.tensorflow.layers import layers 26 | 27 | 28 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/client/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neopenx/Dragon/0e639a7319035ddc81918bd3df059230436ee0a1/Dragon/python/dragon/vm/tensorflow/client/__init__.py -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/client/client_lib.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.vm.tensorflow.client.session import Session 8 | 9 | from dragon.vm.tensorflow.client.session import get_default_session -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/contrib/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from .layers import * -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/contrib/framework/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/contrib/framework/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/contrib/framework/ops/variables.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.vm.tensorflow.framework import ops 8 | from dragon.vm.tensorflow.ops import var_scope as variable_scope 9 | 10 | 11 | def get_variables(scope=None, suffix=None, 12 | collection=ops.GraphKeys.GLOBAL_VARIABLES): 13 | if isinstance(scope, variable_scope.VariableScope): 14 | scope = scope.name 15 | if suffix is not None: 16 | scope = (scope or '') + '.*' + suffix 17 | return ops.get_collection(collection, scope) -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/contrib/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from .layers import * -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/contrib/learn/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neopenx/Dragon/0e639a7319035ddc81918bd3df059230436ee0a1/Dragon/python/dragon/vm/tensorflow/contrib/learn/__init__.py -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/contrib/learn/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/contrib/slim/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.vm.tensorflow.contrib.framework.ops.variables import * 8 | 9 | from dragon.vm.tensorflow.contrib.layers.layers import \ 10 | convolution, convolution2d, conv2d 11 | 12 | from dragon.vm.tensorflow.contrib.layers.layers import \ 13 | fully_connected 14 | 15 | from dragon.vm.tensorflow.contrib.layers.layers import \ 16 | batch_norm 17 | 18 | from dragon.vm.tensorflow.contrib.layers.layers import \ 19 | avg_pool2d, max_pool2d 20 | 21 | from dragon.vm.tensorflow.contrib.layers.layers import \ 22 | flatten -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/examples/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/examples/tutorials/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/examples/tutorials/mnist/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/examples/tutorials/mnist/input_data.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.vm.tensorflow.contrib.learn.datasets.mnist import read_data_sets -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/framework/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/framework/framework_lib.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from .ops import Graph 8 | 9 | # Utilities used when building a Graph. 10 | from dragon.vm.tensorflow.framework.ops import device 11 | from dragon.vm.tensorflow.framework.ops import name_scope 12 | from dragon.vm.tensorflow.framework.ops import get_default_graph 13 | from dragon.vm.tensorflow.framework.ops import add_to_collection 14 | from dragon.vm.tensorflow.framework.ops import get_collection 15 | from dragon.vm.tensorflow.framework.ops import convert_to_tensor 16 | from dragon.vm.tensorflow.framework.ops import GraphKeys 17 | from dragon.vm.tensorflow.framework.constant_op import * 18 | 19 | from dragon.vm.tensorflow.framework.dtypes import * 20 | 21 | 22 | from dragon.vm.tensorflow.framework.tensor_shape import Dimension 23 | from dragon.vm.tensorflow.framework.tensor_shape import TensorShape -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/framework/random_seed.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | import dragon.config as config 8 | 9 | 10 | DEFAULT_GRAPH_SEED = 87654321 11 | _MAXINT32 = 2**31 - 1 12 | 13 | 14 | def _truncate_seed(seed): 15 | return seed % _MAXINT32 # Truncate to fit into 32-bit integer 16 | 17 | 18 | def get_seed(op_seed): 19 | """Return the global random seed. 20 | 21 | Parameters 22 | ---------- 23 | op_seed : int 24 | The optional seed to use. 25 | 26 | Return 27 | ------ 28 | tuple 29 | A tuple of two ints for using. 30 | 31 | """ 32 | graph_seed = config.GetRandomSeed() 33 | if graph_seed is not None: 34 | if op_seed is None: 35 | # pylint: disable=protected-access 36 | op_seed = graph_seed 37 | seeds = _truncate_seed(graph_seed), _truncate_seed(op_seed) 38 | else: 39 | if op_seed is not None: 40 | seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed) 41 | else: 42 | seeds = None, None 43 | return seeds 44 | 45 | 46 | def set_random_seed(seed): 47 | """Set the global random seed. 48 | 49 | Parameters 50 | ---------- 51 | seed : int 52 | The seed to use. 53 | 54 | """ 55 | config.SetRandomSeed(seed) 56 | 57 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/layers/layers.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.vm.tensorflow.layers.convolutional import conv2d 8 | 9 | from dragon.vm.tensorflow.layers.core import dense 10 | 11 | from dragon.vm.tensorflow.layers.normalization import \ 12 | batch_normalization, batch_norm, BatchNorm 13 | 14 | from dragon.vm.tensorflow.layers.pooling import \ 15 | average_pooling2d, max_pooling2d -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/ops/control_flow_ops.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | __all__ = ['equal'] 8 | 9 | import dragon.ops as ops 10 | 11 | 12 | def equal(a, b, name=None): 13 | 14 | return ops.Equal([a, b]) -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/ops/dtypes.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | import numpy as np 8 | 9 | __all__ = ['int32', 'int64', 'float32', 'bool'] 10 | 11 | int32 = np.int32 12 | int64 = np.int64 13 | float32 = np.float32 14 | bool = np.bool -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/ops/gradients_impl.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.core.tensor import Tensor 8 | import dragon.vm.theano.tensor as T 9 | 10 | def gradients(ys, xs, **kwargs): 11 | """Compute the gradients for variables with respect to the cost. 12 | 13 | Parameters 14 | ---------- 15 | ys : Tensor or list of Tensor 16 | The tensor(s) to be differentiated. 17 | xs : Tensor or list of Tensor 18 | The tensor(s to be used for differentiation. 19 | 20 | Returns 21 | ------- 22 | Tensor or list of Tensor 23 | The gradients of variables. 24 | 25 | """ 26 | if not isinstance(ys, list): 27 | ys = [ys] 28 | for y in ys: 29 | dxs = T.grad(y, xs) 30 | return dxs -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/ops/nn.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.vm.tensorflow.ops.nn_ops import * 8 | from dragon.vm.tensorflow.ops.nn_impl import * 9 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/ops/nn_impl.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.core.tensor import Tensor 8 | import dragon.ops as ops 9 | 10 | def batch_normalization(x, mean, variance, 11 | offset, scale, 12 | decay=0.9, variance_epsilon=1e-3, name=None): 13 | raise NotImplementedError('Deprecated. Use tf.layer.batch_normalization.') 14 | 15 | 16 | 17 | def batch_norm_with_global_normalization(t, m, v, 18 | beta, gamma, 19 | decay=0.9, variance_epsilon=1e-3, 20 | scale_after_normalization=True, name=None): 21 | raise NotImplementedError('Deprecated. Use tf.layer.batch_normalization.') 22 | 23 | 24 | def l2_normalize(x, dim, epsilon=1e-12, name=None): 25 | return ops.L2Norm(inputs=x, 26 | axis=dim, 27 | num_axes=1, 28 | eps=epsilon) 29 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/ops/random_ops.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | __all__ = [ 8 | 'random_normal', 9 | 'truncated_normal', 10 | 'random_uniform' 11 | ] 12 | 13 | import dragon.ops as ops 14 | 15 | from dragon.vm.tensorflow.framework import dtypes 16 | 17 | 18 | def random_normal(shape, 19 | mean=0.0, 20 | stddev=1.0, 21 | dtype=dtypes.float32, 22 | seed=None, 23 | name=None): 24 | return ops.RandomNormal(shape, mean, stddev) 25 | 26 | 27 | def truncated_normal(shape, 28 | mean=0.0, 29 | stddev=1.0, 30 | dtype=dtypes.float32, 31 | seed=None, 32 | name=None): 33 | return ops.TruncatedNormal(shape, mean, stddev) 34 | 35 | 36 | def random_uniform(shape, 37 | minval=0, 38 | maxval=None, 39 | dtype=dtypes.float32, 40 | seed=None, 41 | name=None): 42 | return ops.RandomUniform(shape, minval, maxval) -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/ops/standard_ops.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.vm.tensorflow.ops.variables import * 8 | from dragon.vm.tensorflow.ops.var_scope import * 9 | 10 | # Ops 11 | from dragon.vm.tensorflow.ops.init_ops import * 12 | from dragon.vm.tensorflow.ops.random_ops import * 13 | from dragon.vm.tensorflow.ops.math_ops import * 14 | from dragon.vm.tensorflow.ops.array_ops import * 15 | from dragon.vm.tensorflow.ops.control_flow_ops import * 16 | from dragon.vm.tensorflow.ops.nn_ops import * 17 | from dragon.vm.tensorflow.ops.gradients_impl import gradients -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/protobuf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neopenx/Dragon/0e639a7319035ddc81918bd3df059230436ee0a1/Dragon/python/dragon/vm/tensorflow/protobuf/__init__.py -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/protobuf/types.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | package tensorflow; 4 | 5 | enum DataType { 6 | // Not a legal value for DataType. Used to indicate a DataType field 7 | // has not been set. 8 | DT_INVALID = 0; 9 | 10 | // Data types that all computation devices are expected to be 11 | // capable to support. 12 | DT_FLOAT = 1; 13 | DT_DOUBLE = 2; 14 | DT_INT32 = 3; 15 | DT_UINT8 = 4; 16 | DT_INT16 = 5; 17 | DT_INT8 = 6; 18 | DT_STRING = 7; 19 | DT_COMPLEX64 = 8; // Single-precision complex 20 | DT_INT64 = 9; 21 | DT_BOOL = 10; 22 | DT_QINT8 = 11; // Quantized int8 23 | DT_QUINT8 = 12; // Quantized uint8 24 | DT_QINT32 = 13; // Quantized int32 25 | DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops. 26 | DT_QINT16 = 15; // Quantized int16 27 | DT_QUINT16 = 16; // Quantized uint16 28 | DT_UINT16 = 17; 29 | DT_COMPLEX128 = 18; // Double-precision complex 30 | DT_HALF = 19; 31 | DT_RESOURCE = 20; 32 | DT_VARIANT = 21; // Arbitrary C++ data types 33 | DT_UINT32 = 22; 34 | DT_UINT64 = 23; 35 | 36 | // Do not use! These are only for parameters. Every enum above 37 | // should have a corresponding value below (verified by types_test). 38 | DT_FLOAT_REF = 101; 39 | DT_DOUBLE_REF = 102; 40 | DT_INT32_REF = 103; 41 | DT_UINT8_REF = 104; 42 | DT_INT16_REF = 105; 43 | DT_INT8_REF = 106; 44 | DT_STRING_REF = 107; 45 | DT_COMPLEX64_REF = 108; 46 | DT_INT64_REF = 109; 47 | DT_BOOL_REF = 110; 48 | DT_QINT8_REF = 111; 49 | DT_QUINT8_REF = 112; 50 | DT_QINT32_REF = 113; 51 | DT_BFLOAT16_REF = 114; 52 | DT_QINT16_REF = 115; 53 | DT_QUINT16_REF = 116; 54 | DT_UINT16_REF = 117; 55 | DT_COMPLEX128_REF = 118; 56 | DT_HALF_REF = 119; 57 | DT_RESOURCE_REF = 120; 58 | DT_VARIANT_REF = 121; 59 | DT_UINT32_REF = 122; 60 | DT_UINT64_REF = 123; 61 | } -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/training/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/training/saver.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | __all__ = ['Saver'] 8 | 9 | import dragon.core.workspace as ws 10 | from dragon.core.tensor import Tensor 11 | 12 | class Saver(object): 13 | def __init__(self, 14 | var_list=None, 15 | max_to_keep=5, 16 | name=None,): 17 | self.var_list = var_list 18 | 19 | def save(self, 20 | sess, 21 | save_path, 22 | global_step=None): 23 | from ..core.variables import VARIABLES 24 | global VARIABLES 25 | var_list = VARIABLES if self.var_list is None else self.var_list 26 | filename = save_path 27 | if global_step is not None: 28 | if isinstance(global_step, Tensor): 29 | __ndarray__global_step = ws.FetchTensor(global_step) 30 | if __ndarray__global_step.size != 1: 31 | raise ValueError('global step must be a scalar of length 1.') 32 | filename += '-' + str(__ndarray__global_step.flatten()[0]) 33 | ws.Snapshot(var_list.values(), filename=filename, suffix='') 34 | 35 | def restore(self, sess, save_path): 36 | ws.Restore(save_path) -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/training/train.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from .optimizer import * 8 | from .saver import * -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/training/training.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.vm.tensorflow.training.optimizer import GradientDescentOptimizer, \ 8 | MomentumOptimizer, \ 9 | RMSPropOptimizer, \ 10 | AdamOptimizer -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neopenx/Dragon/0e639a7319035ddc81918bd3df059230436ee0a1/Dragon/python/dragon/vm/tensorflow/util/__init__.py -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/tensorflow/util/nest.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # TensorFlow @ Dragon 3 | # Copyright 2016 The TensorFlow Authors 4 | # -------------------------------------------------------- 5 | 6 | import collections as _collections 7 | import six as _six 8 | 9 | def is_sequence(seq): 10 | if isinstance(seq, dict): 11 | return True 12 | return (isinstance(seq, _collections.Sequence) 13 | and not isinstance(seq, _six.string_types)) 14 | 15 | 16 | def _yield_value(iterable): 17 | if isinstance(iterable, dict): 18 | for key in sorted(_six.iterkeys(iterable)): 19 | yield iterable[key] 20 | else: 21 | for value in iterable: 22 | yield value 23 | 24 | 25 | def _yield_flat_nest(nest): 26 | for n in _yield_value(nest): 27 | if is_sequence(n): 28 | for ni in _yield_flat_nest(n): 29 | yield ni 30 | else: 31 | yield n 32 | 33 | 34 | def flatten(nest): 35 | if is_sequence(nest): 36 | return list(_yield_flat_nest(nest)) 37 | else: 38 | return [nest] 39 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/theano/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Theano @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from .compile import ( 8 | function, 9 | scan, 10 | shared) 11 | 12 | from .configdefaults import config -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/theano/compile/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Theano @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from .function import function 8 | from .scan import scan 9 | from .sharedvalue import shared -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/theano/compile/sharedvalue.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Theano @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | import numpy as np 8 | 9 | import dragon.core.workspace as ws 10 | from dragon.core.tensor import Tensor, GetTensorName 11 | 12 | def shared(value, name=None, **kwargs): 13 | """Construct a Tensor initialized with ``value``. 14 | 15 | Parameters 16 | ---------- 17 | value : basic type, list or numpy.ndarray 18 | The numerical values. 19 | name : str 20 | The name of tensor. 21 | 22 | Returns 23 | ------- 24 | Tensor 25 | The initialized tensor. 26 | 27 | """ 28 | if not isinstance(value, (int, float, list, np.ndarray)): 29 | raise TypeError("Unsupported type of value: {}".format(type(value))) 30 | if name is None: name = GetTensorName() 31 | 32 | tensor = Tensor(name).Variable() 33 | ws.FeedTensor(tensor, value) 34 | return tensor -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/theano/configdefaults.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Theano @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | class TheanoConfig(object): 8 | floatX = 'float32' 9 | 10 | config = TheanoConfig() -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/theano/gradient.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Theano @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.core.tensor import Tensor 8 | import dragon.ops as ops 9 | 10 | def grad(cost, wrt, **kwargs): 11 | """Compute the gradients for variables with respect to the cost. 12 | 13 | Parameters 14 | ---------- 15 | cost : Tensor 16 | The cost. 17 | wrt : Tensor or list of Tensor 18 | The variables w.r.t the cost. 19 | 20 | Returns 21 | ------- 22 | Tensor or list of Tensor 23 | The gradients of variables. 24 | 25 | Examples 26 | -------- 27 | >>> x = Tensor('x').Variable() 28 | >>> y = x * 2 29 | >>> dx = grad(y, x) 30 | 31 | >>> z = Tensor('z').Variable() 32 | >>> y = x + z 33 | >>> dx, dz = grad(y, [x, z]) 34 | 35 | """ 36 | grads = [] 37 | if not isinstance(wrt, list): wrt = [wrt] 38 | for w in wrt: 39 | cost.grad_wrts.append(w.name) 40 | w.grad_objs.append(cost) 41 | w_grad = Tensor(w.name + '_grad') 42 | w_grad.extra_targets.add(cost.name) 43 | w_grad.expressions = cost.expressions 44 | w_grad.grad_wrts.append(w.name) 45 | grads.append(w_grad) 46 | if len(grads) == 1: return grads[0] 47 | return grads 48 | 49 | 50 | def disconnected_grad(x): 51 | """Return the identity of input with truncated gradient flow. 52 | 53 | The expression itself is unaffected, but the gradient is stopped. 54 | 55 | Parameters 56 | ---------- 57 | x : Tensor 58 | The input tensor. 59 | 60 | Returns 61 | ------- 62 | Tensor 63 | The identity of input. 64 | 65 | """ 66 | return ops.StopGradient(x) 67 | -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/theano/tensor/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Theano @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from .basic import * 8 | from .extra_ops import * 9 | 10 | from . import nnet 11 | 12 | from ..gradient import grad, disconnected_grad -------------------------------------------------------------------------------- /Dragon/python/dragon/vm/theano/tensor/extra_ops.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Theano @ Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from dragon.core.tensor import Tensor 8 | import dragon.ops as ops 9 | 10 | def cumsum(x, axis=None): 11 | """Compute the cumulative sum along the given axis. 12 | 13 | Parameters 14 | ---------- 15 | x : Tensor 16 | The input tensor. 17 | axis : int 18 | The axis to sum. Default is ``None`` (Along all axes). 19 | 20 | """ 21 | raise NotImplementedError() 22 | 23 | 24 | def cumprod(x, axis=None): 25 | """Compute the cumulative product along the given axis. 26 | 27 | Parameters 28 | ---------- 29 | x : Tensor 30 | The input tensor. 31 | axis : int 32 | The axis to sum. Default is ``None`` (Along all axes). 33 | 34 | """ 35 | raise NotImplementedError() 36 | 37 | 38 | def to_one_hot(y, nb_class, **kwargs): 39 | """Generate a matrix where each row corresponds to the one hot encoding. 40 | 41 | The ``y`` should be a 1d vector. 42 | 43 | Parameters 44 | ---------- 45 | y: Tensor 46 | The input tensor. 47 | nb_class : int 48 | The number of classes. 49 | 50 | Returns 51 | ------- 52 | Tensor 53 | The one hot matrix. 54 | 55 | """ 56 | flat_y = ops.Flatten(y, keep_axes=1) 57 | return ops.OneHot(flat_y, depth=nb_class) 58 | -------------------------------------------------------------------------------- /Dragon/python/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | six 3 | protobuf 4 | lmdb 5 | opencv-python 6 | Pillow -------------------------------------------------------------------------------- /Dragon/python/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | import os.path, sys 3 | import shutil 4 | 5 | packages = [] 6 | 7 | def find_packages(root_dir): 8 | filenames = os.listdir(root_dir) 9 | for filename in filenames: 10 | filepath = os.path.join(root_dir, filename) 11 | if os.path.isdir(filepath): 12 | find_packages(filepath) 13 | else: 14 | if filename == '__init__.py': 15 | packages.append(root_dir) 16 | 17 | def find_modules(): 18 | dragon_c_lib_win32 = '../lib/dragon.dll' 19 | dragon_c_lib_other = '../lib/libdragon.so' 20 | if os.path.exists(dragon_c_lib_win32): 21 | shutil.copy(dragon_c_lib_win32, 'dragon/libdragon.pyd') 22 | elif os.path.exists(dragon_c_lib_other): 23 | shutil.copy(dragon_c_lib_other, 'dragon/libdragon.so') 24 | else: 25 | print('ERROR: Unable to find modules. built Dragon using CMake.') 26 | sys.exit() 27 | 28 | 29 | def find_resources(): 30 | c_lib = ['libdragon.*'] 31 | protos = ['protos/*.proto', 'vm/caffe/proto/*.proto'] 32 | others = [] 33 | return c_lib + protos + others 34 | 35 | find_packages('dragon') 36 | find_modules() 37 | 38 | setup(name = 'dragon', 39 | version='0.2.1.11', 40 | description = 'Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework', 41 | url='https://github.com/neopenx/Dragon', 42 | author='Ting Pan', 43 | license='BSD 2-Clause', 44 | packages=packages, 45 | package_dir={'dragon': 'dragon'}, 46 | package_data={'dragon': find_resources()}) 47 | -------------------------------------------------------------------------------- /Dragon/src/contrib/rcnn/proposal_op.h: -------------------------------------------------------------------------------- 1 | // -------------------------------------------------------- 2 | // Dragon 3 | // Copyright(c) 2017 SeetaTech 4 | // Written by Ting Pan 5 | // -------------------------------------------------------- 6 | 7 | #ifndef DRAGON_CONTRIB_RCNN_PROPOSAL_OP_H_ 8 | #define DRAGON_CONTRIB_RCNN_PROPOSAL_OP_H_ 9 | 10 | #include "core/operator.h" 11 | 12 | namespace dragon { 13 | 14 | template 15 | class ProposalOp final : public Operator { 16 | public: 17 | ProposalOp(const OperatorDef& op_def, Workspace* ws) 18 | : Operator(op_def, ws), 19 | strides(OperatorBase::GetRepeatedArg("strides")), 20 | ratios(OperatorBase::GetRepeatedArg("ratios")), 21 | scales(OperatorBase::GetRepeatedArg("scales")), 22 | pre_nms_top_n(OperatorBase::GetSingleArg("pre_nms_top_n", 6000)), 23 | post_nms_top_n(OperatorBase::GetSingleArg("post_nms_top_n", 300)), 24 | nms_thresh(OperatorBase::GetSingleArg("nms_thresh", (float)0.7)), 25 | min_size(OperatorBase::GetSingleArg("min_size", 16)), 26 | min_level(OperatorBase::GetSingleArg("min_level", 2)), 27 | max_level(OperatorBase::GetSingleArg("max_level", 5)), 28 | canonical_level(OperatorBase::GetSingleArg("canonical_level", 4)), 29 | canonical_scale(OperatorBase::GetSingleArg("canonical_scale", 224)) {} 30 | 31 | void RunOnDevice() override; 32 | template void RunWithType(); 33 | 34 | protected: 35 | vector strides; 36 | vector ratios, scales; 37 | TIndex pre_nms_top_n, post_nms_top_n, min_size, num_images; 38 | TIndex min_level, max_level, canonical_level, canonical_scale; 39 | float nms_thresh; 40 | Tensor anchors_, proposals_, roi_indices_, nms_mask_; 41 | }; 42 | 43 | } // namespace dragon 44 | 45 | #endif // DRAGON_CONTRIB_RCNN_PROPOSAL_OP_H_ -------------------------------------------------------------------------------- /Dragon/src/core/context.cc: -------------------------------------------------------------------------------- 1 | #include "core/context.h" 2 | #include "core/context_cuda.h" 3 | 4 | namespace dragon { 5 | 6 | CPUObject CPUContext::cpu_object_; 7 | #ifdef WITH_CUDA 8 | CUDAObject CUDAContext::cuda_object_; 9 | #endif // WITH_CUDA 10 | 11 | // cpu <- gpu 12 | template<> void CPUContext::Memcpy( 13 | size_t nbytes, void* dst, const void* src) { 14 | #ifdef WITH_CUDA 15 | CUDAContext ctx(POINTER_DEVICE(src)); 16 | ctx.Memcpy(nbytes, dst, src); 17 | #else 18 | LOG(FATAL) << "CUDA was not compiled."; 19 | #endif 20 | } 21 | 22 | // gpu <- cpu 23 | template<> void CPUContext::Memcpy( 24 | size_t nbytes, void* dst, const void* src) { 25 | #ifdef WITH_CUDA 26 | CUDAContext ctx(POINTER_DEVICE(dst)); 27 | ctx.Memcpy(nbytes, dst, src); 28 | #else 29 | LOG(FATAL) << "CUDA was not compiled."; 30 | #endif 31 | } 32 | 33 | } // namespace dragon 34 | -------------------------------------------------------------------------------- /Dragon/src/core/workspace.cc: -------------------------------------------------------------------------------- 1 | #include "core/operator.h" 2 | #include "core/graph.h" 3 | #include "core/workspace.h" 4 | 5 | namespace dragon { 6 | 7 | GraphBase* Workspace::CreateGraph(const GraphDef& meta_graph) { 8 | CHECK(meta_graph.has_name()) 9 | << "The name of given meta graph should not be empty."; 10 | if (graph_map_.count(meta_graph.name())) 11 | return graph_map_[meta_graph.name()].get(); 12 | LOG(DEBUG) << "Create Graph: " << meta_graph.name(); 13 | graph_map_[meta_graph.name()] = unique_ptr(NewGraph(meta_graph, this)); 14 | return graph_map_[meta_graph.name()].get(); 15 | } 16 | 17 | Workspace::~Workspace() { 18 | for (int i = 0; i < WORKSPACE_MAX_CORRUPTED_SIZE; i++) { 19 | string name = "/opt/mirror_stage/buffer_" + dragon_cast(i); 20 | if (tensor_map_.count(name) > 0) { 21 | MixedMemory* mem = tensor_map_[name]->memory(); 22 | if (mem != nullptr) delete mem; 23 | } 24 | } 25 | } 26 | 27 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/cast/float2half.cc: -------------------------------------------------------------------------------- 1 | #include "operators/cast/float2half_op.h" 2 | #include "core/workspace.h" 3 | #include "utils/op_kernel.h" 4 | 5 | namespace dragon { 6 | 7 | #ifdef WITH_CUDA_FP16 8 | 9 | template 10 | void FloatToHalfOp::RunOnDevice() { 11 | CHECK(input(0).template IsType()) 12 | << "The type of input should be float32."; 13 | output(0)->ReshapeLike(input(0)); 14 | 15 | // cast 16 | auto* Xdata = input(0).template data(); 17 | auto* Ydata = output(0)->template mutable_data(); 18 | kernel::Float2Half(output(0)->count(), Xdata, Ydata); 19 | 20 | // release & share 21 | input(0).Reset(); 22 | input(0).ReshapeLike(*output(0)); 23 | input(0).Share(*output(0)); 24 | } 25 | 26 | #ifdef WITH_CUDA 27 | DEPLOY_CUDA(FloatToHalf); 28 | #endif 29 | OPERATOR_SCHEMA(FloatToHalf).NumInputs(1).NumOutputs(1); 30 | 31 | NO_GRADIENT(FloatToHalf); 32 | 33 | #endif 34 | 35 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/control_flow/compare_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/control_flow/compare_op.h" 2 | #include "utils/op_kernel.h" 3 | 4 | namespace dragon { 5 | 6 | template template 7 | void CompareOp::EqualRunWithType() { 8 | auto* X1data = input(0).template data(); 9 | auto* X2data = input(1).template data(); 10 | auto* Ydata = output(0)->template mutable_data(); 11 | kernel::Equal(output(0)->count(), X1data, X2data, Ydata); 12 | } 13 | 14 | template 15 | void CompareOp::RunOnDevice() { 16 | CHECK_EQ(input(0).count(), input(1).count()) 17 | << "Both conditioned tensors should have same elements."; 18 | output(0)->ReshapeLike(input(0)); 19 | 20 | if (operation == "EQUAL") { 21 | if (input(0).template IsType()) EqualRunWithType(); 22 | else LOG(FATAL) << "Unsupported input types."; 23 | } 24 | else { 25 | LOG(FATAL) << "Unsupport operation: [" << operation << "]."; 26 | } 27 | } 28 | 29 | DEPLOY_CPU(Compare); 30 | #ifdef WITH_CUDA 31 | DEPLOY_CUDA(Compare); 32 | #endif 33 | OPERATOR_SCHEMA(Compare).NumInputs(2).NumOutputs(1); 34 | 35 | NO_GRADIENT(Compare); 36 | 37 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/control_flow/copy_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/control_flow/copy_op.h" 2 | 3 | namespace dragon { 4 | 5 | template template 6 | void CopyOp::RunWithType() { 7 | auto* Xdata = input(0).template data(); 8 | auto* Ydata = output(0)->template mutable_data(); 9 | ctx().template Copy(output(0)->count(), Ydata, Xdata); 10 | } 11 | 12 | template 13 | void CopyOp::RunOnDevice() { 14 | output(0)->ReshapeLike(input(0)); 15 | if (input(0).template IsType()) RunWithType(); 16 | else if (input(0).template IsType()) RunWithType(); 17 | else if (input(0).template IsType()) RunWithType(); 18 | else if (input(0).template IsType()) RunWithType(); 19 | else if (input(0).template IsType()) RunWithType(); 20 | else if (input(0).template IsType()) RunWithType(); 21 | else LOG(FATAL) << "Unsupported input types."; 22 | } 23 | 24 | DEPLOY_CPU(Copy); 25 | #ifdef WITH_CUDA 26 | DEPLOY_CUDA(Copy); 27 | #endif 28 | OPERATOR_SCHEMA(Copy).NumInputs(1).NumOutputs(1); 29 | NO_GRADIENT(Copy); 30 | 31 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/ndarray/arange_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/ndarray/arange_op.h" 2 | #include "core/workspace.h" 3 | #include "utils/op_kernel.h" 4 | 5 | namespace dragon { 6 | 7 | template template 8 | void ArangeOp::RunWithType() { 9 | TIndex start_ = start(), step_ = step(), stop_ = stop(), count; 10 | if (stop_ == 0) { stop_ = start_; start_ = 0; } 11 | count = (stop_ - start_ - 1) / step_ + 1; 12 | output(0)->Reshape(vector(1, count)); 13 | auto* Ydata = output(0)->template mutable_data(); 14 | kernel::Arange(count, start_, step_, Ydata); 15 | } 16 | 17 | template 18 | void ArangeOp::RunOnDevice() { 19 | if (dtype == "FLOAT32") RunWithType(); 20 | else if (dtype == "INT32") RunWithType(); 21 | else LOG(FATAL) << "Unsupported data types"; 22 | } 23 | 24 | DEPLOY_CPU(Arange); 25 | #ifdef WITH_CUDA 26 | DEPLOY_CUDA(Arange); 27 | #endif 28 | OPERATOR_SCHEMA(Arange).NumInputs(0).NumOutputs(1); 29 | 30 | NO_GRADIENT(Arange); 31 | 32 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/ndarray/argmax_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/ndarray/argmax_op.h" 2 | #include "utils/op_kernel.h" 3 | 4 | namespace dragon { 5 | 6 | template template 7 | void ArgmaxOp::RunWithType() { 8 | if (top_k != 1) { 9 | // it's difficult to implement device code when top_k > 1 10 | auto* Xdata = input(0).template data(); 11 | auto* Ydata = output(0)->template mutable_data(); 12 | kernel::Argmax(count, axis_dim, inner_dim, top_k, Xdata, Ydata); 13 | } else { 14 | auto* Xdata = input(0).template data(); 15 | auto* Ydata = output(0)->template mutable_data(); 16 | kernel::Argmax(count, axis_dim, inner_dim, top_k, Xdata, Ydata); 17 | } 18 | } 19 | 20 | template 21 | void ArgmaxOp::RunOnDevice() { 22 | if (axis != -1) { 23 | axis_dim = input(0).dim(axis); 24 | inner_dim = input(0).count(axis) / axis_dim; 25 | } else { 26 | axis_dim = input(0).count(); 27 | inner_dim = 1; 28 | } 29 | count = input(0).count() / axis_dim; 30 | vector dims = input(0).dims(); 31 | if (!keep_dims) { 32 | if (axis != -1) { 33 | if (top_k == 1) dims.erase(dims.begin() + axis); 34 | else dims[axis] = top_k; 35 | } else { 36 | dims = vector(1, top_k); 37 | } 38 | } else { 39 | if (axis == -1) dims = vector(input(0).ndim(), 1); 40 | dims[axis] = top_k; 41 | } 42 | output(0)->Reshape(dims); 43 | 44 | if (input(0).template IsType()) RunWithType(); 45 | else LOG(FATAL) << "Unsupported input types."; 46 | } 47 | 48 | DEPLOY_CPU(Argmax); 49 | #ifdef WITH_CUDA 50 | DEPLOY_CUDA(Argmax); 51 | #endif 52 | OPERATOR_SCHEMA(Argmax).NumInputs(1).NumOutputs(1); 53 | 54 | NO_GRADIENT(Argmax); 55 | 56 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/ndarray/expand_dims_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/ndarray/expand_dims_op.h" 2 | 3 | namespace dragon { 4 | 5 | template 6 | void ExpandDimsOp::RunOnDevice() { 7 | vector dims = input(0).dims(); 8 | if (axis == -1 || axis >= (int)dims.size()) dims.push_back(1); 9 | else dims.insert(dims.begin() + axis, 1); 10 | output(0)->Reshape(dims); 11 | output(0)->Share(input(0)); 12 | } 13 | 14 | DEPLOY_CPU(ExpandDims); 15 | #ifdef WITH_CUDA 16 | DEPLOY_CUDA(ExpandDims); 17 | #endif 18 | OPERATOR_SCHEMA(ExpandDims).NumInputs(1).NumOutputs(1); 19 | 20 | template 21 | void ExpandDimsGradientOp::RunOnDevice() { 22 | output(0)->ReshapeLike(input(0)); 23 | output(0)->Share(input(-1)); 24 | } 25 | 26 | DEPLOY_CPU(ExpandDimsGradient); 27 | #ifdef WITH_CUDA 28 | DEPLOY_CUDA(ExpandDimsGradient); 29 | #endif 30 | OPERATOR_SCHEMA(ExpandDimsGradient).NumInputs(2).NumOutputs(1); 31 | 32 | class GetExpandDimsGradient final : public GradientMakerBase { 33 | public: 34 | GRADIENT_MAKER_CTOR(GetExpandDimsGradient); 35 | vector MakeDefs() override { 36 | return SingleDef(def.type() + "Gradient", "", 37 | vector {I(0), GO(0)}, 38 | vector {GI(0)}); 39 | } 40 | }; 41 | REGISTER_GRADIENT(ExpandDims, GetExpandDimsGradient); 42 | 43 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/ndarray/one_hot_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/ndarray/one_hot_op.h" 2 | #include "utils/math_functions.h" 3 | #include "utils/op_kernel.h" 4 | 5 | namespace dragon { 6 | 7 | template template 8 | void OneHotOp::RunWithType() { 9 | auto* Xdata = input(0).template data(); 10 | auto* Ydata = output(0)->template mutable_data(); 11 | math::Set(output(0)->count(), 12 | dragon_cast(float(off_value)), 13 | Ydata); 14 | 15 | kernel::OneHot(input(0).count(), depth, on_value, Xdata, Ydata); 16 | } 17 | 18 | template 19 | void OneHotOp::RunOnDevice() { 20 | vector dims = input(0).dims(); 21 | dims.push_back(depth); 22 | output(0)->Reshape(dims); 23 | 24 | if (input(0).template IsType()) RunWithType(); 25 | else LOG(FATAL) << "Unsupported input types."; 26 | } 27 | 28 | DEPLOY_CPU(OneHot); 29 | #ifdef WITH_CUDA 30 | DEPLOY_CUDA(OneHot); 31 | #endif 32 | OPERATOR_SCHEMA(OneHot).NumInputs(1).NumOutputs(1); 33 | 34 | NO_GRADIENT(OneHot); 35 | 36 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/ndarray/shape_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/ndarray/shape_op.h" 2 | 3 | namespace dragon { 4 | 5 | template 6 | void ShapeOp::RunOnDevice() { 7 | // reshape 8 | output(0)->Reshape(vector(1, input(0).ndim())); 9 | 10 | // forward 11 | auto* Ydata = output(0)->template mutable_data(); 12 | for (int i = 0; i < input(0).ndim(); i++) Ydata[i] = input(0).dim(i); 13 | } 14 | 15 | DEPLOY_CPU(Shape); 16 | #ifdef WITH_CUDA 17 | DEPLOY_CUDA(Shape); 18 | #endif 19 | OPERATOR_SCHEMA(Shape).NumInputs(1).NumOutputs(1); 20 | 21 | NO_GRADIENT(Shape); 22 | 23 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/update/adam_update_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/update/adam_update_op.h" 2 | #include "utils/op_kernel.h" 3 | 4 | namespace dragon { 5 | 6 | template 7 | void AdamUpdateOp::ComputeRunWithFloat() { 8 | if (!m.get()) { 9 | m.reset(new Tensor()); m->ReshapeLike(input(0)); 10 | v.reset(new Tensor()); v->ReshapeLike(input(0)); 11 | } 12 | t++; 13 | coeff = sqrt(1. - pow(beta2, t)) / (1. - pow(beta1, t)); 14 | lr = param("base_lr") * coeff * this->lr_mult; 15 | kernel::AdamUpdate(&input(0), 16 | m.get(), 17 | v.get(), 18 | &temp, 19 | beta1, 20 | beta2, 21 | eps, 22 | lr); 23 | } 24 | 25 | DEPLOY_CPU(AdamUpdate); 26 | #ifdef WITH_CUDA 27 | DEPLOY_CUDA(AdamUpdate); 28 | #endif 29 | OPERATOR_SCHEMA(AdamUpdate).NumInputs(1).NumOutputs(1); 30 | 31 | NO_GRADIENT(AdamUpdate); 32 | 33 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/update/moving_average_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/update/moving_average_op.h" 2 | #include "utils/math_functions.h" 3 | 4 | namespace dragon { 5 | 6 | template template 7 | void MovingAverageOp::RunWithType() { 8 | auto* Xdata = input(0).template data(); 9 | auto* Ydata = output(0)->template mutable_data(); 10 | math::Axpby(output(0)->count(), 1.0 - decay, Xdata, decay, Ydata); 11 | } 12 | 13 | template 14 | void MovingAverageOp::RunOnDevice() { 15 | CHECK(input(0).count() == output(0)->count()); 16 | 17 | if (input(0).template IsType()) RunWithType(); 18 | else if (input(0).template IsType()) RunWithType(); 19 | else LOG(FATAL) << "Unsupported input types."; 20 | } 21 | 22 | DEPLOY_CPU(MovingAverage); 23 | #ifdef WITH_CUDA 24 | DEPLOY_CUDA(MovingAverage); 25 | #endif 26 | OPERATOR_SCHEMA(MovingAverage).NumInputs(1).NumOutputs(1); 27 | 28 | NO_GRADIENT(MovingAverage); 29 | 30 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/update/nesterov_update_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/update/nesterov_update_op.h" 2 | #include "utils/math_functions.h" 3 | #include "utils/op_kernel.h" 4 | 5 | namespace dragon { 6 | 7 | template 8 | void NesterovUpdateOp::ComputeRunWithFloat() { 9 | if (!history.get()) { 10 | history.reset(new Tensor()); 11 | history->ReshapeLike(input(0)); 12 | } 13 | lr = param("base_lr") * this->lr_mult; 14 | auto* dXdata = input(0).template mutable_data(); 15 | auto* Hdata = history->template mutable_data(); 16 | kernel::NesterovUpdate(input(0).count(), 17 | dXdata, 18 | Hdata, 19 | &temp, 20 | momentum, 21 | lr, 22 | &ctx()); 23 | } 24 | 25 | DEPLOY_CPU(NesterovUpdate); 26 | #ifdef WITH_CUDA 27 | DEPLOY_CUDA(NesterovUpdate); 28 | #endif 29 | OPERATOR_SCHEMA(NesterovUpdate).NumInputs(1).NumOutputs(1); 30 | 31 | NO_GRADIENT(NesterovUpdate); 32 | 33 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/update/rmsprop_update_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/update/rmsprop_update_op.h" 2 | #include "core/workspace.h" 3 | #include "utils/op_kernel.h" 4 | 5 | namespace dragon { 6 | 7 | template 8 | void RMSPropUpdateOp::ComputeRunWithFloat() { 9 | if (!history.get()) { 10 | string slot = OperatorBase::GetSingleArg("slot", ""); 11 | if (slot.empty()) history.reset(new Tensor()); 12 | else history.reset(ws()->CreateTensor("/mnt/" + name() + "/history")); 13 | history->ReshapeLike(input(0)); 14 | } 15 | lr = param("base_lr") * this->lr_mult; 16 | auto* dXdata = input(0).template mutable_data(); 17 | auto* Hdata = history->template mutable_data(); 18 | kernel::RMSPropUpdate(input(0).count(), 19 | dXdata, 20 | Hdata, 21 | &temp, 22 | decay, 23 | eps, 24 | lr); 25 | } 26 | 27 | DEPLOY_CPU(RMSPropUpdate); 28 | #ifdef WITH_CUDA 29 | DEPLOY_CUDA(RMSPropUpdate); 30 | #endif 31 | OPERATOR_SCHEMA(RMSPropUpdate).NumInputs(1).NumOutputs(1); 32 | 33 | NO_GRADIENT(RMSPropUpdate); 34 | 35 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/operators/update/sgd_update_op.cc: -------------------------------------------------------------------------------- 1 | #include "operators/update/sgd_update_op.h" 2 | #include "utils/math_functions.h" 3 | 4 | namespace dragon { 5 | 6 | template 7 | void SGDUpdateOp::ComputeRunWithFloat() { 8 | if (!history.get()) { 9 | history.reset(new Tensor()); 10 | history->ReshapeLike(input(0)); 11 | } 12 | lr = param("base_lr") * this->lr_mult; 13 | auto* dXdata = input(0).template mutable_data(); 14 | auto* Hdata = history->template mutable_data(); 15 | math::Axpby(history->count(), lr, dXdata, momentum, Hdata); 16 | ctx().template Copy(history->count(), dXdata, Hdata); 17 | } 18 | 19 | DEPLOY_CPU(SGDUpdate); 20 | #ifdef WITH_CUDA 21 | DEPLOY_CUDA(SGDUpdate); 22 | #endif 23 | OPERATOR_SCHEMA(SGDUpdate).NumInputs(1).NumOutputs(1); 24 | 25 | NO_GRADIENT(SGDUpdate); 26 | 27 | } // namespace dragon -------------------------------------------------------------------------------- /Dragon/src/protos/caffemodel.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | message BlobShape { 4 | repeated int64 dim = 1 [packed = true]; 5 | } 6 | 7 | message BlobProto { 8 | optional BlobShape shape = 7; 9 | repeated float data = 5 [packed = true]; 10 | optional int32 num = 1 [default = 0]; 11 | optional int32 channels = 2 [default = 0]; 12 | optional int32 height = 3 [default = 0]; 13 | optional int32 width = 4 [default = 0]; 14 | } 15 | 16 | message NetParameter { 17 | optional string name = 1; 18 | repeated LayerParameter layer = 100; 19 | } 20 | 21 | message LayerParameter { 22 | optional string name = 1; 23 | repeated BlobProto blobs = 7; 24 | } 25 | 26 | 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, SeetaTech. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework 2 | ![](http://dragon.seetatech.com/static/images/styles-dragon.png) 3 | ----- 4 | 5 | ## Deprecated. See [seetaresearch/Dragon](http://github.com/seetaresearch/Dragon). 6 | -------------------------------------------------------------------------------- /examples/GA3C/GA3C.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # GA3C for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from Config import Config 8 | from Server import Server 9 | 10 | # Adjust configs for Play mode 11 | if Config.PLAY_MODE: 12 | Config.AGENTS = 1 13 | Config.PREDICTORS = 1 14 | Config.TRAINERS = 1 15 | Config.DYNAMIC_SETTINGS = False 16 | 17 | Config.LOAD_CHECKPOINT = True 18 | Config.TRAIN_MODELS = False 19 | Config.SAVE_MODELS = False 20 | 21 | if __name__ == "__main__": 22 | 23 | import dragon.config 24 | dragon.config.EnableCUDA() 25 | 26 | Server().main() 27 | -------------------------------------------------------------------------------- /examples/GA3C/GameManager.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # GA3C for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | import gym 8 | 9 | 10 | class GameManager: 11 | def __init__(self, game_name, display): 12 | self.game_name = game_name 13 | self.display = display 14 | 15 | self.env = gym.make(game_name) 16 | self.reset() 17 | 18 | def reset(self): 19 | observation = self.env.reset() 20 | return observation 21 | 22 | def step(self, action): 23 | self._update_display() 24 | observation, reward, done, info = self.env.step(action) 25 | return observation, reward, done, info 26 | 27 | def _update_display(self): 28 | if self.display: 29 | self.env.render() 30 | -------------------------------------------------------------------------------- /examples/GA3C/ThreadPredictor.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # GA3C for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from threading import Thread 8 | import numpy as np 9 | 10 | from Config import Config 11 | 12 | 13 | class ThreadPredictor(Thread): 14 | def __init__(self, server, id): 15 | super(ThreadPredictor, self).__init__() 16 | self.setDaemon(True) 17 | 18 | self.id = id 19 | self.server = server 20 | self.exit_flag = False 21 | 22 | def run(self): 23 | ids = np.zeros(Config.PREDICTION_BATCH_SIZE, dtype=np.uint16) 24 | states = np.zeros( 25 | (Config.PREDICTION_BATCH_SIZE, Config.STACKED_FRAMES, Config.IMAGE_HEIGHT, Config.IMAGE_WIDTH, ), 26 | dtype=np.float32) 27 | 28 | while not self.exit_flag: 29 | ids[0], states[0] = self.server.prediction_q.get() 30 | 31 | size = 1 32 | while size < Config.PREDICTION_BATCH_SIZE and not self.server.prediction_q.empty(): 33 | ids[size], states[size] = self.server.prediction_q.get() 34 | size += 1 35 | 36 | batch = states[:size] 37 | p, v = self.server.model.predict_p_and_v(batch) 38 | 39 | for i in range(size): 40 | if ids[i] < len(self.server.agents): 41 | self.server.agents[ids[i]].wait_q.put((p[i], v[i])) 42 | -------------------------------------------------------------------------------- /examples/GA3C/ThreadTrainer.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # GA3C for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | from threading import Thread 8 | import numpy as np 9 | 10 | from Config import Config 11 | 12 | 13 | class ThreadTrainer(Thread): 14 | def __init__(self, server, id): 15 | super(ThreadTrainer, self).__init__() 16 | self.setDaemon(True) 17 | 18 | self.id = id 19 | self.server = server 20 | self.exit_flag = False 21 | 22 | def run(self): 23 | while not self.exit_flag: 24 | batch_size = 0 25 | while batch_size <= Config.TRAINING_MIN_BATCH_SIZE: 26 | x_, r_, a_ = self.server.training_q.get() 27 | if batch_size == 0: 28 | x__ = x_ 29 | r__ = r_ 30 | a__ = a_ 31 | else: 32 | x__ = np.concatenate((x__, x_)) 33 | r__ = np.concatenate((r__, r_)) 34 | a__ = np.concatenate((a__, a_)) 35 | batch_size += x_.shape[0] 36 | 37 | if Config.TRAIN_MODELS: 38 | self.server.train_model(x__, r__, a__) -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Dragon Zoo 2 | 3 | This page contains various implements for Dragon. 4 | 5 | We demonstrate that our framework is much easier to make full use of the works that are already done, 6 | 7 | which was described in our arXiv paper: [Dragon: A Computation Graph Virtual Machine Based Deep Learning Framework](https://arxiv.org/abs/1707.08265) 8 | 9 | ## List of examples 10 | 11 | * [cifar10](https://github.com/neopenx/Dragon/tree/master/examples/cifar10) - How to train/infer a basic classification network [*Caffe1 Style*] 12 | 13 | * [Seg-FCN](https://github.com/neopenx/Dragon/tree/master/examples/Seg-FCN) - Fully Convolutional Networks for Semantic Segmentation [*Caff1 Style*] 14 | 15 | * [GA3C](https://github.com/neopenx/Dragon/tree/master/examples/GA3C) - A hybrid CPU/GPU version of the A3C algorithm [*TinyDragon Style*] 16 | -------------------------------------------------------------------------------- /examples/Seg-FCN/colors/pascal_voc.act: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neopenx/Dragon/0e639a7319035ddc81918bd3df059230436ee0a1/examples/Seg-FCN/colors/pascal_voc.act -------------------------------------------------------------------------------- /examples/Seg-FCN/data/demo/001763.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neopenx/Dragon/0e639a7319035ddc81918bd3df059230436ee0a1/examples/Seg-FCN/data/demo/001763.jpg -------------------------------------------------------------------------------- /examples/Seg-FCN/transplants/VGG16/solve.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Seg-FCN for Dragon 3 | # Copyright (c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Transplant fully-connected caffemodel into fully-convolution ver. """ 8 | 9 | import surgery 10 | import dragon.vm.caffe as caffe 11 | 12 | if __name__ == '__main__': 13 | 14 | net = caffe.Net('net.prototxt', 'VGG16.v2.caffemodel', caffe.TEST) 15 | new_net = caffe.Net('new_net.prototxt', caffe.TEST) 16 | surgery.transplant(new_net, net) 17 | new_net.save('VGG16.fcn.caffemodel') -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn16s/caffemodel-url: -------------------------------------------------------------------------------- 1 | http://dl.caffe.berkeleyvision.org/fcn16s-heavy-pascal.caffemodel -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn16s/solve.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Seg-FCN for Dragon 3 | # Copyright (c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Train a FCN-16s(PASCAL VOC) network """ 8 | 9 | import dragon.vm.caffe as caffe 10 | import surgery 11 | 12 | weights = '../voc-fcn32s/snapshot/train_iter_100000.caffemodel' 13 | 14 | if __name__ == '__main__': 15 | 16 | # init 17 | caffe.set_mode_gpu() 18 | caffe.set_device(0) 19 | 20 | solver = caffe.SGDSolver('solver.prototxt') 21 | solver.net.copy_from(weights) 22 | 23 | # surgeries 24 | interp_layers = [k for k in solver.net.params.keys() if 'up' in k] 25 | surgery.interp(solver.net, interp_layers) 26 | 27 | for _ in range(25): 28 | solver.step(4000) -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn16s/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "train.prototxt" 2 | test_net: "val.prototxt" 3 | test_iter: 1111 4 | # make test net, but don't invoke it from the solver itself 5 | test_interval: 999999999 6 | display: 20 7 | average_loss: 20 8 | lr_policy: "fixed" 9 | # lr for unnormalized softmax 10 | base_lr: 1e-12 11 | # high momentum 12 | momentum: 0.99 13 | # no gradient accumulation 14 | iter_size: 1 15 | max_iter: 100000 16 | weight_decay: 0.0005 17 | snapshot: 4000 18 | snapshot_prefix: "snapshot/train" 19 | test_initialization: false 20 | -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn16s/test.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Seg-FCN for Dragon 3 | # Copyright (c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Test a FCN-16s(PASCAL VOC) network """ 8 | 9 | import dragon.vm.caffe as caffe 10 | import score 11 | import numpy as np 12 | 13 | weights = 'snapshot/train_iter_44000.caffemodel' 14 | 15 | if __name__ == '__main__': 16 | 17 | # init 18 | caffe.set_mode_gpu() 19 | caffe.set_device(0) 20 | 21 | solver = caffe.SGDSolver('solver.prototxt') 22 | solver.net.copy_from(weights) 23 | 24 | # scoring 25 | val = np.loadtxt('../data/seg11valid.txt', dtype=str) 26 | score.seg_tests(solver, 'seg', val) 27 | 28 | -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn32s/caffemodel-url: -------------------------------------------------------------------------------- 1 | http://dl.caffe.berkeleyvision.org/fcn32s-heavy-pascal.caffemodel -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn32s/solve.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Seg-FCN for Dragon 3 | # Copyright (c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Train a FCN-32s(PASCAL VOC) network """ 8 | 9 | import dragon.vm.caffe as caffe 10 | import surgery 11 | import numpy as np 12 | 13 | weights = '../transplants/VGG16/VGG16.fcn.caffemodel' 14 | 15 | if __name__ == '__main__': 16 | 17 | # init 18 | caffe.set_mode_gpu() 19 | caffe.set_device(0) 20 | 21 | solver = caffe.SGDSolver('solver.prototxt') 22 | solver.net.copy_from(weights) 23 | 24 | # surgeries 25 | interp_layers = [k for k in solver.net.params.keys() if 'up' in k] 26 | surgery.interp(solver.net, interp_layers) 27 | 28 | for _ in range(25): 29 | solver.step(4000) -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn32s/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "train.prototxt" 2 | test_net: "val.prototxt" 3 | test_iter: 1111 4 | # make test net, but don't invoke it from the solver itself 5 | test_interval: 999999999 6 | display: 20 7 | average_loss: 20 8 | lr_policy: "fixed" 9 | # lr for unnormalized softmax 10 | base_lr: 1e-10 11 | # high momentum 12 | momentum: 0.99 13 | # no gradient accumulation 14 | iter_size: 1 15 | max_iter: 100000 16 | weight_decay: 0.0005 17 | snapshot: 4000 18 | snapshot_prefix: "snapshot/train" 19 | test_initialization: false 20 | -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn32s/test.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Seg-FCN for Dragon 3 | # Copyright (c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Test a FCN-32s(PASCAL VOC) network """ 8 | 9 | import dragon.vm.caffe as caffe 10 | import score 11 | import numpy as np 12 | 13 | weights = 'snapshot/train_iter_100000.caffemodel' 14 | 15 | if __name__ == '__main__': 16 | 17 | # init 18 | caffe.set_mode_gpu() 19 | caffe.set_device(0) 20 | 21 | solver = caffe.SGDSolver('solver.prototxt') 22 | solver.net.copy_from(weights) 23 | 24 | # scoring 25 | val = np.loadtxt('../data/seg11valid.txt', dtype=str) 26 | score.seg_tests(solver, 'seg', val) 27 | 28 | -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn8s-atonce/caffemodel-url: -------------------------------------------------------------------------------- 1 | http://dl.caffe.berkeleyvision.org/fcn8s-atonce-pascal.caffemodel 2 | -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn8s-atonce/solve.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Seg-FCN for Dragon 3 | # Copyright (c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Train a FCN-8s At Once(PASCAL VOC) network """ 8 | 9 | import dragon.vm.caffe as caffe 10 | import surgery 11 | 12 | weights = '../transplants/VGG16/VGG16.fcn.caffemodel' 13 | 14 | if __name__ == '__main__': 15 | 16 | # init 17 | caffe.set_mode_gpu() 18 | caffe.set_device(0) 19 | 20 | solver = caffe.SGDSolver('solver.prototxt') 21 | solver.net.copy_from(weights) 22 | 23 | # surgeries 24 | interp_layers = [k for k in solver.net.params.keys() if 'up' in k] 25 | surgery.interp(solver.net, interp_layers) 26 | 27 | for _ in range(75): 28 | solver.step(4000) -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn8s-atonce/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "train.prototxt" 2 | test_net: "val.prototxt" 3 | test_iter: 736 4 | # make test net, but don't invoke it from the solver itself 5 | test_interval: 999999999 6 | display: 20 7 | average_loss: 20 8 | lr_policy: "fixed" 9 | # lr for unnormalized softmax 10 | base_lr: 1e-10 11 | # high momentum 12 | momentum: 0.99 13 | # no gradient accumulation 14 | iter_size: 1 15 | max_iter: 300000 16 | weight_decay: 0.0005 17 | snapshot: 4000 18 | snapshot_prefix: "snapshot/train" 19 | test_initialization: false 20 | -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn8s-atonce/test.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Seg-FCN for Dragon 3 | # Copyright (c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Test a FCN-8s At Once(PASCAL VOC) network """ 8 | 9 | import dragon.vm.caffe as caffe 10 | import score 11 | import numpy as np 12 | 13 | weights = 'snapshot/train_iter_300000.caffemodel' 14 | 15 | if __name__ == '__main__': 16 | 17 | # init 18 | caffe.set_mode_gpu() 19 | caffe.set_device(0) 20 | 21 | solver = caffe.SGDSolver('solver.prototxt') 22 | solver.net.copy_from(weights) 23 | 24 | # scoring 25 | val = np.loadtxt('../data/seg11valid.txt', dtype=str) 26 | score.seg_tests(solver, 'seg', val) 27 | 28 | -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn8s/caffemodel-url: -------------------------------------------------------------------------------- 1 | http://dl.caffe.berkeleyvision.org/fcn8s-heavy-pascal.caffemodel -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn8s/solve.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Seg-FCN for Dragon 3 | # Copyright (c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Train a FCN-8s(PASCAL VOC) network """ 8 | 9 | import dragon.vm.caffe as caffe 10 | import surgery 11 | 12 | weights = '../voc-fcn16s/snapshot/train_iter_100000.caffemodel' 13 | 14 | if __name__ == '__main__': 15 | 16 | # init 17 | caffe.set_mode_gpu() 18 | caffe.set_device(0) 19 | 20 | solver = caffe.SGDSolver('solver.prototxt') 21 | solver.net.copy_from(weights) 22 | 23 | # surgeries 24 | interp_layers = [k for k in solver.net.params.keys() if 'up' in k] 25 | surgery.interp(solver.net, interp_layers) 26 | 27 | for _ in range(25): 28 | solver.step(4000) -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn8s/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "train.prototxt" 2 | test_net: "val.prototxt" 3 | test_iter: 1111 4 | # make test net, but don't invoke it from the solver itself 5 | test_interval: 999999999 6 | display: 20 7 | average_loss: 20 8 | lr_policy: "fixed" 9 | # lr for unnormalized softmax 10 | base_lr: 1e-14 11 | # high momentum 12 | momentum: 0.99 13 | # no gradient accumulation 14 | iter_size: 1 15 | max_iter: 100000 16 | weight_decay: 0.0005 17 | snapshot: 4000 18 | snapshot_prefix: "snapshot/train" 19 | test_initialization: false 20 | -------------------------------------------------------------------------------- /examples/Seg-FCN/voc-fcn8s/test.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Seg-FCN for Dragon 3 | # Copyright (c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Test a FCN-8s(PASCAL VOC) network """ 8 | 9 | import dragon.vm.caffe as caffe 10 | import score 11 | import numpy as np 12 | 13 | weights = 'snapshot/train_iter_100000.caffemodel' 14 | 15 | if __name__ == '__main__': 16 | 17 | # init 18 | caffe.set_mode_gpu() 19 | caffe.set_device(0) 20 | 21 | solver = caffe.SGDSolver('solver.prototxt') 22 | solver.net.copy_from(weights) 23 | 24 | # scoring 25 | val = np.loadtxt('../data/seg11valid.txt', dtype=str) 26 | score.seg_tests(solver, 'D:/seg', val) 27 | 28 | -------------------------------------------------------------------------------- /examples/cifar10/README.md: -------------------------------------------------------------------------------- 1 | CIFAR-10 [Caffe1 style] 2 | ===================================== 3 | 4 | ### Runtime Requirements for Python 5 | 6 | 0. Package: lmdb 7 | 1. Package: python-opencv 8 | 9 | ----- 10 | 11 | Prepare the Dataset 12 | ------------------- 13 | 14 | - download ``cifar-10-python.tar.gz`` from [http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz](http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz) 15 | 16 | - copy to data folder 17 | 18 | ```Shell 19 | cp cifar-10-python.tar.gz cifar/data 20 | ``` 21 | 22 | - gen db files 23 | 24 | ```Shell 25 | cd cifar10 26 | python gen_lmdb.py 27 | ``` 28 | 29 | Train "Quick/Full" Model 30 | ------------------- 31 | 32 | - Quick 33 | 34 | ```Shell 35 | cd cifar10 36 | python solve_quick.py 37 | ``` 38 | 39 | - Full 40 | 41 | ```Shell 42 | cd cifar10 43 | python solve_full.py 44 | ``` 45 | 46 | Infer "Quick" Model after Training 47 | ------------------- 48 | 49 | ```Shell 50 | cd cifar10 51 | python infer.py 52 | ``` 53 | -------------------------------------------------------------------------------- /examples/cifar10/cifar10_full_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "cifar10_full_train_test.prototxt" 2 | 3 | test_iter: 100 4 | test_interval: 1000 5 | test_initialization: false 6 | 7 | base_lr: 0.001 8 | momentum: 0.9 9 | weight_decay: 0.004 10 | 11 | lr_policy: "multistep" 12 | stepvalue: 60000 13 | stepvalue: 65000 14 | gamma: 0.1 15 | 16 | display: 200 17 | max_iter: 70000 18 | snapshot: 10000 19 | snapshot_prefix: "snapshots/cifar10_full" -------------------------------------------------------------------------------- /examples/cifar10/cifar10_quick_solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "cifar10_quick_train_test.prototxt" 2 | 3 | test_iter: 100 4 | test_interval: 500 5 | test_initialization: false 6 | 7 | base_lr: 0.001 8 | momentum: 0.9 9 | weight_decay: 0.004 10 | 11 | lr_policy: "step" 12 | stepsize: 4000 13 | gamma: 0.1 14 | 15 | display: 100 16 | max_iter: 5000 17 | snapshot: 500 18 | snapshot_prefix: "snapshots/cifar10_quick" -------------------------------------------------------------------------------- /examples/cifar10/data/README.md: -------------------------------------------------------------------------------- 1 | This directory holds (*after you download them*): 2 | - cifar-10-python.tar.gz ([download link](http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz)) 3 | - demo (folder contains demo images) 4 | - extract (folder contains image files and labels, generated by **gen_lmdb.py**) 5 | - train_lmdb (db file, generated by **gen_lmdb.py**) 6 | - test_lmdb (db file, generated by **gen_lmdb.py**) 7 | -------------------------------------------------------------------------------- /examples/cifar10/data/demo/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neopenx/Dragon/0e639a7319035ddc81918bd3df059230436ee0a1/examples/cifar10/data/demo/cat.jpg -------------------------------------------------------------------------------- /examples/cifar10/infer.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Cifar-10 for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Infer for a single Image and show """ 8 | 9 | import dragon.vm.caffe as caffe 10 | import numpy as np 11 | import cv2 12 | 13 | classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 14 | 'dog', 'frog', 'horse', 'ship', 'truck'] 15 | 16 | # init 17 | caffe.set_mode_gpu() 18 | caffe.set_device(0) 19 | # load net 20 | net = caffe.Net("cifar10_quick_deploy.prototxt", 21 | 'snapshots/cifar10_quick_iter_5000.caffemodel', caffe.TEST) 22 | 23 | 24 | def load_image(filename): 25 | # load image, subtract mean, and make dims 1 x 1 x H x W 26 | im = cv2.imread(filename) 27 | im = cv2.resize(im, (32, 32)) 28 | im = np.array(im, dtype=np.float32) 29 | im -= np.array((104.0, 116.0, 122.0)) 30 | im = im.transpose((2,0,1)) 31 | return im[np.newaxis, :, :, :] 32 | 33 | 34 | def run(filename): 35 | 36 | # infer 37 | im = load_image(filename) 38 | net.forward(**{'data': im}) 39 | score = net.blobs['ip2'].data.get_value()[0] 40 | pred = score.argmax(0) 41 | 42 | # show 43 | print(classes[pred]) 44 | 45 | 46 | if __name__ == '__main__': 47 | 48 | run('data/demo/cat.jpg') 49 | -------------------------------------------------------------------------------- /examples/cifar10/solve_full.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Cifar-10 for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Train a cifar-10 net """ 8 | 9 | import dragon.vm.caffe as caffe 10 | 11 | if __name__ == '__main__': 12 | 13 | # init 14 | caffe.set_mode_gpu() 15 | caffe.set_device(0) 16 | 17 | # solve 18 | solver = caffe.SGDSolver('cifar10_full_solver.prototxt') 19 | solver.step(70000) 20 | solver.snapshot() 21 | -------------------------------------------------------------------------------- /examples/cifar10/solve_quick.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Cifar-10 for Dragon 3 | # Copyright(c) 2017 SeetaTech 4 | # Written by Ting Pan 5 | # -------------------------------------------------------- 6 | 7 | """ Train a cifar-10 net """ 8 | 9 | import dragon.vm.caffe as caffe 10 | 11 | if __name__ == '__main__': 12 | 13 | # init 14 | caffe.set_mode_gpu() 15 | caffe.set_device(0) 16 | 17 | # solve 18 | solver = caffe.SGDSolver('cifar10_quick_solver.prototxt') 19 | solver.step(5000) 20 | solver.snapshot() 21 | --------------------------------------------------------------------------------