├── .Rbuildignore ├── inst ├── include │ └── tinydnn.h ├── COPYRIGHTS └── AUTHORS ├── .gitignore ├── src ├── Makevars ├── optimizers.h ├── tiny_dnn │ ├── util │ │ ├── deform.h │ │ ├── serialization_layer_list.h │ │ ├── nn_error.h │ │ ├── colored_print.h │ │ ├── random.h │ │ ├── macro.h │ │ ├── target_cost.h │ │ ├── aligned_allocator.h │ │ ├── weight_init.h │ │ └── graph_visualizer.h │ ├── core │ │ ├── params │ │ │ ├── params.h │ │ │ ├── fully_params.h │ │ │ ├── deconv_params.h │ │ │ └── maxpool_params.h │ │ ├── kernels │ │ │ ├── nnp_deconv2d_kernel.h │ │ │ ├── avx_deconv2d_kernel.h │ │ │ ├── avx_deconv2d_back_kernel.h │ │ │ ├── maxpool_op_avx.h │ │ │ ├── maxpool_op_internal.h │ │ │ ├── fully_connected_op_nnpack.h │ │ │ ├── maxpool_op_nnpack.h │ │ │ ├── tiny_deconv2d_kernel.h │ │ │ ├── fully_connected_op_internal.h │ │ │ ├── conv2d_op_nnpack.h │ │ │ ├── maxpool_grad_op.h │ │ │ ├── conv2d_grad_op.h │ │ │ ├── conv2d_op.h │ │ │ └── fully_connected_grad_op.h │ │ └── session.h │ ├── io │ │ ├── layer_factory.h │ │ ├── cifar10_parser.h │ │ └── display.h │ ├── models │ │ └── alexnet.h │ ├── layers │ │ ├── layers.h │ │ ├── input_layer.h │ │ ├── feedforward_layer.h │ │ ├── arithmetic_layer.h │ │ ├── linear_layer.h │ │ ├── concat_layer.h │ │ └── power_layer.h │ ├── config.h │ └── tiny_dnn.h ├── utils.h ├── layers.h ├── random_r.h ├── cereal │ ├── external │ │ ├── rapidjson │ │ │ ├── internal │ │ │ │ ├── strfunc.h │ │ │ │ ├── swap.h │ │ │ │ ├── stack.h │ │ │ │ └── ieee754.h │ │ │ ├── license.txt │ │ │ ├── filestream.h │ │ │ ├── stringbuffer.h │ │ │ ├── ostreamwrapper.h │ │ │ ├── memorybuffer.h │ │ │ ├── genericstream.h │ │ │ ├── memorystream.h │ │ │ ├── filereadstream.h │ │ │ └── filewritestream.h │ │ └── rapidxml │ │ │ ├── license.txt │ │ │ └── rapidxml_utils.hpp │ ├── types │ │ ├── functional.hpp │ │ ├── utility.hpp │ │ ├── complex.hpp │ │ ├── list.hpp │ │ ├── deque.hpp │ │ ├── forward_list.hpp │ │ ├── string.hpp │ │ ├── chrono.hpp │ │ ├── stack.hpp │ │ ├── concepts │ │ │ └── pair_associative_container.hpp │ │ ├── array.hpp │ │ ├── set.hpp │ │ ├── bitset.hpp │ │ └── unordered_set.hpp │ ├── details │ │ ├── polymorphic_impl_fwd.hpp │ │ ├── util.hpp │ │ └── static_object.hpp │ └── macros.hpp ├── optimizers.cpp ├── classification.cpp ├── regression.cpp └── network.cpp ├── NAMESPACE ├── tinydnn.Rproj ├── DESCRIPTION ├── R ├── optimizers.R └── RcppExports.R └── man └── layers.Rd /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | -------------------------------------------------------------------------------- /inst/include/tinydnn.h: -------------------------------------------------------------------------------- 1 | #include 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | .Ruserdata 5 | src/*.o 6 | src/*.so 7 | src/*.dll 8 | -------------------------------------------------------------------------------- /src/Makevars: -------------------------------------------------------------------------------- 1 | CXX_STD = CXX11 2 | 3 | # PKG_CXXFLAGS = $(SHLIB_OPENMP_CXXFLAGS) 4 | PKG_CPPFLAGS = -I. 5 | # PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) 6 | -------------------------------------------------------------------------------- /src/optimizers.h: -------------------------------------------------------------------------------- 1 | #ifndef TINYDNN_OPTIMIZERS_H 2 | #define TINYDNN_OPTIMIZERS_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | std::shared_ptr get_optimizer(Rcpp::List opt); 10 | 11 | 12 | #endif // TINYDNN_OPTIMIZERS_H 13 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | importFrom(Rcpp, evalCpp) 2 | useDynLib(tinydnn) 3 | 4 | export(net_seq) 5 | export(layer_fully_connected, fc) 6 | export(layer_convolutional, conv) 7 | export(layer_average_pooling, ave_pool) 8 | export(layer_max_pooling, max_pool) 9 | 10 | exportMethods("%<%") 11 | 12 | export(adagrad, rmsprop, adam, sgd, momentum) 13 | -------------------------------------------------------------------------------- /inst/COPYRIGHTS: -------------------------------------------------------------------------------- 1 | All contributions by Taiga Nomi 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | All other contributions: 6 | Copyright (c) 2013-2016, the respective contributors. 7 | All rights reserved. 8 | 9 | Each contributor holds copyright over their respective contributions. 10 | The project versioning (Git) records all such contribution source information. 11 | -------------------------------------------------------------------------------- /tinydnn.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 4 10 | Encoding: UTF-8 11 | 12 | RnwWeave: knitr 13 | LaTeX: XeLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageInstallArgs: --no-multiarch --with-keep.source 20 | PackageRoxygenize: rd 21 | -------------------------------------------------------------------------------- /src/tiny_dnn/util/deform.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/util/util.h" 11 | 12 | namespace tiny_dnn { 13 | 14 | inline vec_t corrupt(vec_t &&in, float_t corruption_level, float_t min_value) { 15 | for (size_t i = 0; i < in.size(); i++) 16 | if (bernoulli(corruption_level)) in[i] = min_value; 17 | return in; 18 | } 19 | 20 | } // namespace tiny_dnn 21 | -------------------------------------------------------------------------------- /src/utils.h: -------------------------------------------------------------------------------- 1 | #ifndef TINYDNN_UTILS_H 2 | #define TINYDNN_UTILS_H 3 | 4 | #include 5 | #include 6 | 7 | inline Rcpp::NumericVector vec_t_to_rcpp_vector(const tiny_dnn::vec_t* v) 8 | { 9 | Rcpp::NumericVector res(v->size()); 10 | std::copy(v->begin(), v->end(), res.begin()); 11 | 12 | return res; 13 | } 14 | 15 | inline Rcpp::NumericMatrix vec_t_to_rcpp_matrix(const tiny_dnn::vec_t* v, int m, int n) 16 | { 17 | Rcpp::NumericMatrix res(m, n); 18 | std::copy(v->begin(), v->end(), res.begin()); 19 | 20 | return res; 21 | } 22 | 23 | 24 | #endif // TINYDNN_UTILS_H 25 | -------------------------------------------------------------------------------- /src/layers.h: -------------------------------------------------------------------------------- 1 | #ifndef TINYDNN_LAYERS_H 2 | #define TINYDNN_LAYERS_H 3 | 4 | #include 5 | #include 6 | 7 | void add_layer_fully_connected( 8 | tiny_dnn::network* net, Rcpp::List layer 9 | ); 10 | 11 | void add_layer_convolutional( 12 | tiny_dnn::network* net, Rcpp::List layer 13 | ); 14 | 15 | void add_layer_average_pooling( 16 | tiny_dnn::network* net, Rcpp::List layer 17 | ); 18 | 19 | void add_layer_max_pooling( 20 | tiny_dnn::network* net, Rcpp::List layer 21 | ); 22 | 23 | #endif // TINYDNN_LAYERS_H 24 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/params/params.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | namespace tiny_dnn { 11 | namespace core { 12 | 13 | class conv_params; 14 | class fully_params; 15 | class maxpool_params; 16 | 17 | /* Base class to model operation parameters */ 18 | class Params { 19 | public: 20 | Params() {} 21 | 22 | conv_params conv() const; 23 | fully_params fully() const; 24 | maxpool_params &maxpool(); 25 | }; 26 | 27 | } // namespace core 28 | } // namespace tiny_dnn 29 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/params/fully_params.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "params.h" 11 | 12 | namespace tiny_dnn { 13 | namespace core { 14 | 15 | class fully_params : public Params { 16 | public: 17 | serial_size_t in_size_; 18 | serial_size_t out_size_; 19 | bool has_bias_; 20 | }; 21 | 22 | // TODO(nyanp): can we do better here? 23 | inline fully_params Params::fully() const { 24 | return *(static_cast(this)); 25 | } 26 | 27 | } // namespace core 28 | } // namespace tiny_dnn 29 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: tinydnn 2 | Type: Package 3 | Title: Tiny yet Powerful Deep Neural Networks 4 | Version: 0.1.0 5 | Date: 2017-02-12 6 | Author: Yixuan Qiu, Taiga Noumi (The 'tiny-dnn' library), Andrey Ogurtsov 7 | (Package vignette), and other contributors of the tiny-dnn library. 8 | See file AUTHORS for details. 9 | Maintainer: Yixuan Qiu 10 | Description: R wrapper of the 'tiny-dnn' C++ library for deep learning. 11 | License: MIT 12 | Copyright: See file COPYRIGHTS 13 | Encoding: UTF-8 14 | LazyData: true 15 | Depends: 16 | methods 17 | Imports: 18 | Rcpp (>= 0.12.9) 19 | Suggests: 20 | prettydoc 21 | LinkingTo: Rcpp 22 | SystemRequirements: C++11 23 | RoxygenNote: 5.0.1 24 | VignetteBuilder: knitr 25 | -------------------------------------------------------------------------------- /src/random_r.h: -------------------------------------------------------------------------------- 1 | // Modified version of , using R's RNG 2 | #pragma once 3 | 4 | #include 5 | 6 | namespace tiny_dnn { 7 | 8 | template 9 | inline int uniform_idx(const Container &t) { 10 | return int(R::unif_rand() * t.size()); 11 | } 12 | 13 | inline bool bernoulli(float_t p) { 14 | return float_t(R::unif_rand()) <= p; 15 | } 16 | 17 | template 18 | void uniform_rand(Iter begin, Iter end, float_t min, float_t max) { 19 | for (Iter it = begin; it != end; ++it) *it = R::runif(min, max); 20 | } 21 | 22 | template 23 | void gaussian_rand(Iter begin, Iter end, float_t mean, float_t sigma) { 24 | for (Iter it = begin; it != end; ++it) *it = R::rnorm(mean, sigma); 25 | } 26 | 27 | } // namespace tiny_dnn 28 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/internal/strfunc.h: -------------------------------------------------------------------------------- 1 | #ifndef RAPIDJSON_INTERNAL_STRFUNC_H_ 2 | #define RAPIDJSON_INTERNAL_STRFUNC_H_ 3 | 4 | namespace rapidjson { 5 | namespace internal { 6 | 7 | //! Custom strlen() which works on different character types. 8 | /*! \tparam Ch Character type (e.g. char, wchar_t, short) 9 | \param s Null-terminated input string. 10 | \return Number of characters in the string. 11 | \note This has the same semantics as strlen(), the return value is not number of Unicode codepoints. 12 | */ 13 | template 14 | inline SizeType StrLen(const Ch* s) { 15 | const Ch* p = s; 16 | while (*p != '\0') 17 | ++p; 18 | return SizeType(p - s); 19 | } 20 | 21 | } // namespace internal 22 | } // namespace rapidjson 23 | 24 | #endif // RAPIDJSON_INTERNAL_STRFUNC_H_ 25 | -------------------------------------------------------------------------------- /src/tiny_dnn/util/serialization_layer_list.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2017, Taiga Nomi 2 | #ifndef CNN_NO_SERIALIZATION 3 | 4 | CNN_REGISTER_LAYER_WITH_ACTIVATIONS(convolutional_layer, conv); 5 | CNN_REGISTER_LAYER_WITH_ACTIVATIONS(fully_connected_layer, fully_connected); 6 | CNN_REGISTER_LAYER_WITH_ACTIVATIONS(average_pooling_layer, avepool); 7 | CNN_REGISTER_LAYER_WITH_ACTIVATIONS(max_pooling_layer, maxpool); 8 | CNN_REGISTER_LAYER_WITH_ACTIVATIONS(linear_layer, linear); 9 | CNN_REGISTER_LAYER_WITH_ACTIVATIONS(lrn_layer, lrn); 10 | 11 | CNN_REGISTER_LAYER(batch_normalization_layer, batchnorm); 12 | CNN_REGISTER_LAYER(concat_layer, concat); 13 | CNN_REGISTER_LAYER(dropout_layer, dropout); 14 | CNN_REGISTER_LAYER(power_layer, power); 15 | CNN_REGISTER_LAYER(slice_layer, slice); 16 | CNN_REGISTER_LAYER(elementwise_add_layer, elementwise_add); 17 | 18 | #endif 19 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/nnp_deconv2d_kernel.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/core/params/deconv_params.h" 11 | 12 | #ifdef CNN_USE_NNPACK 13 | #include "nnpack.h" 14 | 15 | namespace tiny_dnn { 16 | namespace core { 17 | namespace kernels { 18 | 19 | inline void nnp_deconv2d_kernel(const conv_params ¶ms, 20 | const std::vector &in, 21 | const vec_t &W, 22 | const vec_t &bias, 23 | tensor_t &a) {} 24 | 25 | } // namespace kernels 26 | } // namespace core 27 | } // namespace tiny_dnn 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /R/optimizers.R: -------------------------------------------------------------------------------- 1 | adagrad = function(lrate = 0.01) 2 | { 3 | list(opt_name = "adagrad", 4 | lrate = lrate) 5 | } 6 | 7 | rmsprop = function(lrate = 0.0001, decay = 0.99) 8 | { 9 | list(opt_name = "rmsprop", 10 | lrate = lrate, 11 | decay = decay) 12 | } 13 | 14 | adam = function(lrate = 0.001, b1 = 0.9, b2 = 0.999, b1_t = 0.9, b2_t = 0.999) 15 | { 16 | list(opt_name = "adam", 17 | lrate = lrate, 18 | b1 = b1, 19 | b2 = b2, 20 | b1_t = b1_t, 21 | b2_t = b2_t) 22 | } 23 | 24 | sgd = function(lrate = 0.01, decay = 0) 25 | { 26 | list(opt_name = "sgd", 27 | lrate = lrate, 28 | decay = decay) 29 | } 30 | 31 | momentum = function(lrate = 0.01, decay = 0, momentum = 0.9) 32 | { 33 | list(opt_name = "momentum", 34 | lrate = lrate, 35 | decay = decay, 36 | momentum = momentum) 37 | } 38 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/params/deconv_params.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | namespace tiny_dnn { 11 | namespace core { 12 | 13 | struct deconv_layer_worker_specific_storage { 14 | const tensor_t *prev_out_; 15 | const tensor_t *curr_out_unpadded_; 16 | tensor_t curr_out_buf_; 17 | tensor_t curr_delta_padded; 18 | }; 19 | 20 | struct deconv_params { 21 | connection_table tbl; 22 | index3d in; 23 | index3d out; 24 | index3d out_unpadded; 25 | index3d weight; 26 | bool has_bias; 27 | padding pad_type; 28 | serial_size_t w_stride; 29 | serial_size_t h_stride; 30 | }; 31 | 32 | } // namespace core 33 | } // namespace tiny_dnn 34 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/avx_deconv2d_kernel.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/core/kernels/tiny_deconv2d_kernel.h" 11 | #include "tiny_dnn/core/params/deconv_params.h" 12 | 13 | namespace tiny_dnn { 14 | namespace core { 15 | namespace kernels { 16 | 17 | inline void avx_deconv2d_kernel(const deconv_params ¶ms, 18 | const tensor_t &in, 19 | const vec_t &W, 20 | const vec_t &bias, 21 | tensor_t &a, 22 | const bool layer_parallelize) { 23 | // fallback to non-avx version 24 | tiny_deconv2d_kernel(params, in, W, bias, a, layer_parallelize); 25 | } 26 | 27 | } // namespace kernels 28 | } // namespace core 29 | } // namespace tiny_dnn 30 | -------------------------------------------------------------------------------- /src/tiny_dnn/io/layer_factory.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include "tiny_dnn/layers/fully_connected_layer.h" 10 | #include "tiny_dnn/util/util.h" 11 | 12 | namespace tiny_dnn { 13 | 14 | /** 15 | * create multi-layer perceptron 16 | */ 17 | template 18 | network make_mlp(Iter first, Iter last) { 19 | typedef network net_t; 20 | net_t n; 21 | 22 | Iter next = first + 1; 23 | for (; next != last; ++first, ++next) 24 | n << fully_connected_layer(*first, *next); 25 | return n; 26 | } 27 | 28 | /** 29 | * create multi-layer perceptron 30 | */ 31 | template 32 | network make_mlp(const std::vector &units) { 33 | return make_mlp(units.begin(), units.end()); 34 | } 35 | 36 | } // namespace tiny_dnn 37 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/session.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include 11 | #include 12 | 13 | #include "tiny_dnn/core/device.h" 14 | 15 | namespace tiny_dnn { 16 | namespace core { 17 | 18 | class session { 19 | public: 20 | explicit session(const std::string name) : name_(name) {} 21 | 22 | std::string get_name() const { return name_; } 23 | size_t get_num_devices() const { return devices_.size(); } 24 | 25 | // will call construct graph 26 | // should we here specify the devices to use? 27 | void schedule_session(/* network& net */); 28 | 29 | // will call forward or backward methods 30 | void run_session(/* data */); 31 | 32 | private: 33 | std::string name_; 34 | std::vector> devices_; 35 | }; 36 | 37 | } // namespace core 38 | } // namespace tiny_dnn 39 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/avx_deconv2d_back_kernel.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/core/params/deconv_params.h" 11 | 12 | namespace tiny_dnn { 13 | namespace core { 14 | namespace kernels { 15 | 16 | inline void avx_deconv2d_back_kernel(const deconv_params ¶ms, 17 | const tensor_t &prev_out, 18 | const vec_t &W, 19 | tensor_t &dW, 20 | tensor_t &db, 21 | tensor_t &curr_delta, 22 | tensor_t *prev_delta) { 23 | // fallback to non-avx version 24 | tiny_deconv2d_back_kernel(params, prev_out, W, dW, db, curr_delta, 25 | prev_delta); 26 | } 27 | 28 | } // namespace kernels 29 | } // namespace core 30 | } // namespace tiny_dnn 31 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/license.txt: -------------------------------------------------------------------------------- 1 | Copyright (C) 2011 Milo Yip 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/maxpool_op_avx.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/core/kernels/maxpool_op_internal.h" 11 | 12 | namespace tiny_dnn { 13 | namespace kernels { 14 | 15 | inline void maxpool_op_avx( 16 | const tensor_t &in_data, 17 | tensor_t &out_data, 18 | std::vector> &max_idx, 19 | const std::vector> &out2in, 20 | const bool layer_parallelize) { 21 | maxpool_op_internal(in_data, out_data, max_idx, out2in, layer_parallelize); 22 | } 23 | 24 | inline void maxpool_grad_op_avx( 25 | tensor_t &prev_delta, 26 | const tensor_t &curr_delta, 27 | std::vector> &max_idx, 28 | const std::vector &in2out, 29 | const bool layer_parallelize) { 30 | maxpool_grad_op_internal(prev_delta, curr_delta, max_idx, in2out, 31 | layer_parallelize); 32 | } 33 | 34 | } // namespace kernels 35 | } // namespace tiny_dnn 36 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/filestream.h: -------------------------------------------------------------------------------- 1 | #ifndef RAPIDJSON_FILESTREAM_H_ 2 | #define RAPIDJSON_FILESTREAM_H_ 3 | 4 | #include 5 | 6 | namespace rapidjson { 7 | 8 | //! Wrapper of C file stream for input or output. 9 | /*! 10 | This simple wrapper does not check the validity of the stream. 11 | \implements Stream 12 | */ 13 | class FileStream { 14 | public: 15 | typedef char Ch; //!< Character type. Only support char. 16 | 17 | FileStream(FILE* fp) : fp_(fp), count_(0) { Read(); } 18 | 19 | char Peek() const { return current_; } 20 | char Take() { char c = current_; Read(); return c; } 21 | size_t Tell() const { return count_; } 22 | void Put(char c) { fputc(c, fp_); } 23 | 24 | // Not implemented 25 | char* PutBegin() { return 0; } 26 | size_t PutEnd(char*) { return 0; } 27 | 28 | private: 29 | void Read() { 30 | RAPIDJSON_ASSERT(fp_ != 0); 31 | int c = fgetc(fp_); 32 | if (c != EOF) { 33 | current_ = (char)c; 34 | count_++; 35 | } 36 | else 37 | current_ = '\0'; 38 | } 39 | 40 | FILE* fp_; 41 | char current_; 42 | size_t count_; 43 | }; 44 | 45 | } // namespace rapidjson 46 | 47 | #endif // RAPIDJSON_FILESTREAM_H_ 48 | -------------------------------------------------------------------------------- /src/tiny_dnn/models/alexnet.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 4 | All rights reserved. 5 | 6 | Use of this source code is governed by a BSD-style license that can be found 7 | in the LICENSE file. 8 | */ 9 | 10 | // #include "tiny_dnn/tiny_dnn.h" 11 | 12 | using namespace tiny_dnn::activation; 13 | using namespace tiny_dnn::layers; 14 | 15 | namespace models { 16 | 17 | // Based on: 18 | // https://github.com/DeepMark/deepmark/blob/master/torch/image%2Bvideo/alexnet.lua 19 | class alexnet : public network { 20 | public: 21 | explicit alexnet(const std::string &name = "") : network(name) { 22 | *this << conv(224, 224, 11, 11, 3, 64, padding::valid, true, 4, 4); 23 | *this << max_pool(54, 54, 64, 2); 24 | *this << conv(27, 27, 5, 5, 64, 192, padding::valid, true, 1, 1); 25 | *this << max_pool(23, 23, 192, 1); 26 | *this << conv(23, 23, 3, 3, 192, 384, padding::valid, true, 1, 1); 27 | *this << conv(21, 21, 3, 3, 384, 256, padding::valid, true, 1, 1); 28 | *this << conv(19, 19, 3, 3, 256, 256, padding::valid, true, 1, 1); 29 | *this << max_pool(17, 17, 256, 1); 30 | } 31 | }; 32 | 33 | } // namespace models 34 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/params/maxpool_params.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include "tiny_dnn/core/params/params.h" 10 | 11 | namespace tiny_dnn { 12 | namespace core { 13 | 14 | class maxpool_params : public Params { 15 | public: 16 | index3d in; 17 | index3d out; 18 | serial_size_t pool_size_x; 19 | serial_size_t pool_size_y; 20 | serial_size_t stride_x; 21 | serial_size_t stride_y; 22 | padding pad_type; 23 | 24 | /* mapping out => max_index(in) (1:1) */ 25 | std::vector> out2inmax; 26 | /* mapping out => in (1:N) */ 27 | std::vector> out2in; 28 | /* mapping in => out (N:1) */ 29 | std::vector in2out; 30 | }; 31 | 32 | struct max_pooling_layer_worker_specific_storage { 33 | /* mapping out => max_index(in) (1:1) */ 34 | std::vector> out2inmax_; 35 | }; 36 | 37 | // TODO(nyanp): can we do better here? 38 | inline maxpool_params &Params::maxpool() { 39 | return *(static_cast(this)); 40 | } 41 | 42 | } // namespace core 43 | } // namespace tiny_dnn 44 | -------------------------------------------------------------------------------- /src/tiny_dnn/layers/layers.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include "tiny_dnn/layers/arithmetic_layer.h" 10 | #include "tiny_dnn/layers/average_pooling_layer.h" 11 | #include "tiny_dnn/layers/average_unpooling_layer.h" 12 | #include "tiny_dnn/layers/batch_normalization_layer.h" 13 | #include "tiny_dnn/layers/concat_layer.h" 14 | #include "tiny_dnn/layers/convolutional_layer.h" 15 | #include "tiny_dnn/layers/deconvolutional_layer.h" 16 | #include "tiny_dnn/layers/dropout_layer.h" 17 | #include "tiny_dnn/layers/feedforward_layer.h" 18 | #include "tiny_dnn/layers/fully_connected_layer.h" 19 | #include "tiny_dnn/layers/layer.h" 20 | #include "tiny_dnn/layers/linear_layer.h" 21 | #include "tiny_dnn/layers/lrn_layer.h" 22 | #include "tiny_dnn/layers/max_pooling_layer.h" 23 | #include "tiny_dnn/layers/max_unpooling_layer.h" 24 | #include "tiny_dnn/layers/partial_connected_layer.h" 25 | #include "tiny_dnn/layers/power_layer.h" 26 | #include "tiny_dnn/layers/quantized_convolutional_layer.h" 27 | #include "tiny_dnn/layers/quantized_deconvolutional_layer.h" 28 | #include "tiny_dnn/layers/quantized_fully_connected_layer.h" 29 | #include "tiny_dnn/layers/slice_layer.h" 30 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/stringbuffer.h: -------------------------------------------------------------------------------- 1 | #ifndef RAPIDJSON_STRINGBUFFER_H_ 2 | #define RAPIDJSON_STRINGBUFFER_H_ 3 | 4 | #include "rapidjson.h" 5 | #include "internal/stack.h" 6 | 7 | namespace rapidjson { 8 | 9 | //! Represents an in-memory output stream. 10 | /*! 11 | \tparam Encoding Encoding of the stream. 12 | \tparam Allocator type for allocating memory buffer. 13 | \implements Stream 14 | */ 15 | template 16 | struct GenericStringBuffer { 17 | typedef typename Encoding::Ch Ch; 18 | 19 | GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} 20 | 21 | void Put(Ch c) { *stack_.template Push() = c; } 22 | 23 | void Clear() { stack_.Clear(); } 24 | 25 | const char* GetString() const { 26 | // Push and pop a null terminator. This is safe. 27 | *stack_.template Push() = '\0'; 28 | stack_.template Pop(1); 29 | 30 | return stack_.template Bottom(); 31 | } 32 | 33 | size_t Size() const { return stack_.GetSize(); } 34 | 35 | static const size_t kDefaultCapacity = 256; 36 | mutable internal::Stack stack_; 37 | }; 38 | 39 | typedef GenericStringBuffer > StringBuffer; 40 | 41 | //! Implement specialized version of PutN() with memset() for better performance. 42 | template<> 43 | inline void PutN(GenericStringBuffer >& stream, char c, size_t n) { 44 | memset(stream.stack_.Push(n), c, n * sizeof(c)); 45 | } 46 | 47 | } // namespace rapidjson 48 | 49 | #endif // RAPIDJSON_STRINGBUFFER_H_ 50 | -------------------------------------------------------------------------------- /R/RcppExports.R: -------------------------------------------------------------------------------- 1 | # Generated by using Rcpp::compileAttributes() -> do not edit by hand 2 | # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 3 | 4 | net_seq_classification_fit <- function(net, x, y, batch_size, epochs, opt, verbose) { 5 | .Call('tinydnn_net_seq_classification_fit', PACKAGE = 'tinydnn', net, x, y, batch_size, epochs, opt, verbose) 6 | } 7 | 8 | net_seq_classification_predict <- function(net, x) { 9 | .Call('tinydnn_net_seq_classification_predict', PACKAGE = 'tinydnn', net, x) 10 | } 11 | 12 | net_seq_constructor <- function(name) { 13 | .Call('tinydnn_net_seq_constructor', PACKAGE = 'tinydnn', name) 14 | } 15 | 16 | net_seq_name <- function(net) { 17 | .Call('tinydnn_net_seq_name', PACKAGE = 'tinydnn', net) 18 | } 19 | 20 | net_seq_layer_size <- function(net) { 21 | .Call('tinydnn_net_seq_layer_size', PACKAGE = 'tinydnn', net) 22 | } 23 | 24 | net_seq_out_data_size <- function(net) { 25 | .Call('tinydnn_net_seq_out_data_size', PACKAGE = 'tinydnn', net) 26 | } 27 | 28 | net_seq_in_data_size <- function(net) { 29 | .Call('tinydnn_net_seq_in_data_size', PACKAGE = 'tinydnn', net) 30 | } 31 | 32 | net_seq_add_layer <- function(net, layer) { 33 | .Call('tinydnn_net_seq_add_layer', PACKAGE = 'tinydnn', net, layer) 34 | } 35 | 36 | net_seq_regression_fit <- function(net, x, y, batch_size, epochs, opt, verbose) { 37 | .Call('tinydnn_net_seq_regression_fit', PACKAGE = 'tinydnn', net, x, y, batch_size, epochs, opt, verbose) 38 | } 39 | 40 | net_seq_regression_predict <- function(net, x) { 41 | .Call('tinydnn_net_seq_regression_predict', PACKAGE = 'tinydnn', net, x) 42 | } 43 | 44 | -------------------------------------------------------------------------------- /src/tiny_dnn/util/nn_error.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include "tiny_dnn/util/colored_print.h" 13 | 14 | namespace tiny_dnn { 15 | 16 | /** 17 | * error exception class for tiny-dnn 18 | **/ 19 | class nn_error : public std::exception { 20 | public: 21 | explicit nn_error(const std::string &msg) : msg_(msg) {} 22 | const char *what() const throw() override { return msg_.c_str(); } 23 | 24 | private: 25 | std::string msg_; 26 | }; 27 | 28 | /** 29 | * warning class for tiny-dnn (for debug) 30 | **/ 31 | class nn_warn { 32 | public: 33 | explicit nn_warn(const std::string &msg) : msg_(msg) { 34 | #ifdef CNN_USE_STDOUT 35 | coloredPrint(Color::YELLOW, msg_h_ + msg_); 36 | #endif 37 | } 38 | 39 | private: 40 | std::string msg_; 41 | std::string msg_h_ = std::string("[WARNING] "); 42 | }; 43 | 44 | /** 45 | * info class for tiny-dnn (for debug) 46 | **/ 47 | class nn_info { 48 | public: 49 | explicit nn_info(const std::string &msg) : msg_(msg) { 50 | #ifdef CNN_USE_STDOUT 51 | std::cout << msg_h + msg_ << std::endl; 52 | #endif 53 | } 54 | 55 | private: 56 | std::string msg_; 57 | std::string msg_h = std::string("[INFO] "); 58 | }; 59 | 60 | class nn_not_implemented_error : public nn_error { 61 | public: 62 | explicit nn_not_implemented_error(const std::string &msg = "not implemented") 63 | : nn_error(msg) {} 64 | }; 65 | 66 | } // namespace tiny_dnn 67 | -------------------------------------------------------------------------------- /src/tiny_dnn/layers/input_layer.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include "tiny_dnn/layers/layer.h" 10 | 11 | namespace tiny_dnn { 12 | 13 | class input_layer : public layer { 14 | public: 15 | explicit input_layer(const shape3d &shape) 16 | : layer({vector_type::data}, {vector_type::data}), shape_(shape) {} 17 | 18 | explicit input_layer(serial_size_t in_dim) 19 | : layer({vector_type::data}, {vector_type::data}), 20 | shape_(shape3d(in_dim, 1, 1)) {} 21 | 22 | std::vector in_shape() const override { return {shape_}; } 23 | std::vector out_shape() const override { return {shape_}; } 24 | std::string layer_type() const override { return "input"; } 25 | 26 | void forward_propagation(const std::vector &in_data, 27 | std::vector &out_data) override { 28 | *out_data[0] = *in_data[0]; 29 | } 30 | 31 | void back_propagation(const std::vector &in_data, 32 | const std::vector &out_data, 33 | std::vector &out_grad, 34 | std::vector &in_grad) override { 35 | // do nothing 36 | CNN_UNREFERENCED_PARAMETER(in_data); 37 | CNN_UNREFERENCED_PARAMETER(out_data); 38 | CNN_UNREFERENCED_PARAMETER(out_grad); 39 | CNN_UNREFERENCED_PARAMETER(in_grad); 40 | } 41 | 42 | private: 43 | shape3d shape_; 44 | }; 45 | 46 | } // namespace tiny_dnn 47 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/internal/swap.h: -------------------------------------------------------------------------------- 1 | // Tencent is pleased to support the open source community by making RapidJSON available. 2 | // 3 | // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. 4 | // 5 | // Licensed under the MIT License (the "License"); you may not use this file except 6 | // in compliance with the License. You may obtain a copy of the License at 7 | // 8 | // http://opensource.org/licenses/MIT 9 | // 10 | // Unless required by applicable law or agreed to in writing, software distributed 11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | // specific language governing permissions and limitations under the License. 14 | 15 | #ifndef CEREAL_RAPIDJSON_INTERNAL_SWAP_H_ 16 | #define CEREAL_RAPIDJSON_INTERNAL_SWAP_H_ 17 | 18 | #include "../rapidjson.h" 19 | 20 | #if defined(__clang__) 21 | CEREAL_RAPIDJSON_DIAG_PUSH 22 | CEREAL_RAPIDJSON_DIAG_OFF(c++98-compat) 23 | #endif 24 | 25 | CEREAL_RAPIDJSON_NAMESPACE_BEGIN 26 | namespace internal { 27 | 28 | //! Custom swap() to avoid dependency on C++ header 29 | /*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only. 30 | \note This has the same semantics as std::swap(). 31 | */ 32 | template 33 | inline void Swap(T& a, T& b) CEREAL_RAPIDJSON_NOEXCEPT { 34 | T tmp = a; 35 | a = b; 36 | b = tmp; 37 | } 38 | 39 | } // namespace internal 40 | CEREAL_RAPIDJSON_NAMESPACE_END 41 | 42 | #if defined(__clang__) 43 | CEREAL_RAPIDJSON_DIAG_POP 44 | #endif 45 | 46 | #endif // CEREAL_RAPIDJSON_INTERNAL_SWAP_H_ 47 | -------------------------------------------------------------------------------- /src/optimizers.cpp: -------------------------------------------------------------------------------- 1 | #include "optimizers.h" 2 | 3 | using namespace tiny_dnn; 4 | using std::shared_ptr; 5 | 6 | std::shared_ptr get_optimizer(Rcpp::List opt) 7 | { 8 | std::string opt_name = Rcpp::as(opt["opt_name"]); 9 | 10 | if(opt_name == "rmsprop") 11 | { 12 | // RMSprop 13 | shared_ptr opt_ptr = std::make_shared(); 14 | opt_ptr->alpha = Rcpp::as(opt["lrate"]); 15 | opt_ptr->mu = Rcpp::as(opt["decay"]); 16 | 17 | return opt_ptr; 18 | } else if(opt_name == "adam") { 19 | // Adam 20 | shared_ptr opt_ptr = std::make_shared(); 21 | opt_ptr->alpha = Rcpp::as(opt["lrate"]); 22 | opt_ptr->b1 = Rcpp::as(opt["b1"]); 23 | opt_ptr->b2 = Rcpp::as(opt["b2"]); 24 | opt_ptr->b1_t = Rcpp::as(opt["b1_t"]); 25 | opt_ptr->b2_t = Rcpp::as(opt["b2_t"]); 26 | 27 | return opt_ptr; 28 | } else if(opt_name == "sgd") { 29 | // Stochastic gradient descent without momentum 30 | shared_ptr opt_ptr = std::make_shared(); 31 | opt_ptr->alpha = Rcpp::as(opt["lrate"]); 32 | opt_ptr->lambda = Rcpp::as(opt["decay"]); 33 | 34 | return opt_ptr; 35 | } else if(opt_name == "momentum") { 36 | // Stochastic gradient descent with momentum 37 | shared_ptr opt_ptr = std::make_shared(); 38 | opt_ptr->alpha = Rcpp::as(opt["lrate"]); 39 | opt_ptr->lambda = Rcpp::as(opt["decay"]); 40 | opt_ptr->mu = Rcpp::as(opt["momentum"]); 41 | 42 | return opt_ptr; 43 | } 44 | 45 | // Use Adagrad as a default method 46 | shared_ptr opt_ptr = std::make_shared(); 47 | opt_ptr->alpha = Rcpp::as(opt["lrate"]); 48 | 49 | return opt_ptr; 50 | } 51 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/maxpool_op_internal.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | namespace tiny_dnn { 11 | namespace kernels { 12 | 13 | inline void maxpool_op_internal( 14 | const tensor_t &in_data, 15 | tensor_t &out_data, 16 | std::vector> &max_idx, 17 | const std::vector> &out2in, 18 | const bool layer_parallelize) { 19 | for_i(layer_parallelize, in_data.size(), [&](int sample) { 20 | const vec_t &in = in_data[sample]; 21 | vec_t &a = out_data[sample]; 22 | std::vector &max = max_idx[sample]; 23 | 24 | for (serial_size_t i = 0; i < out2in.size(); i++) { 25 | const auto &in_index = out2in[i]; 26 | float_t max_value = std::numeric_limits::lowest(); 27 | serial_size_t idx = 0; 28 | for (auto j : in_index) { 29 | if (in[j] > max_value) { 30 | max_value = in[j]; 31 | idx = j; 32 | } 33 | } 34 | max[i] = idx; 35 | a[i] = max_value; 36 | } 37 | }); 38 | } 39 | 40 | inline void maxpool_grad_op_internal( 41 | tensor_t &prev_delta, 42 | const tensor_t &curr_delta, 43 | std::vector> &max_idx, 44 | const std::vector &in2out, 45 | const bool layer_parallelize) { 46 | for_i(layer_parallelize, prev_delta.size(), [&](int sample) { 47 | vec_t &prev = prev_delta[sample]; 48 | const vec_t &curr = curr_delta[sample]; 49 | const std::vector &max = max_idx[sample]; 50 | 51 | for (serial_size_t i = 0; i < in2out.size(); i++) { 52 | serial_size_t outi = in2out[i]; 53 | prev[i] = 54 | (max[outi] == static_cast(i)) ? curr[outi] : float_t{0}; 55 | } 56 | }); 57 | } 58 | 59 | } // namespace kernels 60 | } // namespace tiny_dnn 61 | -------------------------------------------------------------------------------- /inst/AUTHORS: -------------------------------------------------------------------------------- 1 | # Authors of tiny-dnn ordered by first contribution. 2 | 3 | Taiga Noumi 4 | nyanp 5 | mr 6 | Filippo Lazzarini 7 | Marco foco 8 | Marco foco 9 | Craig Henderson 10 | Patrik Huber 11 | Changxu 12 | Yan 13 | Volker Grabe 14 | stozpark 15 | Bálint Fodor 16 | Tony Di Croce 17 | rmsalinas 18 | llerrito 19 | Vojtech Mrazek 20 | Tolga Birdal 21 | Aziz Baibabaev 22 | Marco Foco 23 | Marco Foco 24 | stereomatchingkiss 25 | Andre Holzner 26 | Raphael Isemann 27 | nyanp 28 | Raphael Isemann 29 | Alex Z 30 | Juha Reunanen 31 | edgarriba 32 | Juan Mo 33 | Wangyida 34 | beru 35 | Mikalai Drabovich 36 | Roland Persson 37 | Edgar Riba 38 | Yida Wang 39 | Juha Reunanen 40 | Andrew Murray 41 | H4kor 42 | Masahiro Imai 43 | Lior David 44 | Syoyo Fujita 45 | Jiaolong 46 | KonfrareAlbert 47 | bhack 48 | jichao zhang <1632206636@qq.com> 49 | Evgeniy Zheltonozhskiy 50 | Goran Rauker 51 | azsane 52 | -------------------------------------------------------------------------------- /src/tiny_dnn/util/colored_print.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include "tiny_dnn/config.h" 10 | 11 | #ifdef CNN_WINDOWS 12 | #ifndef NOMINMAX 13 | #define NOMINMAX 14 | #endif // ifdef NOMINMAX 15 | #include 16 | #endif 17 | 18 | namespace tiny_dnn { 19 | 20 | enum class Color { RED, GREEN, BLUE, YELLOW }; 21 | 22 | #ifdef CNN_WINDOWS 23 | inline WORD getColorAttr(Color c) { 24 | switch (c) { 25 | case Color::RED: return FOREGROUND_RED; 26 | case Color::GREEN: return FOREGROUND_GREEN; 27 | case Color::BLUE: return FOREGROUND_BLUE; 28 | case Color::YELLOW: return FOREGROUND_GREEN | FOREGROUND_RED; 29 | default: assert(0); return 0; 30 | } 31 | } 32 | #else 33 | inline const char *getColorEscape(Color c) { 34 | switch (c) { 35 | case Color::RED: return "\033[31m"; 36 | case Color::GREEN: return "\033[32m"; 37 | case Color::BLUE: return "\033[34m"; 38 | case Color::YELLOW: return "\033[33m"; 39 | default: assert(0); return ""; 40 | } 41 | } 42 | #endif 43 | 44 | inline void coloredPrint(Color c, const char *fmt, ...) { 45 | va_list args; 46 | va_start(args, fmt); 47 | 48 | #ifdef CNN_WINDOWS 49 | const HANDLE std_handle = ::GetStdHandle(STD_OUTPUT_HANDLE); 50 | 51 | CONSOLE_SCREEN_BUFFER_INFO buffer_info; 52 | ::GetConsoleScreenBufferInfo(std_handle, &buffer_info); 53 | const WORD old_color = buffer_info.wAttributes; 54 | const WORD new_color = getColorAttr(c) | FOREGROUND_INTENSITY; 55 | 56 | fflush(stdout); 57 | ::SetConsoleTextAttribute(std_handle, new_color); 58 | 59 | vprintf(fmt, args); 60 | 61 | fflush(stdout); 62 | ::SetConsoleTextAttribute(std_handle, old_color); 63 | #else 64 | printf("%s", getColorEscape(c)); 65 | vprintf(fmt, args); 66 | printf("\033[m"); 67 | #endif 68 | va_end(args); 69 | } 70 | 71 | inline void coloredPrint(Color c, const std::string &msg) { 72 | coloredPrint(c, msg.c_str()); 73 | } 74 | 75 | } // namespace tiny_dnn 76 | -------------------------------------------------------------------------------- /src/cereal/types/functional.hpp: -------------------------------------------------------------------------------- 1 | /*! \file functional.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2016, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_FUNCTIONAL_HPP_ 31 | #define CEREAL_TYPES_FUNCTIONAL_HPP_ 32 | 33 | #include 34 | 35 | namespace cereal 36 | { 37 | //! Saving for std::less 38 | template inline 39 | void serialize( Archive &, std::less & ) 40 | { } 41 | } // namespace cereal 42 | 43 | #endif // CEREAL_TYPES_FUNCTIONAL_HPP_ 44 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/fully_connected_op_nnpack.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/core/backend.h" 11 | #include "tiny_dnn/core/params/fully_params.h" 12 | 13 | namespace tiny_dnn { 14 | namespace kernels { 15 | 16 | inline void fully_connected_op_nnpack(const tensor_t &in_data, 17 | const vec_t &W, 18 | const vec_t &bias, 19 | tensor_t &out_data, 20 | const fully_params ¶ms, 21 | const bool layer_parallelize) { 22 | #ifdef CNN_USE_NNPACK 23 | // call singleton to initialize NNPACK 24 | NNPackInitializer::getInstance().initialize(); 25 | 26 | const float *kernel_ptr = W.data(); 27 | const float *input_ptr = in_data[0].data(); 28 | float *output_ptr = out_data[0].data(); 29 | 30 | // TODO: embed it into a class 31 | const size_t num_mkl_threads = 1; 32 | pthreadpool_t threadpool = pthreadpool_create(num_mkl_threads); 33 | 34 | const auto status = 35 | nnp_fully_connected_inference(params.in_size_, params.out_size_, input_ptr, 36 | kernel_ptr, output_ptr, threadpool); 37 | 38 | if (status != nnp_status_success) { 39 | throw nn_error("Could not succeed with nnp_max_pooling_output"); 40 | } 41 | 42 | // TODO: embed it into a class 43 | pthreadpool_destroy(threadpool); 44 | 45 | if (params.has_bias_) { 46 | for_i(layer_parallelize, params.out_size_, 47 | [&](int i) { output_ptr[i] += bias[i]; }); 48 | } 49 | #else 50 | CNN_UNREFERENCED_PARAMETER(in_data); 51 | CNN_UNREFERENCED_PARAMETER(W); 52 | CNN_UNREFERENCED_PARAMETER(bias); 53 | CNN_UNREFERENCED_PARAMETER(out_data); 54 | CNN_UNREFERENCED_PARAMETER(params); 55 | CNN_UNREFERENCED_PARAMETER(layer_parallelize); 56 | throw nn_error("TinyDNN has not been compiled with NNPACK support."); 57 | #endif 58 | } 59 | 60 | } // namespace kernels 61 | } // namespace tiny_dnn 62 | -------------------------------------------------------------------------------- /src/tiny_dnn/layers/feedforward_layer.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include "tiny_dnn/activations/activation_function.h" 10 | #include "tiny_dnn/layers/layer.h" 11 | 12 | namespace tiny_dnn { 13 | 14 | /** 15 | * single-input, single-output network with activation function 16 | **/ 17 | template 18 | class feedforward_layer : public layer { 19 | public: 20 | explicit feedforward_layer(const std::vector &in_data_type) 21 | : layer(in_data_type, std_output_order(true)) {} 22 | activation::function &activation_function() { return h_; } 23 | std::pair out_value_range() const override { 24 | return h_.scale(); 25 | } 26 | 27 | public: 28 | void forward_activation(tensor_t &a_tensor, tensor_t &out_tensor) { 29 | serial_size_t out_dim = out_shape()[0].size(); 30 | 31 | for_i(a_tensor.size(), [&](int sample) { 32 | vec_t &out = a_tensor[sample]; 33 | vec_t &a = out_tensor[sample]; 34 | out.resize(out_dim); 35 | a.resize(out_dim); 36 | h_.itef(out, a, out_dim); 37 | }); 38 | } 39 | 40 | void backward_activation(const tensor_t &prev_delta, 41 | const tensor_t &this_out, 42 | tensor_t &curr_delta) { 43 | // @todo consider parallelism 44 | for_i(this_out.size(), [&](serial_size_t sample) { 45 | const vec_t &out_vec = this_out[sample]; 46 | const vec_t &prev_delta_vec = prev_delta[sample]; 47 | vec_t &curr_delta_vec = curr_delta[sample]; 48 | 49 | const serial_size_t len = 50 | static_cast(prev_delta_vec.size()); 51 | 52 | if (h_.one_hot()) { 53 | h_.itedf(curr_delta_vec, prev_delta_vec, out_vec, len); 54 | } else { 55 | for (serial_size_t c = 0; c < len; c++) { 56 | vec_t df = h_.df(out_vec, c); 57 | curr_delta_vec[c] = vectorize::dot(&prev_delta_vec[0], &df[0], len); 58 | } 59 | } 60 | }); 61 | } 62 | 63 | Activation h_; 64 | }; 65 | 66 | } // namespace tiny_dnn 67 | -------------------------------------------------------------------------------- /src/cereal/types/utility.hpp: -------------------------------------------------------------------------------- 1 | /*! \file utility.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_UTILITY_HPP_ 31 | #define CEREAL_TYPES_UTILITY_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | namespace cereal 37 | { 38 | //! Serializing for std::pair 39 | template inline 40 | void CEREAL_SERIALIZE_FUNCTION_NAME( Archive & ar, std::pair & pair ) 41 | { 42 | ar( CEREAL_NVP_("first", pair.first), 43 | CEREAL_NVP_("second", pair.second) ); 44 | } 45 | } // namespace cereal 46 | 47 | #endif // CEREAL_TYPES_UTILITY_HPP_ 48 | -------------------------------------------------------------------------------- /src/tiny_dnn/config.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include 11 | #include 12 | 13 | /** 14 | * define if you want to use intel TBB library 15 | */ 16 | // #define CNN_USE_TBB 17 | 18 | /** 19 | * define to enable avx vectorization 20 | */ 21 | // #define CNN_USE_AVX 22 | 23 | /** 24 | * define to enable sse2 vectorization 25 | */ 26 | // #define CNN_USE_SSE 27 | 28 | /** 29 | * define to enable OMP parallelization 30 | */ 31 | // #define CNN_USE_OMP 32 | 33 | /** 34 | * define to enable Grand Central Dispatch parallelization 35 | */ 36 | //#define CNN_USE_GCD 37 | 38 | /** 39 | * define to use exceptions 40 | */ 41 | #define CNN_USE_EXCEPTIONS 42 | 43 | /** 44 | * comment out if you want tiny-dnn to be quiet 45 | */ 46 | // #define CNN_USE_STDOUT 47 | 48 | // #define CNN_SINGLE_THREAD 49 | 50 | /** 51 | * disable serialization/deserialization function 52 | * You can uncomment this to speedup compilation & linking time, 53 | * if you don't use network::save / network::load functions. 54 | **/ 55 | #define CNN_NO_SERIALIZATION 56 | 57 | /** 58 | * Enable Image API support. 59 | * Currently we use stb by default. 60 | **/ 61 | //#define DNN_USE_IMAGE_API 62 | 63 | /** 64 | * Enable Gemmlowp support. 65 | **/ 66 | #ifdef USE_GEMMLOWP 67 | #if !defined(_MSC_VER) && !defined(_WIN32) && !defined(WIN32) 68 | #define CNN_USE_GEMMLOWP // gemmlowp doesn't support MSVC/mingw 69 | #endif 70 | #endif // USE_GEMMLOWP 71 | 72 | /** 73 | * number of task in batch-gradient-descent. 74 | * @todo automatic optimization 75 | */ 76 | #ifdef CNN_USE_OMP 77 | #define CNN_TASK_SIZE 100 78 | #else 79 | #define CNN_TASK_SIZE 8 80 | #endif 81 | 82 | namespace tiny_dnn { 83 | 84 | /** 85 | * calculation data type 86 | * you can change it to float, or user defined class (fixed point,etc) 87 | **/ 88 | #ifdef CNN_USE_DOUBLE 89 | typedef double float_t; 90 | #else 91 | typedef float float_t; 92 | #endif 93 | 94 | /** 95 | * size of layer, model, data etc. 96 | * change to smaller type if memory footprint is severe 97 | **/ 98 | typedef std::uint32_t serial_size_t; 99 | 100 | } // namespace tiny_dnn 101 | -------------------------------------------------------------------------------- /src/classification.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "layers.h" 4 | #include "optimizers.h" 5 | 6 | // [[Rcpp::export]] 7 | SEXP net_seq_classification_fit( 8 | Rcpp::XPtr< tiny_dnn::network > net, 9 | Rcpp::NumericMatrix x, 10 | Rcpp::IntegerVector y, 11 | int batch_size, 12 | int epochs, 13 | Rcpp::List opt, 14 | bool verbose 15 | ) 16 | { 17 | using namespace tiny_dnn; 18 | 19 | const int n = x.nrow(); 20 | const int p = x.ncol(); 21 | 22 | std::vector input; 23 | std::vector output(n); 24 | 25 | input.reserve(n); 26 | 27 | // It looks like that currently tiny-dnn does not shuffle data 28 | // during training, so we provide a shuffled data set to tiny-dnn 29 | Rcpp::IntegerVector ind = Rcpp::sample(n, n, false, R_NilValue, false); 30 | 31 | // Copy data 32 | vec_t rowx(p); 33 | for(int i = 0; i < n; i++) 34 | { 35 | for(int j = 0; j < p; j++) 36 | { 37 | rowx[j] = x(ind[i], j); 38 | } 39 | input.push_back(rowx); 40 | 41 | output[i] = y[ind[i]]; 42 | } 43 | 44 | std::shared_ptr opt_ptr = get_optimizer(opt); 45 | 46 | timer t; 47 | int epoch = 0; 48 | 49 | net->train(*opt_ptr, input, output, batch_size, epochs, 50 | // called for each mini-batch 51 | []() { 52 | 53 | }, 54 | // called for each epoch 55 | [verbose, &t, &epoch]() { 56 | if(verbose) 57 | { 58 | Rcpp::Rcout << "[Epoch " << epoch << "]: " << t.elapsed() << "s" << std::endl; 59 | t.restart(); 60 | epoch++; 61 | } 62 | } 63 | ); 64 | 65 | return R_NilValue; 66 | } 67 | 68 | 69 | 70 | // [[Rcpp::export]] 71 | Rcpp::IntegerVector net_seq_classification_predict( 72 | Rcpp::XPtr< tiny_dnn::network > net, 73 | Rcpp::NumericMatrix x 74 | ) 75 | { 76 | using namespace tiny_dnn; 77 | 78 | const int n = x.nrow(); 79 | const int p = x.ncol(); 80 | 81 | Rcpp::IntegerVector pred(n); 82 | vec_t row(p); 83 | 84 | for(int i = 0; i < n; i++) 85 | { 86 | for(int j = 0; j < p; j++) 87 | { 88 | row[j] = x(i, j); 89 | } 90 | pred[i] = net->predict_label(row); 91 | } 92 | 93 | return pred; 94 | } 95 | -------------------------------------------------------------------------------- /src/tiny_dnn/util/random.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | #include "nn_error.h" 15 | #include "tiny_dnn/config.h" 16 | 17 | namespace tiny_dnn { 18 | 19 | class random_generator { 20 | public: 21 | static random_generator &get_instance() { 22 | static random_generator instance; 23 | return instance; 24 | } 25 | 26 | std::mt19937 &operator()() { return gen_; } 27 | 28 | void set_seed(unsigned int seed) { gen_.seed(seed); } 29 | 30 | private: 31 | // avoid gen_(0) for MSVC known issue 32 | // https://connect.microsoft.com/VisualStudio/feedback/details/776456 33 | random_generator() : gen_(1) {} 34 | std::mt19937 gen_; 35 | }; 36 | 37 | template 38 | inline typename std::enable_if::value, T>::type 39 | uniform_rand(T min, T max) { 40 | std::uniform_int_distribution dst(min, max); 41 | return dst(random_generator::get_instance()()); 42 | } 43 | 44 | template 45 | inline typename std::enable_if::value, T>::type 46 | uniform_rand(T min, T max) { 47 | std::uniform_real_distribution dst(min, max); 48 | return dst(random_generator::get_instance()()); 49 | } 50 | 51 | template 52 | inline typename std::enable_if::value, T>::type 53 | gaussian_rand(T mean, T sigma) { 54 | std::normal_distribution dst(mean, sigma); 55 | return dst(random_generator::get_instance()()); 56 | } 57 | 58 | inline void set_random_seed(unsigned int seed) { 59 | random_generator::get_instance().set_seed(seed); 60 | } 61 | 62 | template 63 | inline int uniform_idx(const Container &t) { 64 | return uniform_rand(0, int(t.size() - 1)); 65 | } 66 | 67 | inline bool bernoulli(float_t p) { 68 | return uniform_rand(float_t{0}, float_t{1}) <= p; 69 | } 70 | 71 | template 72 | void uniform_rand(Iter begin, Iter end, float_t min, float_t max) { 73 | for (Iter it = begin; it != end; ++it) *it = uniform_rand(min, max); 74 | } 75 | 76 | template 77 | void gaussian_rand(Iter begin, Iter end, float_t mean, float_t sigma) { 78 | for (Iter it = begin; it != end; ++it) *it = gaussian_rand(mean, sigma); 79 | } 80 | 81 | } // namespace tiny_dnn 82 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/maxpool_op_nnpack.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/core/backend.h" 11 | #include "tiny_dnn/core/params/maxpool_params.h" 12 | 13 | namespace tiny_dnn { 14 | namespace kernels { 15 | 16 | inline void maxpool_op_nnpack(const tensor_t &in_data, 17 | tensor_t &out_data, 18 | const maxpool_params ¶ms) { 19 | #ifdef CNN_USE_NNPACK 20 | // call singleton to initialize NNPACK 21 | NNPackInitializer::getInstance().initialize(); 22 | 23 | const serial_size_t input_channels = params.in.depth_; 24 | 25 | const nnp_size input_size = {static_cast(params.in.width_), 26 | static_cast(params.in.height_)}; 27 | 28 | const nnp_padding input_padding = { 29 | static_cast(0), // top 30 | static_cast(0), // right 31 | static_cast(0), // bottom 32 | static_cast(0) // left 33 | }; 34 | 35 | const nnp_size pooling_size = {static_cast(params.pool_size_x), 36 | static_cast(params.pool_size_y)}; 37 | 38 | const nnp_size pooling_stride = {static_cast(params.stride_x), 39 | static_cast(params.stride_y)}; 40 | 41 | const float *input_ptr = in_data[0].data(); 42 | float *output_ptr = out_data[0].data(); 43 | 44 | // TODO: embed it into a class 45 | const size_t num_mkl_threads = 1; 46 | pthreadpool_t threadpool = pthreadpool_create(num_mkl_threads); 47 | 48 | const size_t batch_size = 1; 49 | 50 | const auto status = nnp_max_pooling_output( 51 | batch_size, input_channels, input_size, input_padding, pooling_size, 52 | pooling_stride, input_ptr, output_ptr, threadpool); 53 | 54 | if (status != nnp_status_success) { 55 | throw nn_error("Could not succeed with nnp_max_pooling_output"); 56 | } 57 | 58 | // TODO: embed it into a class 59 | pthreadpool_destroy(threadpool); 60 | #else 61 | CNN_UNREFERENCED_PARAMETER(in_data); 62 | CNN_UNREFERENCED_PARAMETER(out_data); 63 | CNN_UNREFERENCED_PARAMETER(params); 64 | throw nn_error("TinyDNN has not been compiled with NNPACK support."); 65 | #endif 66 | } 67 | 68 | } // namespace kernels 69 | } // namespace tiny_dnn 70 | -------------------------------------------------------------------------------- /src/tiny_dnn/util/macro.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright 10 | notice, this list of conditions and the following disclaimer in the 11 | documentation and/or other materials provided with the distribution. 12 | * Neither the name of the tiny-dnn nor the 13 | names of its contributors may be used to endorse or promote products 14 | derived from this software without specific prior written permission. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY 18 | EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 21 | FOR ANY 22 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 | THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #pragma once 31 | 32 | #define CNN_UNREFERENCED_PARAMETER(x) (void)(x) 33 | 34 | #if defined(_MSC_VER) && (_MSC_VER <= 1800) 35 | // msvc2013 doesn't have move constructor 36 | #define CNN_DEFAULT_MOVE_CONSTRUCTOR_UNAVAILABLE 37 | #define CNN_DEFAULT_ASSIGNMENT_OPERATOR_UNAVAILABLE 38 | #endif 39 | 40 | #if defined(_MSC_VER) && (_MSC_VER <= 1800) 41 | // msvc2013 doesn't have alignof operator 42 | #define CNN_ALIGNOF(x) __alignof(x) 43 | #else 44 | #define CNN_ALIGNOF(x) alignof(x) 45 | #endif 46 | 47 | #if !defined(_MSC_VER) || (_MSC_VER >= 1900) // default generation of move 48 | // constructor is unsupported in 49 | // VS2013 50 | #define CNN_USE_DEFAULT_MOVE_CONSTRUCTORS 51 | #endif 52 | 53 | #if defined _WIN32 && !defined(__MINGW32__) 54 | #define CNN_WINDOWS 55 | #endif 56 | -------------------------------------------------------------------------------- /src/tiny_dnn/layers/arithmetic_layer.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include "tiny_dnn/layers/layer.h" 10 | #include "tiny_dnn/util/util.h" 11 | 12 | namespace tiny_dnn { 13 | 14 | /** 15 | * element-wise add N vectors ```y_i = x0_i + x1_i + ... + xnum_i``` 16 | **/ 17 | class elementwise_add_layer : public layer { 18 | public: 19 | /** 20 | * @param num_args [in] number of inputs 21 | * @param dim [in] number of elements for each input 22 | */ 23 | elementwise_add_layer(serial_size_t num_args, serial_size_t dim) 24 | : layer(std::vector(num_args, vector_type::data), 25 | {vector_type::data}), 26 | num_args_(num_args), 27 | dim_(dim) {} 28 | 29 | std::string layer_type() const override { return "elementwise-add"; } 30 | 31 | std::vector in_shape() const override { 32 | return std::vector(num_args_, shape3d(dim_, 1, 1)); 33 | } 34 | 35 | std::vector out_shape() const override { 36 | return {shape3d(dim_, 1, 1)}; 37 | } 38 | 39 | void forward_propagation(const std::vector &in_data, 40 | std::vector &out_data) override { 41 | const tensor_t &in1 = *in_data[0]; 42 | tensor_t &out = *out_data[0]; 43 | 44 | out = in1; 45 | 46 | // @todo parallelize 47 | for (size_t sample = 0; sample < in1.size(); ++sample) { 48 | for (serial_size_t i = 1; i < num_args_; i++) { 49 | std::transform((*in_data[i])[sample].begin(), 50 | (*in_data[i])[sample].end(), out[sample].begin(), 51 | out[sample].begin(), 52 | [](float_t x, float_t y) { return x + y; }); 53 | } 54 | } 55 | } 56 | 57 | void back_propagation(const std::vector &in_data, 58 | const std::vector &out_data, 59 | std::vector &out_grad, 60 | std::vector &in_grad) override { 61 | CNN_UNREFERENCED_PARAMETER(in_data); 62 | CNN_UNREFERENCED_PARAMETER(out_data); 63 | for (serial_size_t i = 0; i < num_args_; i++) *in_grad[i] = *out_grad[0]; 64 | } 65 | 66 | #ifndef CNN_NO_SERIALIZATION 67 | friend struct serialization_buddy; 68 | #endif 69 | 70 | private: 71 | serial_size_t num_args_; 72 | serial_size_t dim_; 73 | }; 74 | 75 | } // namespace tiny_dnn 76 | -------------------------------------------------------------------------------- /src/cereal/types/complex.hpp: -------------------------------------------------------------------------------- 1 | /*! \file complex.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_COMPLEX_HPP_ 31 | #define CEREAL_TYPES_COMPLEX_HPP_ 32 | 33 | #include 34 | 35 | namespace cereal 36 | { 37 | //! Serializing (save) for std::complex 38 | template inline 39 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::complex const & comp ) 40 | { 41 | ar( CEREAL_NVP_("real", comp.real()), 42 | CEREAL_NVP_("imag", comp.imag()) ); 43 | } 44 | 45 | //! Serializing (load) for std::complex 46 | template inline 47 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::complex & bits ) 48 | { 49 | T real, imag; 50 | ar( CEREAL_NVP_("real", real), 51 | CEREAL_NVP_("imag", imag) ); 52 | bits = {real, imag}; 53 | } 54 | } // namespace cereal 55 | 56 | #endif // CEREAL_TYPES_COMPLEX_HPP_ 57 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/ostreamwrapper.h: -------------------------------------------------------------------------------- 1 | // Tencent is pleased to support the open source community by making RapidJSON available. 2 | // 3 | // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. 4 | // 5 | // Licensed under the MIT License (the "License"); you may not use this file except 6 | // in compliance with the License. You may obtain a copy of the License at 7 | // 8 | // http://opensource.org/licenses/MIT 9 | // 10 | // Unless required by applicable law or agreed to in writing, software distributed 11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | // specific language governing permissions and limitations under the License. 14 | 15 | #include "stream.h" 16 | #include 17 | 18 | #ifdef __clang__ 19 | CEREAL_RAPIDJSON_DIAG_PUSH 20 | CEREAL_RAPIDJSON_DIAG_OFF(padded) 21 | #endif 22 | 23 | CEREAL_RAPIDJSON_NAMESPACE_BEGIN 24 | 25 | //! Wrapper of \c std::basic_ostream into RapidJSON's Stream concept. 26 | /*! 27 | The classes can be wrapped including but not limited to: 28 | 29 | - \c std::ostringstream 30 | - \c std::stringstream 31 | - \c std::wpstringstream 32 | - \c std::wstringstream 33 | - \c std::ifstream 34 | - \c std::fstream 35 | - \c std::wofstream 36 | - \c std::wfstream 37 | 38 | \tparam StreamType Class derived from \c std::basic_ostream. 39 | */ 40 | 41 | template 42 | class BasicOStreamWrapper { 43 | public: 44 | typedef typename StreamType::char_type Ch; 45 | BasicOStreamWrapper(StreamType& stream) : stream_(stream) {} 46 | 47 | void Put(Ch c) { 48 | stream_.put(c); 49 | } 50 | 51 | void Flush() { 52 | stream_.flush(); 53 | } 54 | 55 | // Not implemented 56 | char Peek() const { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 57 | char Take() { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 58 | size_t Tell() const { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 59 | char* PutBegin() { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 60 | size_t PutEnd(char*) { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 61 | 62 | private: 63 | BasicOStreamWrapper(const BasicOStreamWrapper&); 64 | BasicOStreamWrapper& operator=(const BasicOStreamWrapper&); 65 | 66 | StreamType& stream_; 67 | }; 68 | 69 | typedef BasicOStreamWrapper OStreamWrapper; 70 | typedef BasicOStreamWrapper WOStreamWrapper; 71 | 72 | #ifdef __clang__ 73 | CEREAL_RAPIDJSON_DIAG_POP 74 | #endif 75 | 76 | CEREAL_RAPIDJSON_NAMESPACE_END 77 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/internal/stack.h: -------------------------------------------------------------------------------- 1 | #ifndef RAPIDJSON_INTERNAL_STACK_H_ 2 | #define RAPIDJSON_INTERNAL_STACK_H_ 3 | 4 | namespace rapidjson { 5 | namespace internal { 6 | 7 | /////////////////////////////////////////////////////////////////////////////// 8 | // Stack 9 | 10 | //! A type-unsafe stack for storing different types of data. 11 | /*! \tparam Allocator Allocator for allocating stack memory. 12 | */ 13 | template 14 | class Stack { 15 | public: 16 | Stack(Allocator* allocator, size_t stack_capacity) : allocator_(allocator), own_allocator_(0), stack_(0), stack_top_(0), stack_end_(0), stack_capacity_(stack_capacity) { 17 | RAPIDJSON_ASSERT(stack_capacity_ > 0); 18 | if (!allocator_) 19 | own_allocator_ = allocator_ = new Allocator(); 20 | stack_top_ = stack_ = (char*)allocator_->Malloc(stack_capacity_); 21 | stack_end_ = stack_ + stack_capacity_; 22 | } 23 | 24 | ~Stack() { 25 | Allocator::Free(stack_); 26 | delete own_allocator_; // Only delete if it is owned by the stack 27 | } 28 | 29 | void Clear() { /*stack_top_ = 0;*/ stack_top_ = stack_; } 30 | 31 | template 32 | T* Push(size_t count = 1) { 33 | // Expand the stack if needed 34 | if (stack_top_ + sizeof(T) * count >= stack_end_) { 35 | size_t new_capacity = stack_capacity_ * 2; 36 | size_t size = GetSize(); 37 | size_t new_size = GetSize() + sizeof(T) * count; 38 | if (new_capacity < new_size) 39 | new_capacity = new_size; 40 | stack_ = (char*)allocator_->Realloc(stack_, stack_capacity_, new_capacity); 41 | stack_capacity_ = new_capacity; 42 | stack_top_ = stack_ + size; 43 | stack_end_ = stack_ + stack_capacity_; 44 | } 45 | T* ret = (T*)stack_top_; 46 | stack_top_ += sizeof(T) * count; 47 | return ret; 48 | } 49 | 50 | template 51 | T* Pop(size_t count) { 52 | RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T)); 53 | stack_top_ -= count * sizeof(T); 54 | return (T*)stack_top_; 55 | } 56 | 57 | template 58 | T* Top() { 59 | RAPIDJSON_ASSERT(GetSize() >= sizeof(T)); 60 | return (T*)(stack_top_ - sizeof(T)); 61 | } 62 | 63 | template 64 | T* Bottom() { return (T*)stack_; } 65 | 66 | Allocator& GetAllocator() { return *allocator_; } 67 | size_t GetSize() const { return stack_top_ - stack_; } 68 | size_t GetCapacity() const { return stack_capacity_; } 69 | 70 | private: 71 | Allocator* allocator_; 72 | Allocator* own_allocator_; 73 | char *stack_; 74 | char *stack_top_; 75 | char *stack_end_; 76 | size_t stack_capacity_; 77 | }; 78 | 79 | } // namespace internal 80 | } // namespace rapidjson 81 | 82 | #endif // RAPIDJSON_STACK_H_ 83 | -------------------------------------------------------------------------------- /src/cereal/types/list.hpp: -------------------------------------------------------------------------------- 1 | /*! \file list.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_LIST_HPP_ 31 | #define CEREAL_TYPES_LIST_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | namespace cereal 37 | { 38 | //! Saving for std::list 39 | template inline 40 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::list const & list ) 41 | { 42 | ar( make_size_tag( static_cast(list.size()) ) ); 43 | 44 | for( auto const & i : list ) 45 | ar( i ); 46 | } 47 | 48 | //! Loading for std::list 49 | template inline 50 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::list & list ) 51 | { 52 | size_type size; 53 | ar( make_size_tag( size ) ); 54 | 55 | list.resize( static_cast( size ) ); 56 | 57 | for( auto & i : list ) 58 | ar( i ); 59 | } 60 | } // namespace cereal 61 | 62 | #endif // CEREAL_TYPES_LIST_HPP_ 63 | -------------------------------------------------------------------------------- /src/cereal/types/deque.hpp: -------------------------------------------------------------------------------- 1 | /*! \file deque.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_DEQUE_HPP_ 31 | #define CEREAL_TYPES_DEQUE_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | namespace cereal 37 | { 38 | //! Saving for std::deque 39 | template inline 40 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::deque const & deque ) 41 | { 42 | ar( make_size_tag( static_cast(deque.size()) ) ); 43 | 44 | for( auto const & i : deque ) 45 | ar( i ); 46 | } 47 | 48 | //! Loading for std::deque 49 | template inline 50 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::deque & deque ) 51 | { 52 | size_type size; 53 | ar( make_size_tag( size ) ); 54 | 55 | deque.resize( static_cast( size ) ); 56 | 57 | for( auto & i : deque ) 58 | ar( i ); 59 | } 60 | } // namespace cereal 61 | 62 | #endif // CEREAL_TYPES_DEQUE_HPP_ 63 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/tiny_deconv2d_kernel.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/core/params/deconv_params.h" 11 | 12 | namespace tiny_dnn { 13 | namespace core { 14 | namespace kernels { 15 | 16 | inline void tiny_deconv2d_kernel(const deconv_params ¶ms, 17 | const tensor_t &in, 18 | const vec_t &W, 19 | const vec_t &bias, 20 | tensor_t &a, 21 | const bool layer_parallelize) { 22 | for_i(layer_parallelize, in.size(), [&](int sample) { 23 | for (serial_size_t o = 0; o < params.out.depth_; o++) { 24 | for (serial_size_t inc = 0; inc < params.in.depth_; inc++) { 25 | if (!params.tbl.is_connected(o, inc)) continue; 26 | 27 | serial_size_t idx = 0; 28 | idx = params.in.depth_ * o + inc; 29 | idx = params.weight.get_index(0, 0, idx); 30 | assert(idx < W.size()); 31 | const float_t *pw = &W[idx]; 32 | 33 | idx = params.in.get_index(0, 0, inc); 34 | assert(static_cast(sample) < in.size() && 35 | idx < in[sample].size()); 36 | const float_t *pi = &in[sample][idx]; 37 | 38 | idx = params.out.get_index(0, 0, o); 39 | assert(static_cast(sample) < a.size() && 40 | idx < a[sample].size()); 41 | float_t *pa = &a[sample][idx]; 42 | 43 | for (serial_size_t y = 0; y < params.in.height_; y++) { 44 | for (serial_size_t x = 0; x < params.in.width_; x++) { 45 | const float_t *ppw = pw; 46 | const float_t *ppi = pi + y * params.in.width_ + x; 47 | // should be optimized for small kernel(3x3,5x5) 48 | for (serial_size_t wy = 0; wy < params.weight.height_; wy++) { 49 | for (serial_size_t wx = 0; wx < params.weight.width_; wx++) { 50 | pa[(y * params.h_stride + wy) * params.out.width_ + 51 | (x * params.w_stride + wx)] += 52 | ppw[wy * params.weight.width_ + wx] * (*ppi); 53 | } 54 | } 55 | } 56 | } 57 | } 58 | 59 | if (params.has_bias) { 60 | float_t *pa = &a[sample][params.out.get_index(0, 0, o)]; 61 | float_t *paa = pa + params.out.width_ * params.out.height_; 62 | std::for_each(pa, paa, [&](float_t &f) { f += bias[o]; }); 63 | } 64 | } 65 | }); 66 | } 67 | 68 | } // namespace kernels 69 | } // namespace core 70 | } // namespace tiny_dnn 71 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/memorybuffer.h: -------------------------------------------------------------------------------- 1 | // Tencent is pleased to support the open source community by making RapidJSON available. 2 | // 3 | // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. 4 | // 5 | // Licensed under the MIT License (the "License"); you may not use this file except 6 | // in compliance with the License. You may obtain a copy of the License at 7 | // 8 | // http://opensource.org/licenses/MIT 9 | // 10 | // Unless required by applicable law or agreed to in writing, software distributed 11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | // specific language governing permissions and limitations under the License. 14 | 15 | #ifndef CEREAL_RAPIDJSON_MEMORYBUFFER_H_ 16 | #define CEREAL_RAPIDJSON_MEMORYBUFFER_H_ 17 | 18 | #include "stream.h" 19 | #include "internal/stack.h" 20 | 21 | CEREAL_RAPIDJSON_NAMESPACE_BEGIN 22 | 23 | //! Represents an in-memory output byte stream. 24 | /*! 25 | This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream. 26 | 27 | It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file. 28 | 29 | Differences between MemoryBuffer and StringBuffer: 30 | 1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer. 31 | 2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator. 32 | 33 | \tparam Allocator type for allocating memory buffer. 34 | \note implements Stream concept 35 | */ 36 | template 37 | struct GenericMemoryBuffer { 38 | typedef char Ch; // byte 39 | 40 | GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} 41 | 42 | void Put(Ch c) { *stack_.template Push() = c; } 43 | void Flush() {} 44 | 45 | void Clear() { stack_.Clear(); } 46 | void ShrinkToFit() { stack_.ShrinkToFit(); } 47 | Ch* Push(size_t count) { return stack_.template Push(count); } 48 | void Pop(size_t count) { stack_.template Pop(count); } 49 | 50 | const Ch* GetBuffer() const { 51 | return stack_.template Bottom(); 52 | } 53 | 54 | size_t GetSize() const { return stack_.GetSize(); } 55 | 56 | static const size_t kDefaultCapacity = 256; 57 | mutable internal::Stack stack_; 58 | }; 59 | 60 | typedef GenericMemoryBuffer<> MemoryBuffer; 61 | 62 | //! Implement specialized version of PutN() with memset() for better performance. 63 | template<> 64 | inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) { 65 | std::memset(memoryBuffer.stack_.Push(n), c, n * sizeof(c)); 66 | } 67 | 68 | CEREAL_RAPIDJSON_NAMESPACE_END 69 | 70 | #endif // CEREAL_RAPIDJSON_MEMORYBUFFER_H_ 71 | -------------------------------------------------------------------------------- /src/cereal/external/rapidxml/license.txt: -------------------------------------------------------------------------------- 1 | Use of this software is granted under one of the following two licenses, 2 | to be chosen freely by the user. 3 | 4 | 1. Boost Software License - Version 1.0 - August 17th, 2003 5 | =============================================================================== 6 | 7 | Copyright (c) 2006, 2007 Marcin Kalicinski 8 | 9 | Permission is hereby granted, free of charge, to any person or organization 10 | obtaining a copy of the software and accompanying documentation covered by 11 | this license (the "Software") to use, reproduce, display, distribute, 12 | execute, and transmit the Software, and to prepare derivative works of the 13 | Software, and to permit third-parties to whom the Software is furnished to 14 | do so, all subject to the following: 15 | 16 | The copyright notices in the Software and this entire statement, including 17 | the above license grant, this restriction and the following disclaimer, 18 | must be included in all copies of the Software, in whole or in part, and 19 | all derivative works of the Software, unless such copies or derivative 20 | works are solely in the form of machine-executable object code generated by 21 | a source language processor. 22 | 23 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 | FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 26 | SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 27 | FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 28 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 29 | DEALINGS IN THE SOFTWARE. 30 | 31 | 2. The MIT License 32 | =============================================================================== 33 | 34 | Copyright (c) 2006, 2007 Marcin Kalicinski 35 | 36 | Permission is hereby granted, free of charge, to any person obtaining a copy 37 | of this software and associated documentation files (the "Software"), to deal 38 | in the Software without restriction, including without limitation the rights 39 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 40 | of the Software, and to permit persons to whom the Software is furnished to do so, 41 | subject to the following conditions: 42 | 43 | The above copyright notice and this permission notice shall be included in all 44 | copies or substantial portions of the Software. 45 | 46 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 47 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 48 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 49 | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 50 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 51 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 52 | IN THE SOFTWARE. 53 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/genericstream.h: -------------------------------------------------------------------------------- 1 | // Generic*Stream code from https://code.google.com/p/rapidjson/issues/detail?id=20 2 | #ifndef RAPIDJSON_GENERICSTREAM_H_ 3 | #define RAPIDJSON_GENERICSTREAM_H_ 4 | 5 | #include "rapidjson.h" 6 | #include 7 | 8 | #ifdef _MSC_VER 9 | #pragma warning(push) 10 | #pragma warning(disable: 4127) // conditional expression is constant 11 | #pragma warning(disable: 4512) // assignment operator could not be generated 12 | #pragma warning(disable: 4100) // unreferenced formal parameter 13 | #endif 14 | 15 | namespace rapidjson { 16 | 17 | //! Wrapper of std::istream for input. 18 | class GenericReadStream { 19 | public: 20 | typedef char Ch; //!< Character type (byte). 21 | 22 | //! Constructor. 23 | /*! 24 | \param is Input stream. 25 | */ 26 | GenericReadStream(std::istream & is) : is_(&is) { 27 | } 28 | 29 | 30 | Ch Peek() const { 31 | if(is_->eof()) return '\0'; 32 | return static_cast(is_->peek()); 33 | } 34 | 35 | Ch Take() { 36 | if(is_->eof()) return '\0'; 37 | return static_cast(is_->get()); 38 | } 39 | 40 | size_t Tell() const { 41 | return (int)is_->tellg(); 42 | } 43 | 44 | // Not implemented 45 | void Put(Ch) { RAPIDJSON_ASSERT(false); } 46 | void Flush() { RAPIDJSON_ASSERT(false); } 47 | Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } 48 | size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } 49 | 50 | std::istream * is_; 51 | }; 52 | 53 | 54 | //! Wrapper of std::ostream for output. 55 | class GenericWriteStream { 56 | public: 57 | typedef char Ch; //!< Character type. Only support char. 58 | 59 | //! Constructor 60 | /*! 61 | \param os Output stream. 62 | */ 63 | GenericWriteStream(std::ostream& os) : os_(os) { 64 | } 65 | 66 | void Put(char c) { 67 | os_.put(c); 68 | } 69 | 70 | void PutN(char c, size_t n) { 71 | for (size_t i = 0; i < n; ++i) { 72 | Put(c); 73 | } 74 | } 75 | 76 | void Flush() { 77 | os_.flush(); 78 | } 79 | 80 | size_t Tell() const { 81 | return (int)os_.tellp(); 82 | } 83 | 84 | // Not implemented 85 | char Peek() const { RAPIDJSON_ASSERT(false); } 86 | char Take() { RAPIDJSON_ASSERT(false); } 87 | char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } 88 | size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; } 89 | 90 | private: 91 | std::ostream& os_; 92 | }; 93 | 94 | template<> 95 | inline void PutN(GenericWriteStream& stream, char c, size_t n) { 96 | stream.PutN(c, n); 97 | } 98 | 99 | } // namespace rapidjson 100 | 101 | // On MSVC, restore warnings state 102 | #ifdef _MSC_VER 103 | #pragma warning(pop) 104 | #endif 105 | #endif // RAPIDJSON_GENERICSTREAM_H_ 106 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/fully_connected_op_internal.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/core/params/fully_params.h" 11 | 12 | namespace tiny_dnn { 13 | namespace kernels { 14 | 15 | inline void fully_connected_op_internal(const tensor_t &in_data, 16 | const vec_t &W, 17 | const vec_t &bias, 18 | tensor_t &out_data, 19 | const fully_params ¶ms, 20 | const bool layer_parallelize) { 21 | for_i(layer_parallelize, in_data.size(), [&](int sample) { 22 | const vec_t &in = in_data[sample]; 23 | vec_t &out = out_data[sample]; 24 | 25 | for (serial_size_t i = 0; i < params.out_size_; i++) { 26 | out[i] = float_t{0}; 27 | for (serial_size_t c = 0; c < params.in_size_; c++) { 28 | out[i] += W[c * params.out_size_ + i] * in[c]; 29 | } 30 | 31 | if (params.has_bias_) { 32 | out[i] += bias[i]; 33 | } 34 | } 35 | }); 36 | } 37 | 38 | inline void fully_connected_op_internal(const tensor_t &prev_out, 39 | const vec_t &W, 40 | tensor_t &dW, 41 | tensor_t &db, 42 | tensor_t &curr_delta, 43 | tensor_t &prev_delta, 44 | const fully_params ¶ms, 45 | const bool layer_parallelize) { 46 | for (serial_size_t sample = 0; sample < prev_out.size(); sample++) { 47 | for (serial_size_t c = 0; c < params.in_size_; c++) { 48 | // propagate delta to previous layer 49 | // prev_delta[c] += current_delta[r] * W_[c * out_size_ + r] 50 | prev_delta[sample][c] += vectorize::dot( 51 | &curr_delta[sample][0], &W[c * params.out_size_], params.out_size_); 52 | } 53 | 54 | for_(layer_parallelize, 0, size_t(params.out_size_), 55 | [&](const blocked_range &r) { 56 | // accumulate weight-step using delta 57 | // dW[c * out_size + i] += current_delta[i] * prev_out[c] 58 | for (serial_size_t c = 0; c < params.in_size_; c++) { 59 | vectorize::muladd(&curr_delta[sample][r.begin()], 60 | prev_out[sample][c], r.end() - r.begin(), 61 | &dW[sample][c * params.out_size_ + r.begin()]); 62 | } 63 | 64 | if (params.has_bias_) { 65 | // vec_t& db = *in_grad[2]; 66 | for (int i = r.begin(); i < r.end(); i++) { 67 | db[sample][i] += curr_delta[sample][i]; 68 | } 69 | } 70 | }); 71 | } 72 | } 73 | 74 | } // namespace kernels 75 | } // namespace tiny_dnn 76 | -------------------------------------------------------------------------------- /src/tiny_dnn/layers/linear_layer.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include 11 | 12 | #include "tiny_dnn/util/util.h" 13 | 14 | namespace tiny_dnn { 15 | 16 | /** 17 | * element-wise operation: ```f(x) = h(scale*x+bias)``` 18 | */ 19 | template 20 | class linear_layer : public feedforward_layer { 21 | public: 22 | CNN_USE_LAYER_MEMBERS; 23 | 24 | typedef feedforward_layer Base; 25 | 26 | /** 27 | * @param dim [in] number of elements 28 | * @param scale [in] factor by which to multiply 29 | * @param bias [in] bias term 30 | **/ 31 | explicit linear_layer(serial_size_t dim, 32 | float_t scale = float_t{1}, 33 | float_t bias = float_t{0}) 34 | : Base({vector_type::data}), dim_(dim), scale_(scale), bias_(bias) {} 35 | 36 | std::vector in_shape() const override { 37 | return {shape3d(dim_, 1, 1)}; 38 | } 39 | 40 | std::vector out_shape() const override { 41 | return {shape3d(dim_, 1, 1), shape3d(dim_, 1, 1)}; 42 | } 43 | 44 | std::string layer_type() const override { return "linear"; } 45 | 46 | void forward_propagation(const std::vector &in_data, 47 | std::vector &out_data) override { 48 | const tensor_t &in = *in_data[0]; 49 | tensor_t &out = *out_data[0]; 50 | tensor_t &a = *out_data[1]; 51 | 52 | // do nothing 53 | CNN_UNREFERENCED_PARAMETER(out); 54 | 55 | // @todo revise the parallelism strategy 56 | for_i(parallelize_, dim_, [&](int i) { 57 | for (serial_size_t sample = 0, 58 | sample_count = static_cast(in.size()); 59 | sample < sample_count; ++sample) 60 | a[sample][i] = scale_ * in[sample][i] + bias_; 61 | }); 62 | this->forward_activation(*out_data[0], *out_data[1]); 63 | } 64 | 65 | void back_propagation(const std::vector &in_data, 66 | const std::vector &out_data, 67 | std::vector &out_grad, 68 | std::vector &in_grad) override { 69 | tensor_t &prev_delta = *in_grad[0]; 70 | tensor_t &curr_delta = *out_grad[1]; 71 | 72 | CNN_UNREFERENCED_PARAMETER(in_data); 73 | 74 | this->backward_activation(*out_grad[0], *out_data[0], curr_delta); 75 | 76 | // @todo revise parallelism strategy 77 | for (serial_size_t sample = 0; 78 | sample < static_cast(prev_delta.size()); ++sample) { 79 | for_i(parallelize_, dim_, [&](int i) { 80 | prev_delta[sample][i] = curr_delta[sample][i] * scale_; 81 | }); 82 | } 83 | } 84 | 85 | #ifndef CNN_NO_SERIALIZATION 86 | friend struct serialization_buddy; 87 | #endif 88 | 89 | protected: 90 | serial_size_t dim_; 91 | float_t scale_, bias_; 92 | }; 93 | 94 | } // namespace tiny_dnn 95 | -------------------------------------------------------------------------------- /src/cereal/types/forward_list.hpp: -------------------------------------------------------------------------------- 1 | /*! \file forward_list.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_FORWARD_LIST_HPP_ 31 | #define CEREAL_TYPES_FORWARD_LIST_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | namespace cereal 37 | { 38 | //! Saving for std::forward_list all other types 39 | template inline 40 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::forward_list const & forward_list ) 41 | { 42 | // write the size - note that this is slow because we need to traverse 43 | // the entire list. there are ways we could avoid this but this was chosen 44 | // since it works in the most general fashion with any archive type 45 | size_type const size = std::distance( forward_list.begin(), forward_list.end() ); 46 | 47 | ar( make_size_tag( size ) ); 48 | 49 | // write the list 50 | for( const auto & i : forward_list ) 51 | ar( i ); 52 | } 53 | 54 | //! Loading for std::forward_list all other types from 55 | template 56 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::forward_list & forward_list ) 57 | { 58 | size_type size; 59 | ar( make_size_tag( size ) ); 60 | 61 | forward_list.resize( static_cast( size ) ); 62 | 63 | for( auto & i : forward_list ) 64 | ar( i ); 65 | } 66 | } // namespace cereal 67 | 68 | #endif // CEREAL_TYPES_FORWARD_LIST_HPP_ 69 | -------------------------------------------------------------------------------- /src/cereal/types/string.hpp: -------------------------------------------------------------------------------- 1 | /*! \file string.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_STRING_HPP_ 31 | #define CEREAL_TYPES_STRING_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | namespace cereal 37 | { 38 | //! Serialization for basic_string types, if binary data is supported 39 | template inline 40 | typename std::enable_if, Archive>::value, void>::type 41 | CEREAL_SAVE_FUNCTION_NAME(Archive & ar, std::basic_string const & str) 42 | { 43 | // Save number of chars + the data 44 | ar( make_size_tag( static_cast(str.size()) ) ); 45 | ar( binary_data( str.data(), str.size() * sizeof(CharT) ) ); 46 | } 47 | 48 | //! Serialization for basic_string types, if binary data is supported 49 | template inline 50 | typename std::enable_if, Archive>::value, void>::type 51 | CEREAL_LOAD_FUNCTION_NAME(Archive & ar, std::basic_string & str) 52 | { 53 | size_type size; 54 | ar( make_size_tag( size ) ); 55 | str.resize(static_cast(size)); 56 | ar( binary_data( const_cast( str.data() ), static_cast(size) * sizeof(CharT) ) ); 57 | } 58 | } // namespace cereal 59 | 60 | #endif // CEREAL_TYPES_STRING_HPP_ 61 | 62 | -------------------------------------------------------------------------------- /src/cereal/details/polymorphic_impl_fwd.hpp: -------------------------------------------------------------------------------- 1 | /*! \file polymorphic_impl_fwd.hpp 2 | \brief Internal polymorphism support forward declarations 3 | \ingroup Internal */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | 31 | /* This code is heavily inspired by the boost serialization implementation by the following authors 32 | 33 | (C) Copyright 2002 Robert Ramey - http://www.rrsd.com . 34 | Use, modification and distribution is subject to the Boost Software 35 | License, Version 1.0. (See http://www.boost.org/LICENSE_1_0.txt) 36 | 37 | See http://www.boost.org for updates, documentation, and revision history. 38 | 39 | (C) Copyright 2006 David Abrahams - http://www.boost.org. 40 | 41 | See /boost/serialization/export.hpp and /boost/archive/detail/register_archive.hpp for their 42 | implementation. 43 | */ 44 | 45 | #ifndef CEREAL_DETAILS_POLYMORPHIC_IMPL_FWD_HPP_ 46 | #define CEREAL_DETAILS_POLYMORPHIC_IMPL_FWD_HPP_ 47 | 48 | namespace cereal 49 | { 50 | namespace detail 51 | { 52 | //! Forward declaration, see polymorphic_impl.hpp for more information 53 | template 54 | struct RegisterPolymorphicCaster; 55 | 56 | //! Forward declaration, see polymorphic_impl.hpp for more information 57 | struct PolymorphicCasters; 58 | 59 | //! Forward declaration, see polymorphic_impl.hpp for more information 60 | template 61 | struct PolymorphicRelation; 62 | } // namespace detail 63 | } // namespace cereal 64 | 65 | #endif // CEREAL_DETAILS_POLYMORPHIC_IMPL_FWD_HPP_ 66 | -------------------------------------------------------------------------------- /src/cereal/types/chrono.hpp: -------------------------------------------------------------------------------- 1 | /*! \file chrono.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_CHRONO_HPP_ 31 | #define CEREAL_TYPES_CHRONO_HPP_ 32 | 33 | #include 34 | 35 | namespace cereal 36 | { 37 | //! Saving std::chrono::duration 38 | template inline 39 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::chrono::duration const & dur ) 40 | { 41 | ar( CEREAL_NVP_("count", dur.count()) ); 42 | } 43 | 44 | //! Loading std::chrono::duration 45 | template inline 46 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::chrono::duration & dur ) 47 | { 48 | R count; 49 | ar( CEREAL_NVP_("count", count) ); 50 | 51 | dur = std::chrono::duration{count}; 52 | } 53 | 54 | //! Saving std::chrono::time_point 55 | template inline 56 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::chrono::time_point const & dur ) 57 | { 58 | ar( CEREAL_NVP_("time_since_epoch", dur.time_since_epoch()) ); 59 | } 60 | 61 | //! Loading std::chrono::time_point 62 | template inline 63 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::chrono::time_point & dur ) 64 | { 65 | D elapsed; 66 | ar( CEREAL_NVP_("time_since_epoch", elapsed) ); 67 | 68 | dur = std::chrono::time_point{elapsed}; 69 | } 70 | } // namespace cereal 71 | 72 | #endif // CEREAL_TYPES_CHRONO_HPP_ 73 | -------------------------------------------------------------------------------- /src/cereal/types/stack.hpp: -------------------------------------------------------------------------------- 1 | /*! \file stack.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_STACK_HPP_ 31 | #define CEREAL_TYPES_STACK_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | // The default container for stack is deque, so let's include that too 37 | #include 38 | 39 | namespace cereal 40 | { 41 | namespace stack_detail 42 | { 43 | //! Allows access to the protected container in stack 44 | template inline 45 | C const & container( std::stack const & stack ) 46 | { 47 | struct H : public std::stack 48 | { 49 | static C const & get( std::stack const & s ) 50 | { 51 | return s.*(&H::c); 52 | } 53 | }; 54 | 55 | return H::get( stack ); 56 | } 57 | } 58 | 59 | //! Saving for std::stack 60 | template inline 61 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::stack const & stack ) 62 | { 63 | ar( CEREAL_NVP_("container", stack_detail::container( stack )) ); 64 | } 65 | 66 | //! Loading for std::stack 67 | template inline 68 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::stack & stack ) 69 | { 70 | C container; 71 | ar( CEREAL_NVP_("container", container) ); 72 | stack = std::stack( std::move( container ) ); 73 | } 74 | } // namespace cereal 75 | 76 | #endif // CEREAL_TYPES_STACK_HPP_ 77 | -------------------------------------------------------------------------------- /src/regression.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "layers.h" 4 | #include "optimizers.h" 5 | 6 | // [[Rcpp::export]] 7 | SEXP net_seq_regression_fit( 8 | Rcpp::XPtr< tiny_dnn::network > net, 9 | Rcpp::NumericMatrix x, 10 | Rcpp::NumericMatrix y, 11 | int batch_size, 12 | int epochs, 13 | Rcpp::List opt, 14 | bool verbose 15 | ) 16 | { 17 | using namespace tiny_dnn; 18 | 19 | const int n = x.nrow(); 20 | const int px = x.ncol(); 21 | const int py = y.ncol(); 22 | 23 | std::vector input; 24 | std::vector output; 25 | 26 | input.reserve(n); 27 | output.reserve(n); 28 | 29 | // It looks like that currently tiny-dnn does not shuffle data 30 | // during training, so we provide a shuffled data set to tiny-dnn 31 | Rcpp::IntegerVector ind = Rcpp::sample(n, n, false, R_NilValue, false); 32 | 33 | // Copy data 34 | vec_t rowx(px); 35 | vec_t rowy(py); 36 | for(int i = 0; i < n; i++) 37 | { 38 | // Fill input 39 | for(int j = 0; j < px; j++) 40 | { 41 | rowx[j] = x(ind[i], j); 42 | } 43 | input.push_back(rowx); 44 | 45 | // Fill output 46 | if(py == 1) 47 | { 48 | rowy[0] = y[ind[i]]; 49 | } else { 50 | for(int j = 0; j < py; j++) 51 | { 52 | rowy[j] = y(ind[i], j); 53 | } 54 | } 55 | output.push_back(rowy); 56 | } 57 | 58 | std::shared_ptr opt_ptr = get_optimizer(opt); 59 | 60 | timer t; 61 | int epoch = 0; 62 | 63 | net->fit(*opt_ptr, input, output, batch_size, epochs, 64 | // called for each mini-batch 65 | []() { 66 | 67 | }, 68 | // called for each epoch 69 | [verbose, &t, &epoch]() { 70 | if(verbose) 71 | { 72 | Rcpp::Rcout << "[Epoch " << epoch << "]: " << t.elapsed() << "s" << std::endl; 73 | t.restart(); 74 | epoch++; 75 | } 76 | } 77 | ); 78 | 79 | return R_NilValue; 80 | } 81 | 82 | 83 | 84 | // [[Rcpp::export]] 85 | Rcpp::NumericMatrix net_seq_regression_predict( 86 | Rcpp::XPtr< tiny_dnn::network > net, 87 | Rcpp::NumericMatrix x 88 | ) 89 | { 90 | using namespace tiny_dnn; 91 | 92 | const int n = x.nrow(); 93 | const int px = x.ncol(); 94 | const int py = net->out_data_size(); 95 | 96 | Rcpp::NumericMatrix pred(n, py); 97 | vec_t rowx(px); 98 | 99 | for(int i = 0; i < n; i++) 100 | { 101 | for(int j = 0; j < px; j++) 102 | { 103 | rowx[j] = x(i, j); 104 | } 105 | 106 | vec_t rowy = net->predict(rowx); 107 | if(py == 1) 108 | { 109 | pred[i] = rowy[0]; 110 | } else { 111 | for(int j = 0; j < py; j++) 112 | { 113 | pred(i, j) = rowy[j]; 114 | } 115 | } 116 | } 117 | 118 | return pred; 119 | } 120 | -------------------------------------------------------------------------------- /src/tiny_dnn/io/cifar10_parser.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include 10 | #include 11 | #include 12 | #include "tiny_dnn/util/util.h" 13 | 14 | #define CIFAR10_IMAGE_DEPTH (3) 15 | #define CIFAR10_IMAGE_WIDTH (32) 16 | #define CIFAR10_IMAGE_HEIGHT (32) 17 | #define CIFAR10_IMAGE_AREA (CIFAR10_IMAGE_WIDTH * CIFAR10_IMAGE_HEIGHT) 18 | #define CIFAR10_IMAGE_SIZE (CIFAR10_IMAGE_AREA * CIFAR10_IMAGE_DEPTH) 19 | 20 | namespace tiny_dnn { 21 | 22 | /** 23 | * parse CIFAR-10 database format images 24 | * 25 | * @param filename [in] filename of database(binary version) 26 | * @param train_images [out] parsed images 27 | * @param train_labels [out] parsed labels 28 | * @param scale_min [in] min-value of output 29 | * @param scale_max [in] max-value of output 30 | * @param x_padding [in] adding border width (left,right) 31 | * @param y_padding [in] adding border width (top,bottom) 32 | **/ 33 | inline void parse_cifar10(const std::string &filename, 34 | std::vector *train_images, 35 | std::vector *train_labels, 36 | float_t scale_min, 37 | float_t scale_max, 38 | int x_padding, 39 | int y_padding) { 40 | if (x_padding < 0 || y_padding < 0) 41 | throw nn_error("padding size must not be negative"); 42 | if (scale_min >= scale_max) 43 | throw nn_error("scale_max must be greater than scale_min"); 44 | 45 | std::ifstream ifs(filename.c_str(), std::ios::in | std::ios::binary); 46 | if (ifs.fail() || ifs.bad()) 47 | throw nn_error("failed to open file:" + filename); 48 | 49 | uint8_t label; 50 | std::vector buf(CIFAR10_IMAGE_SIZE); 51 | 52 | while (ifs.read(reinterpret_cast(&label), 1)) { 53 | vec_t img; 54 | 55 | if (!ifs.read(reinterpret_cast(&buf[0]), CIFAR10_IMAGE_SIZE)) break; 56 | 57 | if (x_padding || y_padding) { 58 | int w = CIFAR10_IMAGE_WIDTH + 2 * x_padding; 59 | int h = CIFAR10_IMAGE_HEIGHT + 2 * y_padding; 60 | 61 | img.resize(w * h * CIFAR10_IMAGE_DEPTH, scale_min); 62 | 63 | for (int c = 0; c < CIFAR10_IMAGE_DEPTH; c++) { 64 | for (int y = 0; y < CIFAR10_IMAGE_HEIGHT; y++) { 65 | for (int x = 0; x < CIFAR10_IMAGE_WIDTH; x++) { 66 | img[c * w * h + (y + y_padding) * w + x + x_padding] = 67 | scale_min + 68 | (scale_max - scale_min) * 69 | buf[c * CIFAR10_IMAGE_AREA + y * CIFAR10_IMAGE_WIDTH + x] / 255; 70 | } 71 | } 72 | } 73 | } else { 74 | std::transform(buf.begin(), buf.end(), std::back_inserter(img), 75 | [=](unsigned char c) { 76 | return scale_min + (scale_max - scale_min) * c / 255; 77 | }); 78 | } 79 | 80 | train_images->push_back(img); 81 | train_labels->push_back(label); 82 | } 83 | } 84 | 85 | } // namespace tiny_dnn 86 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/memorystream.h: -------------------------------------------------------------------------------- 1 | // Tencent is pleased to support the open source community by making RapidJSON available. 2 | // 3 | // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. 4 | // 5 | // Licensed under the MIT License (the "License"); you may not use this file except 6 | // in compliance with the License. You may obtain a copy of the License at 7 | // 8 | // http://opensource.org/licenses/MIT 9 | // 10 | // Unless required by applicable law or agreed to in writing, software distributed 11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | // specific language governing permissions and limitations under the License. 14 | 15 | #ifndef CEREAL_RAPIDJSON_MEMORYSTREAM_H_ 16 | #define CEREAL_RAPIDJSON_MEMORYSTREAM_H_ 17 | 18 | #include "stream.h" 19 | 20 | #ifdef __clang__ 21 | CEREAL_RAPIDJSON_DIAG_PUSH 22 | CEREAL_RAPIDJSON_DIAG_OFF(unreachable-code) 23 | CEREAL_RAPIDJSON_DIAG_OFF(missing-noreturn) 24 | #endif 25 | 26 | #ifdef _MSC_VER 27 | CEREAL_RAPIDJSON_DIAG_PUSH 28 | CEREAL_RAPIDJSON_DIAG_OFF( 4127 ) // ignore assert(false) for triggering exception 29 | #endif 30 | 31 | CEREAL_RAPIDJSON_NAMESPACE_BEGIN 32 | 33 | //! Represents an in-memory input byte stream. 34 | /*! 35 | This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream. 36 | 37 | It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file. 38 | 39 | Differences between MemoryStream and StringStream: 40 | 1. StringStream has encoding but MemoryStream is a byte stream. 41 | 2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source. 42 | 3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4(). 43 | \note implements Stream concept 44 | */ 45 | struct MemoryStream { 46 | typedef char Ch; // byte 47 | 48 | MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {} 49 | 50 | Ch Peek() const { return CEREAL_RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_; } 51 | Ch Take() { return CEREAL_RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_++; } 52 | size_t Tell() const { return static_cast(src_ - begin_); } 53 | 54 | Ch* PutBegin() { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 55 | void Put(Ch) { CEREAL_RAPIDJSON_ASSERT(false); } 56 | void Flush() { CEREAL_RAPIDJSON_ASSERT(false); } 57 | size_t PutEnd(Ch*) { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 58 | 59 | // For encoding detection only. 60 | const Ch* Peek4() const { 61 | return Tell() + 4 <= size_ ? src_ : 0; 62 | } 63 | 64 | const Ch* src_; //!< Current read position. 65 | const Ch* begin_; //!< Original head of the string. 66 | const Ch* end_; //!< End of stream. 67 | size_t size_; //!< Size of the stream. 68 | }; 69 | 70 | CEREAL_RAPIDJSON_NAMESPACE_END 71 | 72 | #if defined(__clang__) || defined(_MSC_VER) 73 | CEREAL_RAPIDJSON_DIAG_POP 74 | #endif 75 | 76 | #endif // CEREAL_RAPIDJSON_MEMORYBUFFER_H_ 77 | -------------------------------------------------------------------------------- /src/cereal/details/util.hpp: -------------------------------------------------------------------------------- 1 | /*! \file util.hpp 2 | \brief Internal misc utilities 3 | \ingroup Internal */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_DETAILS_UTIL_HPP_ 31 | #define CEREAL_DETAILS_UTIL_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | #ifdef _MSC_VER 37 | namespace cereal 38 | { 39 | namespace util 40 | { 41 | //! Demangles the type encoded in a string 42 | /*! @internal */ 43 | inline std::string demangle( std::string const & name ) 44 | { return name; } 45 | 46 | //! Gets the demangled name of a type 47 | /*! @internal */ 48 | template inline 49 | std::string demangledName() 50 | { return typeid( T ).name(); } 51 | } // namespace util 52 | } // namespace cereal 53 | #else // clang or gcc 54 | #include 55 | #include 56 | namespace cereal 57 | { 58 | namespace util 59 | { 60 | //! Demangles the type encoded in a string 61 | /*! @internal */ 62 | inline std::string demangle(std::string mangledName) 63 | { 64 | int status = 0; 65 | char *demangledName = nullptr; 66 | std::size_t len; 67 | 68 | demangledName = abi::__cxa_demangle(mangledName.c_str(), 0, &len, &status); 69 | 70 | std::string retName(demangledName); 71 | free(demangledName); 72 | 73 | return retName; 74 | } 75 | 76 | //! Gets the demangled name of a type 77 | /*! @internal */ 78 | template inline 79 | std::string demangledName() 80 | { return demangle(typeid(T).name()); } 81 | } 82 | } // namespace cereal 83 | #endif // clang or gcc branch of _MSC_VER 84 | #endif // CEREAL_DETAILS_UTIL_HPP_ 85 | -------------------------------------------------------------------------------- /src/tiny_dnn/util/target_cost.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2016, Juha Reunanen 2 | #pragma once 3 | 4 | #include // std::accumulate 5 | 6 | #include "tiny_dnn/util/util.h" 7 | 8 | namespace tiny_dnn { 9 | 10 | // calculate the number of samples for each class label 11 | // - for example, if there are 10 samples having label 0, and 12 | // 20 samples having label 1, returns a vector [10, 20] 13 | inline std::vector calculate_label_counts( 14 | const std::vector &t) { 15 | std::vector label_counts; 16 | for (label_t label : t) { 17 | if (label >= label_counts.size()) { 18 | label_counts.resize(label + 1); 19 | } 20 | label_counts[label]++; 21 | } 22 | assert(std::accumulate(label_counts.begin(), label_counts.end(), 23 | static_cast(0)) == t.size()); 24 | return label_counts; 25 | } 26 | 27 | // calculate the weight of a given sample needed for a balanced target cost 28 | // NB: we call a target cost matrix "balanced", if the cost of each *class* is 29 | // equal 30 | // (this happens when the product weight * sample count is equal between the 31 | // different 32 | // classes, and the sum of these products equals the total number of 33 | // samples) 34 | inline float_t get_sample_weight_for_balanced_target_cost( 35 | serial_size_t classes, 36 | serial_size_t total_samples, 37 | serial_size_t this_class_samples) { 38 | assert(this_class_samples <= total_samples); 39 | return total_samples / static_cast(classes * this_class_samples); 40 | } 41 | 42 | // create a target cost matrix implying equal cost for each *class* (distinct 43 | // label) 44 | // - by default, each *sample* has an equal cost, which means e.g. that a 45 | // classifier 46 | // may prefer to always guess the majority class (in case the degree of 47 | // imbalance 48 | // is relatively high, and the classification task is relatively difficult) 49 | // - the parameter w can be used to fine-tune the balance: 50 | // * use 0 to have an equal cost for each *sample* (equal to not supplying 51 | // any target costs at all) 52 | // * use 1 to have an equal cost for each *class* (default behaviour of this 53 | // function) 54 | // * use a value between 0 and 1 to have something between the two extremes 55 | inline std::vector create_balanced_target_cost( 56 | const std::vector &t, float_t w = 1.0) { 57 | const auto label_counts = calculate_label_counts(t); 58 | const serial_size_t total_sample_count = static_cast(t.size()); 59 | const serial_size_t class_count = 60 | static_cast(label_counts.size()); 61 | 62 | std::vector target_cost(t.size()); 63 | 64 | for (serial_size_t i = 0; i < total_sample_count; ++i) { 65 | vec_t &sample_cost = target_cost[i]; 66 | sample_cost.resize(class_count); 67 | const float_t balanced_weight = get_sample_weight_for_balanced_target_cost( 68 | class_count, total_sample_count, label_counts[t[i]]); 69 | const float_t unbalanced_weight = 1; 70 | const float_t sample_weight = 71 | w * balanced_weight + (1 - w) * unbalanced_weight; 72 | std::fill(sample_cost.begin(), sample_cost.end(), sample_weight); 73 | } 74 | 75 | return target_cost; 76 | } 77 | 78 | } // namespace tiny_dnn 79 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/conv2d_op_nnpack.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi, Edgar Riba 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/core/backend.h" 11 | #include "tiny_dnn/core/params/conv_params.h" 12 | 13 | namespace tiny_dnn { 14 | namespace kernels { 15 | 16 | inline void conv2d_op_nnpack(const tensor_t &in_data, 17 | const vec_t &W, 18 | const vec_t &bias, 19 | tensor_t &out_data, 20 | const core::conv_params ¶ms) { 21 | #ifdef CNN_USE_NNPACK 22 | // call singleton to initialize NNPACK 23 | core::NNPackInitializer::getInstance().initialize(); 24 | 25 | const auto algorithm = core::nnp_algorithm(); 26 | const auto kernel_transform_strategy = core::nnp_kts(); 27 | 28 | const serial_size_t input_channels = params.in.depth_; 29 | const serial_size_t output_channels = params.out.depth_; 30 | 31 | // input data passed by convolution layer has been padded already 32 | // set input_size to padded size 33 | const nnp_size input_size = {static_cast(params.in_padded.width_), 34 | static_cast(params.in_padded.height_)}; 35 | 36 | const nnp_size kernel_size = {static_cast(params.weight.width_), 37 | static_cast(params.weight.height_)}; 38 | 39 | // input padded ,so no need to do padding 40 | const float_t dx{0.0}; // params.in_padded.width_ - params.in.width_; 41 | const float_t dy{0.0}; // params.in_padded.height_ - params.in.height_; 42 | 43 | // we'll assume that padding is symmetric 44 | 45 | const nnp_padding padding = { 46 | static_cast(dy / 2), // top 47 | static_cast(dx / 2), // right 48 | static_cast(dy / 2), // bottom 49 | static_cast(dx / 2) // left 50 | }; 51 | 52 | const nnp_size stride = {static_cast(params.w_stride), 53 | static_cast(params.h_stride)}; 54 | 55 | const float *input_ptr = in_data[0].data(); 56 | const float *kernel_ptr = W.data(); 57 | const float *bias_ptr = bias.data(); 58 | 59 | float *output_ptr = out_data[0].data(); 60 | 61 | // TODO: embed it into a class 62 | const size_t num_mkl_threads = 1; 63 | pthreadpool_t threadpool = pthreadpool_create(num_mkl_threads); 64 | 65 | nnp_profile *profile = nullptr; 66 | 67 | nnp_status status = nnp_convolution_inference( 68 | algorithm, kernel_transform_strategy, input_channels, output_channels, 69 | input_size, padding, kernel_size, stride, input_ptr, kernel_ptr, bias_ptr, 70 | output_ptr, threadpool, profile); 71 | 72 | if (status != nnp_status_success) { 73 | throw nn_error("Could not succeed with nnp_convolution_inference"); 74 | } 75 | 76 | // TODO: embed it into a class 77 | pthreadpool_destroy(threadpool); 78 | #else 79 | CNN_UNREFERENCED_PARAMETER(in_data); 80 | CNN_UNREFERENCED_PARAMETER(W); 81 | CNN_UNREFERENCED_PARAMETER(bias); 82 | CNN_UNREFERENCED_PARAMETER(out_data); 83 | CNN_UNREFERENCED_PARAMETER(params); 84 | throw nn_error("TinyDNN has not been compiled with NNPACK support."); 85 | #endif 86 | } 87 | 88 | } // namespace kernels 89 | } // namespace tiny_dnn 90 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/internal/ieee754.h: -------------------------------------------------------------------------------- 1 | // Tencent is pleased to support the open source community by making RapidJSON available. 2 | // 3 | // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. 4 | // 5 | // Licensed under the MIT License (the "License"); you may not use this file except 6 | // in compliance with the License. You may obtain a copy of the License at 7 | // 8 | // http://opensource.org/licenses/MIT 9 | // 10 | // Unless required by applicable law or agreed to in writing, software distributed 11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | // specific language governing permissions and limitations under the License. 14 | 15 | #ifndef CEREAL_RAPIDJSON_IEEE754_ 16 | #define CEREAL_RAPIDJSON_IEEE754_ 17 | 18 | #include "../rapidjson.h" 19 | 20 | CEREAL_RAPIDJSON_NAMESPACE_BEGIN 21 | namespace internal { 22 | 23 | class Double { 24 | public: 25 | Double() {} 26 | Double(double d) : d_(d) {} 27 | Double(uint64_t u) : u_(u) {} 28 | 29 | double Value() const { return d_; } 30 | uint64_t Uint64Value() const { return u_; } 31 | 32 | double NextPositiveDouble() const { 33 | CEREAL_RAPIDJSON_ASSERT(!Sign()); 34 | return Double(u_ + 1).Value(); 35 | } 36 | 37 | bool Sign() const { return (u_ & kSignMask) != 0; } 38 | uint64_t Significand() const { return u_ & kSignificandMask; } 39 | int Exponent() const { return static_cast(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); } 40 | 41 | bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; } 42 | bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; } 43 | bool IsNanOrInf() const { return (u_ & kExponentMask) == kExponentMask; } 44 | bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; } 45 | bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; } 46 | 47 | uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); } 48 | int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; } 49 | uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; } 50 | 51 | static unsigned EffectiveSignificandSize(int order) { 52 | if (order >= -1021) 53 | return 53; 54 | else if (order <= -1074) 55 | return 0; 56 | else 57 | return static_cast(order) + 1074; 58 | } 59 | 60 | private: 61 | static const int kSignificandSize = 52; 62 | static const int kExponentBias = 0x3FF; 63 | static const int kDenormalExponent = 1 - kExponentBias; 64 | static const uint64_t kSignMask = CEREAL_RAPIDJSON_UINT64_C2(0x80000000, 0x00000000); 65 | static const uint64_t kExponentMask = CEREAL_RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000); 66 | static const uint64_t kSignificandMask = CEREAL_RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF); 67 | static const uint64_t kHiddenBit = CEREAL_RAPIDJSON_UINT64_C2(0x00100000, 0x00000000); 68 | 69 | union { 70 | double d_; 71 | uint64_t u_; 72 | }; 73 | }; 74 | 75 | } // namespace internal 76 | CEREAL_RAPIDJSON_NAMESPACE_END 77 | 78 | #endif // CEREAL_RAPIDJSON_IEEE754_ 79 | -------------------------------------------------------------------------------- /src/network.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "layers.h" 4 | #include "utils.h" 5 | 6 | // [[Rcpp::export]] 7 | Rcpp::XPtr< tiny_dnn::network > net_seq_constructor(std::string name) 8 | { 9 | using namespace tiny_dnn; 10 | 11 | network* net = new network(name); 12 | 13 | return Rcpp::XPtr< network >(net); 14 | } 15 | 16 | // [[Rcpp::export]] 17 | std::string net_seq_name(Rcpp::XPtr< tiny_dnn::network > net) 18 | { 19 | return net->name(); 20 | } 21 | 22 | // [[Rcpp::export]] 23 | int net_seq_layer_size(Rcpp::XPtr< tiny_dnn::network > net) 24 | { 25 | return net->layer_size(); 26 | } 27 | 28 | // [[Rcpp::export]] 29 | int net_seq_out_data_size(Rcpp::XPtr< tiny_dnn::network > net) 30 | { 31 | return net->out_data_size(); 32 | } 33 | 34 | // [[Rcpp::export]] 35 | int net_seq_in_data_size(Rcpp::XPtr< tiny_dnn::network > net) 36 | { 37 | return net->in_data_size(); 38 | } 39 | 40 | 41 | 42 | // [[Rcpp::export]] 43 | SEXP net_seq_add_layer(Rcpp::XPtr< tiny_dnn::network > net, 44 | Rcpp::List layer) 45 | { 46 | int id = layer["layer_id"]; 47 | switch(id) 48 | { 49 | case 0: 50 | add_layer_fully_connected(net, layer); 51 | break; 52 | case 1: 53 | add_layer_convolutional(net, layer); 54 | break; 55 | case 2: 56 | add_layer_average_pooling(net, layer); 57 | break; 58 | case 3: 59 | add_layer_max_pooling(net, layer); 60 | break; 61 | default: 62 | Rcpp::stop("unimplemented layer type"); 63 | } 64 | 65 | return R_NilValue; 66 | } 67 | 68 | // [[Rcpp::export]] 69 | Rcpp::List net_seq_get_weights(Rcpp::XPtr< tiny_dnn::network > net) 70 | { 71 | using tiny_dnn::vec_t; 72 | using tiny_dnn::layer; 73 | 74 | int layer_size = net->layer_size(); 75 | Rcpp::List res(layer_size); 76 | 77 | for(int i = 0; i < layer_size; i++) 78 | { 79 | const layer* net_layer = (*net)[i]; 80 | std::string type = net_layer->layer_type(); 81 | std::vector params = net_layer->weights(); 82 | int nparam = params.size(); 83 | 84 | // For fully-connected layers, output type, weights, and bias 85 | if(type == "fully-connected") 86 | { 87 | Rcpp::List lst = Rcpp::List::create( 88 | Rcpp::Named("type") = type, 89 | Rcpp::Named("weights") = vec_t_to_rcpp_matrix( 90 | params[0], 91 | net_layer->out_data_size(), 92 | net_layer->in_data_size() 93 | ), 94 | Rcpp::Named("bias") = vec_t_to_rcpp_vector(params[1]) 95 | ); 96 | res[i] = lst; 97 | } else { 98 | // (Currently) for other layers, output general parameters 99 | Rcpp::List lst(nparam + 1); 100 | lst[0] = type; 101 | for(int j = 0; j < nparam; j++) 102 | { 103 | lst[j + 1] = vec_t_to_rcpp_vector(params[j]); 104 | } 105 | res[i] = lst; 106 | } 107 | } 108 | 109 | return res; 110 | } 111 | 112 | -------------------------------------------------------------------------------- /src/cereal/types/concepts/pair_associative_container.hpp: -------------------------------------------------------------------------------- 1 | /*! \file pair_associative_container.hpp 2 | \brief Support for the PairAssociativeContainer refinement of the 3 | AssociativeContainer concept. 4 | \ingroup TypeConcepts */ 5 | /* 6 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 7 | All rights reserved. 8 | 9 | Redistribution and use in source and binary forms, with or without 10 | modification, are permitted provided that the following conditions are met: 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in the 15 | documentation and/or other materials provided with the distribution. 16 | * Neither the name of cereal nor the 17 | names of its contributors may be used to endorse or promote products 18 | derived from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 21 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 24 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 27 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | */ 31 | #ifndef CEREAL_CONCEPTS_PAIR_ASSOCIATIVE_CONTAINER_HPP_ 32 | #define CEREAL_CONCEPTS_PAIR_ASSOCIATIVE_CONTAINER_HPP_ 33 | 34 | #include 35 | 36 | namespace cereal 37 | { 38 | //! Saving for std-like pair associative containers 39 | template class Map, typename... Args, typename = typename Map::mapped_type> inline 40 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, Map const & map ) 41 | { 42 | ar( make_size_tag( static_cast(map.size()) ) ); 43 | 44 | for( const auto & i : map ) 45 | ar( make_map_item(i.first, i.second) ); 46 | } 47 | 48 | //! Loading for std-like pair associative containers 49 | template class Map, typename... Args, typename = typename Map::mapped_type> inline 50 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, Map & map ) 51 | { 52 | size_type size; 53 | ar( make_size_tag( size ) ); 54 | 55 | map.clear(); 56 | 57 | auto hint = map.begin(); 58 | for( size_t i = 0; i < size; ++i ) 59 | { 60 | typename Map::key_type key; 61 | typename Map::mapped_type value; 62 | 63 | ar( make_map_item(key, value) ); 64 | #ifdef CEREAL_OLDER_GCC 65 | hint = map.insert( hint, std::make_pair(std::move(key), std::move(value)) ); 66 | #else // NOT CEREAL_OLDER_GCC 67 | hint = map.emplace_hint( hint, std::move( key ), std::move( value ) ); 68 | #endif // NOT CEREAL_OLDER_GCC 69 | } 70 | } 71 | } // namespace cereal 72 | 73 | #endif // CEREAL_CONCEPTS_PAIR_ASSOCIATIVE_CONTAINER_HPP_ 74 | -------------------------------------------------------------------------------- /src/tiny_dnn/util/aligned_allocator.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include 10 | #ifdef _WIN32 11 | #include 12 | #endif 13 | #ifdef __MINGW32__ 14 | #include 15 | #endif 16 | #include "nn_error.h" 17 | 18 | namespace tiny_dnn { 19 | 20 | template 21 | class aligned_allocator { 22 | public: 23 | typedef T value_type; 24 | typedef T *pointer; 25 | typedef std::size_t size_type; 26 | typedef std::ptrdiff_t difference_type; 27 | typedef T &reference; 28 | typedef const T &const_reference; 29 | typedef const T *const_pointer; 30 | 31 | template 32 | struct rebind { 33 | typedef aligned_allocator other; 34 | }; 35 | 36 | aligned_allocator() {} 37 | 38 | template 39 | aligned_allocator(const aligned_allocator &) {} 40 | 41 | const_pointer address(const_reference value) const { 42 | return std::addressof(value); 43 | } 44 | 45 | pointer address(reference value) const { return std::addressof(value); } 46 | 47 | pointer allocate(size_type size, const void * = nullptr) { 48 | void *p = aligned_alloc(alignment, sizeof(T) * size); 49 | if (!p && size > 0) throw nn_error("failed to allocate"); 50 | return static_cast(p); 51 | } 52 | 53 | size_type max_size() const { 54 | return ~static_cast(0) / sizeof(T); 55 | } 56 | 57 | void deallocate(pointer ptr, size_type) { aligned_free(ptr); } 58 | 59 | template 60 | void construct(U *ptr, const V &value) { 61 | void *p = ptr; 62 | ::new (p) U(value); 63 | } 64 | 65 | #if defined(_MSC_VER) && _MSC_VER <= 1800 66 | // -vc2013 doesn't support variadic templates 67 | #else 68 | template 69 | void construct(U *ptr, Args &&... args) { 70 | void *p = ptr; 71 | ::new (p) U(std::forward(args)...); 72 | } 73 | #endif 74 | 75 | template 76 | void construct(U *ptr) { 77 | void *p = ptr; 78 | ::new (p) U(); 79 | } 80 | 81 | template 82 | void destroy(U *ptr) { 83 | ptr->~U(); 84 | } 85 | 86 | private: 87 | void *aligned_alloc(size_type align, size_type size) const { 88 | #if defined(_MSC_VER) 89 | return ::_aligned_malloc(size, align); 90 | #elif defined(__ANDROID__) 91 | return ::memalign(align, size); 92 | #elif defined(__MINGW32__) 93 | return ::_mm_malloc(size, align); 94 | #else // posix assumed 95 | void *p; 96 | if (::posix_memalign(&p, align, size) != 0) { 97 | p = 0; 98 | } 99 | return p; 100 | #endif 101 | } 102 | 103 | void aligned_free(pointer ptr) { 104 | #if defined(_MSC_VER) 105 | ::_aligned_free(ptr); 106 | #elif defined(__MINGW32__) 107 | ::_mm_free(ptr); 108 | #else 109 | ::free(ptr); 110 | #endif 111 | } 112 | }; 113 | 114 | template 115 | inline bool operator==(const aligned_allocator &, 116 | const aligned_allocator &) { 117 | return true; 118 | } 119 | 120 | template 121 | inline bool operator!=(const aligned_allocator &, 122 | const aligned_allocator &) { 123 | return false; 124 | } 125 | 126 | } // namespace tiny_dnn 127 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/filereadstream.h: -------------------------------------------------------------------------------- 1 | // Tencent is pleased to support the open source community by making RapidJSON available. 2 | // 3 | // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. 4 | // 5 | // Licensed under the MIT License (the "License"); you may not use this file except 6 | // in compliance with the License. You may obtain a copy of the License at 7 | // 8 | // http://opensource.org/licenses/MIT 9 | // 10 | // Unless required by applicable law or agreed to in writing, software distributed 11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | // specific language governing permissions and limitations under the License. 14 | 15 | #ifndef CEREAL_RAPIDJSON_FILEREADSTREAM_H_ 16 | #define CEREAL_RAPIDJSON_FILEREADSTREAM_H_ 17 | 18 | #include "stream.h" 19 | #include 20 | 21 | #ifdef __clang__ 22 | CEREAL_RAPIDJSON_DIAG_PUSH 23 | CEREAL_RAPIDJSON_DIAG_OFF(padded) 24 | CEREAL_RAPIDJSON_DIAG_OFF(unreachable-code) 25 | CEREAL_RAPIDJSON_DIAG_OFF(missing-noreturn) 26 | #endif 27 | 28 | CEREAL_RAPIDJSON_NAMESPACE_BEGIN 29 | 30 | //! File byte stream for input using fread(). 31 | /*! 32 | \note implements Stream concept 33 | */ 34 | class FileReadStream { 35 | public: 36 | typedef char Ch; //!< Character type (byte). 37 | 38 | //! Constructor. 39 | /*! 40 | \param fp File pointer opened for read. 41 | \param buffer user-supplied buffer. 42 | \param bufferSize size of buffer in bytes. Must >=4 bytes. 43 | */ 44 | FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { 45 | CEREAL_RAPIDJSON_ASSERT(fp_ != 0); 46 | CEREAL_RAPIDJSON_ASSERT(bufferSize >= 4); 47 | Read(); 48 | } 49 | 50 | Ch Peek() const { return *current_; } 51 | Ch Take() { Ch c = *current_; Read(); return c; } 52 | size_t Tell() const { return count_ + static_cast(current_ - buffer_); } 53 | 54 | // Not implemented 55 | void Put(Ch) { CEREAL_RAPIDJSON_ASSERT(false); } 56 | void Flush() { CEREAL_RAPIDJSON_ASSERT(false); } 57 | Ch* PutBegin() { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 58 | size_t PutEnd(Ch*) { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 59 | 60 | // For encoding detection only. 61 | const Ch* Peek4() const { 62 | return (current_ + 4 <= bufferLast_) ? current_ : 0; 63 | } 64 | 65 | private: 66 | void Read() { 67 | if (current_ < bufferLast_) 68 | ++current_; 69 | else if (!eof_) { 70 | count_ += readCount_; 71 | readCount_ = fread(buffer_, 1, bufferSize_, fp_); 72 | bufferLast_ = buffer_ + readCount_ - 1; 73 | current_ = buffer_; 74 | 75 | if (readCount_ < bufferSize_) { 76 | buffer_[readCount_] = '\0'; 77 | ++bufferLast_; 78 | eof_ = true; 79 | } 80 | } 81 | } 82 | 83 | std::FILE* fp_; 84 | Ch *buffer_; 85 | size_t bufferSize_; 86 | Ch *bufferLast_; 87 | Ch *current_; 88 | size_t readCount_; 89 | size_t count_; //!< Number of characters read 90 | bool eof_; 91 | }; 92 | 93 | CEREAL_RAPIDJSON_NAMESPACE_END 94 | 95 | #ifdef __clang__ 96 | CEREAL_RAPIDJSON_DIAG_POP 97 | #endif 98 | 99 | #endif // CEREAL_RAPIDJSON_FILESTREAM_H_ 100 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/maxpool_grad_op.h: -------------------------------------------------------------------------------- 1 | /* 2 | COPYRIGHT 3 | 4 | All contributions by Taiga Nomi 5 | Copyright (c) 2013, Taiga Nomi 6 | All rights reserved. 7 | 8 | All other contributions: 9 | Copyright (c) 2013-2016, the respective contributors. 10 | All rights reserved. 11 | 12 | Each contributor holds copyright over their respective contributions. 13 | The project versioning (Git) records all such contribution source 14 | information. 15 | 16 | LICENSE 17 | 18 | The BSD 3-Clause License 19 | 20 | 21 | Redistribution and use in source and binary forms, with or without 22 | modification, are permitted provided that the following conditions are met: 23 | 24 | * Redistributions of source code must retain the above copyright notice, 25 | this 26 | list of conditions and the following disclaimer. 27 | 28 | * Redistributions in binary form must reproduce the above copyright notice, 29 | this list of conditions and the following disclaimer in the documentation 30 | and/or other materials provided with the distribution. 31 | 32 | * Neither the name of tiny-dnn nor the names of its 33 | contributors may be used to endorse or promote products derived from 34 | this software without specific prior written permission. 35 | 36 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 37 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 | ARE 40 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 41 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 43 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 44 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 | LIABILITY, 46 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 47 | USE 48 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 49 | */ 50 | #pragma once 51 | 52 | #include "tiny_dnn/core/framework/op_kernel.h" 53 | 54 | #include "tiny_dnn/core/kernels/maxpool_op_avx.h" 55 | #include "tiny_dnn/core/kernels/maxpool_op_internal.h" 56 | 57 | namespace tiny_dnn { 58 | 59 | class MaxPoolGradOp : public core::OpKernel { 60 | public: 61 | explicit MaxPoolGradOp(const core::OpKernelConstruction &context) 62 | : core::OpKernel(context) {} 63 | 64 | void compute(const core::OpKernelContext &context) override { 65 | auto ¶ms = OpKernel::params_->maxpool(); 66 | 67 | // incoming/outcoming data 68 | tensor_t &prev_delta = context.input_grad(0); 69 | tensor_t &curr_delta = context.output_grad(1); 70 | 71 | // initialize outputs 72 | fill_tensor(prev_delta, float_t{0}); 73 | 74 | // call the algorithm depending on the selected engine type 75 | 76 | const core::backend_t engine = context.engine(); 77 | 78 | if (engine == core::backend_t::internal) { 79 | kernels::maxpool_grad_op_internal(prev_delta, curr_delta, 80 | params.out2inmax, params.in2out, 81 | context.parallelize()); 82 | } else if (engine == core::backend_t::avx) { 83 | kernels::maxpool_grad_op_avx(prev_delta, curr_delta, params.out2inmax, 84 | params.in2out, context.parallelize()); 85 | } else { 86 | throw nn_error("Not supported engine: " + to_string(engine)); 87 | } 88 | } 89 | }; 90 | 91 | } // namespace tiny_dnn 92 | -------------------------------------------------------------------------------- /src/tiny_dnn/util/weight_init.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2015, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/util/util.h" 11 | 12 | namespace tiny_dnn { 13 | namespace weight_init { 14 | 15 | class function { 16 | public: 17 | virtual void fill(vec_t* weight, 18 | serial_size_t fan_in, 19 | serial_size_t fan_out) = 0; 20 | }; 21 | 22 | class scalable : public function { 23 | public: 24 | explicit scalable(float_t value) : scale_(value) {} 25 | 26 | void scale(float_t value) { scale_ = value; } 27 | 28 | protected: 29 | float_t scale_; 30 | }; 31 | 32 | /** 33 | * Use fan-in and fan-out for scaling 34 | * 35 | * X Glorot, Y Bengio, 36 | * Understanding the difficulty of training deep feedforward neural networks 37 | * Proc. AISTATS 10, May 2010, vol.9, pp249-256 38 | **/ 39 | class xavier : public scalable { 40 | public: 41 | xavier() : scalable(float_t(6)) {} 42 | explicit xavier(float_t value) : scalable(value) {} 43 | 44 | void fill(vec_t* weight, 45 | serial_size_t fan_in, 46 | serial_size_t fan_out) override { 47 | const float_t weight_base = std::sqrt(scale_ / (fan_in + fan_out)); 48 | 49 | uniform_rand(weight->begin(), weight->end(), -weight_base, weight_base); 50 | } 51 | }; 52 | 53 | /** 54 | * Use fan-in(number of input weight for each neuron) for scaling 55 | * 56 | * Y LeCun, L Bottou, G B Orr, and K Muller, 57 | * Efficient backprop 58 | * Neural Networks, Tricks of the Trade, Springer, 1998 59 | **/ 60 | class lecun : public scalable { 61 | public: 62 | lecun() : scalable(float_t{1}) {} 63 | explicit lecun(float_t value) : scalable(value) {} 64 | 65 | void fill(vec_t* weight, 66 | serial_size_t fan_in, 67 | serial_size_t fan_out) override { 68 | CNN_UNREFERENCED_PARAMETER(fan_out); 69 | 70 | const float_t weight_base = scale_ / std::sqrt(float_t(fan_in)); 71 | 72 | uniform_rand(weight->begin(), weight->end(), -weight_base, weight_base); 73 | } 74 | }; 75 | 76 | class gaussian : public scalable { 77 | public: 78 | gaussian() : scalable(float_t{1}) {} 79 | explicit gaussian(float_t sigma) : scalable(sigma) {} 80 | 81 | void fill(vec_t* weight, 82 | serial_size_t fan_in, 83 | serial_size_t fan_out) override { 84 | CNN_UNREFERENCED_PARAMETER(fan_in); 85 | CNN_UNREFERENCED_PARAMETER(fan_out); 86 | 87 | gaussian_rand(weight->begin(), weight->end(), float_t{0}, scale_); 88 | } 89 | }; 90 | 91 | class constant : public scalable { 92 | public: 93 | constant() : scalable(float_t{0}) {} 94 | explicit constant(float_t value) : scalable(value) {} 95 | 96 | void fill(vec_t* weight, 97 | serial_size_t fan_in, 98 | serial_size_t fan_out) override { 99 | CNN_UNREFERENCED_PARAMETER(fan_in); 100 | CNN_UNREFERENCED_PARAMETER(fan_out); 101 | 102 | std::fill(weight->begin(), weight->end(), scale_); 103 | } 104 | }; 105 | 106 | class he : public scalable { 107 | public: 108 | he() : scalable(float_t{2}) {} 109 | explicit he(float_t value) : scalable(value) {} 110 | 111 | void fill(vec_t* weight, 112 | serial_size_t fan_in, 113 | serial_size_t fan_out) override { 114 | CNN_UNREFERENCED_PARAMETER(fan_out); 115 | 116 | const float_t sigma = std::sqrt(scale_ / fan_in); 117 | 118 | gaussian_rand(weight->begin(), weight->end(), float_t{0}, sigma); 119 | } 120 | }; 121 | 122 | } // namespace weight_init 123 | } // namespace tiny_dnn 124 | -------------------------------------------------------------------------------- /src/cereal/external/rapidjson/filewritestream.h: -------------------------------------------------------------------------------- 1 | // Tencent is pleased to support the open source community by making RapidJSON available. 2 | // 3 | // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. 4 | // 5 | // Licensed under the MIT License (the "License"); you may not use this file except 6 | // in compliance with the License. You may obtain a copy of the License at 7 | // 8 | // http://opensource.org/licenses/MIT 9 | // 10 | // Unless required by applicable law or agreed to in writing, software distributed 11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | // specific language governing permissions and limitations under the License. 14 | 15 | #ifndef CEREAL_RAPIDJSON_FILEWRITESTREAM_H_ 16 | #define CEREAL_RAPIDJSON_FILEWRITESTREAM_H_ 17 | 18 | #include "stream.h" 19 | #include 20 | 21 | #ifdef __clang__ 22 | CEREAL_RAPIDJSON_DIAG_PUSH 23 | CEREAL_RAPIDJSON_DIAG_OFF(unreachable-code) 24 | #endif 25 | 26 | CEREAL_RAPIDJSON_NAMESPACE_BEGIN 27 | 28 | //! Wrapper of C file stream for input using fread(). 29 | /*! 30 | \note implements Stream concept 31 | */ 32 | class FileWriteStream { 33 | public: 34 | typedef char Ch; //!< Character type. Only support char. 35 | 36 | FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) { 37 | CEREAL_RAPIDJSON_ASSERT(fp_ != 0); 38 | } 39 | 40 | void Put(char c) { 41 | if (current_ >= bufferEnd_) 42 | Flush(); 43 | 44 | *current_++ = c; 45 | } 46 | 47 | void PutN(char c, size_t n) { 48 | size_t avail = static_cast(bufferEnd_ - current_); 49 | while (n > avail) { 50 | std::memset(current_, c, avail); 51 | current_ += avail; 52 | Flush(); 53 | n -= avail; 54 | avail = static_cast(bufferEnd_ - current_); 55 | } 56 | 57 | if (n > 0) { 58 | std::memset(current_, c, n); 59 | current_ += n; 60 | } 61 | } 62 | 63 | void Flush() { 64 | if (current_ != buffer_) { 65 | size_t result = fwrite(buffer_, 1, static_cast(current_ - buffer_), fp_); 66 | if (result < static_cast(current_ - buffer_)) { 67 | // failure deliberately ignored at this time 68 | // added to avoid warn_unused_result build errors 69 | } 70 | current_ = buffer_; 71 | } 72 | } 73 | 74 | // Not implemented 75 | char Peek() const { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 76 | char Take() { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 77 | size_t Tell() const { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 78 | char* PutBegin() { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 79 | size_t PutEnd(char*) { CEREAL_RAPIDJSON_ASSERT(false); return 0; } 80 | 81 | private: 82 | // Prohibit copy constructor & assignment operator. 83 | FileWriteStream(const FileWriteStream&); 84 | FileWriteStream& operator=(const FileWriteStream&); 85 | 86 | std::FILE* fp_; 87 | char *buffer_; 88 | char *bufferEnd_; 89 | char *current_; 90 | }; 91 | 92 | //! Implement specialized version of PutN() with memset() for better performance. 93 | template<> 94 | inline void PutN(FileWriteStream& stream, char c, size_t n) { 95 | stream.PutN(c, n); 96 | } 97 | 98 | CEREAL_RAPIDJSON_NAMESPACE_END 99 | 100 | #ifdef __clang__ 101 | CEREAL_RAPIDJSON_DIAG_POP 102 | #endif 103 | 104 | #endif // CEREAL_RAPIDJSON_FILESTREAM_H_ 105 | -------------------------------------------------------------------------------- /src/cereal/types/array.hpp: -------------------------------------------------------------------------------- 1 | /*! \file array.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_ARRAY_HPP_ 31 | #define CEREAL_TYPES_ARRAY_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | namespace cereal 37 | { 38 | //! Saving for std::array primitive types 39 | //! using binary serialization, if supported 40 | template inline 41 | typename std::enable_if, Archive>::value 42 | && std::is_arithmetic::value, void>::type 43 | CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::array const & array ) 44 | { 45 | ar( binary_data( array.data(), sizeof(array) ) ); 46 | } 47 | 48 | //! Loading for std::array primitive types 49 | //! using binary serialization, if supported 50 | template inline 51 | typename std::enable_if, Archive>::value 52 | && std::is_arithmetic::value, void>::type 53 | CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::array & array ) 54 | { 55 | ar( binary_data( array.data(), sizeof(array) ) ); 56 | } 57 | 58 | //! Saving for std::array all other types 59 | template inline 60 | typename std::enable_if, Archive>::value 61 | || !std::is_arithmetic::value, void>::type 62 | CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::array const & array ) 63 | { 64 | for( auto const & i : array ) 65 | ar( i ); 66 | } 67 | 68 | //! Loading for std::array all other types 69 | template inline 70 | typename std::enable_if, Archive>::value 71 | || !std::is_arithmetic::value, void>::type 72 | CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::array & array ) 73 | { 74 | for( auto & i : array ) 75 | ar( i ); 76 | } 77 | } // namespace cereal 78 | 79 | #endif // CEREAL_TYPES_ARRAY_HPP_ 80 | -------------------------------------------------------------------------------- /src/tiny_dnn/layers/concat_layer.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | #include "tiny_dnn/layers/layer.h" 10 | #include "tiny_dnn/util/util.h" 11 | 12 | namespace tiny_dnn { 13 | 14 | /** 15 | * concat N layers along depth 16 | * 17 | * @code 18 | * // in: [3,1,1],[3,1,1] out: [3,1,2] (in W,H,K order) 19 | * concat_layer l1(2,3); 20 | * 21 | * // in: [3,2,2],[3,2,5] out: [3,2,7] (in W,H,K order) 22 | * concat_layer l2({shape3d(3,2,2),shape3d(3,2,5)}); 23 | * @endcode 24 | **/ 25 | class concat_layer : public layer { 26 | public: 27 | /** 28 | * @param in_shapes [in] shapes of input tensors 29 | */ 30 | explicit concat_layer(const std::vector &in_shapes) 31 | : layer(std::vector(in_shapes.size(), vector_type::data), 32 | {vector_type::data}), 33 | in_shapes_(in_shapes) { 34 | set_outshape(); 35 | } 36 | 37 | /** 38 | * @param num_args [in] number of input tensors 39 | * @param ndim [in] number of elements for each input 40 | */ 41 | concat_layer(serial_size_t num_args, serial_size_t ndim) 42 | : layer(std::vector(num_args, vector_type::data), 43 | {vector_type::data}), 44 | in_shapes_(std::vector(num_args, shape3d(ndim, 1, 1))) { 45 | set_outshape(); 46 | } 47 | 48 | void set_outshape() { 49 | out_shape_ = in_shapes_.front(); 50 | for (size_t i = 1; i < in_shapes_.size(); i++) { 51 | if (in_shapes_[i].area() != out_shape_.area()) 52 | throw nn_error("each input shapes to concat must have same WxH size"); 53 | out_shape_.depth_ += in_shapes_[i].depth_; 54 | } 55 | } 56 | 57 | std::string layer_type() const override { return "concat"; } 58 | 59 | std::vector in_shape() const override { return in_shapes_; } 60 | 61 | std::vector out_shape() const override { return {out_shape_}; } 62 | 63 | void forward_propagation(const std::vector &in_data, 64 | std::vector &out_data) override { 65 | serial_size_t num_samples = 66 | static_cast((*out_data[0]).size()); 67 | 68 | for (serial_size_t s = 0; s < num_samples; s++) { 69 | float_t *outs = &(*out_data[0])[s][0]; 70 | 71 | for (serial_size_t i = 0; i < in_shapes_.size(); i++) { 72 | const float_t *ins = &(*in_data[i])[s][0]; 73 | serial_size_t dim = in_shapes_[i].size(); 74 | outs = std::copy(ins, ins + dim, outs); 75 | } 76 | } 77 | } 78 | 79 | void back_propagation(const std::vector &in_data, 80 | const std::vector &out_data, 81 | std::vector &out_grad, 82 | std::vector &in_grad) override { 83 | CNN_UNREFERENCED_PARAMETER(in_data); 84 | CNN_UNREFERENCED_PARAMETER(out_data); 85 | 86 | size_t num_samples = (*out_grad[0]).size(); 87 | 88 | for (size_t s = 0; s < num_samples; s++) { 89 | const float_t *outs = &(*out_grad[0])[s][0]; 90 | 91 | for (serial_size_t i = 0; i < in_shapes_.size(); i++) { 92 | serial_size_t dim = in_shapes_[i].size(); 93 | float_t *ins = &(*in_grad[i])[s][0]; 94 | std::copy(outs, outs + dim, ins); 95 | outs += dim; 96 | } 97 | } 98 | } 99 | 100 | #ifndef CNN_NO_SERIALIZATION 101 | friend struct serialization_buddy; 102 | #endif 103 | 104 | private: 105 | std::vector in_shapes_; 106 | shape3d out_shape_; 107 | }; 108 | 109 | } // namespace tiny_dnn 110 | -------------------------------------------------------------------------------- /src/tiny_dnn/layers/power_layer.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include 11 | 12 | #include "tiny_dnn/layers/layer.h" 13 | #include "tiny_dnn/util/util.h" 14 | 15 | namespace tiny_dnn { 16 | 17 | /** 18 | * element-wise pow: ```y = scale*x^factor``` 19 | **/ 20 | class power_layer : public layer { 21 | public: 22 | typedef layer Base; 23 | 24 | /** 25 | * @param in_shape [in] shape of input tensor 26 | * @param factor [in] floating-point number that specifies a power 27 | * @param scale [in] scale factor for additional multiply 28 | */ 29 | power_layer(const shape3d &in_shape, 30 | float_t factor, 31 | float_t scale = float_t{1.0}) 32 | : layer({vector_type::data}, {vector_type::data}), 33 | in_shape_(in_shape), 34 | factor_(factor), 35 | scale_(scale) {} 36 | 37 | /** 38 | * @param prev_layer [in] previous layer to be connected 39 | * @param factor [in] floating-point number that specifies a power 40 | * @param scale [in] scale factor for additional multiply 41 | */ 42 | power_layer(const layer &prev_layer, 43 | float_t factor, 44 | float_t scale = float_t{1.0}) 45 | : layer({vector_type::data}, {vector_type::data}), 46 | in_shape_(prev_layer.out_shape()[0]), 47 | factor_(factor), 48 | scale_(scale) {} 49 | 50 | std::string layer_type() const override { return "power"; } 51 | 52 | std::vector in_shape() const override { return {in_shape_}; } 53 | 54 | std::vector out_shape() const override { return {in_shape_}; } 55 | 56 | void forward_propagation(const std::vector &in_data, 57 | std::vector &out_data) override { 58 | const tensor_t &x = *in_data[0]; 59 | tensor_t &y = *out_data[0]; 60 | 61 | for (serial_size_t i = 0; i < x.size(); i++) { 62 | std::transform(x[i].begin(), x[i].end(), y[i].begin(), 63 | [=](float_t x) { return scale_ * std::pow(x, factor_); }); 64 | } 65 | } 66 | 67 | void back_propagation(const std::vector &in_data, 68 | const std::vector &out_data, 69 | std::vector &out_grad, 70 | std::vector &in_grad) override { 71 | tensor_t &dx = *in_grad[0]; 72 | const tensor_t &dy = *out_grad[0]; 73 | const tensor_t &x = *in_data[0]; 74 | const tensor_t &y = *out_data[0]; 75 | 76 | for (serial_size_t i = 0; i < x.size(); i++) { 77 | for (serial_size_t j = 0; j < x[i].size(); j++) { 78 | // f(x) = (scale*x)^factor 79 | // -> 80 | // dx = dy * df(x) 81 | // = dy * scale * factor * (scale * x)^(factor - 1) 82 | // = dy * scale * factor * (scale * x)^factor * (scale * 83 | // x)^(-1) 84 | // = dy * factor * y / x 85 | if (std::abs(x[i][j]) > 1e-10) { 86 | dx[i][j] = dy[i][j] * factor_ * y[i][j] / x[i][j]; 87 | } else { 88 | dx[i][j] = 89 | dy[i][j] * scale_ * factor_ * std::pow(x[i][j], factor_ - 1.0f); 90 | } 91 | } 92 | } 93 | } 94 | 95 | #ifndef CNN_NO_SERIALIZATION 96 | friend struct serialization_buddy; 97 | #endif 98 | 99 | float_t factor() const { return factor_; } 100 | 101 | float_t scale() const { return scale_; } 102 | 103 | private: 104 | shape3d in_shape_; 105 | float_t factor_; 106 | float_t scale_; 107 | }; 108 | 109 | } // namespace tiny_dnn 110 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/conv2d_grad_op.h: -------------------------------------------------------------------------------- 1 | /* 2 | COPYRIGHT 3 | 4 | All contributions by Taiga Nomi 5 | Copyright (c) 2013, Taiga Nomi 6 | All rights reserved. 7 | 8 | All other contributions: 9 | Copyright (c) 2013-2016, the respective contributors. 10 | All rights reserved. 11 | 12 | Each contributor holds copyright over their respective contributions. 13 | The project versioning (Git) records all such contribution source 14 | information. 15 | 16 | LICENSE 17 | 18 | The BSD 3-Clause License 19 | 20 | 21 | Redistribution and use in source and binary forms, with or without 22 | modification, are permitted provided that the following conditions are met: 23 | 24 | * Redistributions of source code must retain the above copyright notice, 25 | this 26 | list of conditions and the following disclaimer. 27 | 28 | * Redistributions in binary form must reproduce the above copyright notice, 29 | this list of conditions and the following disclaimer in the documentation 30 | and/or other materials provided with the distribution. 31 | 32 | * Neither the name of tiny-dnn nor the names of its 33 | contributors may be used to endorse or promote products derived from 34 | this software without specific prior written permission. 35 | 36 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 37 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 | ARE 40 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 41 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 43 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 44 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 | LIABILITY, 46 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 47 | USE 48 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 49 | */ 50 | #pragma once 51 | 52 | #include "tiny_dnn/core/framework/op_kernel.h" 53 | 54 | #include "tiny_dnn/core/kernels/conv2d_grad_op_avx.h" 55 | #include "tiny_dnn/core/kernels/conv2d_op_internal.h" 56 | 57 | namespace tiny_dnn { 58 | 59 | class Conv2dGradOp : public core::OpKernel { 60 | public: 61 | explicit Conv2dGradOp(const core::OpKernelConstruction &context) 62 | : core::OpKernel(context) {} 63 | 64 | void compute(const core::OpKernelContext &context) override { 65 | auto params = OpKernel::params_->conv(); 66 | 67 | // incoming/outcoming data 68 | const tensor_t &prev_out = context.input(0); 69 | const tensor_t &W = context.input(1); 70 | tensor_t &dW = context.input_grad(1); 71 | tensor_t &db = context.input_grad(2); 72 | tensor_t &prev_delta = context.input_grad(0); 73 | tensor_t &curr_delta = context.output_grad(1); 74 | 75 | // initalize outputs 76 | fill_tensor(prev_delta, float_t{0}); 77 | 78 | // call convolution algorithm depending 79 | // on the selected engine type 80 | 81 | const core::backend_t engine = context.engine(); 82 | 83 | if (engine == core::backend_t::internal) { 84 | kernels::conv2d_op_internal(prev_out, W[0], dW, db, curr_delta, 85 | prev_delta, params, context.parallelize()); 86 | } else if (engine == core::backend_t::avx) { 87 | kernels::conv2d_grad_op_avx(prev_out, W[0], dW, db, curr_delta, 88 | prev_delta, params, context.parallelize()); 89 | } else { 90 | throw nn_error("Not supported engine: " + to_string(engine)); 91 | } 92 | } 93 | }; 94 | 95 | } // namespace tiny_dnn 96 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/conv2d_op.h: -------------------------------------------------------------------------------- 1 | /* 2 | COPYRIGHT 3 | 4 | All contributions by Taiga Nomi 5 | Copyright (c) 2013, Taiga Nomi 6 | All rights reserved. 7 | 8 | All other contributions: 9 | Copyright (c) 2013-2016, the respective contributors. 10 | All rights reserved. 11 | 12 | Each contributor holds copyright over their respective contributions. 13 | The project versioning (Git) records all such contribution source 14 | information. 15 | 16 | LICENSE 17 | 18 | The BSD 3-Clause License 19 | 20 | 21 | Redistribution and use in source and binary forms, with or without 22 | modification, are permitted provided that the following conditions are met: 23 | 24 | * Redistributions of source code must retain the above copyright notice, 25 | this 26 | list of conditions and the following disclaimer. 27 | 28 | * Redistributions in binary form must reproduce the above copyright notice, 29 | this list of conditions and the following disclaimer in the documentation 30 | and/or other materials provided with the distribution. 31 | 32 | * Neither the name of tiny-dnn nor the names of its 33 | contributors may be used to endorse or promote products derived from 34 | this software without specific prior written permission. 35 | 36 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 37 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 | ARE 40 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 41 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 43 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 44 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 | LIABILITY, 46 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 47 | USE 48 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 49 | */ 50 | #pragma once 51 | 52 | #include "tiny_dnn/core/framework/op_kernel.h" 53 | 54 | #include "tiny_dnn/core/kernels/conv2d_op_avx.h" 55 | #include "tiny_dnn/core/kernels/conv2d_op_internal.h" 56 | #include "tiny_dnn/core/kernels/conv2d_op_nnpack.h" 57 | 58 | namespace tiny_dnn { 59 | 60 | class Conv2dOp : public core::OpKernel { 61 | public: 62 | explicit Conv2dOp(const core::OpKernelConstruction &context) 63 | : core::OpKernel(context) {} 64 | 65 | void compute(const core::OpKernelContext &context) override { 66 | auto params = OpKernel::params_->conv(); 67 | 68 | // incomimg/outcoming data 69 | const tensor_t &in_data = context.input(0); 70 | const tensor_t &W = context.input(1); 71 | const tensor_t &bias = context.input(2); 72 | tensor_t &out_data = context.output(1); 73 | 74 | // initialize outputs 75 | fill_tensor(out_data, float_t{0}); 76 | 77 | // call convolution algorithm depending 78 | // on the selected engine type 79 | 80 | const core::backend_t engine = context.engine(); 81 | 82 | if (engine == core::backend_t::internal) { 83 | kernels::conv2d_op_internal(in_data, W[0], bias[0], out_data, params, 84 | context.parallelize()); 85 | } else if (engine == core::backend_t::nnpack) { 86 | kernels::conv2d_op_nnpack(in_data, W[0], bias[0], out_data, params); 87 | } else if (engine == core::backend_t::avx) { 88 | kernels::conv2d_op_avx(in_data, W[0], bias[0], out_data, params, 89 | context.parallelize()); 90 | } else { 91 | throw nn_error("Not supported engine: " + to_string(engine)); 92 | } 93 | } 94 | }; 95 | 96 | } // namespace tiny_dnn 97 | -------------------------------------------------------------------------------- /src/cereal/external/rapidxml/rapidxml_utils.hpp: -------------------------------------------------------------------------------- 1 | #ifndef RAPIDXML_UTILS_HPP_INCLUDED 2 | #define RAPIDXML_UTILS_HPP_INCLUDED 3 | 4 | // Copyright (C) 2006, 2009 Marcin Kalicinski 5 | // Version 1.13 6 | // Revision $DateTime: 2009/05/13 01:46:17 $ 7 | //! in certain simple scenarios. They should probably not be used if maximizing performance is the main objective. 8 | 9 | #include "rapidxml.hpp" 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | namespace rapidxml 16 | { 17 | 18 | //! Represents data loaded from a file 19 | template 20 | class file 21 | { 22 | 23 | public: 24 | 25 | //! Loads file into the memory. Data will be automatically destroyed by the destructor. 26 | //! \param filename Filename to load. 27 | file(const char *filename) 28 | { 29 | using namespace std; 30 | 31 | // Open stream 32 | basic_ifstream stream(filename, ios::binary); 33 | if (!stream) 34 | throw runtime_error(string("cannot open file ") + filename); 35 | stream.unsetf(ios::skipws); 36 | 37 | // Determine stream size 38 | stream.seekg(0, ios::end); 39 | size_t size = stream.tellg(); 40 | stream.seekg(0); 41 | 42 | // Load data and add terminating 0 43 | m_data.resize(size + 1); 44 | stream.read(&m_data.front(), static_cast(size)); 45 | m_data[size] = 0; 46 | } 47 | 48 | //! Loads file into the memory. Data will be automatically destroyed by the destructor 49 | //! \param stream Stream to load from 50 | file(std::basic_istream &stream) 51 | { 52 | using namespace std; 53 | 54 | // Load data and add terminating 0 55 | stream.unsetf(ios::skipws); 56 | m_data.assign(istreambuf_iterator(stream), istreambuf_iterator()); 57 | if (stream.fail() || stream.bad()) 58 | throw runtime_error("error reading stream"); 59 | m_data.push_back(0); 60 | } 61 | 62 | //! Gets file data. 63 | //! \return Pointer to data of file. 64 | Ch *data() 65 | { 66 | return &m_data.front(); 67 | } 68 | 69 | //! Gets file data. 70 | //! \return Pointer to data of file. 71 | const Ch *data() const 72 | { 73 | return &m_data.front(); 74 | } 75 | 76 | //! Gets file data size. 77 | //! \return Size of file data, in characters. 78 | std::size_t size() const 79 | { 80 | return m_data.size(); 81 | } 82 | 83 | private: 84 | 85 | std::vector m_data; // File data 86 | 87 | }; 88 | 89 | //! Counts children of node. Time complexity is O(n). 90 | //! \return Number of children of node 91 | template 92 | inline std::size_t count_children(xml_node *node) 93 | { 94 | xml_node *child = node->first_node(); 95 | std::size_t count = 0; 96 | while (child) 97 | { 98 | ++count; 99 | child = child->next_sibling(); 100 | } 101 | return count; 102 | } 103 | 104 | //! Counts attributes of node. Time complexity is O(n). 105 | //! \return Number of attributes of node 106 | template 107 | inline std::size_t count_attributes(xml_node *node) 108 | { 109 | xml_attribute *attr = node->first_attribute(); 110 | std::size_t count = 0; 111 | while (attr) 112 | { 113 | ++count; 114 | attr = attr->next_attribute(); 115 | } 116 | return count; 117 | } 118 | 119 | } 120 | 121 | #endif 122 | -------------------------------------------------------------------------------- /man/layers.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/layers.R 3 | \name{layer_fully_connected} 4 | \alias{ave_pool} 5 | \alias{conv} 6 | \alias{fc} 7 | \alias{layer_average_pooling} 8 | \alias{layer_convolutional} 9 | \alias{layer_fully_connected} 10 | \alias{layer_max_pooling} 11 | \alias{layers} 12 | \alias{max_pool} 13 | \title{Layers to Build Deep Neural Networks} 14 | \usage{ 15 | layer_fully_connected(in_dim, out_dim, has_bias = TRUE, 16 | activation = "sigmoid") 17 | 18 | fc(in_dim, out_dim, has_bias = TRUE, activation = "sigmoid") 19 | 20 | layer_convolutional(in_width, in_height, window_width, window_height, 21 | in_channels, out_channels, pad_type = c("valid", "same"), has_bias = TRUE, 22 | stride_x = 1L, stride_y = 1L, activation = "sigmoid") 23 | 24 | conv(in_width, in_height, window_width, window_height, in_channels, 25 | out_channels, pad_type = c("valid", "same"), has_bias = TRUE, 26 | stride_x = 1L, stride_y = 1L, activation = "sigmoid") 27 | 28 | layer_average_pooling(in_width, in_height, in_channels, pool_size_x, 29 | pool_size_y = ifelse(in_height == 1, 1, pool_size_x), 30 | stride_x = pool_size_x, stride_y = pool_size_y, pad_type = c("valid", 31 | "same"), activation = "sigmoid") 32 | 33 | ave_pool(in_width, in_height, in_channels, pool_size_x, 34 | pool_size_y = ifelse(in_height == 1, 1, pool_size_x), 35 | stride_x = pool_size_x, stride_y = pool_size_y, pad_type = c("valid", 36 | "same"), activation = "sigmoid") 37 | 38 | layer_max_pooling(in_width, in_height, in_channels, pool_size_x, 39 | pool_size_y = ifelse(in_height == 1, 1, pool_size_x), 40 | stride_x = pool_size_x, stride_y = pool_size_y, pad_type = c("valid", 41 | "same"), activation = "sigmoid") 42 | 43 | max_pool(in_width, in_height, in_channels, pool_size_x, 44 | pool_size_y = ifelse(in_height == 1, 1, pool_size_x), 45 | stride_x = pool_size_x, stride_y = pool_size_y, pad_type = c("valid", 46 | "same"), activation = "sigmoid") 47 | } 48 | \arguments{ 49 | \item{in_dim}{Number of elements in the input} 50 | 51 | \item{out_dim}{Number of elements in the output} 52 | 53 | \item{has_bias}{Whether to include the bias element} 54 | 55 | \item{activation}{Activation function applied to this layer. See section 56 | \strong{Activation Functions} for details.} 57 | 58 | \item{in_width}{Input image width} 59 | 60 | \item{in_height}{Input image height} 61 | 62 | \item{window_width}{Window width of convolution} 63 | 64 | \item{window_height}{Window height of convolution} 65 | 66 | \item{in_channels}{Input image channels (depth)} 67 | 68 | \item{out_channels}{Output image channels (depth)} 69 | 70 | \item{pad_type}{Rounding strategy} 71 | 72 | \item{stride_x}{The horizontal interval at which to apply the filters} 73 | 74 | \item{stride_y}{The vertical interval at which to apply the filters} 75 | 76 | \item{pool_size_x}{The factor by which to downscale in horizontal direction} 77 | 78 | \item{pool_size_y}{The factor by which to downscale in vertical direction} 79 | } 80 | \description{ 81 | Various layers that can be combined to build a deep neural network. 82 | } 83 | \section{List of Layers}{ 84 | 85 | Currently the following layers are supported: 86 | \itemize{ 87 | \item Fully-connected layer: \code{layer_fully_connected()}, or \code{fc()} for short 88 | \item Convolutoinal layer: \code{layer_convolutional()}, \code{conv()} 89 | \item Average-pooling layer: \code{layer_average_pooling()}, \code{ave_pool()} 90 | \item Max-pooling layer: \code{layer_max_pooling()}, \code{max_pool()} 91 | } 92 | More types of layers are to be added. 93 | } 94 | 95 | \section{Activation Functions}{ 96 | 97 | Currently the following activation functions are supported: 98 | 99 | \itemize{ 100 | \item identity 101 | \item sigmoid 102 | \item relu 103 | \item leaky_relu 104 | \item elu 105 | \item softmax 106 | \item tan_h 107 | \item tan_hp1m2 108 | } 109 | } 110 | 111 | -------------------------------------------------------------------------------- /src/tiny_dnn/core/kernels/fully_connected_grad_op.h: -------------------------------------------------------------------------------- 1 | /* 2 | COPYRIGHT 3 | 4 | All contributions by Taiga Nomi 5 | Copyright (c) 2013, Taiga Nomi 6 | All rights reserved. 7 | 8 | All other contributions: 9 | Copyright (c) 2013-2016, the respective contributors. 10 | All rights reserved. 11 | 12 | Each contributor holds copyright over their respective contributions. 13 | The project versioning (Git) records all such contribution source 14 | information. 15 | 16 | LICENSE 17 | 18 | The BSD 3-Clause License 19 | 20 | 21 | Redistribution and use in source and binary forms, with or without 22 | modification, are permitted provided that the following conditions are met: 23 | 24 | * Redistributions of source code must retain the above copyright notice, 25 | this 26 | list of conditions and the following disclaimer. 27 | 28 | * Redistributions in binary form must reproduce the above copyright notice, 29 | this list of conditions and the following disclaimer in the documentation 30 | and/or other materials provided with the distribution. 31 | 32 | * Neither the name of tiny-dnn nor the names of its 33 | contributors may be used to endorse or promote products derived from 34 | this software without specific prior written permission. 35 | 36 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 37 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 | ARE 40 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 41 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 43 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 44 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 | LIABILITY, 46 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 47 | USE 48 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 49 | */ 50 | #pragma once 51 | 52 | #include "tiny_dnn/core/framework/op_kernel.h" 53 | 54 | #include "tiny_dnn/core/kernels/fully_connected_op_avx.h" 55 | #include "tiny_dnn/core/kernels/fully_connected_op_internal.h" 56 | 57 | namespace tiny_dnn { 58 | 59 | class FullyConnectedGradOp : public core::OpKernel { 60 | public: 61 | explicit FullyConnectedGradOp(const core::OpKernelConstruction &context) 62 | : core::OpKernel(context) {} 63 | 64 | void compute(const core::OpKernelContext &context) override { 65 | auto params = OpKernel::params_->fully(); 66 | 67 | // incoming/outcoming data 68 | const tensor_t &prev_out = context.input(0); 69 | const tensor_t &W = context.input(1); 70 | tensor_t &dW = context.input_grad(1); 71 | tensor_t *db = params.has_bias_ ? &context.input_grad(2) : nullptr; 72 | tensor_t &prev_delta = context.input_grad(0); 73 | tensor_t &curr_delta = context.output_grad(1); 74 | tensor_t dummy; // need lvalue for non-const reference 75 | 76 | // initialize outputs 77 | fill_tensor(prev_delta, float_t{0}); 78 | 79 | // call the algorithm depending on the selected engine type 80 | 81 | const core::backend_t engine = context.engine(); 82 | 83 | if (engine == core::backend_t::internal) { 84 | kernels::fully_connected_op_internal( 85 | prev_out, W[0], dW, params.has_bias_ ? *db : dummy, curr_delta, 86 | prev_delta, params, context.parallelize()); 87 | } else if (engine == core::backend_t::avx) { 88 | kernels::fully_connected_op_avx( 89 | prev_out, W[0], dW, params.has_bias_ ? *db : dummy, curr_delta, 90 | prev_delta, params, context.parallelize()); 91 | } else { 92 | throw nn_error("Not supported engine: " + to_string(engine)); 93 | } 94 | } 95 | }; 96 | 97 | } // namespace tiny_dnn 98 | -------------------------------------------------------------------------------- /src/cereal/types/set.hpp: -------------------------------------------------------------------------------- 1 | /*! \file set.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_SET_HPP_ 31 | #define CEREAL_TYPES_SET_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | namespace cereal 37 | { 38 | namespace set_detail 39 | { 40 | //! @internal 41 | template inline 42 | void save( Archive & ar, SetT const & set ) 43 | { 44 | ar( make_size_tag( static_cast(set.size()) ) ); 45 | 46 | for( const auto & i : set ) 47 | ar( i ); 48 | } 49 | 50 | //! @internal 51 | template inline 52 | void load( Archive & ar, SetT & set ) 53 | { 54 | size_type size; 55 | ar( make_size_tag( size ) ); 56 | 57 | set.clear(); 58 | 59 | auto hint = set.begin(); 60 | for( size_type i = 0; i < size; ++i ) 61 | { 62 | typename SetT::key_type key; 63 | 64 | ar( key ); 65 | #ifdef CEREAL_OLDER_GCC 66 | hint = set.insert( hint, std::move( key ) ); 67 | #else // NOT CEREAL_OLDER_GCC 68 | hint = set.emplace_hint( hint, std::move( key ) ); 69 | #endif // NOT CEREAL_OLDER_GCC 70 | } 71 | } 72 | } 73 | 74 | //! Saving for std::set 75 | template inline 76 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::set const & set ) 77 | { 78 | set_detail::save( ar, set ); 79 | } 80 | 81 | //! Loading for std::set 82 | template inline 83 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::set & set ) 84 | { 85 | set_detail::load( ar, set ); 86 | } 87 | 88 | //! Saving for std::multiset 89 | template inline 90 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::multiset const & multiset ) 91 | { 92 | set_detail::save( ar, multiset ); 93 | } 94 | 95 | //! Loading for std::multiset 96 | template inline 97 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::multiset & multiset ) 98 | { 99 | set_detail::load( ar, multiset ); 100 | } 101 | } // namespace cereal 102 | 103 | #endif // CEREAL_TYPES_SET_HPP_ 104 | -------------------------------------------------------------------------------- /src/cereal/details/static_object.hpp: -------------------------------------------------------------------------------- 1 | /*! \file static_object.hpp 2 | \brief Internal polymorphism static object support 3 | \ingroup Internal */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are met: 9 | * Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | * Redistributions in binary form must reproduce the above copyright 12 | notice, this list of conditions and the following disclaimer in the 13 | documentation and/or other materials provided with the distribution. 14 | * Neither the name of cereal nor the 15 | names of its contributors may be used to endorse or promote products 16 | derived from this software without specific prior written permission. 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 21 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | */ 28 | #ifndef CEREAL_DETAILS_STATIC_OBJECT_HPP_ 29 | #define CEREAL_DETAILS_STATIC_OBJECT_HPP_ 30 | 31 | //! Prevent link optimization from removing non-referenced static objects 32 | /*! Especially for polymorphic support, we create static objects which 33 | may not ever be explicitly referenced. Most linkers will detect this 34 | and remove the code causing various unpleasant runtime errors. These 35 | macros, adopted from Boost (see force_include.hpp) prevent this 36 | (C) Copyright 2002 Robert Ramey - http://www.rrsd.com . 37 | Use, modification and distribution is subject to the Boost Software 38 | License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at 39 | http://www.boost.org/LICENSE_1_0.txt) */ 40 | 41 | #ifdef _MSC_VER 42 | # define CEREAL_DLL_EXPORT __declspec(dllexport) 43 | # define CEREAL_USED 44 | #else // clang or gcc 45 | # define CEREAL_DLL_EXPORT 46 | # define CEREAL_USED __attribute__ ((__used__)) 47 | #endif 48 | 49 | namespace cereal 50 | { 51 | namespace detail 52 | { 53 | //! A static, pre-execution object 54 | /*! This class will create a single copy (singleton) of some 55 | type and ensures that merely referencing this type will 56 | cause it to be instantiated and initialized pre-execution. 57 | For example, this is used heavily in the polymorphic pointer 58 | serialization mechanisms to bind various archive types with 59 | different polymorphic classes */ 60 | template 61 | class CEREAL_DLL_EXPORT StaticObject 62 | { 63 | private: 64 | //! Forces instantiation at pre-execution time 65 | static void instantiate( T const & ) {} 66 | 67 | static T & create() 68 | { 69 | static T t; 70 | instantiate(instance); 71 | return t; 72 | } 73 | 74 | StaticObject( StaticObject const & /*other*/ ) {} 75 | 76 | public: 77 | static T & getInstance() 78 | { 79 | return create(); 80 | } 81 | 82 | private: 83 | static T & instance; 84 | }; 85 | 86 | template T & StaticObject::instance = StaticObject::create(); 87 | } // namespace detail 88 | } // namespace cereal 89 | 90 | #endif // CEREAL_DETAILS_STATIC_OBJECT_HPP_ -------------------------------------------------------------------------------- /src/tiny_dnn/util/graph_visualizer.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2016, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/layers/layer.h" 11 | #include "tiny_dnn/network.h" 12 | #include "tiny_dnn/node.h" 13 | 14 | namespace tiny_dnn { 15 | 16 | /** 17 | * utility for graph visualization 18 | **/ 19 | class graph_visualizer { 20 | public: 21 | explicit graph_visualizer(layer *root_node, 22 | const std::string &graph_name = "graph") 23 | : root_(root_node), name_(graph_name) {} 24 | 25 | template 26 | explicit graph_visualizer(network &network, 27 | const std::string &graph_name = "graph") 28 | : root_(network[0]), name_(graph_name) {} 29 | 30 | /** 31 | * generate graph structure in dot language format 32 | **/ 33 | void generate(std::ostream &stream) { 34 | generate_header(stream); 35 | generate_nodes(stream); 36 | generate_footer(stream); 37 | } 38 | 39 | private: 40 | typedef std::unordered_map node2name_t; 41 | 42 | void generate_header(std::ostream &stream) { 43 | stream << "digraph \"" << name_ << "\" {" << std::endl; 44 | stream << " node [ shape=record ];" << std::endl; 45 | } 46 | 47 | void generate_nodes(std::ostream &stream) { 48 | node2name_t node2name; 49 | get_layer_names(node2name); 50 | 51 | graph_traverse( 52 | root_, [&](const layer &l) { generate_layer(stream, l, node2name); }, 53 | [&](const edge &e) { generate_edge(stream, e, node2name); }); 54 | } 55 | 56 | void get_layer_names(node2name_t &node2name) { 57 | std::unordered_map layer_counts; // [layer_type -> num] 58 | 59 | auto namer = [&](const layer &l) { 60 | std::string ltype = l.layer_type(); 61 | 62 | // add quote and sequential-id 63 | node2name[&l] = 64 | "\"" + ltype + to_string(layer_counts[l.layer_type()]++) + "\""; 65 | }; 66 | 67 | graph_traverse(root_, namer, [&](const edge &) {}); 68 | } 69 | 70 | void generate_edge(std::ostream &stream, 71 | const edge &e, 72 | node2name_t &node2name) { 73 | auto next = e.next(); 74 | auto prev = e.prev(); 75 | 76 | for (auto n : next) { 77 | serial_size_t dst_port = n->prev_port(e); 78 | serial_size_t src_port = prev->next_port(e); 79 | stream << " " << node2name[prev] << ":out" << src_port << " -> " 80 | << node2name[n] << ":in" << dst_port << ";" << std::endl; 81 | } 82 | } 83 | 84 | void generate_layer(std::ostream &stream, 85 | const layer &layer, 86 | node2name_t &node2name) { 87 | stream << " " << node2name[&layer] << " [" << std::endl; 88 | stream << " label= \""; 89 | stream << layer.layer_type() << "|{{in"; 90 | generate_layer_channels(stream, layer.in_shape(), layer.in_types(), "in"); 91 | stream << "}|{out"; 92 | generate_layer_channels(stream, layer.out_shape(), layer.out_types(), 93 | "out"); 94 | stream << "}}\"" << std::endl; 95 | stream << " ];" << std::endl; 96 | } 97 | 98 | void generate_layer_channels(std::ostream &stream, 99 | const std::vector &shapes, 100 | const std::vector &vtypes, 101 | const std::string &port_prefix) { 102 | CNN_UNREFERENCED_PARAMETER(vtypes); 103 | for (size_t i = 0; i < shapes.size(); i++) { 104 | stream << "|<" << port_prefix << i << ">" << shapes[i] << "(" << vtypes[i] 105 | << ")"; 106 | } 107 | } 108 | 109 | void generate_footer(std::ostream &stream) { stream << "}" << std::endl; } 110 | 111 | layer *root_; 112 | std::string name_; 113 | }; 114 | 115 | } // namespace tiny_dnn 116 | -------------------------------------------------------------------------------- /src/tiny_dnn/tiny_dnn.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2013, Taiga Nomi 3 | All rights reserved. 4 | 5 | Use of this source code is governed by a BSD-style license that can be found 6 | in the LICENSE file. 7 | */ 8 | #pragma once 9 | 10 | #include "tiny_dnn/config.h" 11 | #include "tiny_dnn/network.h" 12 | #include "tiny_dnn/nodes.h" 13 | 14 | #include "tiny_dnn/core/framework/tensor.h" 15 | 16 | #include "tiny_dnn/core/framework/device.h" 17 | #include "tiny_dnn/core/framework/program_manager.h" 18 | 19 | #include "tiny_dnn/layers/arithmetic_layer.h" 20 | #include "tiny_dnn/layers/average_pooling_layer.h" 21 | #include "tiny_dnn/layers/average_unpooling_layer.h" 22 | #include "tiny_dnn/layers/batch_normalization_layer.h" 23 | #include "tiny_dnn/layers/concat_layer.h" 24 | #include "tiny_dnn/layers/convolutional_layer.h" 25 | #include "tiny_dnn/layers/deconvolutional_layer.h" 26 | #include "tiny_dnn/layers/dropout_layer.h" 27 | #include "tiny_dnn/layers/feedforward_layer.h" 28 | #include "tiny_dnn/layers/fully_connected_layer.h" 29 | #include "tiny_dnn/layers/input_layer.h" 30 | #include "tiny_dnn/layers/linear_layer.h" 31 | #include "tiny_dnn/layers/lrn_layer.h" 32 | #include "tiny_dnn/layers/max_pooling_layer.h" 33 | #include "tiny_dnn/layers/max_unpooling_layer.h" 34 | #include "tiny_dnn/layers/power_layer.h" 35 | #include "tiny_dnn/layers/quantized_convolutional_layer.h" 36 | #include "tiny_dnn/layers/quantized_deconvolutional_layer.h" 37 | #include "tiny_dnn/layers/slice_layer.h" 38 | 39 | #ifdef CNN_USE_GEMMLOWP 40 | #include "tiny_dnn/layers/quantized_fully_connected_layer.h" 41 | #endif // CNN_USE_GEMMLOWP 42 | 43 | #include "tiny_dnn/activations/activation_function.h" 44 | #include "tiny_dnn/lossfunctions/loss_function.h" 45 | #include "tiny_dnn/optimizers/optimizer.h" 46 | 47 | #include "tiny_dnn/util/deform.h" 48 | #include "tiny_dnn/util/graph_visualizer.h" 49 | #include "tiny_dnn/util/product.h" 50 | #include "tiny_dnn/util/weight_init.h" 51 | 52 | #include "tiny_dnn/io/cifar10_parser.h" 53 | #include "tiny_dnn/io/display.h" 54 | #include "tiny_dnn/io/layer_factory.h" 55 | #include "tiny_dnn/io/mnist_parser.h" 56 | 57 | #ifdef DNN_USE_IMAGE_API 58 | #include "tiny_dnn/util/image.h" 59 | #endif // DNN_USE_IMAGE_API 60 | 61 | #ifndef CNN_NO_SERIALIZATION 62 | #include "tiny_dnn/util/deserialization_helper.h" 63 | #include "tiny_dnn/util/serialization_helper.h" 64 | #endif // CNN_NO_SERIALIZATION 65 | 66 | #ifdef CNN_USE_CAFFE_CONVERTER 67 | // experimental / require google protobuf 68 | #include "tiny_dnn/io/caffe/layer_factory.h" 69 | #endif 70 | 71 | // shortcut version of layer names 72 | namespace tiny_dnn { 73 | namespace layers { 74 | 75 | template 76 | using conv = tiny_dnn::convolutional_layer; 77 | 78 | template 79 | using q_conv = tiny_dnn::quantized_convolutional_layer; 80 | 81 | template 82 | using max_pool = tiny_dnn::max_pooling_layer; 83 | 84 | template 85 | using ave_pool = tiny_dnn::average_pooling_layer; 86 | 87 | template 88 | using fc = tiny_dnn::fully_connected_layer; 89 | 90 | template 91 | using dense = tiny_dnn::fully_connected_layer; 92 | 93 | using add = tiny_dnn::elementwise_add_layer; 94 | 95 | using dropout = tiny_dnn::dropout_layer; 96 | 97 | using input = tiny_dnn::input_layer; 98 | 99 | template 100 | using lrn = tiny_dnn::lrn_layer; 101 | 102 | using input = tiny_dnn::input_layer; 103 | 104 | using concat = tiny_dnn::concat_layer; 105 | 106 | template 107 | using deconv = tiny_dnn::deconvolutional_layer; 108 | 109 | template 110 | using max_unpool = tiny_dnn::max_unpooling_layer; 111 | 112 | template 113 | using ave_unpool = tiny_dnn::average_unpooling_layer; 114 | 115 | } // namespace layers 116 | 117 | #include "tiny_dnn/models/alexnet.h" 118 | 119 | using batch_norm = tiny_dnn::batch_normalization_layer; 120 | 121 | using slice = tiny_dnn::slice_layer; 122 | 123 | using power = tiny_dnn::power_layer; 124 | 125 | } // namespace tiny_dnn 126 | -------------------------------------------------------------------------------- /src/cereal/types/bitset.hpp: -------------------------------------------------------------------------------- 1 | /*! \file bitset.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_BITSET_HPP_ 31 | #define CEREAL_TYPES_BITSET_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | namespace cereal 37 | { 38 | namespace bitset_detail 39 | { 40 | //! The type the bitset is encoded with 41 | /*! @internal */ 42 | enum class type : uint8_t 43 | { 44 | ulong, 45 | ullong, 46 | string 47 | }; 48 | } 49 | 50 | //! Serializing (save) for std::bitset 51 | template inline 52 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::bitset const & bits ) 53 | { 54 | try 55 | { 56 | auto const b = bits.to_ulong(); 57 | ar( CEREAL_NVP_("type", bitset_detail::type::ulong) ); 58 | ar( CEREAL_NVP_("data", b) ); 59 | } 60 | catch( std::overflow_error const & ) 61 | { 62 | try 63 | { 64 | auto const b = bits.to_ullong(); 65 | ar( CEREAL_NVP_("type", bitset_detail::type::ullong) ); 66 | ar( CEREAL_NVP_("data", b) ); 67 | } 68 | catch( std::overflow_error const & ) 69 | { 70 | ar( CEREAL_NVP_("type", bitset_detail::type::string) ); 71 | ar( CEREAL_NVP_("data", bits.to_string()) ); 72 | } 73 | } 74 | } 75 | 76 | //! Serializing (load) for std::bitset 77 | template inline 78 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::bitset & bits ) 79 | { 80 | bitset_detail::type t; 81 | ar( CEREAL_NVP_("type", t) ); 82 | 83 | switch( t ) 84 | { 85 | case bitset_detail::type::ulong: 86 | { 87 | unsigned long b; 88 | ar( CEREAL_NVP_("data", b) ); 89 | bits = std::bitset( b ); 90 | break; 91 | } 92 | case bitset_detail::type::ullong: 93 | { 94 | unsigned long long b; 95 | ar( CEREAL_NVP_("data", b) ); 96 | bits = std::bitset( b ); 97 | break; 98 | } 99 | case bitset_detail::type::string: 100 | { 101 | std::string b; 102 | ar( CEREAL_NVP_("data", b) ); 103 | bits = std::bitset( b ); 104 | break; 105 | } 106 | default: 107 | throw Exception("Invalid bitset data representation"); 108 | } 109 | } 110 | } // namespace cereal 111 | 112 | #endif // CEREAL_TYPES_BITSET_HPP_ 113 | -------------------------------------------------------------------------------- /src/cereal/macros.hpp: -------------------------------------------------------------------------------- 1 | /*! \file macros.hpp 2 | \brief Preprocessor macros that can customise the cereal library 3 | 4 | By default, cereal looks for serialization functions with very 5 | specific names, that is: serialize, load, save, load_minimal, 6 | or save_minimal. 7 | 8 | This file allows an advanced user to change these names to conform 9 | to some other style or preference. This is implemented using 10 | preprocessor macros. 11 | 12 | As a result of this, in internal cereal code you will see macros 13 | used for these function names. In user code, you should name 14 | the functions like you normally would and not use the macros 15 | to improve readability. 16 | \ingroup utility */ 17 | /* 18 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 19 | All rights reserved. 20 | 21 | Redistribution and use in source and binary forms, with or without 22 | modification, are permitted provided that the following conditions are met: 23 | * Redistributions of source code must retain the above copyright 24 | notice, this list of conditions and the following disclaimer. 25 | * Redistributions in binary form must reproduce the above copyright 26 | notice, this list of conditions and the following disclaimer in the 27 | documentation and/or other materials provided with the distribution. 28 | * Neither the name of cereal nor the 29 | names of its contributors may be used to endorse or promote products 30 | derived from this software without specific prior written permission. 31 | 32 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 33 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 34 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 35 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 36 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 37 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 38 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 39 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 41 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 | */ 43 | 44 | #ifndef CEREAL_MACROS_HPP_ 45 | #define CEREAL_MACROS_HPP_ 46 | 47 | #ifndef CEREAL_SERIALIZE_FUNCTION_NAME 48 | //! The serialization/deserialization function name to search for. 49 | /*! You can define @c CEREAL_SERIALIZE_FUNCTION_NAME to be different assuming 50 | you do so before this file is included. */ 51 | #define CEREAL_SERIALIZE_FUNCTION_NAME serialize 52 | #endif // CEREAL_SERIALIZE_FUNCTION_NAME 53 | 54 | #ifndef CEREAL_LOAD_FUNCTION_NAME 55 | //! The deserialization (load) function name to search for. 56 | /*! You can define @c CEREAL_LOAD_FUNCTION_NAME to be different assuming you do so 57 | before this file is included. */ 58 | #define CEREAL_LOAD_FUNCTION_NAME load 59 | #endif // CEREAL_LOAD_FUNCTION_NAME 60 | 61 | #ifndef CEREAL_SAVE_FUNCTION_NAME 62 | //! The serialization (save) function name to search for. 63 | /*! You can define @c CEREAL_SAVE_FUNCTION_NAME to be different assuming you do so 64 | before this file is included. */ 65 | #define CEREAL_SAVE_FUNCTION_NAME save 66 | #endif // CEREAL_SAVE_FUNCTION_NAME 67 | 68 | #ifndef CEREAL_LOAD_MINIMAL_FUNCTION_NAME 69 | //! The deserialization (load_minimal) function name to search for. 70 | /*! You can define @c CEREAL_LOAD_MINIMAL_FUNCTION_NAME to be different assuming you do so 71 | before this file is included. */ 72 | #define CEREAL_LOAD_MINIMAL_FUNCTION_NAME load_minimal 73 | #endif // CEREAL_LOAD_MINIMAL_FUNCTION_NAME 74 | 75 | #ifndef CEREAL_SAVE_MINIMAL_FUNCTION_NAME 76 | //! The serialization (save_minimal) function name to search for. 77 | /*! You can define @c CEREAL_SAVE_MINIMAL_FUNCTION_NAME to be different assuming you do so 78 | before this file is included. */ 79 | #define CEREAL_SAVE_MINIMAL_FUNCTION_NAME save_minimal 80 | #endif // CEREAL_SAVE_MINIMAL_FUNCTION_NAME 81 | 82 | #endif // CEREAL_MACROS_HPP_ 83 | -------------------------------------------------------------------------------- /src/cereal/types/unordered_set.hpp: -------------------------------------------------------------------------------- 1 | /*! \file unordered_set.hpp 2 | \brief Support for types found in \ 3 | \ingroup STLSupport */ 4 | /* 5 | Copyright (c) 2014, Randolph Voorhies, Shane Grant 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | * Neither the name of cereal nor the 16 | names of its contributors may be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL RANDOLPH VOORHIES OR SHANE GRANT BE LIABLE FOR ANY 23 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | */ 30 | #ifndef CEREAL_TYPES_UNORDERED_SET_HPP_ 31 | #define CEREAL_TYPES_UNORDERED_SET_HPP_ 32 | 33 | #include 34 | #include 35 | 36 | namespace cereal 37 | { 38 | namespace unordered_set_detail 39 | { 40 | //! @internal 41 | template inline 42 | void save( Archive & ar, SetT const & set ) 43 | { 44 | ar( make_size_tag( static_cast(set.size()) ) ); 45 | 46 | for( const auto & i : set ) 47 | ar( i ); 48 | } 49 | 50 | //! @internal 51 | template inline 52 | void load( Archive & ar, SetT & set ) 53 | { 54 | size_type size; 55 | ar( make_size_tag( size ) ); 56 | 57 | set.clear(); 58 | set.reserve( static_cast( size ) ); 59 | 60 | for( size_type i = 0; i < size; ++i ) 61 | { 62 | typename SetT::key_type key; 63 | 64 | ar( key ); 65 | set.emplace( std::move( key ) ); 66 | } 67 | } 68 | } 69 | 70 | //! Saving for std::unordered_set 71 | template inline 72 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::unordered_set const & unordered_set ) 73 | { 74 | unordered_set_detail::save( ar, unordered_set ); 75 | } 76 | 77 | //! Loading for std::unordered_set 78 | template inline 79 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::unordered_set & unordered_set ) 80 | { 81 | unordered_set_detail::load( ar, unordered_set ); 82 | } 83 | 84 | //! Saving for std::unordered_multiset 85 | template inline 86 | void CEREAL_SAVE_FUNCTION_NAME( Archive & ar, std::unordered_multiset const & unordered_multiset ) 87 | { 88 | unordered_set_detail::save( ar, unordered_multiset ); 89 | } 90 | 91 | //! Loading for std::unordered_multiset 92 | template inline 93 | void CEREAL_LOAD_FUNCTION_NAME( Archive & ar, std::unordered_multiset & unordered_multiset ) 94 | { 95 | unordered_set_detail::load( ar, unordered_multiset ); 96 | } 97 | } // namespace cereal 98 | 99 | #endif // CEREAL_TYPES_UNORDERED_SET_HPP_ 100 | -------------------------------------------------------------------------------- /src/tiny_dnn/io/display.h: -------------------------------------------------------------------------------- 1 | // addapted from boost progress.hpp, made c++11 only // 2 | 3 | #ifndef PROGRESS_H 4 | #define PROGRESS_H 5 | 6 | #include 7 | #include // for ostream, cout, etc 8 | #include // for string 9 | 10 | namespace tiny_dnn { 11 | 12 | class timer { 13 | public: 14 | timer() : t1(std::chrono::high_resolution_clock::now()) {} 15 | float_t elapsed() { 16 | return std::chrono::duration_cast>( 17 | std::chrono::high_resolution_clock::now() - t1) 18 | .count(); 19 | } 20 | void restart() { t1 = std::chrono::high_resolution_clock::now(); } 21 | void start() { t1 = std::chrono::high_resolution_clock::now(); } 22 | void stop() { t2 = std::chrono::high_resolution_clock::now(); } 23 | float_t total() { 24 | stop(); 25 | return std::chrono::duration_cast>(t2 - t1) 26 | .count(); 27 | } 28 | ~timer() {} 29 | 30 | private: 31 | std::chrono::high_resolution_clock::time_point t1, t2; 32 | }; 33 | 34 | // progress_display --------------------------------------------------------// 35 | 36 | // progress_display displays an appropriate indication of 37 | // progress at an appropriate place in an appropriate form. 38 | 39 | class progress_display { 40 | public: 41 | explicit progress_display(size_t expected_count_, 42 | std::ostream &os = std::cout, 43 | const std::string &s1 = "\n", // leading strings 44 | const std::string &s2 = "", 45 | const std::string &s3 = "") 46 | // os is hint; implementation may ignore, particularly in embedded systems 47 | : m_os(os), m_s1(s1), m_s2(s2), m_s3(s3) { 48 | restart(expected_count_); 49 | } 50 | 51 | void restart(size_t expected_count_) { 52 | // Effects: display appropriate scale 53 | // Postconditions: count()==0, expected_count()==expected_count_ 54 | _count = _next_tic_count = _tic = 0; 55 | _expected_count = expected_count_; 56 | 57 | m_os << m_s1 << "0% 10 20 30 40 50 60 70 80 90 100%\n" 58 | << m_s2 << "|----|----|----|----|----|----|----|----|----|----|" 59 | << std::endl // endl implies flush, which ensures display 60 | << m_s3; 61 | if (!_expected_count) _expected_count = 1; // prevent divide by zero 62 | } // restart 63 | 64 | size_t operator+=(size_t increment) { 65 | // Effects: Display appropriate progress tic if needed. 66 | // Postconditions: count()== original count() + increment 67 | // Returns: count(). 68 | if ((_count += increment) >= _next_tic_count) { 69 | display_tic(); 70 | } 71 | return _count; 72 | } 73 | 74 | size_t operator++() { return operator+=(1); } 75 | size_t count() const { return _count; } 76 | size_t expected_count() const { return _expected_count; } 77 | 78 | private: 79 | std::ostream &m_os; // may not be present in all imps 80 | const std::string m_s1; // string is more general, safer than 81 | const std::string m_s2; // const char *, and efficiency or size are 82 | const std::string m_s3; // not issues 83 | 84 | size_t _count, _expected_count, _next_tic_count; 85 | size_t _tic; 86 | void display_tic() { 87 | // use of floating point ensures that both large and small counts 88 | // work correctly. static_cast<>() is also used several places 89 | // to suppress spurious compiler warnings. 90 | size_t tics_needed = static_cast( 91 | (static_cast(_count) / _expected_count) * 50.0); 92 | do { 93 | m_os << '*' << std::flush; 94 | } while (++_tic < tics_needed); 95 | _next_tic_count = static_cast((_tic / 50.0) * _expected_count); 96 | if (_count == _expected_count) { 97 | if (_tic < 51) m_os << '*'; 98 | m_os << std::endl; 99 | } 100 | } // display_tic 101 | 102 | progress_display &operator=(const progress_display &) = delete; 103 | }; 104 | 105 | } // namespace tiny_dnn 106 | 107 | #endif 108 | --------------------------------------------------------------------------------