├── .clang-format ├── .clang-tidy ├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── README.md ├── cmake └── ClangTools.cmake ├── data ├── X_train_sample.csv └── y_train_sample.csv └── src ├── dataset_example.cpp ├── function_approx.cpp ├── hello_world.cpp ├── lstm_example.cpp ├── simple_optimization_example.cpp └── time_serie_prediction.cpp /.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: Chromium 2 | SortIncludes: true 3 | ColumnLimit: 100 4 | -------------------------------------------------------------------------------- /.clang-tidy: -------------------------------------------------------------------------------- 1 | Checks: 'clang-diagnostic-*,clang-analyzer-*,-clang-analyzer-alpha*,google-*,misc-*,readability-*,modernize-*,performance-*,-clang-diagnostic-deprecated-declarations,-modernize-pass-by-value,-clang-diagnostic-reinterpret-base-class,-clang-diagnostic-return-type,-clang-diagnostic-switch' 2 | HeaderFilterRegex: '' 3 | CheckOptions: 4 | # Classes, structs, ... 5 | - key: readability-identifier-naming.NamespaceCase 6 | value: lower_case 7 | - key: readability-identifier-naming.ClassCase 8 | value: CamelCase 9 | - key: readability-identifier-naming.StructCase 10 | value: CamelCase 11 | - key: readability-identifier-naming.EnumCase 12 | value: CamelCase 13 | - key: readability-identifier-naming.UnionCase 14 | value: CamelCase 15 | - key: readability-identifier-naming.TypedefCase 16 | value: CamelCase 17 | 18 | # Variables, member variables, ... 19 | - key: readability-identifier-naming.ParameterCase 20 | value: lower_case 21 | - key: readability-identifier-naming.VariableCase 22 | value: lower_case 23 | - key: readability-identifier-naming.MemberCase 24 | value: lower_case 25 | - key: readability-identifier-naming.PublicMemberCase 26 | value: lower_case 27 | - key: readability-identifier-naming.ProtectedMemberCase 28 | value: lower_case 29 | - key: readability-identifier-naming.PrivateMemberCase 30 | value: lower_case 31 | - key: readability-identifier-naming.PrivateMemberSuffix 32 | value: _ 33 | 34 | # Functions, methods, ... 35 | - key: readability-identifier-naming.FunctionCase 36 | value: camelBack 37 | - key: readability-identifier-naming.MethodCase 38 | value: camelBack 39 | 40 | # Constants 41 | - key: readability-identifier-naming.ConstantPrefix 42 | value: k 43 | - key: readability-identifier-naming.ConstantCase 44 | value: CamelCase 45 | - key: readability-identifier-naming.ConstantMemberPrefix 46 | value: '' 47 | - key: readability-identifier-naming.ConstantMemberCase 48 | value: lower_case 49 | - key: readability-identifier-naming.ConstantParameterCase 50 | value: lower_case 51 | - key: readability-identifier-naming.ConstantParameterPrefix 52 | value: '' 53 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/ -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.4) 2 | project(libtorch_examples) 3 | 4 | set(CMAKE_CXX_STANDARD 17) 5 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 6 | 7 | set(libtorch_VERSION 1.4.0) 8 | find_package(Torch ${libtorch_VERSION} EXACT QUIET CONFIG) 9 | if(NOT Torch_FOUND) 10 | message(STATUS "libtorch ${libtorch_VERSION} - not found") 11 | message(STATUS "Fetching libtorch") 12 | include(FetchContent) 13 | FetchContent_Declare( 14 | libtorch 15 | URL https://download.pytorch.org/libtorch/cu101/libtorch-cxx11-abi-shared-with-deps-${libtorch_VERSION}.zip 16 | SOURCE_DIR libtorch) 17 | FetchContent_GetProperties(libtorch) 18 | if(NOT libtorch_POPULATED) 19 | unset(FETCHCONTENT_QUIET CACHE) 20 | FetchContent_Populate(libtorch) 21 | list(APPEND CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/libtorch) 22 | endif() 23 | find_package(Torch ${libtorch_VERSION} EXACT CONFIG REQUIRED) 24 | else() 25 | message(STATUS "libtorch ${libtorch_VERSION} - found") 26 | endif() 27 | 28 | macro(add_example name) 29 | add_executable(${name} src/${name}.cpp) 30 | target_include_directories(${name} PRIVATE include) 31 | target_link_libraries(${name} ${TORCH_LIBRARIES}) 32 | endmacro() 33 | 34 | add_example(hello_world) 35 | add_example(simple_optimization_example) 36 | add_example(function_approx) 37 | add_example(time_serie_prediction) 38 | add_example(lstm_example) 39 | add_example(dataset_example) 40 | 41 | # Tools 42 | include(${CMAKE_CURRENT_LIST_DIR}/cmake/ClangTools.cmake OPTIONAL 43 | RESULT_VARIABLE CLANG_TOOLS) 44 | if(CLANG_TOOLS) 45 | file(GLOB_RECURSE SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) 46 | file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h 47 | ${CMAKE_CURRENT_SOURCE_DIR}/src/*.h) 48 | add_format_target(${PROJECT_NAME} FILES ${SOURCES} ${HEADERS}) 49 | add_tidy_target(${PROJECT_NAME} FILES ${SOURCES}) 50 | endif() 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2019, Zheng Qu 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # libtorch_examples 2 | This repository contains examples of libtorch, which is C++ front end of PyTorch. 3 | * `hello_world.cpp`: A simple example of libtorch. 4 | * `function_approx.cpp`: A feedforward network based function approximator, which trains on `y = cos(x)`. 5 | 6 | ## Compilation 7 | 8 | - Download libtorch with cmake automatically 9 | ```bash 10 | mkdir build && cd build 11 | cmake .. -DCMAKE_BUILD_TYPE=Release && cmake --build . 12 | ``` 13 | 14 | - Use an existing local libtorch 15 | ```bash 16 | mkdir build && cd build 17 | cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=/absolute/path/to/libtorch && cmake --build . 18 | ``` 19 | -------------------------------------------------------------------------------- /cmake/ClangTools.cmake: -------------------------------------------------------------------------------- 1 | include(CMakeParseArguments) 2 | 3 | find_program(CLANG_FORMAT_PROG clang-format DOC "'clang-format' executable") 4 | if(CLANG_FORMAT_PROG AND NOT TARGET format) 5 | add_custom_target(format) 6 | add_custom_target(check-format) 7 | endif() 8 | 9 | find_program(CLANG_TIDY_PROG clang-tidy DOC "'clang-tidy' executable") 10 | if(CLANG_TIDY_PROG AND NOT TARGET tidy) 11 | if(NOT CMAKE_EXPORT_COMPILE_COMMANDS) 12 | message(WARNING "Invoke Catkin/CMake with '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON' 13 | to generate compilation database for 'clang-tidy'.") 14 | endif() 15 | add_custom_target(tidy) 16 | add_custom_target(check-tidy) 17 | endif() 18 | 19 | find_program(PARALLEL_PROG parallel DOC "'parallel' executable") 20 | if(PARALLEL_PROG) 21 | set(PARALLEL_DELIMITER ":::") 22 | else() 23 | message(WARNING "parallel is not installeld. To enable parallelization of clang-tidy, try with 'sudo apt install parallel'.") 24 | set(PARALLEL_PROG "") 25 | set(PARALLEL_DELIMITER "") 26 | endif() 27 | 28 | function(add_format_target _target) 29 | if(NOT CLANG_FORMAT_PROG) 30 | return() 31 | endif() 32 | cmake_parse_arguments(ARG "" "" "FILES" ${ARGN}) 33 | 34 | add_custom_target(format-${_target} 35 | COMMAND ${CLANG_FORMAT_PROG} -i ${ARG_FILES} 36 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/.. 37 | COMMENT "Formatting ${_target} source code with clang-format" 38 | VERBATIM 39 | ) 40 | add_dependencies(format format-${_target}) 41 | 42 | add_custom_target(check-format-${_target} 43 | COMMAND ${CLANG_FORMAT_PROG} -output-replacements-xml ${ARG_FILES} | grep " /tmp/log && exit 1 || exit 0 44 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/.. 45 | COMMENT "Checking ${_target} code formatting with clang-format" 46 | VERBATIM 47 | ) 48 | add_dependencies(check-format check-format-${_target}) 49 | endfunction() 50 | 51 | function(add_tidy_target _target) 52 | if(NOT CLANG_TIDY_PROG) 53 | return() 54 | endif() 55 | cmake_parse_arguments(ARG "" "" "FILES;DEPENDS" ${ARGN}) 56 | 57 | add_custom_target(tidy-${_target} 58 | COMMAND ${PARALLEL_PROG} ${CLANG_TIDY_PROG} -fix -p=${CMAKE_BINARY_DIR} ${PARALLEL_DELIMITER} ${ARG_FILES} 59 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/.. 60 | DEPENDS ${ARG_DEPENDS} 61 | COMMENT "Running clang-tidy for ${_target}" 62 | VERBATIM 63 | ) 64 | add_dependencies(tidy tidy-${_target}) 65 | 66 | add_custom_target(check-tidy-${_target} 67 | COMMAND ${PARALLEL_PROG} ${CLANG_TIDY_PROG} -p=${CMAKE_BINARY_DIR} ${PARALLEL_DELIMITER} ${ARG_FILES} | grep . && exit 1 || exit 0 68 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/.. 69 | DEPENDS ${ARG_DEPENDS} 70 | COMMENT "Running clang-tidy for ${_target}" 71 | VERBATIM 72 | ) 73 | add_dependencies(check-tidy check-tidy-${_target}) 74 | endfunction() 75 | -------------------------------------------------------------------------------- /data/X_train_sample.csv: -------------------------------------------------------------------------------- 1 | row_id,series_id,measurement_number,orientation_X,orientation_Y,orientation_Z,orientation_W,angular_velocity_X,angular_velocity_Y,angular_velocity_Z,linear_acceleration_X,linear_acceleration_Y,linear_acceleration_Z 2 | 0_0,0,0,-0.025773,-0.98864,-0.14801,0.00335,-0.0065237,-0.0010714,-0.02739,0.10043,4.2061,-5.5439 3 | 0_1,0,1,-0.025683,-0.98862,-0.14816,0.003439,-0.11396,0.083987,-0.06059,-0.70889,3.9905,-8.0273 4 | -------------------------------------------------------------------------------- /data/y_train_sample.csv: -------------------------------------------------------------------------------- 1 | series_id,group_id,surface 2 | 0,13,fine_concrete 3 | -------------------------------------------------------------------------------- /src/dataset_example.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | template 9 | std::ostream& operator<<(std::ostream& o, std::vector data) { 10 | std::copy(data.cbegin(), data.cend(), std::ostream_iterator(o, " ")); 11 | return o; 12 | } 13 | 14 | // Parses a token from a csv line 15 | template 16 | auto parse_token(std::istringstream& ss, char sep = ',') -> T { 17 | T result; 18 | std::string token; 19 | std::getline(ss, token, sep); 20 | std::stringstream stoken(token); 21 | stoken >> result; 22 | return result; 23 | } 24 | 25 | // Loads a csv file 26 | template 27 | auto load_csv_file(const std::string& csv_path, bool has_header = true) -> std::vector { 28 | std::vector items; 29 | std::ifstream data(csv_path); 30 | std::string line; 31 | if (has_header) 32 | std::getline(data, line); 33 | while (std::getline(data, line)) { 34 | items.emplace_back(line); 35 | } 36 | return items; 37 | } 38 | 39 | // Custom sensor data 40 | struct SensorData { 41 | SensorData() = default; 42 | SensorData(const std::string& csv_line) { 43 | std::istringstream iss(csv_line); 44 | row_id = parse_token(iss); 45 | series_id = parse_token(iss); 46 | measurement_number = parse_token(iss); 47 | while (!iss.eof()) { 48 | sensor_data.push_back(parse_token(iss)); 49 | } 50 | } 51 | std::string row_id; 52 | int series_id; 53 | int measurement_number; 54 | std::vector sensor_data; 55 | 56 | torch::Tensor toTensor() const { 57 | return torch::tensor(torch::ArrayRef(sensor_data.data(), sensor_data.size())).clone(); 58 | } 59 | }; 60 | 61 | struct FloorType { 62 | FloorType() = default; 63 | FloorType(const std::string& csv_line) { 64 | std::istringstream iss(csv_line); 65 | series_id = parse_token(iss); 66 | group_id = parse_token(iss); 67 | surface = parse_token(iss); 68 | }; 69 | 70 | torch::Tensor toTensor(const std::vector& surfaces) const { 71 | auto iter = std::find(surfaces.cbegin(), surfaces.end(), surface); 72 | if (iter == surfaces.end()) { 73 | throw std::logic_error("the surfaces must contain the FloorType::surface"); 74 | } 75 | long id = iter - surfaces.begin(); 76 | return torch::one_hot(torch::tensor({id}, torch::TensorOptions(torch::kLong)), surfaces.size()); 77 | }; 78 | 79 | int series_id; 80 | int group_id; 81 | std::string surface; 82 | }; 83 | 84 | std::ostream& operator<<(std::ostream& o, const SensorData& d) { 85 | o << "row_id: " << d.row_id << ", series_id: " << d.series_id 86 | << ", measurement_number: " << d.measurement_number << ", sensor_data: " << d.sensor_data 87 | << std::endl; 88 | return o; 89 | } 90 | 91 | class CustomDataset : torch::data::Dataset { 92 | public: 93 | CustomDataset(const std::string& x_train_csv, const std::string& y_train_csv) { 94 | // read csv file to sensor_data_ and floor_types_; 95 | auto sensor_data = load_csv_file(x_train_csv); 96 | auto floor_types = load_csv_file(y_train_csv); 97 | }; 98 | 99 | torch::data::Example<> get(size_t index) override { 100 | return {sensor_data_.at(index).clone(), floor_types_.at(index).clone()}; 101 | }; 102 | 103 | torch::optional size() const override { return floor_types_.size(); }; 104 | 105 | private: 106 | std::vector sensor_data_; 107 | std::vector floor_types_; 108 | }; 109 | 110 | int main(int argc, char* argv[]) { 111 | /* 112 | argv[1] path to X_train.csv 113 | argv[2] path to y_train.csv 114 | */ 115 | 116 | auto x_train_raw = load_csv_file(argv[1]); 117 | auto y_train_raw = load_csv_file(argv[2]); 118 | std::cout << "sensor data number: " << x_train_raw.size() << std::endl; 119 | std::cout << "floor types number: " << y_train_raw.size() << std::endl; 120 | 121 | std::vector floor_types; 122 | for_each(y_train_raw.cbegin(), y_train_raw.cend(), [&floor_types](const FloorType& d) { 123 | if (std::find(floor_types.cbegin(), floor_types.cend(), d.surface) == floor_types.end()) { 124 | floor_types.push_back(d.surface); 125 | } 126 | }); 127 | std::sort(floor_types.begin(), floor_types.end()); 128 | std::cout << "floor types: " << floor_types << std::endl; 129 | 130 | std::cout << y_train_raw.at(0).surface << ": " << y_train_raw.at(0).toTensor(floor_types) 131 | << std::endl; 132 | std::cout << y_train_raw.at(1).surface << ": " << y_train_raw.at(1).toTensor(floor_types) 133 | << std::endl; 134 | std::cout << y_train_raw.at(4).surface << ": " << y_train_raw.at(4).toTensor(floor_types) 135 | << std::endl; 136 | return 0; 137 | } 138 | -------------------------------------------------------------------------------- /src/function_approx.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | class OneDimMappingDataset : public torch::data::Dataset { 6 | private: 7 | size_t size_; 8 | double x_min_; 9 | double x_max_; 10 | 11 | public: 12 | explicit OneDimMappingDataset(const size_t size, 13 | const double x_min = -2 * M_PI, 14 | const double x_max = 2 * M_PI) 15 | : size_(size), x_min_(x_min), x_max_(x_max){}; 16 | torch::data::Example<> get(size_t index) override { 17 | torch::Tensor state = torch::rand(1) * (x_max_ - x_min_) + x_min_; 18 | torch::Tensor label = torch::ones(1) * std::cos(state[0].item()); 19 | return {state, label}; 20 | }; 21 | 22 | torch::optional size() const override { return size_; } 23 | }; 24 | 25 | int main(int /*argc*/, char* /*argv*/[]) { 26 | const bool kRestoreFromCheckpoint = true; 27 | const std::string kCheckPointFile = "cos-func-approx-checkpoint.pt"; 28 | const size_t kEpochSize = 1000; 29 | const size_t kBatchSize = 1000; 30 | const int64_t kLogInterval = 10; 31 | const int64_t kCheckpointEvery = 10000; 32 | 33 | // Use GPU when present, CPU otherwise. 34 | torch::Device device(torch::kCPU); 35 | if (torch::cuda::is_available()) { 36 | device = torch::Device(torch::kCUDA); 37 | std::cout << "CUDA is available! Training on GPU." << std::endl; 38 | } 39 | 40 | // Generate a dataset 41 | auto data_set = OneDimMappingDataset(100000).map(torch::data::transforms::Stack<>()); 42 | 43 | const size_t kDataSize = data_set.size().value(); 44 | const int64_t kBatchesPerEpoch = std::ceil(kDataSize / static_cast(kBatchSize)); 45 | 46 | // Generate a data loader. 47 | auto data_loader = torch::data::make_data_loader( 48 | std::move(data_set), kBatchSize); 49 | 50 | // Define network 51 | torch::nn::Sequential func_approximator( 52 | torch::nn::Linear(torch::nn::LinearOptions(1, 100).bias(true)), 53 | torch::nn::Functional(torch::leaky_relu, 0.2), 54 | torch::nn::Linear(torch::nn::LinearOptions(100, 10).bias(true)), 55 | torch::nn::Functional(torch::leaky_relu, 0.2), 56 | torch::nn::Linear(torch::nn::LinearOptions(10, 1).bias(true)), 57 | torch::nn::Functional(torch::tanh)); 58 | func_approximator->to(device); 59 | 60 | // Define Optimizer 61 | torch::optim::Adam optimizer(func_approximator->parameters(), 62 | torch::optim::AdamOptions(2e-4).beta1(0.5)); 63 | 64 | if (kRestoreFromCheckpoint) { 65 | try { 66 | torch::load(func_approximator, kCheckPointFile); 67 | std::cout << kCheckPointFile << " loaded. Continue with training on the loaded weights." 68 | << std::endl; 69 | } catch (const c10::Error e) { 70 | std::cout << "Warning: " << e.msg_without_backtrace() << std::endl; 71 | std::cout << "Start training from beginning." << std::endl; 72 | } 73 | } 74 | 75 | size_t epoch_idx = 0; 76 | while (epoch_idx < kEpochSize) { 77 | size_t batch_index = 0; 78 | for (torch::data::Example<>& batch : *data_loader) { 79 | // Train discriminator with real images. 80 | func_approximator->zero_grad(); 81 | auto data = batch.data.to(device); 82 | auto labels = batch.target.to(device); 83 | 84 | torch::Tensor real_output = func_approximator->forward(data); 85 | torch::Tensor d_loss_real = torch::mse_loss(real_output, labels); 86 | d_loss_real.backward(); 87 | optimizer.step(); 88 | 89 | if (batch_index % kLogInterval == 0) { 90 | std::printf("\r[%2ld/%2ld][%3ld/%3ld] loss: %.6f \n", epoch_idx, kEpochSize, batch_index, 91 | kBatchesPerEpoch, d_loss_real.item()); 92 | /* 93 | auto test_x = -2 * M_PI + torch::rand(1) * 4 * M_PI; 94 | auto test_y = func_approximator->forward(test_x.toBackend(c10::Backend::CUDA)); 95 | std::printf("x = %.5f, target y = %.5f, predicted y = %.5f\n ", test_x[0].item(), 96 | std::cos(test_x[0].item()), test_y[0].item()); 97 | */ 98 | } 99 | 100 | if (batch_index % kCheckpointEvery == 0) { 101 | // Checkpoint the model and optimizer state. 102 | torch::save(func_approximator, kCheckPointFile); 103 | } 104 | batch_index++; 105 | } 106 | epoch_idx++; 107 | } 108 | 109 | return 0; 110 | } 111 | -------------------------------------------------------------------------------- /src/hello_world.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | template 5 | void pretty_print(const std::string& info, T&& data) { 6 | std::cout << info << std::endl; 7 | std::cout << data << std::endl << std::endl; 8 | } 9 | 10 | int main() { 11 | // Create an eye tensor 12 | torch::Tensor tensor = torch::eye(3); 13 | pretty_print("Eye tensor: ", tensor); 14 | 15 | // Tensor view is like reshape in numpy, which changes the dimension representation of the tensor 16 | // without touching its underlying memory structure. 17 | tensor = torch::range(1, 9, 1); 18 | pretty_print("Tensor range 1x9: ", tensor); 19 | pretty_print("Tensor view 3x3: ", tensor.view({3, 3})); 20 | pretty_print("Tensor view 3x3 with D0 and D1 transposed: ", tensor.view({3, 3}).transpose(0, 1)); 21 | tensor = torch::range(1, 27, 1); 22 | pretty_print("Tensor range 1x27: ", tensor); 23 | pretty_print("Tensor view 3x3x3: ", tensor.view({3, 3, 3})); 24 | pretty_print("Tensor view 3x3x3 with D0 and D1 transposed: ", 25 | tensor.view({3, 3, 3}).transpose(0, 1)); 26 | pretty_print("Tensor view 3x1x9: ", tensor.view({3, 1, -1})); 27 | } 28 | -------------------------------------------------------------------------------- /src/lstm_example.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define INPUTS 1 6 | #define SEQUENCE 3 7 | #define BATCH 1 8 | #define LAYERS 3 9 | #define HIDDEN 2 10 | #define DIRECTIONS 2 11 | #define OUTPUTS 1 12 | 13 | struct BLSTM_Model : torch::nn::Module { 14 | torch::nn::LSTM lstm{nullptr}; 15 | torch::nn::LSTM reverse_lstm{nullptr}; 16 | torch::nn::Linear linear{nullptr}; 17 | 18 | BLSTM_Model(uint64_t layers, uint64_t hidden, uint64_t inputs) { 19 | lstm = register_module("lstm", 20 | torch::nn::LSTM(torch::nn::LSTMOptions(inputs, hidden).layers(layers))); 21 | reverse_lstm = register_module( 22 | "rlstm", torch::nn::LSTM(torch::nn::LSTMOptions(inputs, hidden).layers(layers))); 23 | linear = register_module("linear", torch::nn::Linear(hidden * DIRECTIONS, OUTPUTS)); 24 | } 25 | 26 | torch::Tensor forward(torch::Tensor x) { 27 | // Reverse and feed into LSTM + Reversed LSTM 28 | auto lstm1 = lstm->forward(x.view({x.size(0), BATCH, -1})); 29 | //[SEQUENCE,BATCH,FEATURE] 30 | auto lstm2 = reverse_lstm->forward(torch::flip(x, 0).view({x.size(0), BATCH, -1})); 31 | // Reverse Output from Reversed LSTM + Combine Outputs into one Tensor 32 | auto cat = torch::empty({DIRECTIONS, BATCH, x.size(0), HIDDEN}); 33 | //[DIRECTIONS,BATCH,SEQUENCE,FEATURE] 34 | cat[0] = lstm1.output.view({BATCH, x.size(0), HIDDEN}); 35 | cat[1] = torch::flip(lstm2.output.view({BATCH, x.size(0), HIDDEN}), 1); 36 | // Feed into Linear Layer 37 | auto out = torch::sigmoid(linear->forward(cat.view({BATCH, x.size(0), HIDDEN * DIRECTIONS}))); 38 | //[BATCH,SEQUENCE,FEATURE] 39 | return out; 40 | } 41 | }; 42 | 43 | int main() { 44 | // Input: 0.1, 0.2, 0.3 -> Expected Output: 0.4, 0.5, 0.6 45 | BLSTM_Model model = BLSTM_Model(LAYERS, HIDDEN, INPUTS); 46 | torch::optim::Adam optimizer(model.parameters(), torch::optim::AdamOptions(0.0001)); 47 | // Input 48 | torch::Tensor input = torch::empty({SEQUENCE, INPUTS}); 49 | auto input_acc = input.accessor(); 50 | size_t count = 0; 51 | for (float i = 0.1; i < 0.4; i += 0.1) { 52 | input_acc[count][0] = i; 53 | count++; 54 | } 55 | // Target 56 | torch::Tensor target = torch::empty({SEQUENCE, OUTPUTS}); 57 | auto target_acc = target.accessor(); 58 | count = 0; 59 | for (float i = 0.4; i < 0.7; i += 0.1) { 60 | target_acc[count][0] = i; 61 | count++; 62 | } 63 | // Train 64 | for (size_t i = 0; i < 6000; i++) { 65 | torch::Tensor output = model.forward(input); 66 | auto loss = torch::mse_loss(output.view({SEQUENCE, OUTPUTS}), target); 67 | std::cout << "Loss " << i << " : " << loss.item() << std::endl; 68 | loss.backward(); 69 | optimizer.step(); 70 | } 71 | // Test: Response should be about (0.4, 0.5, 0.6) 72 | torch::Tensor output = model.forward(input); 73 | std::cout << output << std::endl; 74 | return EXIT_SUCCESS; 75 | } 76 | -------------------------------------------------------------------------------- /src/simple_optimization_example.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | constexpr double kLearningRate = 0.001; 6 | constexpr int kMaxIterations = 100000; 7 | 8 | void native_run(double minimal) { 9 | // Initial x value 10 | auto x = torch::randn({1, 1}, torch::requires_grad(true)); 11 | 12 | for (size_t t = 0; t < kMaxIterations; t++) { 13 | // Expression/value to be minimized 14 | auto y = (x - minimal) * (x - minimal); 15 | if (y.item() < 1e-3) { 16 | break; 17 | } 18 | // Calculate gradient 19 | y.backward(); 20 | 21 | // Step x value without considering gradient 22 | torch::NoGradGuard no_grad_guard; 23 | x -= kLearningRate * x.grad(); 24 | 25 | // Reset the gradient of variable x 26 | x.grad().reset(); 27 | } 28 | 29 | std::cout << "[native] Actual minimal x value: " << minimal 30 | << ", calculated optimal x value: " << x.item() << std::endl; 31 | } 32 | 33 | void optimizer_run(double minimal) { 34 | // Initial x value 35 | std::vector x; 36 | x.push_back(torch::randn({1, 1}, torch::requires_grad(true))); 37 | auto opt = torch::optim::SGD(x, torch::optim::SGDOptions(kLearningRate)); 38 | 39 | for (size_t t = 0; t < kMaxIterations; t++) { 40 | // Expression/value to be minimized 41 | auto y = (x[0] - minimal) * (x[0] - minimal); 42 | if (y.item() < 1e-3) { 43 | break; 44 | } 45 | // Calculate gradient 46 | y.backward(); 47 | 48 | // Step x value without considering gradient 49 | opt.step(); 50 | // Reset the gradient of variable x 51 | opt.zero_grad(); 52 | } 53 | 54 | std::cout << "[optimizer] Actual minimal x value: " << minimal 55 | << ", calculated optimal x value: " << x[0].item() << std::endl; 56 | } 57 | 58 | // optimize y = (x - 10)^2 59 | int main(int argc, char* argv[]) { 60 | if (argc < 2) { 61 | std::cout << "Usage: " << argv[0] << " minimal_value\n"; 62 | return 1; 63 | } 64 | native_run(atof(argv[1])); 65 | optimizer_run(atof(argv[1])); 66 | return 0; 67 | } 68 | -------------------------------------------------------------------------------- /src/time_serie_prediction.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | template 4 | void pretty_print(const std::string& info, T&& data) { 5 | std::cout << info << std::endl; 6 | std::cout << data << std::endl << std::endl; 7 | } 8 | 9 | int main(int /*argc*/, char* /*argv*/[]) { 10 | // Use GPU when present, CPU otherwise. 11 | torch::Device device(torch::kCPU); 12 | if (torch::cuda::is_available()) { 13 | device = torch::Device(torch::kCUDA); 14 | std::cout << "CUDA is available! Training on GPU." << std::endl; 15 | } 16 | 17 | const size_t kSequenceLen = 1; 18 | const size_t kInputDim = 1; 19 | const size_t kHiddenDim = 5; 20 | const size_t kOuputDim = 1; 21 | auto time_serie_detector = torch::nn::LSTM(torch::nn::LSTMOptions(kInputDim, kHiddenDim) 22 | .dropout(0.2) 23 | .layers(kSequenceLen) 24 | .bidirectional(false)); 25 | time_serie_detector->to(device); 26 | std::cout << time_serie_detector << std::endl; 27 | 28 | torch::Tensor input = torch::empty({kSequenceLen, kInputDim}); 29 | torch::Tensor state = torch::zeros({2, kSequenceLen, kHiddenDim}); 30 | auto input_acc = input.accessor(); 31 | size_t count = 0; 32 | for (float i = 0.1; i < 0.4; i += 0.1) { 33 | input_acc[count][0] = i; 34 | count++; 35 | } 36 | input = input.toBackend(c10::Backend::CUDA); 37 | state = state.toBackend(c10::Backend::CUDA); 38 | std::cout << "input = " << input << std::endl; 39 | time_serie_detector->zero_grad(); 40 | 41 | auto i_tmp = input.view({input.size(0), 1, -1}); 42 | auto s_tmp = state.view({2, state.size(0) / 2, 1, -1}); 43 | 44 | pretty_print("input: ", i_tmp); 45 | pretty_print("state: ", s_tmp); 46 | 47 | auto rnn_output = time_serie_detector->forward(i_tmp, s_tmp); 48 | pretty_print("rnn_output/output: ", rnn_output.output); 49 | pretty_print("rnn_output/state: ", rnn_output.state); 50 | 51 | return 0; 52 | } 53 | --------------------------------------------------------------------------------