├── .gitmodules ├── cleanup ├── configure.win ├── tools ├── .gitignore └── winlibs.R ├── .github ├── .gitignore └── workflows │ ├── extra-checks.yaml │ ├── pkgdown.yaml │ ├── check.yaml │ └── test-coverage.yaml ├── vignettes ├── .gitignore ├── examples │ ├── .gitignore │ ├── img │ │ └── torch.png │ └── torch.Rmd └── articles │ ├── .gitignore │ ├── read.Rmd │ ├── hparams.Rmd │ └── images.Rmd ├── .gitattributes ├── LICENSE ├── cran-comments.md ├── R ├── utils-pipe.R ├── package.R ├── event_writer.R ├── utils.R ├── RcppExports.R ├── text.R ├── scalar.R ├── audio.R ├── histogram.R ├── tensor.R ├── log.R ├── image.R ├── events.R └── read.R ├── src ├── .gitignore ├── tfevents_types.h ├── na.h ├── Makevars.in ├── record_writer.h ├── code.cpp ├── utils.h ├── reader.h ├── event_writer.h ├── write.cpp ├── crc32c.h ├── event_writer.cpp ├── proto │ ├── plugins │ │ ├── text │ │ │ └── plugin_data.proto │ │ ├── scalar │ │ │ └── plugin_data.proto │ │ ├── histogram │ │ │ └── plugin_data.proto │ │ ├── hparams │ │ │ ├── hparams_util.proto │ │ │ └── plugin_data.proto │ │ ├── image │ │ │ └── plugin_data.proto │ │ └── audio │ │ │ └── plugin_data.proto │ ├── resource_handle.proto │ ├── tensor_shape.proto │ ├── types.proto │ ├── tensor.proto │ ├── event.proto │ └── summary.proto ├── Makevars.win ├── reader.cpp ├── utils.cpp ├── record_writer.cpp ├── RcppExports.cpp ├── tensor.h ├── wrap.h └── as.h ├── codecov.yml ├── tests ├── testthat │ ├── resources │ │ ├── img.png │ │ └── test-audio.wav │ ├── helper-tbparse.R │ ├── test-events.R │ ├── test-text.R │ ├── test-histogram.R │ ├── test-tensor.R │ ├── test-audio.R │ ├── test-log.R │ ├── test-hparams.R │ ├── test-image.R │ └── test-read.R └── testthat.R ├── man ├── figures │ └── README-tensorboard-1.png ├── event.Rd ├── as_event.Rd ├── summary_metadata.Rd ├── summary_scalar.Rd ├── log_event.Rd ├── get_global_step.Rd ├── summary_text.Rd ├── get_default_logdir.Rd ├── hparams_metric.Rd ├── value.Rd ├── hparams_hparam.Rd ├── log_hparams.Rd ├── collect_events.Rd ├── summary_audio.Rd ├── log_hparams_config.Rd ├── summary_histogram.Rd └── summary_image.Rd ├── .gitignore ├── NEWS.md ├── tfevents.Rproj ├── .Rbuildignore ├── _pkgdown.yml ├── LICENSE.md ├── DESCRIPTION ├── NAMESPACE ├── README.md ├── README.Rmd └── configure /.gitmodules: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cleanup: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /configure.win: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/.gitignore: -------------------------------------------------------------------------------- 1 | debug.R 2 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /vignettes/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | *.R 3 | -------------------------------------------------------------------------------- /vignettes/examples/.gitignore: -------------------------------------------------------------------------------- 1 | mnist 2 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | inst/tp/* linguist-vendored 2 | -------------------------------------------------------------------------------- /vignettes/articles/.gitignore: -------------------------------------------------------------------------------- 1 | webshot.png 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | YEAR: 2022 2 | COPYRIGHT HOLDER: tfevents authors 3 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | Re-submission fixing examples for CRAN-UBSAN checks. 2 | -------------------------------------------------------------------------------- /R/utils-pipe.R: -------------------------------------------------------------------------------- 1 | #' @keywords internal 2 | #' @importFrom zeallot %<-% 3 | NULL 4 | -------------------------------------------------------------------------------- /src/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | *.so 3 | *.dll 4 | generated 5 | build 6 | libs 7 | Makevars 8 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - inst/tp 3 | - src/generated 4 | - src/zlib 5 | - src/crc32c 6 | -------------------------------------------------------------------------------- /tests/testthat/resources/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlverse/tfevents/HEAD/tests/testthat/resources/img.png -------------------------------------------------------------------------------- /vignettes/examples/img/torch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlverse/tfevents/HEAD/vignettes/examples/img/torch.png -------------------------------------------------------------------------------- /man/figures/README-tensorboard-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlverse/tfevents/HEAD/man/figures/README-tensorboard-1.png -------------------------------------------------------------------------------- /tests/testthat/resources/test-audio.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mlverse/tfevents/HEAD/tests/testthat/resources/test-audio.wav -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .Rdata 4 | .httr-oauth 5 | .DS_Store 6 | .vscode 7 | logs 8 | docs 9 | configure.log 10 | windows 11 | -------------------------------------------------------------------------------- /src/tfevents_types.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include "event_writer.h" 3 | #include "reader.h" 4 | #include "tensor.h" 5 | #include "as.h" 6 | #include "wrap.h" 7 | #include "hparams.h" 8 | 9 | -------------------------------------------------------------------------------- /src/na.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | static bool r_is_na (SEXP x) { 4 | static auto pkg = Rcpp::Environment::namespace_env("tfevents"); 5 | auto isna = Rcpp::Function(pkg["is_na"]); 6 | return Rcpp::as(isna(x)); 7 | } 8 | -------------------------------------------------------------------------------- /R/package.R: -------------------------------------------------------------------------------- 1 | ## usethis namespace: start 2 | #' @importFrom Rcpp sourceCpp 3 | ## usethis namespace: end 4 | NULL 5 | 6 | ## usethis namespace: start 7 | #' @useDynLib tfevents, .registration = TRUE 8 | ## usethis namespace: end 9 | NULL 10 | -------------------------------------------------------------------------------- /src/Makevars.in: -------------------------------------------------------------------------------- 1 | PKG_CPPFLAGS=@cflags@ -Igenerated 2 | PKG_CXXFLAGS=$(C_VISIBILITY) 3 | PKG_LIBS=@libs@ 4 | 5 | CPP_SOURCES=@cppsrc@ 6 | CC_SOURCES=@pbsrc@ 7 | C_SOURCES=@csrc@ 8 | 9 | OBJECTS=$(CPP_SOURCES:.cpp=.o) $(CC_SOURCES:.cc=.o) $(C_SOURCES:.c=.o) 10 | -------------------------------------------------------------------------------- /src/record_writer.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | class RecordWriter { 4 | public: 5 | std::string path; 6 | std::ofstream writer; 7 | RecordWriter (std::string path); 8 | ~RecordWriter (); 9 | bool write_record (std::string data); 10 | void flush(); 11 | }; 12 | -------------------------------------------------------------------------------- /R/event_writer.R: -------------------------------------------------------------------------------- 1 | create_event_writer <- function(logdir) { 2 | fs::dir_create(logdir, recurse = TRUE) 3 | fname <- file.path( 4 | logdir, 5 | paste0("events.out.tfevents.", as.integer(Sys.time()), ".v2") 6 | ) 7 | fs::file_create(fname) 8 | event_writer(fname) 9 | } 10 | -------------------------------------------------------------------------------- /src/code.cpp: -------------------------------------------------------------------------------- 1 | #include "generated/event.pb.h" 2 | #include "generated/summary.pb.h" 3 | #include "plugins/scalar/plugin_data.pb.h" 4 | #include "event_writer.h" 5 | #include "generated/tensor.pb.h" 6 | #include "generated/tensor_shape.pb.h" 7 | #include "utils.h" 8 | 9 | #include 10 | 11 | -------------------------------------------------------------------------------- /tests/testthat/helper-tbparse.R: -------------------------------------------------------------------------------- 1 | skip_if_tbparse_not_available <- function() { 2 | skip_if(inherits(try(reticulate::import("tbparse"), silent = TRUE), "try-error")) 3 | } 4 | 5 | if (inherits(try(reticulate::import("tbparse"), silent = TRUE), "try-error")) { 6 | tbparse <- NULL 7 | } else { 8 | tbparse <- reticulate::import("tbparse") 9 | } 10 | -------------------------------------------------------------------------------- /src/utils.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include "generated/plugins/scalar/plugin_data.pb.h" 3 | #include "generated/summary.pb.h" 4 | #include 5 | #include "generated/plugins/hparams/plugin_data.pb.h" 6 | 7 | long get_wall_time (); 8 | tensorboard::SummaryMetadata::PluginData make_plugin_data (std::string plugin_name, 9 | SEXP plugin_content); 10 | -------------------------------------------------------------------------------- /src/reader.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include "generated/summary.pb.h" 6 | 7 | class EventFileIterator { 8 | public: 9 | std::ifstream file; 10 | std::uint64_t current_pos = 0; 11 | std::string path; 12 | std::string run_name; 13 | EventFileIterator (const std::string& path, const std::string& run_name); 14 | tensorboard::Event get_next (); 15 | }; 16 | 17 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | # tfevents (development version) 2 | 3 | # tfevents 0.0.4 4 | 5 | # tfevents 0.0.3 6 | 7 | * Fixed GCC 13 compilation issues. (#44) 8 | * Added support for arm64 Windows. (#42) 9 | * Removed usage of deprecated `rlang::squash_if`. 10 | 11 | # tfevents 0.0.2 12 | 13 | * Fix CRAN warnings for GCC13 and UBSAN issues. 14 | 15 | # tfevents 0.0.1 16 | 17 | * Added a `NEWS.md` file to track changes to the package. 18 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | # This file is part of the standard setup for testthat. 2 | # It is recommended that you do not modify it. 3 | # 4 | # Where should you do additional test configuration? 5 | # Learn more about the roles of various files in: 6 | # * https://r-pkgs.org/tests.html 7 | # * https://testthat.r-lib.org/reference/test_package.html#special-files 8 | 9 | library(testthat) 10 | library(tfevents) 11 | 12 | test_check("tfevents") 13 | -------------------------------------------------------------------------------- /tfevents.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: No 4 | SaveWorkspace: No 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | LineEndingConversion: Posix 18 | 19 | BuildType: Package 20 | PackageUseDevtools: Yes 21 | PackageInstallArgs: --no-multiarch --with-keep.source 22 | PackageRoxygenize: rd,collate,namespace 23 | -------------------------------------------------------------------------------- /src/event_writer.h: -------------------------------------------------------------------------------- 1 | #include "record_writer.h" 2 | #include "generated/event.pb.h" 3 | 4 | // Prefix of version string present in the first entry of every event file. 5 | static constexpr const char* kVersionPrefix = "brain.Event:"; 6 | static constexpr const int kCurrentVersion = 2; 7 | 8 | class EventWriter { 9 | public: 10 | std::unique_ptr record_writer; 11 | EventWriter(const std::string& file); 12 | ~EventWriter(); 13 | bool write_event(const tensorboard::Event& event); 14 | void flush(); 15 | }; 16 | -------------------------------------------------------------------------------- /tests/testthat/test-events.R: -------------------------------------------------------------------------------- 1 | test_that("Can't have untagged summaries", { 2 | 3 | temp <- tempfile() 4 | 5 | expect_error(with_logdir(temp, { 6 | log_event(1) 7 | }), regexp = "must have a tag") 8 | 9 | expect_error(with_logdir(temp, { 10 | log_event(summary_scalar(1)) 11 | }), regexp = "must have a tag") 12 | 13 | expect_error(with_logdir(temp, { 14 | log_event(summary_scalar(1, tag = "hello")) 15 | }), regexp = NA) 16 | 17 | scalars <- collect_events(temp, type = "scalar") 18 | expect_equal(scalars$tag, "hello") 19 | 20 | }) 21 | -------------------------------------------------------------------------------- /src/write.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "generated/event.pb.h" 3 | #include "event_writer.h" 4 | 5 | // [[Rcpp::export]] 6 | Rcpp::XPtr event_writer (std::string file) { 7 | return Rcpp::XPtr(new EventWriter(file)); 8 | } 9 | 10 | 11 | // [[Rcpp::export]] 12 | bool write_events (std::vector events, Rcpp::List writers) { 13 | for(size_t i = 0; i < events.size(); i++) { 14 | auto writer = Rcpp::as>(writers[i]); 15 | auto event = events[i]; 16 | writer->write_event(events[i]); 17 | } 18 | return true; 19 | } 20 | -------------------------------------------------------------------------------- /R/utils.R: -------------------------------------------------------------------------------- 1 | map2 <- function(x, f, ...) { 2 | out <- vector(mode="list") 3 | nms <- names2(x) 4 | # because of unnamed object, and the possibility of duplicated names 5 | # we iterate via integer indexes and then re-add the names. 6 | for(i in seq_along(nms)) { 7 | out[[i]] <- f(x[[i]], nms[i], ...) 8 | } 9 | names(out) <- nms 10 | out 11 | } 12 | 13 | # Unnamed object will now have names="" for all obejcts. 14 | # this is similar to the approach of `names(c(1, x = 2))` 15 | # that makes the name for the first element an empty character. 16 | names2 <- function(x) { 17 | if (is.null(names(x))) { 18 | return(rep("", length(x))) 19 | } else { 20 | names(x) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /R/RcppExports.R: -------------------------------------------------------------------------------- 1 | # Generated by using Rcpp::compileAttributes() -> do not edit by hand 2 | # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 3 | 4 | create_event_file_iterator <- function(path, run_name) { 5 | .Call(`_tfevents_create_event_file_iterator`, path, run_name) 6 | } 7 | 8 | event_file_iterator_next <- function(iter) { 9 | .Call(`_tfevents_event_file_iterator_next`, iter) 10 | } 11 | 12 | get_wall_time <- function() { 13 | .Call(`_tfevents_get_wall_time`) 14 | } 15 | 16 | event_writer <- function(file) { 17 | .Call(`_tfevents_event_writer`, file) 18 | } 19 | 20 | write_events <- function(events, writers) { 21 | .Call(`_tfevents_write_events`, events, writers) 22 | } 23 | 24 | -------------------------------------------------------------------------------- /src/crc32c.h: -------------------------------------------------------------------------------- 1 | // crc32c.h -- header for crc32c.c 2 | // Copyright (C) 2015 Mark Adler 3 | // See crc32c.c for the license. 4 | 5 | #include 6 | 7 | // Return the CRC-32C of buf[0..len-1] given the starting CRC crc. This can be 8 | // used to calculate the CRC of a sequence of bytes a chunk at a time, using 9 | // the previously returned crc in the next call. The first call must be with 10 | // crc == 0. crc32c() uses the Intel crc32 hardware instruction if available. 11 | uint32_t crc32c(uint32_t crc, void const *buf, size_t len); 12 | 13 | // crc32c_sw() is the same, but does not use the hardware instruction, even if 14 | // available. 15 | uint32_t crc32c_sw(uint32_t crc, void const *buf, size_t len); 16 | -------------------------------------------------------------------------------- /.github/workflows/extra-checks.yaml: -------------------------------------------------------------------------------- 1 | name: Extra CRAN checks 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | inpcont: 7 | description: 'Check container' 8 | required: true 9 | type: choice 10 | options: 11 | - 'atlas' 12 | - 'gcc13' 13 | - 'nold' 14 | - 'clang-asan' 15 | 16 | jobs: 17 | check: 18 | runs-on: ubuntu-latest 19 | container: 20 | image: ghcr.io/r-hub/containers/${{ github.event.inputs.inpcont }}:latest 21 | 22 | steps: 23 | - uses: actions/checkout@v4 24 | - name: Install dependencies 25 | run: | 26 | R -q -e 'pak::pkg_install(c("deps::.", "any::rcmdcheck"), dependencies = TRUE)' 27 | - uses: r-lib/actions/check-r-package@v2 28 | with: 29 | upload-results: true 30 | -------------------------------------------------------------------------------- /src/event_writer.cpp: -------------------------------------------------------------------------------- 1 | #include "event_writer.h" 2 | #include "utils.h" 3 | 4 | EventWriter::EventWriter(const std::string& file) : 5 | record_writer(new RecordWriter(file)) { 6 | long time_in_seconds = get_wall_time(); 7 | { 8 | // Write the first event with the current version, and flush 9 | // right away so the file contents will be easily determined. 10 | tensorboard::Event event; 11 | event.set_wall_time(time_in_seconds); 12 | event.set_file_version(std::string(kVersionPrefix) + std::to_string(kCurrentVersion)); 13 | write_event(event); 14 | } 15 | } 16 | 17 | EventWriter::~EventWriter() { 18 | 19 | } 20 | 21 | bool EventWriter::write_event(const tensorboard::Event& event) { 22 | return this->record_writer->write_record(event.SerializeAsString()); 23 | } 24 | 25 | void EventWriter::flush () { 26 | this->record_writer->flush(); 27 | } 28 | -------------------------------------------------------------------------------- /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^tfevents\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^\.github$ 4 | ^src/generated$ 5 | ^src/build$ 6 | ^src/libs$ 7 | ^src/Makevars$ 8 | ^inst/tp/pb/java$ 9 | ^inst/tp/pb/benchmarks$ 10 | ^inst/tp/pb/kokoro$ 11 | ^inst/tp/pb/python$ 12 | ^inst/tp/pb/ruby$ 13 | ^inst/tp/pb/csharp$ 14 | ^inst/tp/pb/php$ 15 | ^inst/tp/pb/objectivec$ 16 | ^inst/tp/pb/\.git$ 17 | ^inst/tp/pb/\.bazelignore$ 18 | ^inst/tp/pb/\.readthedocs.yml$ 19 | ^inst/tp/pb/\.github$ 20 | ^inst/tp/pb/src/solaris/libstdc\+\+\.la$ 21 | ^inst/tp/pb/src/google/protobuf/testing$ 22 | ^inst/tp/pb/third_party/googletest$ 23 | ^inst/tp/pb/third_party/benchmark$ 24 | ^\.DS_Store$ 25 | ^src/\.DS_Store$ 26 | ^LICENSE\.md$ 27 | ^codecov\.yml$ 28 | ^vignettes/articles$ 29 | ^vignettes/examples$ 30 | ^logs$ 31 | ^.vscode$ 32 | ^README\.Rmd$ 33 | ^_pkgdown\.yml$ 34 | ^docs$ 35 | ^pkgdown$ 36 | ^cran-comments\.md$ 37 | ^CRAN-SUBMISSION$ 38 | ^configure\.log$ 39 | -------------------------------------------------------------------------------- /man/event.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/events.R 3 | \name{event} 4 | \alias{event} 5 | \title{Creates events} 6 | \usage{ 7 | event(run, wall_time, step, ..., summary = NA, file_version = NA) 8 | } 9 | \description{ 10 | We try to match events as closely as possible to the protobuf messages. 11 | The hierarchy looks like: 12 | 13 | \if{html}{\out{
}}\preformatted{event (): 14 | - run () 15 | - wall_time () 16 | - step () 17 | - summary ( aka list_of): 18 | - values (list): 19 | - : 20 | - metadata () 21 | - tag () 22 | - value () 23 | - image () 24 | - buffer () 25 | - width () 26 | - height () 27 | - colorspace () 28 | }\if{html}{\out{
}} 29 | } 30 | \keyword{internal} 31 | -------------------------------------------------------------------------------- /tools/winlibs.R: -------------------------------------------------------------------------------- 1 | if(!file.exists("../windows/protobuf/include/google/protobuf/descriptor.h")){ 2 | unlink("../windows", recursive = TRUE) 3 | url <- if(grepl("aarch", R.version$platform)){ 4 | "https://github.com/r-windows/bundles/releases/download/protobuf-21.12/protobuf-21.12-clang-aarch64.tar.xz" 5 | } else if(grepl("clang", Sys.getenv('R_COMPILED_BY'))){ 6 | "https://github.com/r-windows/bundles/releases/download/protobuf-21.12/protobuf-21.12-clang-x86_64.tar.xz" 7 | } else if(getRversion() >= "4.3") { 8 | "https://github.com/r-windows/bundles/releases/download/protobuf-21.12/protobuf-21.12-ucrt-x86_64.tar.xz" 9 | } else { 10 | "https://github.com/rwinlib/protobuf/archive/v21.12.tar.gz" 11 | } 12 | download.file(url, basename(url), quiet = TRUE) 13 | dir.create("../windows", showWarnings = FALSE) 14 | untar(basename(url), exdir = "../windows", tar = 'internal') 15 | unlink(basename(url)) 16 | setwd("../windows") 17 | file.rename(list.files(), 'protobuf') 18 | } 19 | -------------------------------------------------------------------------------- /tests/testthat/test-text.R: -------------------------------------------------------------------------------- 1 | test_that("can log text values", { 2 | temp <- tempfile() 3 | with_logdir(temp, { 4 | log_event(x = summary_text("Hello world!")) 5 | log_event(x = "## hello world!\n hello!") 6 | }) 7 | 8 | skip_if_tbparse_not_available() 9 | reader <- tbparse$SummaryReader(log_path = temp) 10 | 11 | expect_equal(nrow(reader$text), 2) 12 | }) 13 | 14 | test_that("errors when passing more than one character value", { 15 | temp <- tempfile() 16 | expect_error( 17 | with_logdir(temp, { 18 | log_event(x = c("hello", "world")) 19 | }), 20 | regexp = "Can't log a character" 21 | ) 22 | expect_error( 23 | with_logdir(temp, { 24 | log_event(x = summary_text(c("hello", "world"))) 25 | }), 26 | regexp = "Can't log a character" 27 | ) 28 | }) 29 | 30 | test_that("make sure we fail when metadata is not text", { 31 | temp <- tempfile() 32 | expect_error( 33 | with_logdir(temp, { 34 | log_event(x = summary_text("hello", metadata = summary_metadata("text2"))) 35 | }), 36 | regexp = "Plugin name should be " 37 | ) 38 | }) 39 | -------------------------------------------------------------------------------- /man/as_event.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/events.R 3 | \name{as_event} 4 | \alias{as_event} 5 | \title{Coerce an object to a event.} 6 | \usage{ 7 | as_event(x, step, wall_time, ...) 8 | } 9 | \arguments{ 10 | \item{x}{Object that will be coerced to an event.} 11 | 12 | \item{step}{The step that will be used when the event is logged. This is used 13 | by TensorBoard when showing data.} 14 | 15 | \item{wall_time}{The all time the event will appended to the event. This field 16 | is used by TensorBoard when displaying information based on actual time.} 17 | 18 | \item{...}{currently unused.} 19 | } 20 | \value{ 21 | A event vctr with class . 22 | } 23 | \description{ 24 | Coerce an object to a event. 25 | } 26 | \section{Extending \code{as_event}}{ 27 | 28 | 29 | \code{as_event} is an S3 generic and you can implement method for your own class. 30 | We don't export the \code{event} constructor though, so you should implement it 31 | in terms of other \code{as_event} methods. 32 | } 33 | 34 | \examples{ 35 | as_event(list(hello = 1), step = 1, wall_time = 1) 36 | 37 | } 38 | -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | url: https://mlverse.github.io/tfevents/ 2 | template: 3 | bootstrap: 5 4 | 5 | reference: 6 | - title: Event logging 7 | desc: > 8 | Functions directly related to event logging. 9 | contents: 10 | - log_event 11 | - as_event 12 | - set_global_step 13 | - set_default_logdir 14 | - with_logdir 15 | - local_logdir 16 | - title: Summaries 17 | desc: > 18 | Types of events that can be logged 19 | contents: 20 | - starts_with("summary_") 21 | - -contains("hparams") 22 | - title: HParams 23 | desc: > 24 | Related to logging hyperparameters 25 | contents: 26 | contains("hparams") 27 | - title: Reading 28 | desc: > 29 | Related to reading tfevents record files 30 | contents: 31 | - starts_with("collect") 32 | - starts_with("iter") 33 | - value 34 | 35 | articles: 36 | - title: Guides 37 | navbar: Guides 38 | contents: 39 | - articles/images 40 | - articles/hparams 41 | - articles/read 42 | - title: Examples 43 | navbar: Examples 44 | contents: 45 | - examples/torch 46 | 47 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | Copyright (c) 2022 tfevents authors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/proto/plugins/text/plugin_data.proto: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | 13 | syntax = "proto3"; 14 | 15 | package tensorboard; 16 | 17 | // Text summaries created by the `tensorboard.plugins.text.summary` 18 | // module will include `SummaryMetadata` whose `plugin_data` field has 19 | // as `content` a binary string that is the encoding of an 20 | // `TextPluginData` proto. 21 | message TextPluginData { 22 | // Version `0` is the only supported version. 23 | int32 version = 1; 24 | } 25 | -------------------------------------------------------------------------------- /man/summary_metadata.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/events.R 3 | \name{summary_metadata} 4 | \alias{summary_metadata} 5 | \title{Summary metadata} 6 | \usage{ 7 | summary_metadata( 8 | plugin_name, 9 | display_name = NA_character_, 10 | description = NA_character_, 11 | ..., 12 | plugin_content = NA 13 | ) 14 | } 15 | \arguments{ 16 | \item{plugin_name}{The name of the TensorBoard plugin that might use the summary.} 17 | 18 | \item{display_name}{Display name for the summary.} 19 | 20 | \item{description}{A description of the summary.} 21 | 22 | \item{...}{Currently unused. For future expansion.} 23 | 24 | \item{plugin_content}{An optional plugin content. Note that it will only be 25 | used if the C++ function \code{make_plugin_data} is aware of \code{plugin_content} 26 | for the specified plugin name. For advanced use only.} 27 | } 28 | \value{ 29 | A \code{summary_metadata} object. 30 | } 31 | \description{ 32 | Creates a summary metadata that can be passed to multiple \code{summary_} functions. 33 | } 34 | \examples{ 35 | summary <- summary_scalar(1, metadata = summary_metadata("scalars")) 36 | 37 | } 38 | -------------------------------------------------------------------------------- /man/summary_scalar.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/scalar.R 3 | \name{summary_scalar} 4 | \alias{summary_scalar} 5 | \title{Scalar event} 6 | \usage{ 7 | summary_scalar(value, ..., metadata = NULL, tag = NA) 8 | } 9 | \arguments{ 10 | \item{value}{A numeric scalar value to be logged.} 11 | 12 | \item{...}{Currently unused. To allow future expansion.} 13 | 14 | \item{metadata}{A \code{metadata} object, as created with \code{\link[=summary_metadata]{summary_metadata()}}. In 15 | most cases you don't need to change the default.} 16 | 17 | \item{tag}{A tag that within the TensorBoard UI. See \code{\link[=log_event]{log_event()}} for other 18 | ways of specifying the tag attribute.} 19 | } 20 | \value{ 21 | A \verb{} object. 22 | } 23 | \description{ 24 | Scalar event 25 | } 26 | \examples{ 27 | temp <- tempfile() 28 | with_logdir(temp, { 29 | log_event(loss = summary_scalar(1)) 30 | }) 31 | } 32 | \seealso{ 33 | Other summary: 34 | \code{\link{summary_audio}()}, 35 | \code{\link{summary_histogram}()}, 36 | \code{\link{summary_image}()}, 37 | \code{\link{summary_text}()} 38 | } 39 | \concept{summary} 40 | -------------------------------------------------------------------------------- /src/proto/plugins/scalar/plugin_data.proto: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | 13 | syntax = "proto3"; 14 | 15 | package tensorboard; 16 | 17 | // Scalar summaries created by the `tensorboard.plugins.scalar.summary` 18 | // module will include `SummaryMetadata` whose `plugin_data` field has 19 | // as `content` a binary string that is the encoding of an 20 | // `ScalarPluginData` proto. 21 | message ScalarPluginData { 22 | // Version `0` is the only supported version. 23 | int32 version = 1; 24 | } 25 | -------------------------------------------------------------------------------- /src/proto/plugins/histogram/plugin_data.proto: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | syntax = "proto3"; 17 | 18 | package tensorboard; 19 | 20 | // Histogram summaries created by the `tensorboard.plugins.histogram.summary` 21 | // module will include `SummaryMetadata` whose `plugin_data` field has 22 | // as `content` a binary string that is the encoding of a 23 | // `HistogramPluginData` proto. 24 | message HistogramPluginData { 25 | // Version `0` is the only supported version. 26 | int32 version = 1; 27 | } 28 | -------------------------------------------------------------------------------- /src/proto/plugins/hparams/hparams_util.proto: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | 13 | // Messages for passing configuraton data to the hparams_util script as 14 | // command line flags. 15 | 16 | syntax = "proto3"; 17 | 18 | import "google/protobuf/struct.proto"; 19 | import "plugins/hparams/api.proto"; 20 | 21 | package tensorboard.hparams; 22 | 23 | message HParamInfosList { 24 | repeated HParamInfo hparam_infos = 1; 25 | } 26 | 27 | message MetricInfosList { 28 | repeated MetricInfo metric_infos = 1; 29 | } 30 | 31 | message HParams { 32 | map hparams = 1; 33 | } 34 | -------------------------------------------------------------------------------- /man/log_event.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/log.R 3 | \name{log_event} 4 | \alias{log_event} 5 | \title{Log event} 6 | \usage{ 7 | log_event(..., step = get_global_step(increment = TRUE)) 8 | } 9 | \arguments{ 10 | \item{...}{Named values that you want to log. They can be possibly nested, 11 | in this case, the enclosing names are considered 'run' names by TensorBoard.} 12 | 13 | \item{step}{The step associated the logs. If \code{NULL}, a managed step counter 14 | will be used, and the global step is increased in every call to \code{\link[=log_event]{log_event()}}.} 15 | } 16 | \value{ 17 | Invisibly returns the logged data. 18 | } 19 | \description{ 20 | Log event 21 | } 22 | \note{ 23 | \code{\link[=log_event]{log_event()}} writes events to the default \code{logdir}. You can query the 24 | default \code{logdir} with \code{\link[=get_default_logdir]{get_default_logdir()}} and modify it with 25 | \code{\link[=set_default_logdir]{set_default_logdir()}}. You can also use the \code{\link[=with_logdir]{with_logdir()}} context switcher 26 | to temporarily modify the logdir. 27 | } 28 | \examples{ 29 | temp <- tempfile() 30 | with_logdir(temp, { 31 | log_event( 32 | train = list(loss = runif(1), acc = runif(1)), 33 | valid = list(loss = runif(1), acc = runif(1)) 34 | ) 35 | }) 36 | } 37 | -------------------------------------------------------------------------------- /man/get_global_step.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/log.R 3 | \name{get_global_step} 4 | \alias{get_global_step} 5 | \alias{set_global_step} 6 | \title{Global step counters} 7 | \usage{ 8 | get_global_step(increment = TRUE) 9 | 10 | set_global_step(step) 11 | } 12 | \arguments{ 13 | \item{increment}{Wether to increment the \code{step} when getting it.} 14 | 15 | \item{step}{New value for \code{step}.} 16 | } 17 | \value{ 18 | The global step value for the default logdir, when \code{get_global_step}, 19 | otherwise returns \code{NULL} invisibly. 20 | } 21 | \description{ 22 | Global step counters 23 | } 24 | \details{ 25 | \code{tfevents} tracks and automatically increased the step counter whenever 26 | \code{\link[=log_event]{log_event()}} is called. Note that, it maintains a separate step counter for 27 | each root \code{logdir}, thus if you change the \code{logdir} using \code{\link[=set_default_logdir]{set_default_logdir()}} 28 | or \code{\link[=with_logdir]{with_logdir()}}, a different step counter will be used. 29 | } 30 | \section{Functions}{ 31 | \itemize{ 32 | \item \code{set_global_step()}: Set the global step. 33 | 34 | }} 35 | \examples{ 36 | temp <- tempfile() 37 | with_logdir(temp, { 38 | print(get_global_step()) 39 | set_global_step(100) 40 | print(get_global_step()) 41 | }) 42 | print(get_global_step()) 43 | } 44 | -------------------------------------------------------------------------------- /src/proto/plugins/image/plugin_data.proto: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | 13 | syntax = "proto3"; 14 | 15 | package tensorboard; 16 | 17 | // Image summaries created by the `tensorboard.plugins.image.summary` 18 | // module will include `SummaryMetadata` whose `plugin_data` field has 19 | // as `content` a binary string that is the encoding of an 20 | // `ImagePluginData` proto. 21 | message ImagePluginData { 22 | // Version `0` is the only supported version. 23 | int32 version = 1; 24 | 25 | // Indicates whether this time series data was originally represented 26 | // as `Summary.Value.Image` values and has been automatically 27 | // converted to bytestring tensors. 28 | bool converted_to_tensor = 2; 29 | } 30 | -------------------------------------------------------------------------------- /tests/testthat/test-histogram.R: -------------------------------------------------------------------------------- 1 | test_that("can write and visualize histograms", { 2 | 3 | temp <- tempfile() 4 | with_logdir(temp, { 5 | for(i in 1:10) { 6 | log_event(x = summary_histogram(rnorm(10000))) 7 | } 8 | }) 9 | 10 | skip_if_tbparse_not_available() 11 | reader <- tbparse$SummaryReader(temp) 12 | x <- reader$tensors 13 | expect_equal(nrow(x), 10) 14 | 15 | # test for arrays. 16 | temp <- tempfile() 17 | with_logdir(temp, { 18 | for(i in 1:10) { 19 | log_event(x = summary_histogram(array(rnorm(10000), dim = c(10, 10, 100)))) 20 | } 21 | }) 22 | 23 | skip_if_tbparse_not_available() 24 | reader <- tbparse$SummaryReader(temp) 25 | x <- reader$tensors 26 | expect_equal(nrow(x), 10) 27 | }) 28 | 29 | test_that("edge case where there's no data doesn't fail", { 30 | temp <- tempfile() 31 | with_logdir(temp, { 32 | for(i in 1:10) { 33 | log_event(x = summary_histogram(numeric(0))) 34 | } 35 | }) 36 | 37 | skip_if_tbparse_not_available() 38 | reader <- tbparse$SummaryReader(temp) 39 | x <- reader$tensors 40 | expect_equal(nrow(x), 10) 41 | 42 | temp <- tempfile() 43 | with_logdir(temp, { 44 | for(i in 1:10) { 45 | log_event(x = summary_histogram(rep(0, 100))) 46 | } 47 | }) 48 | 49 | reader <- tbparse$SummaryReader(temp) 50 | x <- reader$tensors 51 | expect_equal(nrow(x), 10) 52 | }) 53 | -------------------------------------------------------------------------------- /src/Makevars.win: -------------------------------------------------------------------------------- 1 | RWINLIB = ../windows/protobuf 2 | PKG_CPPFLAGS= -I$(RWINLIB)/include -Igenerated 3 | PKG_LIBS= -L$(RWINLIB)/lib${subst gcc,,${COMPILED_BY}}${R_ARCH} \ 4 | -L$(RWINLIB)/lib \ 5 | -lprotobuf 6 | BINDIR=$(RWINLIB)/bin$(subst 64,,$(WIN)) 7 | 8 | OBJECTS = \ 9 | generated/event.pb.o \ 10 | generated/plugins/audio/plugin_data.pb.o \ 11 | generated/plugins/histogram/plugin_data.pb.o \ 12 | generated/plugins/hparams/api.pb.o \ 13 | generated/plugins/hparams/hparams_util.pb.o \ 14 | generated/plugins/hparams/plugin_data.pb.o \ 15 | generated/plugins/image/plugin_data.pb.o \ 16 | generated/plugins/scalar/plugin_data.pb.o \ 17 | generated/plugins/text/plugin_data.pb.o \ 18 | generated/resource_handle.pb.o \ 19 | generated/summary.pb.o \ 20 | generated/tensor.pb.o \ 21 | generated/tensor_shape.pb.o \ 22 | generated/types.pb.o \ 23 | reader.o \ 24 | event_writer.o \ 25 | record_writer.o \ 26 | utils.o \ 27 | write.o \ 28 | code.o \ 29 | RcppExports.o \ 30 | crc32c.o 31 | 32 | #OBJECTS=$(CPP_SOURCES:.cpp=.o) $(CC_SOURCES:.cc=.o) $(C_SOURCES:.c=.o) 33 | 34 | all: clean winlibs 35 | 36 | winlibs: 37 | "${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe" "../tools/winlibs.R" 38 | mkdir generated 39 | find proto -name *.proto -exec bash -c '"${BINDIR}/protoc" -I "proto" --cpp_out="generated" {}' \; 40 | 41 | clean: 42 | rm -f $(OBJECTS) $(SHLIB) 43 | rm -f -r generated 44 | (cd ..; sh cleanup) 45 | -------------------------------------------------------------------------------- /R/text.R: -------------------------------------------------------------------------------- 1 | 2 | #' Creates a text summary 3 | #' @param txt An object that can be converted to a text. 4 | #' @param ... Currently unused. 5 | #' @inheritParams summary_scalar 6 | #' @family summary 7 | #' 8 | #' @examples 9 | #' temp <- tempfile() 10 | #' with_logdir(temp, { 11 | #' log_event( 12 | #' x = "hello world", 13 | #' y = summary_text("hello world") 14 | #' ) 15 | #' }) 16 | #' @returns A summary that can be logged with [log_event()]. 17 | #' @export 18 | summary_text <- function(txt, ..., metadata = NULL, tag = NA) { 19 | UseMethod("summary_text") 20 | } 21 | 22 | #' @describeIn summary_text Creates a summary from a scalar character vector. 23 | #' @export 24 | summary_text.character <- function(txt, ..., metadata = NULL, tag = NA) { 25 | if (is.null(metadata)) { 26 | metadata <- summary_metadata(plugin_name = "text") 27 | } 28 | 29 | if (!rlang::is_scalar_atomic(txt)) { 30 | cli::cli_abort(c( 31 | "Can't log a character vector with length != 1.", 32 | i = "Expected a single value but got a vector with length {.val {length(txt)}}." 33 | )) 34 | } 35 | 36 | if (!all(field(metadata, "plugin_name") == "text")) { 37 | cli::cli_abort(c( 38 | "Plugin name should be 'text'", 39 | x = "Got {.val {unique(field(metadata, 'plugin_name'))}}" 40 | )) 41 | } 42 | 43 | summary_tensor( 44 | txt, 45 | dtype = "string", 46 | metadata = metadata, 47 | tag = tag 48 | ) 49 | } 50 | -------------------------------------------------------------------------------- /R/scalar.R: -------------------------------------------------------------------------------- 1 | #' Scalar event 2 | #' 3 | #' @param value A numeric scalar value to be logged. 4 | #' @param ... Currently unused. To allow future expansion. 5 | #' @param metadata A `metadata` object, as created with [summary_metadata()]. In 6 | #' most cases you don't need to change the default. 7 | #' @param tag A tag that within the TensorBoard UI. See [log_event()] for other 8 | #' ways of specifying the tag attribute. 9 | #' 10 | #' @returns A `` object. 11 | #' @family summary 12 | #' 13 | #' @examples 14 | #' temp <- tempfile() 15 | #' with_logdir(temp, { 16 | #' log_event(loss = summary_scalar(1)) 17 | #' }) 18 | #' @export 19 | summary_scalar <- function(value, ..., metadata = NULL, tag = NA) { 20 | rlang::check_dots_empty() 21 | new_summary_scalar(value, metadata = metadata, tag = tag) 22 | } 23 | 24 | new_summary_scalar <- function(value = numeric(), ..., metadata = NULL, 25 | tag = character()) { 26 | if (is.null(metadata)) { 27 | metadata <- summary_metadata(plugin_name = "scalars") 28 | } 29 | summary_values(metadata = metadata, value = value, tag = tag, 30 | class = "tfevents_summary_scalar") 31 | } 32 | 33 | #' @export 34 | vec_ptype2.tfevents_summary_values.tfevents_summary_scalar <- function(x, y, ...) { 35 | new_summary_values() 36 | } 37 | #' @export 38 | vec_ptype2.tfevents_summary_scalar.tfevents_summary_values <- function(x, y, ...) { 39 | new_summary_values() 40 | } 41 | -------------------------------------------------------------------------------- /man/summary_text.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/text.R 3 | \name{summary_text} 4 | \alias{summary_text} 5 | \alias{summary_text.character} 6 | \title{Creates a text summary} 7 | \usage{ 8 | summary_text(txt, ..., metadata = NULL, tag = NA) 9 | 10 | \method{summary_text}{character}(txt, ..., metadata = NULL, tag = NA) 11 | } 12 | \arguments{ 13 | \item{txt}{An object that can be converted to a text.} 14 | 15 | \item{...}{Currently unused.} 16 | 17 | \item{metadata}{A \code{metadata} object, as created with \code{\link[=summary_metadata]{summary_metadata()}}. In 18 | most cases you don't need to change the default.} 19 | 20 | \item{tag}{A tag that within the TensorBoard UI. See \code{\link[=log_event]{log_event()}} for other 21 | ways of specifying the tag attribute.} 22 | } 23 | \value{ 24 | A summary that can be logged with \code{\link[=log_event]{log_event()}}. 25 | } 26 | \description{ 27 | Creates a text summary 28 | } 29 | \section{Methods (by class)}{ 30 | \itemize{ 31 | \item \code{summary_text(character)}: Creates a summary from a scalar character vector. 32 | 33 | }} 34 | \examples{ 35 | temp <- tempfile() 36 | with_logdir(temp, { 37 | log_event( 38 | x = "hello world", 39 | y = summary_text("hello world") 40 | ) 41 | }) 42 | } 43 | \seealso{ 44 | Other summary: 45 | \code{\link{summary_audio}()}, 46 | \code{\link{summary_histogram}()}, 47 | \code{\link{summary_image}()}, 48 | \code{\link{summary_scalar}()} 49 | } 50 | \concept{summary} 51 | -------------------------------------------------------------------------------- /src/proto/resource_handle.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorboard; 4 | 5 | import "tensor_shape.proto"; 6 | import "types.proto"; 7 | 8 | option cc_enable_arenas = true; 9 | option java_outer_classname = "ResourceHandle"; 10 | option java_multiple_files = true; 11 | option java_package = "org.tensorflow.framework"; 12 | option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/resource_handle_go_proto"; 13 | 14 | // Protocol buffer representing a handle to a tensorflow resource. Handles are 15 | // not valid across executions, but can be serialized back and forth from within 16 | // a single run. 17 | message ResourceHandleProto { 18 | // Unique name for the device containing the resource. 19 | string device = 1; 20 | 21 | // Container in which this resource is placed. 22 | string container = 2; 23 | 24 | // Unique name of this resource. 25 | string name = 3; 26 | 27 | // Hash code for the type of the resource. Is only valid in the same device 28 | // and in the same execution. 29 | uint64 hash_code = 4; 30 | 31 | // For debug-only, the name of the type pointed to by this handle, if 32 | // available. 33 | string maybe_type_name = 5; 34 | 35 | // Protocol buffer representing a pair of (data type, tensor shape). 36 | message DtypeAndShape { 37 | DataType dtype = 1; 38 | TensorShapeProto shape = 2; 39 | } 40 | 41 | // Data types and shapes for the underlying resource. 42 | repeated DtypeAndShape dtypes_and_shapes = 6; 43 | 44 | reserved 7; 45 | } 46 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: tfevents 2 | Title: Write Events for 'TensorBoard' 3 | Version: 0.0.4.9000 4 | Authors@R: c( 5 | person("Daniel", "Falbel", email = "daniel@posit.co", role = c("aut", "cre", "cph")), 6 | person(family = "Posit, PBC", role = c("cph")), 7 | person(family = "The tl::optional authors", role = c("cph"), comment = "For the vendored tl::optional code."), 8 | person("Mark", "Adler", role = c("cph"), comment = "For the included crc32c code.") 9 | ) 10 | Description: Provides a convenient way to log scalars, images, audio, and histograms in the 'tfevent' record file format. 11 | Logged data can be visualized on the fly using 'TensorBoard', a web based tool that focuses on visualizing the training 12 | progress of machine learning models. 13 | License: MIT + file LICENSE 14 | Encoding: UTF-8 15 | Roxygen: list(markdown = TRUE) 16 | RoxygenNote: 7.2.3 17 | LinkingTo: 18 | Rcpp 19 | Imports: 20 | Rcpp, 21 | withr, 22 | fs, 23 | rlang (>= 1.0.0), 24 | vctrs, 25 | blob, 26 | png, 27 | digest, 28 | cli, 29 | zeallot 30 | Suggests: 31 | testthat (>= 3.0.0), 32 | tibble, 33 | tidyr, 34 | reticulate, 35 | rmarkdown, 36 | ggplot2, 37 | tensorflow, 38 | wav 39 | SystemRequirements: libprotobuf, protobuf-compiler 40 | URL: https://github.com/mlverse/tfevents, 41 | https://mlverse.github.io/tfevents/ 42 | BugReports: https://github.com/mlverse/tfevents/issues 43 | Config/testthat/edition: 3 44 | Config/Needs/website: tensorflow, webshot2, keras 45 | -------------------------------------------------------------------------------- /man/get_default_logdir.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/log.R 3 | \name{get_default_logdir} 4 | \alias{get_default_logdir} 5 | \alias{set_default_logdir} 6 | \alias{with_logdir} 7 | \alias{local_logdir} 8 | \title{Query and modify the logdir} 9 | \usage{ 10 | get_default_logdir() 11 | 12 | set_default_logdir(logdir = "logs") 13 | 14 | with_logdir(logdir, code) 15 | 16 | local_logdir(logdir, .env = parent.frame()) 17 | } 18 | \arguments{ 19 | \item{logdir}{The \code{logdir} that you want to set as default.} 20 | 21 | \item{code}{Expressions that will be evaluated in a context with the \code{new} 22 | \code{logdir} as the default \code{logdir}.} 23 | 24 | \item{.env}{Environment that controls scope of changes. For expert use only.} 25 | } 26 | \value{ 27 | The \code{logdir} for \code{get_default_logdir()} otherwise invisibly returns 28 | \code{NULL} 29 | } 30 | \description{ 31 | \code{\link[=log_event]{log_event()}} has a notion of default logdir, so you don't need to specify it 32 | at every call. These functions allow you to query and the current logdir. 33 | } 34 | \section{Functions}{ 35 | \itemize{ 36 | \item \code{set_default_logdir()}: Modifies the default \code{logdir}. 37 | 38 | \item \code{with_logdir()}: Temporarily modify the default \code{logdir}. 39 | 40 | \item \code{local_logdir()}: Temporarily modify thedefault \code{logdir}. 41 | 42 | }} 43 | \examples{ 44 | temp <- tempfile() 45 | get_default_logdir() 46 | with_logdir(temp, { 47 | print(get_default_logdir()) 48 | }) 49 | } 50 | -------------------------------------------------------------------------------- /man/hparams_metric.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hparams.R 3 | \name{hparams_metric} 4 | \alias{hparams_metric} 5 | \title{Defines a Metric} 6 | \usage{ 7 | hparams_metric( 8 | tag, 9 | group = NA, 10 | display_name = tag, 11 | description = tag, 12 | dataset_type = NA 13 | ) 14 | } 15 | \arguments{ 16 | \item{tag}{The tag name of the scalar summary that corresponds to this 17 | metric.} 18 | 19 | \item{group}{An optional string listing the subdirectory under the 20 | session's log directory containing summaries for this metric. 21 | For instance, if summaries for training runs are written to 22 | events files in \code{ROOT_LOGDIR/SESSION_ID/train}, then \code{group} 23 | should be \code{"train"}. Defaults to the empty string: i.e., 24 | summaries are expected to be written to the session logdir.} 25 | 26 | \item{display_name}{An optional human-readable display name.} 27 | 28 | \item{description}{An optional Markdown string with a human-readable 29 | description of this metric, to appear in TensorBoard.} 30 | 31 | \item{dataset_type}{dataset_type: Either \code{"training"} or \verb{"validation}, or 32 | \code{NA}.} 33 | } 34 | \value{ 35 | A \code{hparams_metric} object. 36 | } 37 | \description{ 38 | Metric objects are passed to \code{\link[=log_hparams_config]{log_hparams_config()}} in order to define the 39 | collection of scalars that will be displayed in the HParams tab in TensorBoard. 40 | } 41 | \examples{ 42 | hparams_metric("loss", group = "train") 43 | hparams_metric("acc") 44 | } 45 | -------------------------------------------------------------------------------- /man/value.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/read.R 3 | \name{value} 4 | \alias{value} 5 | \alias{value.tfevents_summary_values} 6 | \title{Extracts the value of a summary value} 7 | \usage{ 8 | value(x, ...) 9 | 10 | \method{value}{tfevents_summary_values}(x, ..., as_list = FALSE) 11 | } 12 | \arguments{ 13 | \item{x}{A \code{tfevents_summary_values} object.} 14 | 15 | \item{...}{Currently unused. To allow future extension.} 16 | 17 | \item{as_list}{A boolean indicating if the results should be returned in a list. 18 | The default is to return a single value. If you need to extract values from 19 | multiple summaries use \code{as_list = TRUE}.} 20 | } 21 | \value{ 22 | Depending on the type of the summary it returns an image, audio, text or 23 | scalar. 24 | } 25 | \description{ 26 | Summaries are complicated objects because they reflect the Protobuf object 27 | structure that are serialized in the tfevents records files. This function 28 | allows one to easily query vaues from summaries and will dispatch to the 29 | correct way to extract images, audio, text, etc from summary values. 30 | } 31 | \section{Methods (by class)}{ 32 | \itemize{ 33 | \item \code{value(tfevents_summary_values)}: Acess values from \code{summary_values}. 34 | 35 | }} 36 | \examples{ 37 | temp <- tempfile() 38 | with_logdir(temp, { 39 | for(i in 1:5) { 40 | log_event(my_log = runif(1)) 41 | } 42 | }) 43 | 44 | # iterate over all events 45 | summary <- collect_events(temp, n = 1, type = "summary") 46 | value(summary$summary) 47 | 48 | } 49 | -------------------------------------------------------------------------------- /man/hparams_hparam.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hparams.R 3 | \name{hparams_hparam} 4 | \alias{hparams_hparam} 5 | \title{Defines a HParam} 6 | \usage{ 7 | hparams_hparam(name, domain = NA, display_name = name, description = name) 8 | } 9 | \arguments{ 10 | \item{name}{Name of the hyperparameter.} 11 | 12 | \item{domain}{A list of values that can be assumed by the hyperparameter. 13 | It can be \code{character()}, \code{numeric()} or \code{logical()} vector. You can also 14 | pass a named numeric vector with eg \code{c(min_value = 0, max_value = 10)} in 15 | this case, any value in this range is accepted.} 16 | 17 | \item{display_name}{Display name of the hparameter for the TensorBoard UI. 18 | By default it's identical to the name.} 19 | 20 | \item{description}{Parameter description. Shown in tooltips around the 21 | TensorBoard UI.} 22 | } 23 | \value{ 24 | A \code{hparams_hparam} object. 25 | } 26 | \description{ 27 | Hparam object are used to describe names and domains of hyperparameters so 28 | TensorBoard UI can show additional information about them. 29 | } 30 | \note{ 31 | A list of \code{hparam} values can be passed to \code{\link[=log_hparams_config]{log_hparams_config()}} so 32 | you define the hyperparameters that are tracked by the experiment. 33 | } 34 | \examples{ 35 | hparams_hparam("optimizer", domain = c("adam", "sgd")) 36 | hparams_hparam("num_units", domain = c(128, 512, 1024)) 37 | hparams_hparam("use_cnn", domain = c(TRUE, FALSE)) 38 | hparams_hparam("dropout", domain = c(min_value = 0, max_value = 0.5)) 39 | } 40 | -------------------------------------------------------------------------------- /tests/testthat/test-tensor.R: -------------------------------------------------------------------------------- 1 | test_that("can write a tensor", { 2 | skip_if_tbparse_not_available() 3 | 4 | tx <- array(0, dim = c(28, 28, 28)) 5 | txc <- array("hello", dim = c(28, 28, 28)) 6 | temp <- tempfile() 7 | with_logdir(temp, { 8 | log_event(x = summary_tensor(tx, "float")) 9 | log_event(y = summary_tensor(tx)) # auto detect type 10 | log_event(z = summary_tensor(txc)) # auto detect type 11 | log_event(a = summary_tensor(list(tx, txc))) # auto detect type 12 | log_event(b = summary_tensor(tx, "double")) 13 | }) 14 | 15 | reader <- tbparse$SummaryReader(temp) 16 | df <- reader$tensors 17 | expect_equal(nrow(df), 6) 18 | }) 19 | 20 | test_that("can write tensors with dimnames", { 21 | tx <- array(0, dim = c(1, 2, 2, 3)) 22 | names(dim(tx)) <- c("b", "w", "h", "c") 23 | 24 | temp <- tempfile() 25 | with_logdir(temp, { 26 | log_event(x = summary_tensor(tx)) 27 | }) 28 | 29 | skip_if_tbparse_not_available() 30 | reader <- tbparse$SummaryReader(temp) 31 | 32 | df <- reader$tensors 33 | # TODO: can't easily test if dimension names we actually writen because 34 | # tbparse ignores them. 35 | expect_equal(dim(df$value[[1]]), c(1,2,2,3)) 36 | }) 37 | 38 | test_that("can set name within summary_tensor", { 39 | tx <- array(0, dim = c(1, 28, 28, 28)) 40 | temp <- tempfile() 41 | with_logdir(temp, { 42 | log_event(summary_tensor(tx, tag = "hello")) 43 | }) 44 | 45 | skip_if_tbparse_not_available() 46 | reader <- tbparse$SummaryReader(temp) 47 | expect_equal(reader$tags$tensors, "hello") 48 | }) 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /man/log_hparams.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hparams.R 3 | \name{log_hparams} 4 | \alias{log_hparams} 5 | \alias{summary_hparams} 6 | \title{Log hyperaparameters} 7 | \usage{ 8 | log_hparams(..., trial_id = NA, time_created_secs = get_wall_time()) 9 | 10 | summary_hparams(..., trial_id = NA, time_created_secs = get_wall_time()) 11 | } 12 | \arguments{ 13 | \item{...}{Named values of hyperparameters.} 14 | 15 | \item{trial_id}{A name for the current trail. by default it's the hash of the 16 | hparams names and values.} 17 | 18 | \item{time_created_secs}{The time the experiment is created in seconds 19 | since the UNIX epoch.} 20 | } 21 | \value{ 22 | A hyperparameter summary. USed for the side effects of logging the 23 | hyperparameter to the logdir. 24 | } 25 | \description{ 26 | Log hyperaparameters 27 | } 28 | \details{ 29 | This function should only be called once in a logdir and it will 30 | record the set of hyperparameters used in that run. Undefined behavior can 31 | happen if it's called more than once in a logdir - specially how TensorBoard 32 | behaves during visualization. 33 | } 34 | \section{Functions}{ 35 | \itemize{ 36 | \item \code{summary_hparams()}: For advanced users only. It's recommended to use the \code{log_hparams()} 37 | function instead. Creates a hyperparameter summary that can be written with \code{log_event()}. 38 | 39 | }} 40 | \examples{ 41 | temp <- tempfile() 42 | with_logdir(temp, { 43 | log_hparams(optimizer = "adam", num_units = 16) 44 | }) 45 | } 46 | \seealso{ 47 | \code{\link[=log_hparams_config]{log_hparams_config()}} 48 | } 49 | -------------------------------------------------------------------------------- /man/collect_events.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/read.R 3 | \name{collect_events} 4 | \alias{collect_events} 5 | \alias{events_logdir} 6 | \title{Collect data from tfevents records} 7 | \usage{ 8 | collect_events( 9 | logdir = get_default_logdir(), 10 | n = NULL, 11 | type = c("any", "summary", "scalar") 12 | ) 13 | 14 | events_logdir(logdir = get_default_logdir()) 15 | } 16 | \arguments{ 17 | \item{logdir}{The log directory that you want to query events from. Either a 18 | file path or a connection created with \code{\link[=events_logdir]{events_logdir()}}.} 19 | 20 | \item{n}{The maximum number of events to read from the connection. If \code{NULL} 21 | then all events are read, the default is \code{NULL}.} 22 | 23 | \item{type}{The kind of events that are to be read. By default all events are 24 | read. If a different type is specified, then the result can include other 25 | columns as well as more lines.} 26 | } 27 | \value{ 28 | A \code{tibble} with the collected events. 29 | } 30 | \description{ 31 | Collects all events of a kind in a single data.frame ready for analysis. 32 | } 33 | \section{Functions}{ 34 | \itemize{ 35 | \item \code{events_logdir()}: Creates a connection to a logdir that can be reused 36 | to read further events later. 37 | 38 | }} 39 | \examples{ 40 | temp <- tempfile() 41 | with_logdir(temp, { 42 | for(i in 1:5) { 43 | log_event(my_log = runif(1)) 44 | } 45 | }) 46 | # collect all events in files, including file description events 47 | collect_events(temp) 48 | # collect summaries in the logdir 49 | collect_events(temp, type = "summary") 50 | # collect only scalar events 51 | collect_events(temp, type = "scalar") 52 | 53 | } 54 | -------------------------------------------------------------------------------- /.github/workflows/pkgdown.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | release: 9 | types: [published] 10 | workflow_dispatch: 11 | 12 | name: pkgdown 13 | 14 | permissions: read-all 15 | 16 | jobs: 17 | pkgdown: 18 | runs-on: ubuntu-latest 19 | # Only restrict concurrency for non-PR jobs 20 | concurrency: 21 | group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} 22 | env: 23 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 24 | permissions: 25 | contents: write 26 | steps: 27 | - uses: actions/checkout@v4 28 | with: 29 | submodules: 'true' 30 | 31 | - uses: r-lib/actions/setup-pandoc@v2 32 | 33 | - uses: r-lib/actions/setup-r@v2 34 | with: 35 | use-public-rspm: true 36 | 37 | - uses: r-lib/actions/setup-r-dependencies@v2 38 | with: 39 | extra-packages: any::pkgdown, local::. 40 | needs: website 41 | 42 | - name: Install Python deps 43 | run: | 44 | reticulate::py_install('tensorflow', pip = TRUE) 45 | shell: Rscript {0} 46 | 47 | - name: Build site 48 | run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) 49 | shell: Rscript {0} 50 | 51 | - name: Deploy to GitHub pages 🚀 52 | if: github.event_name != 'pull_request' 53 | uses: JamesIves/github-pages-deploy-action@v4.5.0 54 | with: 55 | clean: false 56 | branch: gh-pages 57 | folder: docs 58 | 59 | -------------------------------------------------------------------------------- /.github/workflows/check.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | 9 | name: Check 10 | 11 | permissions: read-all 12 | 13 | jobs: 14 | R-CMD-check: 15 | runs-on: ${{ matrix.config.os }} 16 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 17 | 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | config: 22 | - {os: ubuntu-latest, r: 'release'} 23 | - {os: windows-latest, r: 'release'} 24 | - {os: macos-latest, r: 'release'} 25 | 26 | env: 27 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 28 | R_KEEP_PKG_SOURCE: yes 29 | 30 | steps: 31 | - uses: actions/checkout@v4 32 | with: 33 | submodules: 'true' 34 | 35 | - uses: r-lib/actions/setup-pandoc@v2 36 | 37 | - uses: r-lib/actions/setup-r@v2 38 | with: 39 | r-version: ${{ matrix.config.r }} 40 | http-user-agent: ${{ matrix.config.http-user-agent }} 41 | use-public-rspm: true 42 | 43 | - uses: r-lib/actions/setup-r-dependencies@v2 44 | with: 45 | extra-packages: any::rcmdcheck 46 | needs: check 47 | 48 | - name: Install Python libraries used for testing 49 | continue-on-error: true 50 | run: | 51 | reticulate::py_install(c("tbparse", "tensorflow"), pip = TRUE) 52 | shell: Rscript {0} 53 | 54 | - uses: r-lib/actions/check-r-package@v2 55 | with: 56 | upload-snapshots: true 57 | build_args: 'c("--no-manual","--compact-vignettes=gs+qpdf")' 58 | -------------------------------------------------------------------------------- /src/proto/tensor_shape.proto: -------------------------------------------------------------------------------- 1 | // Protocol buffer representing the shape of tensors. 2 | 3 | syntax = "proto3"; 4 | option cc_enable_arenas = true; 5 | option java_outer_classname = "TensorShapeProtos"; 6 | option java_multiple_files = true; 7 | option java_package = "org.tensorflow.framework"; 8 | option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_shape_go_proto"; 9 | 10 | package tensorboard; 11 | 12 | // Dimensions of a tensor. 13 | message TensorShapeProto { 14 | // One dimension of the tensor. 15 | message Dim { 16 | // Size of the tensor in that dimension. 17 | // This value must be >= -1, but values of -1 are reserved for "unknown" 18 | // shapes (values of -1 mean "unknown" dimension). Certain wrappers 19 | // that work with TensorShapeProto may fail at runtime when deserializing 20 | // a TensorShapeProto containing a dim value of -1. 21 | int64 size = 1; 22 | 23 | // Optional name of the tensor dimension. 24 | string name = 2; 25 | }; 26 | 27 | // Dimensions of the tensor, such as {"input", 30}, {"output", 40} 28 | // for a 30 x 40 2D tensor. If an entry has size -1, this 29 | // corresponds to a dimension of unknown size. The names are 30 | // optional. 31 | // 32 | // The order of entries in "dim" matters: It indicates the layout of the 33 | // values in the tensor in-memory representation. 34 | // 35 | // The first entry in "dim" is the outermost dimension used to layout the 36 | // values, the last entry is the innermost dimension. This matches the 37 | // in-memory layout of RowMajor Eigen tensors. 38 | // 39 | // If "dim.size()" > 0, "unknown_rank" must be false. 40 | repeated Dim dim = 2; 41 | 42 | // If true, the number of dimensions in the shape is unknown. 43 | // 44 | // If true, "dim.size()" must be 0. 45 | bool unknown_rank = 3; 46 | }; 47 | -------------------------------------------------------------------------------- /src/proto/plugins/audio/plugin_data.proto: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. 11 | ==============================================================================*/ 12 | 13 | syntax = "proto3"; 14 | 15 | package tensorboard; 16 | 17 | // Audio summaries created by the `tensorboard.plugins.audio.summary` 18 | // module will include `SummaryMetadata` whose `plugin_data` field has 19 | // as `content` a binary string that is the encoding of an 20 | // `AudioPluginData` proto. 21 | message AudioPluginData { 22 | enum Encoding { 23 | // Do not use `UNKNOWN`; it is only present because it must be. 24 | UNKNOWN = 0; 25 | WAV = 11; 26 | } 27 | 28 | // Version `0` is the only supported version. It has the following 29 | // semantics: 30 | // 31 | // - If the tensor shape is rank-2, then `t[:, 0]` represent encoded 32 | // audio data, and `t[:, 1]` represent corresponding UTF-8 encoded 33 | // Markdown labels. 34 | // - If the tensor shape is rank-1, then `t[:]` represent encoded 35 | // audio data. There are no labels. 36 | int32 version = 1; 37 | 38 | Encoding encoding = 2; 39 | 40 | // Indicates whether this time series data was originally represented 41 | // as `Summary.Value.Audio` values and has been automatically 42 | // converted to bytestring tensors. 43 | bool converted_to_tensor = 3; 44 | } 45 | -------------------------------------------------------------------------------- /src/reader.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "generated/event.pb.h" 4 | #include "reader.h" 5 | 6 | // declare that we know how to cast events into R objects. 7 | template <> 8 | SEXP Rcpp::wrap(const tensorboard::Event& object); 9 | 10 | static auto pkg = Rcpp::Environment::namespace_env("tfevents"); 11 | static auto r_fill_run_field = Rcpp::Function(pkg["fill_run_field"]); 12 | 13 | 14 | EventFileIterator::EventFileIterator (const std::string& path, const std::string& run_name) { 15 | this->path = path; 16 | this->run_name = run_name; 17 | } 18 | 19 | tensorboard::Event EventFileIterator::get_next () { 20 | std::uint64_t length; 21 | std::uint32_t crc; 22 | 23 | if (!file.is_open()) { 24 | file.open(path, std::ios::binary); 25 | file.seekg(current_pos, std::ios::beg); 26 | } 27 | 28 | current_pos = file.tellg(); 29 | 30 | if (file.peek() == EOF) { 31 | file.close(); 32 | Rcpp::stop("File iterator is over."); 33 | } 34 | 35 | file.read(reinterpret_cast(&length), sizeof(std::uint64_t)); 36 | 37 | if (file.eof()) { 38 | file.clear(); 39 | Rcpp::stop("File iterator is over."); 40 | } 41 | 42 | file.read(reinterpret_cast(&crc), sizeof(std::uint32_t)); 43 | 44 | std::vector buffer(length); 45 | file.read(&buffer[0], length); 46 | 47 | tensorboard::Event event; 48 | event.ParseFromString(std::string(buffer.begin(), buffer.end())); 49 | 50 | file.read(reinterpret_cast(&crc), sizeof(std::uint32_t)); 51 | 52 | return event; 53 | } 54 | 55 | // [[Rcpp::export]] 56 | Rcpp::XPtr create_event_file_iterator (const std::string& path, const std::string& run_name) { 57 | return Rcpp::XPtr(new EventFileIterator(path, run_name)); 58 | } 59 | 60 | // [[Rcpp::export]] 61 | SEXP event_file_iterator_next (Rcpp::XPtr iter) { 62 | auto event = iter->get_next(); 63 | return r_fill_run_field(event, iter->run_name); 64 | } 65 | -------------------------------------------------------------------------------- /R/audio.R: -------------------------------------------------------------------------------- 1 | #' Summary audio 2 | #' 3 | #' Audio summaries can be played withing the TensorBoard UI. 4 | #' 5 | #' @param audio Object that will be written as an audio event in the tfevents record. 6 | #' @inheritParams summary_image 7 | #' @param sample_rate The sample rate in Hz associated to the audio values. 8 | #' @returns An audio summary that can be logged with [log_event()]. 9 | #' @family summary 10 | #' @examples 11 | #' tmp <- tempfile() 12 | #' with_logdir(tmp, { 13 | #' summary_audio(array(runif(100), dim = c(1,100, 1))) 14 | #' }) 15 | #' @export 16 | summary_audio <- function(audio, ..., metadata = NULL, tag = NA) { 17 | UseMethod("summary_audio") 18 | } 19 | 20 | #' @describeIn summary_audio Creates a summary from a 3D array with dimensions 21 | #' `(batch_size, n_samples, n_channels)`. Values must be in the range `[-1, 1]`. 22 | #' @export 23 | summary_audio.array <- function(audio, ..., sample_rate = 44100, metadata = NULL, 24 | tag = NA) { 25 | 26 | rlang::check_installed("wav") 27 | temp <- tempfile() 28 | raw_audios <- apply(audio, 1, simplify = FALSE, function(x) { 29 | wav::write_wav(t(x), sample_rate = sample_rate, path = temp) 30 | sze <- fs::file_info(temp)$size 31 | readBin(temp, n = sze, what = "raw") 32 | }) 33 | blob_audios <- blob::new_blob(raw_audios) 34 | summary_audio( 35 | blob_audios, 36 | metadata = metadata, 37 | tag = tag 38 | ) 39 | } 40 | 41 | #' @describeIn summary_audio Creates an audio summary from a raw vector containing 42 | #' a WAV encoded audio file. 43 | #' @export 44 | summary_audio.raw <- function(audio, ..., metadata = NULL, tag = NA) { 45 | summary_audio(blob::blob(audio), metadata = metadata, tag = tag) 46 | } 47 | 48 | #' @describeIn summary_audio Creates an audio summary from a blob (ie list of raw vectors) 49 | #' containing WAV encoded audio files. 50 | #' @export 51 | summary_audio.blob <- function(audio, ..., metadata = NULL, tag = NA) { 52 | 53 | if (is.null(metadata)) { 54 | metadata <- summary_metadata(plugin_name = "audio") 55 | } 56 | 57 | summary_tensor( 58 | audio, 59 | dtype = "string", 60 | metadata = metadata, 61 | tag = tag 62 | ) 63 | } 64 | -------------------------------------------------------------------------------- /man/summary_audio.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/audio.R 3 | \name{summary_audio} 4 | \alias{summary_audio} 5 | \alias{summary_audio.array} 6 | \alias{summary_audio.raw} 7 | \alias{summary_audio.blob} 8 | \title{Summary audio} 9 | \usage{ 10 | summary_audio(audio, ..., metadata = NULL, tag = NA) 11 | 12 | \method{summary_audio}{array}(audio, ..., sample_rate = 44100, metadata = NULL, tag = NA) 13 | 14 | \method{summary_audio}{raw}(audio, ..., metadata = NULL, tag = NA) 15 | 16 | \method{summary_audio}{blob}(audio, ..., metadata = NULL, tag = NA) 17 | } 18 | \arguments{ 19 | \item{audio}{Object that will be written as an audio event in the tfevents record.} 20 | 21 | \item{...}{Currently unused.} 22 | 23 | \item{metadata}{A \code{metadata} object, as created with \code{\link[=summary_metadata]{summary_metadata()}}. In 24 | most cases you don't need to change the default.} 25 | 26 | \item{tag}{A tag that within the TensorBoard UI. See \code{\link[=log_event]{log_event()}} for other 27 | ways of specifying the tag attribute.} 28 | 29 | \item{sample_rate}{The sample rate in Hz associated to the audio values.} 30 | } 31 | \value{ 32 | An audio summary that can be logged with \code{\link[=log_event]{log_event()}}. 33 | } 34 | \description{ 35 | Audio summaries can be played withing the TensorBoard UI. 36 | } 37 | \section{Methods (by class)}{ 38 | \itemize{ 39 | \item \code{summary_audio(array)}: Creates a summary from a 3D array with dimensions 40 | \verb{(batch_size, n_samples, n_channels)}. Values must be in the range \verb{[-1, 1]}. 41 | 42 | \item \code{summary_audio(raw)}: Creates an audio summary from a raw vector containing 43 | a WAV encoded audio file. 44 | 45 | \item \code{summary_audio(blob)}: Creates an audio summary from a blob (ie list of raw vectors) 46 | containing WAV encoded audio files. 47 | 48 | }} 49 | \examples{ 50 | tmp <- tempfile() 51 | with_logdir(tmp, { 52 | summary_audio(array(runif(100), dim = c(1,100, 1))) 53 | }) 54 | } 55 | \seealso{ 56 | Other summary: 57 | \code{\link{summary_histogram}()}, 58 | \code{\link{summary_image}()}, 59 | \code{\link{summary_scalar}()}, 60 | \code{\link{summary_text}()} 61 | } 62 | \concept{summary} 63 | -------------------------------------------------------------------------------- /src/utils.cpp: -------------------------------------------------------------------------------- 1 | #include "utils.h" 2 | #include "hparams.h" 3 | #include "generated/plugins/image/plugin_data.pb.h" 4 | #include "generated/plugins/text/plugin_data.pb.h" 5 | #include "generated/plugins/audio/plugin_data.pb.h" 6 | #include "generated/plugins/histogram/plugin_data.pb.h" 7 | 8 | tensorboard::SummaryMetadata::PluginData make_plugin_data (std::string plugin_name, 9 | SEXP plugin_content) { 10 | tensorboard::SummaryMetadata::PluginData plugin_data; 11 | plugin_data.set_plugin_name(plugin_name); 12 | 13 | if (r_is_na(plugin_content)) { 14 | if (plugin_name == "scalars") { 15 | auto content = tensorboard::ScalarPluginData(); 16 | content.set_version(0); 17 | plugin_data.set_content(content.SerializeAsString()); 18 | } 19 | if (plugin_name == "images") { 20 | auto content = tensorboard::ImagePluginData(); 21 | content.set_version(0); 22 | plugin_data.set_content(content.SerializeAsString()); 23 | } 24 | if (plugin_name == "text") { 25 | auto content = tensorboard::TextPluginData(); 26 | content.set_version(0); 27 | plugin_data.set_content(content.SerializeAsString()); 28 | } 29 | if (plugin_name == "audio") { 30 | auto content = tensorboard::AudioPluginData(); 31 | content.set_version(0); 32 | content.set_encoding(tensorboard::AudioPluginData_Encoding::AudioPluginData_Encoding_WAV); 33 | plugin_data.set_content(content.SerializeAsString()); 34 | } 35 | if (plugin_name == "histograms") { 36 | auto content = tensorboard::HistogramPluginData(); 37 | content.set_version(0); 38 | plugin_data.set_content(content.SerializeAsString()); 39 | } 40 | } else { 41 | if (plugin_name == "hparams") { 42 | auto content = Rcpp::as(plugin_content); 43 | plugin_data.set_content(content.SerializeAsString()); 44 | } 45 | } 46 | 47 | return plugin_data; 48 | } 49 | 50 | // [[Rcpp::export]] 51 | long get_wall_time () { 52 | return std::chrono::duration_cast( 53 | std::chrono::system_clock::now().time_since_epoch() 54 | ).count(); 55 | } 56 | -------------------------------------------------------------------------------- /tests/testthat/test-audio.R: -------------------------------------------------------------------------------- 1 | test_that("can write an audio file", { 2 | f <- wav::read_wav(test_path("resources/test-audio.wav")) 3 | audio <- array(t(f), dim = c(1, rev(dim(f)))) 4 | 5 | temp <- tempfile() 6 | with_logdir(temp, { 7 | log_event(x = summary_audio(audio)) 8 | }) 9 | 10 | skip_if_tbparse_not_available() 11 | reader <- tbparse$SummaryReader(temp) 12 | 13 | # couldn't find a way to decode the binary string directly from memory, 14 | # so we write to disk and read again 15 | temp2 <- tempfile() 16 | w <- reticulate::import_builtins()$open(temp2, "wb") 17 | w$write(reader$tensors$value[[1]][[1]]) 18 | 19 | f2 <- wav::read_wav(temp2) 20 | expect_true(all.equal(as.numeric(f), as.numeric(f2))) 21 | }) 22 | 23 | test_that("can write multiple audio files from a array", { 24 | f <- wav::read_wav(test_path("resources/test-audio.wav")) 25 | f_t <- t(f) 26 | audio <- array(0, dim = c(10, rev(dim(f)))) 27 | for (i in 1:10) { 28 | audio[i,,] <- f_t 29 | } 30 | 31 | temp <- tempfile() 32 | with_logdir(temp, { 33 | log_event(x = summary_audio(audio)) 34 | }) 35 | 36 | skip_if_tbparse_not_available() 37 | reader <- tbparse$SummaryReader(temp) 38 | 39 | for (i in 1:10) { 40 | temp2 <- tempfile() 41 | w <- reticulate::import_builtins()$open(temp2, "wb") 42 | w$write(reader$tensors$value[[1]][[i]]) 43 | 44 | f2 <- wav::read_wav(temp2) 45 | expect_true(all.equal(as.numeric(f), as.numeric(f2))) 46 | } 47 | }) 48 | 49 | test_that("can write directly from raw encoded file", { 50 | path <- test_path("resources/test-audio.wav") 51 | audio <- readBin(path, what = raw(), n = fs::file_info(path)$size) 52 | 53 | temp <- tempfile() 54 | with_logdir(temp, { 55 | log_event(x = summary_audio(audio)) 56 | }) 57 | 58 | skip_if_tbparse_not_available() 59 | reader <- tbparse$SummaryReader(temp) 60 | 61 | # couldn't find a way to decode the binary string directly from memory, 62 | # so we write to disk and read again 63 | temp2 <- tempfile() 64 | w <- reticulate::import_builtins()$open(temp2, "wb") 65 | w$write(reader$tensors$value[[1]][[1]]) 66 | 67 | expect_true(all.equal(wav::read_wav(path), wav::read_wav(temp2))) 68 | }) 69 | 70 | -------------------------------------------------------------------------------- /.github/workflows/test-coverage.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | 9 | name: test-coverage 10 | 11 | permissions: read-all 12 | 13 | jobs: 14 | test-coverage: 15 | runs-on: ubuntu-latest 16 | env: 17 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 18 | 19 | steps: 20 | - uses: actions/checkout@v4 21 | with: 22 | submodules: 'true' 23 | 24 | - uses: r-lib/actions/setup-r@v2 25 | with: 26 | use-public-rspm: true 27 | 28 | - uses: r-lib/actions/setup-r-dependencies@v2 29 | with: 30 | extra-packages: any::covr, any::xml2 31 | needs: coverage 32 | 33 | - name: Install Python libraries used for testing 34 | run: | 35 | reticulate::py_install(c("tbparse", "tensorflow"), pip = TRUE) 36 | shell: Rscript {0} 37 | 38 | - name: Test coverage 39 | run: | 40 | cov <- covr::package_coverage( 41 | quiet = FALSE, 42 | clean = FALSE, 43 | install_path = file.path(normalizePath(Sys.getenv("RUNNER_TEMP"), winslash = "/"), "package") 44 | ) 45 | covr::to_cobertura(cov) 46 | shell: Rscript {0} 47 | 48 | - uses: codecov/codecov-action@v4 49 | with: 50 | fail_ci_if_error: ${{ github.event_name != 'pull_request' && true || false }} 51 | file: ./cobertura.xml 52 | plugin: noop 53 | disable_search: true 54 | token: ${{ secrets.CODECOV_TOKEN }} 55 | 56 | - name: Show testthat output 57 | if: always() 58 | run: | 59 | ## -------------------------------------------------------------------- 60 | find '${{ runner.temp }}/package' -name 'testthat.Rout*' -exec cat '{}' \; || true 61 | shell: bash 62 | 63 | - name: Upload test results 64 | if: failure() 65 | uses: actions/upload-artifact@v4 66 | with: 67 | name: coverage-test-failures 68 | path: ${{ runner.temp }}/package 69 | -------------------------------------------------------------------------------- /man/log_hparams_config.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hparams.R 3 | \name{log_hparams_config} 4 | \alias{log_hparams_config} 5 | \alias{summary_hparams_config} 6 | \title{Logs hyperparameters configuration} 7 | \usage{ 8 | log_hparams_config(hparams, metrics, time_created_secs = get_wall_time()) 9 | 10 | summary_hparams_config(hparams, metrics, time_created_secs = get_wall_time()) 11 | } 12 | \arguments{ 13 | \item{hparams}{A list of \code{hparams} objects as created by \code{\link[=hparams_hparam]{hparams_hparam()}}.} 14 | 15 | \item{metrics}{A list of \code{metrics} objects as created by \code{\link[=hparams_metric]{hparams_metric()}}. 16 | These metrics will be tracked by TensorBoard UI when displaying the 17 | hyperparameter tuning table.} 18 | 19 | \item{time_created_secs}{The time the experiment is created in seconds 20 | since the UNIX epoch.} 21 | } 22 | \value{ 23 | Invisibly returns the HParam conffuration data as a \code{summary} object. 24 | } 25 | \description{ 26 | Logs the hyperaparameter configuration for a HyperParameter tuning experiment. 27 | It allows you to define the domain for each hyperparameters and what are the 28 | metrics that should be shown in the TensorBoard UI, along with configuring 29 | their display name and descriptions. 30 | } 31 | \section{Functions}{ 32 | \itemize{ 33 | \item \code{summary_hparams_config()}: For advanced users only. Creates a hyperaparameter 34 | configuration summary that can be logged with \code{\link[=log_event]{log_event()}}. 35 | 36 | }} 37 | \section{Recommendations}{ 38 | 39 | 40 | When loging hyperparameter tuning experiments, the log directory organization 41 | is: 42 | 43 | \if{html}{\out{
}}\preformatted{- root: 44 | - log_hparams_config(...) 45 | - run1: 46 | - log_hparams(...) 47 | - log_event(...) 48 | - run2: 49 | - log_hparams(...) 50 | - log_event(...) 51 | }\if{html}{\out{
}} 52 | 53 | Ie you should have a root logdir that will only contain the hyperaparameter 54 | config log, as created with \code{\link[=log_hparams_config]{log_hparams_config()}}. Then each run in the 55 | experiment will have it's own logdir as a child directory of the root logdir. 56 | } 57 | 58 | \seealso{ 59 | \code{\link[=log_hparams]{log_hparams()}} 60 | } 61 | -------------------------------------------------------------------------------- /man/summary_histogram.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/histogram.R 3 | \name{summary_histogram} 4 | \alias{summary_histogram} 5 | \alias{summary_histogram.numeric} 6 | \alias{summary_histogram.array} 7 | \title{Creates an histogram summary} 8 | \usage{ 9 | summary_histogram(data, ..., metadata = NULL, tag = NA) 10 | 11 | \method{summary_histogram}{numeric}(data, ..., metadata = NULL, tag = NA, buckets = 30) 12 | 13 | \method{summary_histogram}{array}(data, ..., metadata = NULL, tag = NA, buckets = 30) 14 | } 15 | \arguments{ 16 | \item{data}{A Tensor of any shape. The histogram is computed over its elements, 17 | which must be castable to float64.} 18 | 19 | \item{...}{Currently unused. To allow future expansion.} 20 | 21 | \item{metadata}{A \code{metadata} object, as created with \code{\link[=summary_metadata]{summary_metadata()}}. In 22 | most cases you don't need to change the default.} 23 | 24 | \item{tag}{A tag that within the TensorBoard UI. See \code{\link[=log_event]{log_event()}} for other 25 | ways of specifying the tag attribute.} 26 | 27 | \item{buckets}{Optional positive int. The output will have this many buckets, 28 | except in two edge cases. If there is no data, then there are no buckets. 29 | If there is data but all points have the same value, then all buckets' left 30 | and right endpoints are the same and only the last bucket has nonzero count. 31 | Defaults to 30 if not specified.} 32 | } 33 | \value{ 34 | An histogram summary that can be logged with \code{\link[=log_event]{log_event()}}. 35 | } 36 | \description{ 37 | Writes an histogram for later analysis in TensorBoard's Histograms and 38 | Distributions tab. 39 | } 40 | \section{Methods (by class)}{ 41 | \itemize{ 42 | \item \code{summary_histogram(numeric)}: Creates an histogram summary for a numeric vector. 43 | 44 | \item \code{summary_histogram(array)}: Creates an histogram for array data. 45 | 46 | }} 47 | \examples{ 48 | temp <- tempfile() 49 | with_logdir(temp, { 50 | for(i in 1:10) { 51 | log_event(x = summary_histogram(rnorm(10000))) 52 | } 53 | }) 54 | 55 | } 56 | \seealso{ 57 | Other summary: 58 | \code{\link{summary_audio}()}, 59 | \code{\link{summary_image}()}, 60 | \code{\link{summary_scalar}()}, 61 | \code{\link{summary_text}()} 62 | } 63 | \concept{summary} 64 | -------------------------------------------------------------------------------- /src/record_writer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "record_writer.h" 6 | extern "C" { 7 | #include "crc32c.h" 8 | } 9 | 10 | 11 | // helper functions 12 | 13 | // some changes from: 14 | // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/core/coding.cc#L43 15 | void encode_fixed_64(char* buf, std::uint64_t value) { 16 | memcpy(buf, &value, sizeof(value)); 17 | } 18 | 19 | void encode_fixed_32(char* buf, std::uint32_t value) { 20 | memcpy(buf, &value, sizeof(value)); 21 | } 22 | 23 | 24 | // found implementation here: https://bidetly.io/2017/02/08/crc-part-1/ 25 | std::uint32_t crc32_c(const char* first, int len) { 26 | return crc32c(0, (void*) first, len); 27 | } 28 | 29 | // mask delta constant 30 | // https://github.com/tensorflow/tensorflow/blob/754048a0453a04a761e112ae5d99c149eb9910dd/tensorflow/core/lib/hash/crc32c.h#L33 31 | const uint32_t mask_delta = 0xa282ead8ul; 32 | 33 | // making crc 34 | // https://github.com/tensorflow/tensorflow/blob/754048a0453a04a761e112ae5d99c149eb9910dd/tensorflow/core/lib/hash/crc32c.h#L40 35 | uint32_t mask(uint32_t crc) { 36 | return ((crc >> 15) | (crc << 17)) + mask_delta; 37 | } 38 | 39 | std::uint32_t masked_crc (char * data, std::size_t n) { 40 | return mask(crc32_c(data, n)); 41 | } 42 | 43 | RecordWriter::RecordWriter (std::string path) { 44 | this->path = path; 45 | this->writer.open(path, std::ios::binary); 46 | } 47 | 48 | RecordWriter::~RecordWriter () { 49 | this->writer.flush(); 50 | this->writer.close(); 51 | } 52 | 53 | bool RecordWriter::write_record (std::string data) { 54 | 55 | // Format of a single record: 56 | // uint64 length 57 | // uint32 masked crc of length 58 | // byte data[length] 59 | // uint32 masked crc of data 60 | 61 | char length[sizeof(std::uint64_t)]; 62 | encode_fixed_64(length, data.size()); 63 | 64 | char length_crc[sizeof(std::uint32_t)]; 65 | encode_fixed_32(length_crc, masked_crc(length, sizeof(std::uint64_t))); 66 | 67 | char data_crc[sizeof(std::uint32_t)]; 68 | char * data_array = const_cast(data.c_str()); 69 | encode_fixed_32(data_crc, masked_crc(data_array, data.size())); 70 | 71 | this->writer.write(length, sizeof(length)); 72 | this->writer.write(length_crc, sizeof(length_crc)); 73 | this->writer.write(data.c_str(), data.length()); 74 | this->writer.write(data_crc, sizeof(data_crc)); 75 | this->writer.flush(); 76 | 77 | return true; 78 | } 79 | 80 | void RecordWriter::flush () { 81 | this->writer.flush(); 82 | } 83 | 84 | -------------------------------------------------------------------------------- /src/proto/types.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorboard; 4 | 5 | option cc_enable_arenas = true; 6 | option java_outer_classname = "TypesProtos"; 7 | option java_multiple_files = true; 8 | option java_package = "org.tensorflow.framework"; 9 | option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/types_go_proto"; 10 | 11 | // (== suppress_warning documentation-presence ==) 12 | // DISABLED.IfChange 13 | enum DataType { 14 | // Not a legal value for DataType. Used to indicate a DataType field 15 | // has not been set. 16 | DT_INVALID = 0; 17 | 18 | // Data types that all computation devices are expected to be 19 | // capable to support. 20 | DT_FLOAT = 1; 21 | DT_DOUBLE = 2; 22 | DT_INT32 = 3; 23 | DT_UINT8 = 4; 24 | DT_INT16 = 5; 25 | DT_INT8 = 6; 26 | DT_STRING = 7; 27 | DT_COMPLEX64 = 8; // Single-precision complex 28 | DT_INT64 = 9; 29 | DT_BOOL = 10; 30 | DT_QINT8 = 11; // Quantized int8 31 | DT_QUINT8 = 12; // Quantized uint8 32 | DT_QINT32 = 13; // Quantized int32 33 | DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops. 34 | DT_QINT16 = 15; // Quantized int16 35 | DT_QUINT16 = 16; // Quantized uint16 36 | DT_UINT16 = 17; 37 | DT_COMPLEX128 = 18; // Double-precision complex 38 | DT_HALF = 19; 39 | DT_RESOURCE = 20; 40 | DT_VARIANT = 21; // Arbitrary C++ data types 41 | DT_UINT32 = 22; 42 | DT_UINT64 = 23; 43 | 44 | // Do not use! These are only for parameters. Every enum above 45 | // should have a corresponding value below (verified by types_test). 46 | DT_FLOAT_REF = 101; 47 | DT_DOUBLE_REF = 102; 48 | DT_INT32_REF = 103; 49 | DT_UINT8_REF = 104; 50 | DT_INT16_REF = 105; 51 | DT_INT8_REF = 106; 52 | DT_STRING_REF = 107; 53 | DT_COMPLEX64_REF = 108; 54 | DT_INT64_REF = 109; 55 | DT_BOOL_REF = 110; 56 | DT_QINT8_REF = 111; 57 | DT_QUINT8_REF = 112; 58 | DT_QINT32_REF = 113; 59 | DT_BFLOAT16_REF = 114; 60 | DT_QINT16_REF = 115; 61 | DT_QUINT16_REF = 116; 62 | DT_UINT16_REF = 117; 63 | DT_COMPLEX128_REF = 118; 64 | DT_HALF_REF = 119; 65 | DT_RESOURCE_REF = 120; 66 | DT_VARIANT_REF = 121; 67 | DT_UINT32_REF = 122; 68 | DT_UINT64_REF = 123; 69 | } 70 | // DISABLED.ThenChange( 71 | // https://www.tensorflow.org/code/tensorflow/c/tf_datatype.h, 72 | // https://www.tensorflow.org/code/tensorflow/go/tensor.go, 73 | // https://www.tensorflow.org/code/tensorboard/compat/proto/tensor.cc, 74 | // https://www.tensorflow.org/code/tensorboard/compat/proto/types.h, 75 | // https://www.tensorflow.org/code/tensorboard/compat/proto/types.cc, 76 | // https://www.tensorflow.org/code/tensorboard/compat/proto/dtypes.py, 77 | // https://www.tensorflow.org/code/tensorboard/compat/proto/function.py) 78 | -------------------------------------------------------------------------------- /man/summary_image.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/image.R 3 | \name{summary_image} 4 | \alias{summary_image} 5 | \alias{summary_image.ggplot} 6 | \alias{summary_image.array} 7 | \alias{summary_image.blob} 8 | \alias{summary_image.raw} 9 | \title{Creates a image summary} 10 | \usage{ 11 | summary_image(img, ..., metadata = NULL, tag = NA) 12 | 13 | \method{summary_image}{ggplot}(img, ..., width = 480, height = 480, metadata = NULL, tag = NA) 14 | 15 | \method{summary_image}{array}(img, ..., metadata = NULL, tag = NA) 16 | 17 | \method{summary_image}{blob}(img, ..., width, height, colorspace, metadata = NULL, tag = NA) 18 | 19 | \method{summary_image}{raw}(img, ..., width, height, colorspace, metadata = NULL, tag = NA) 20 | } 21 | \arguments{ 22 | \item{img}{An object that can be converted to an image.} 23 | 24 | \item{...}{Currently unused.} 25 | 26 | \item{metadata}{A \code{metadata} object, as created with \code{\link[=summary_metadata]{summary_metadata()}}. In 27 | most cases you don't need to change the default.} 28 | 29 | \item{tag}{A tag that within the TensorBoard UI. See \code{\link[=log_event]{log_event()}} for other 30 | ways of specifying the tag attribute.} 31 | 32 | \item{width}{Width of the image.} 33 | 34 | \item{height}{Height of the image.} 35 | 36 | \item{colorspace}{Valid colorspace values are 37 | \code{1 - grayscale}, 38 | \code{2 - grayscale + alpha}, 39 | \code{3 - RGB}, 40 | \code{4 - RGBA}, 41 | \code{5 - DIGITAL_YUV}, 42 | \code{6 - BGRA}} 43 | } 44 | \value{ 45 | An image summary that can be logged with \code{\link[=log_event]{log_event()}}. 46 | } 47 | \description{ 48 | Creates a image summary 49 | } 50 | \section{Methods (by class)}{ 51 | \itemize{ 52 | \item \code{summary_image(ggplot)}: Cretes an image summary from a ggplot2 graph object. 53 | The \code{...} will be forwarded to \code{\link[grDevices:png]{grDevices::png()}}. 54 | 55 | \item \code{summary_image(array)}: Creates an image from an R array. The array should be 56 | numeric, with values between 0 and 1. Dimensions should be \verb{(batch, height, width, channels)}. 57 | 58 | \item \code{summary_image(blob)}: Creates an image from \code{\link[blob:blob]{blob::blob()}} vctr of PNG encoded images, 59 | (eg using \code{\link[png:writePNG]{png::writePNG()}}). \code{width}, \code{height} and \code{colorspace} are recycled 60 | thus they can be a single scalar or a vector the same size of the images blob. 61 | 62 | \item \code{summary_image(raw)}: Creates an image from a png encoded image. Eg, created 63 | with \code{\link[png:writePNG]{png::writePNG()}}. In this case you need to provide \code{width}, \code{height} and 64 | \code{colorspace} arguments. 65 | 66 | }} 67 | \examples{ 68 | tmp <- tempfile() 69 | with_logdir(tmp, { 70 | summary_image(array(runif(100), dim = c(1,10, 10, 1))) 71 | }) 72 | } 73 | \seealso{ 74 | Other summary: 75 | \code{\link{summary_audio}()}, 76 | \code{\link{summary_histogram}()}, 77 | \code{\link{summary_scalar}()}, 78 | \code{\link{summary_text}()} 79 | } 80 | \concept{summary} 81 | -------------------------------------------------------------------------------- /R/histogram.R: -------------------------------------------------------------------------------- 1 | #' Creates an histogram summary 2 | #' 3 | #' Writes an histogram for later analysis in TensorBoard's Histograms and 4 | #' Distributions tab. 5 | #' 6 | #' @param data A Tensor of any shape. The histogram is computed over its elements, 7 | #' which must be castable to float64. 8 | #' @inheritParams summary_scalar 9 | #' @param buckets Optional positive int. The output will have this many buckets, 10 | #' except in two edge cases. If there is no data, then there are no buckets. 11 | #' If there is data but all points have the same value, then all buckets' left 12 | #' and right endpoints are the same and only the last bucket has nonzero count. 13 | #' Defaults to 30 if not specified. 14 | #' 15 | #' @returns 16 | #' An histogram summary that can be logged with [log_event()]. 17 | #' @family summary 18 | #' 19 | #' @examples 20 | #' temp <- tempfile() 21 | #' with_logdir(temp, { 22 | #' for(i in 1:10) { 23 | #' log_event(x = summary_histogram(rnorm(10000))) 24 | #' } 25 | #' }) 26 | #' 27 | #' @export 28 | summary_histogram <- function(data, ..., metadata = NULL, tag = NA) { 29 | UseMethod("summary_histogram") 30 | } 31 | 32 | #' @describeIn summary_histogram Creates an histogram summary for a numeric vector. 33 | #' @export 34 | summary_histogram.numeric <- function(data, ..., metadata = NULL, tag = NA, buckets = 30) { 35 | # this is mostly a literal translation from code in: 36 | # https://github.com/tensorflow/tensorboard/blob/2cd515880ea26ec66cfa85fbb38ad96cc38f6985/tensorboard/plugins/histogram/summary_v2.py#L41 37 | if (buckets == 0 || length(data) == 0) { 38 | histogram_buckets <- array(0, dim = c(buckets, 3)) 39 | } else { 40 | min_ <- min(data) 41 | max_ <- max(data) 42 | range_ <- max_ - min_ 43 | if (range_ == 0) { 44 | left_edges <- right_edges <- rep(min_, buckets) 45 | bucket_counts <- c(rep(0, buckets-1), length(data)) 46 | histogram_buckets <- cbind(left_edges, right_edges, bucket_counts) 47 | } else { 48 | bucket_width <- range_ / buckets 49 | offsets <- data - min_ 50 | bucket_indices <- floor(offsets / bucket_width) 51 | clamped_indices <- pmin(bucket_indices, buckets) 52 | bucket_counts <- sapply(seq(0, buckets-1), function(i) sum(clamped_indices==i)) 53 | edges <- seq(min_, max_, length.out = buckets + 1) 54 | left_edges <- utils::head(edges, buckets) 55 | right_edges <- utils::tail(edges, buckets) 56 | histogram_buckets <- cbind(left_edges, right_edges, bucket_counts) 57 | } 58 | } 59 | 60 | if (is.null(metadata)) { 61 | metadata <- summary_metadata(plugin_name = "histograms") 62 | } 63 | 64 | summary_tensor( 65 | histogram_buckets, 66 | dtype = "double", 67 | metadata = metadata, 68 | tag = tag 69 | ) 70 | } 71 | 72 | #' @describeIn summary_histogram Creates an histogram for array data. 73 | #' @export 74 | summary_histogram.array <- function(data, ..., metadata = NULL, tag = NA, buckets = 30) { 75 | summary_histogram( 76 | as.numeric(data), 77 | ..., 78 | metadata = metadata, 79 | tag = tag, 80 | buckets = buckets 81 | ) 82 | } 83 | 84 | -------------------------------------------------------------------------------- /src/proto/plugins/hparams/plugin_data.proto: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Defines protos for storing a hypertuning experiment data inside Summary tags. 17 | // 18 | // A hypertuning-experiment data consists of metadata that's constant 19 | // throughout the experiment and evolving metric data for each training session 20 | // in the experiment. The HParams plugin assumes the following organization of 21 | // this entire data set. Experiment metadata is recorded in the empty run in a 22 | // tag (named by the Python constant) metadata.EXPERIMENT_TAG. Within the 23 | // experiment, for a session named by its metadata is recorded 24 | // in the run in the tags metadata.SESSION_START_INFO and 25 | // metadata.SESSION_END_INFO. Finally, the session's metric data for a metric 26 | // with a (, ) name (see MetricName in api.proto), is recorded 27 | // in a Scalar-plugin summary with tag in the run . 28 | 29 | syntax = "proto3"; 30 | 31 | import "plugins/hparams/api.proto"; 32 | import "google/protobuf/struct.proto"; 33 | 34 | package tensorboard.hparams; 35 | 36 | // HParam summaries created by `tensorboard.plugins.hparams.summary` 37 | // module will include `SummaryMetadata` whose `plugin_data` field has 38 | // as `content` a serialized HParamsPluginData message. 39 | message HParamsPluginData { 40 | // The version of the plugin data schema. 41 | int32 version = 1; 42 | oneof data { 43 | Experiment experiment = 2; 44 | SessionStartInfo session_start_info = 3; 45 | SessionEndInfo session_end_info = 4; 46 | } 47 | } 48 | 49 | message SessionStartInfo { 50 | // A map describing the hyperparameter values for the session. 51 | // Maps each hyperparameter name to its value. 52 | // Currently only scalars are supported. 53 | map hparams = 1; 54 | 55 | // A URI for where checkpoints are saved. 56 | string model_uri = 2; 57 | 58 | // An optional URL to a website monitoring the session. 59 | string monitor_url = 3; 60 | 61 | // The name of the session group containing this session. If empty, the 62 | // group name is taken to be the session id (so this session is the only 63 | // member of its group). 64 | string group_name = 4; 65 | 66 | // The time the session started in seconds since epoch. 67 | double start_time_secs = 5; 68 | } 69 | 70 | message SessionEndInfo { 71 | Status status = 1; 72 | 73 | // The time the session ended in seconds since epoch. 74 | double end_time_secs = 2; 75 | } 76 | -------------------------------------------------------------------------------- /R/tensor.R: -------------------------------------------------------------------------------- 1 | summary_tensor <- function(x, dtype = NA, ..., metadata = NULL, tag = NA) { 2 | new_summary_tensor(x = x, dtype = dtype, metadata = metadata, tag = tag) 3 | } 4 | 5 | new_summary_tensor <- function(x, dtype = NA, ..., metadata = NULL, tag = NA) { 6 | if (is.null(metadata)) { 7 | metadata <- summary_metadata(plugin_name = "tensor") 8 | } 9 | summary_values(metadata = metadata, tensor = as_tensor_proto(x, dtype), 10 | class = "tfevents_summary_tensor", tag = tag) 11 | } 12 | 13 | as_tensor_proto <- function(x, dtype = NA, ...) { 14 | if (rlang::is_na(x)) return(vec_cast(NA, new_tensor_proto())) 15 | UseMethod("as_tensor_proto") 16 | } 17 | 18 | as_tensor_proto.blob <- function(x, dtype = NA, ...) { 19 | if (is.na(dtype)) dtype <- "string" 20 | if (!dtype %in% c("string")) 21 | cli::cli_abort("dtype should be string when converting a blob to tensor proto.") 22 | tensor_proto(list(x), shape = new_tensor_shape(dim = length(x)), dtype = dtype) 23 | } 24 | 25 | as_tensor_proto.character <- function(x, dtype = NA, ...) { 26 | if (is.na(dtype)) dtype <- "string" 27 | if (!dtype %in% c("string")) 28 | cli::cli_abort("dtype should be string when converting a character to tensor proto.") 29 | tensor_proto(list(x), shape = new_tensor_shape(dim = length(x)), dtype = dtype) 30 | } 31 | 32 | as_tensor_proto.array <- function(x, dtype = NA, ...) { 33 | dims <- dim(x) 34 | # proto store tensor data in C ordering, thus we need to reshape values 35 | # here. 36 | x <- aperm(x, rev(seq_along(dims))) 37 | tensor_proto(x, shape = new_tensor_shape(dim = list(dims)), dtype = dtype) 38 | } 39 | 40 | as_tensor_proto.list <- function(x, dtype, ...) { 41 | c(x, dtype) %<-% vec_recycle_common(x, dtype) 42 | results <- lapply(seq_along(x), function(i) { 43 | as_tensor_proto(x[[i]], dtype[[i]], ...) 44 | }) 45 | vec_c(!!!results) 46 | } 47 | 48 | tensor_proto <- function(content, shape, dtype = NA) { 49 | if (!is.list(content)) content <- list(content) 50 | 51 | if ((length(dtype) == 1) && is.na(dtype)) { 52 | dtype <- sapply(content, make_default_dtype) 53 | } 54 | 55 | if (!inherits(shape, "tensor_shape")) { 56 | if (is.list(shape)) 57 | shape <- new_tensor_shape(dim = shape) 58 | } 59 | 60 | 61 | new_tensor_proto( 62 | content = content, 63 | shape = shape, 64 | dtype = dtype 65 | ) 66 | } 67 | 68 | make_default_dtype <- function(x) { 69 | if (is.numeric(x)) { 70 | return("float") 71 | } 72 | if (is.character(x)) { 73 | return("string") 74 | } 75 | } 76 | 77 | new_tensor_proto <- function(content = new_list_of(), shape = new_tensor_shape(), dtype = character()) { 78 | new_rcrd( 79 | fields = list( 80 | content = content, 81 | shape = shape, 82 | dtype = dtype 83 | ), 84 | class = "tensor_proto" 85 | ) 86 | } 87 | 88 | new_tensor_shape <- function(dim = new_list_of(ptype = integer())) { 89 | new_rcrd( 90 | fields = list( 91 | dim = dim 92 | ), 93 | class = "tensor_shape" 94 | ) 95 | } 96 | 97 | #' @export 98 | vec_cast.tensor_proto.tensor_proto <- function(x, to, ...) x 99 | 100 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(as_event,character) 4 | S3method(as_event,list) 5 | S3method(as_event,numeric) 6 | S3method(as_event,tfevents_summary_values) 7 | S3method(format,tfevents_event) 8 | S3method(format,tfevents_summary) 9 | S3method(format,tfevents_summary_values) 10 | S3method(summary_audio,array) 11 | S3method(summary_audio,blob) 12 | S3method(summary_audio,raw) 13 | S3method(summary_histogram,array) 14 | S3method(summary_histogram,numeric) 15 | S3method(summary_image,array) 16 | S3method(summary_image,blob) 17 | S3method(summary_image,ggplot) 18 | S3method(summary_image,raw) 19 | S3method(summary_text,character) 20 | S3method(value,tfevents_summary_values) 21 | S3method(value,tfevents_summary_values_audio) 22 | S3method(value,tfevents_summary_values_histograms) 23 | S3method(value,tfevents_summary_values_images) 24 | S3method(value,tfevents_summary_values_scalars) 25 | S3method(value,tfevents_summary_values_text) 26 | S3method(vec_cast,hparams_experiment.hparams_experiment) 27 | S3method(vec_cast,hparams_hparam_info.hparams_hparam_info) 28 | S3method(vec_cast,hparams_interval.hparams_interval) 29 | S3method(vec_cast,hparams_session_start_info.hparams_session_start_info) 30 | S3method(vec_cast,list.hparams_hparams_plugin_data) 31 | S3method(vec_cast,summary_summary_image.summary_summary_image) 32 | S3method(vec_cast,tensor_proto.tensor_proto) 33 | S3method(vec_cast,tfevents_event.tfevents_event) 34 | S3method(vec_cast,tfevents_summary.tfevents_summary_values) 35 | S3method(vec_cast,tfevents_summary_values.tfevents_summary_values) 36 | S3method(vec_ptype2,hparams_experiment.hparams_experiment) 37 | S3method(vec_ptype2,hparams_hparam_info.hparams_hparam_info) 38 | S3method(vec_ptype2,hparams_hparams_plugin_data.list) 39 | S3method(vec_ptype2,hparams_interval.hparams_interval) 40 | S3method(vec_ptype2,hparams_session_start_info.hparams_session_start_info) 41 | S3method(vec_ptype2,summary_hparams_config.tfevents_summary_values) 42 | S3method(vec_ptype2,summary_summary_image.summary_summary_image) 43 | S3method(vec_ptype2,tfevents_event.tfevents_event) 44 | S3method(vec_ptype2,tfevents_summary.tfevents_summary_values) 45 | S3method(vec_ptype2,tfevents_summary_scalar.tfevents_summary_values) 46 | S3method(vec_ptype2,tfevents_summary_values.summary_hparams_config) 47 | S3method(vec_ptype2,tfevents_summary_values.tfevents_summary) 48 | S3method(vec_ptype2,tfevents_summary_values.tfevents_summary_scalar) 49 | S3method(vec_ptype2,tfevents_summary_values.tfevents_summary_values) 50 | export(as_event) 51 | export(collect_events) 52 | export(events_logdir) 53 | export(get_default_logdir) 54 | export(get_global_step) 55 | export(hparams_hparam) 56 | export(hparams_metric) 57 | export(local_logdir) 58 | export(log_event) 59 | export(log_hparams) 60 | export(log_hparams_config) 61 | export(set_default_logdir) 62 | export(set_global_step) 63 | export(summary_audio) 64 | export(summary_histogram) 65 | export(summary_hparams) 66 | export(summary_hparams_config) 67 | export(summary_image) 68 | export(summary_metadata) 69 | export(summary_scalar) 70 | export(summary_text) 71 | export(value) 72 | export(with_logdir) 73 | import(vctrs) 74 | importFrom(Rcpp,sourceCpp) 75 | importFrom(blob,blob) 76 | importFrom(utils,tail) 77 | importFrom(zeallot,"%<-%") 78 | useDynLib(tfevents, .registration = TRUE) 79 | -------------------------------------------------------------------------------- /src/RcppExports.cpp: -------------------------------------------------------------------------------- 1 | // Generated by using Rcpp::compileAttributes() -> do not edit by hand 2 | // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 3 | 4 | #include "tfevents_types.h" 5 | #include 6 | 7 | using namespace Rcpp; 8 | 9 | #ifdef RCPP_USE_GLOBAL_ROSTREAM 10 | Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); 11 | Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); 12 | #endif 13 | 14 | // create_event_file_iterator 15 | Rcpp::XPtr create_event_file_iterator(const std::string& path, const std::string& run_name); 16 | RcppExport SEXP _tfevents_create_event_file_iterator(SEXP pathSEXP, SEXP run_nameSEXP) { 17 | BEGIN_RCPP 18 | Rcpp::RObject rcpp_result_gen; 19 | Rcpp::RNGScope rcpp_rngScope_gen; 20 | Rcpp::traits::input_parameter< const std::string& >::type path(pathSEXP); 21 | Rcpp::traits::input_parameter< const std::string& >::type run_name(run_nameSEXP); 22 | rcpp_result_gen = Rcpp::wrap(create_event_file_iterator(path, run_name)); 23 | return rcpp_result_gen; 24 | END_RCPP 25 | } 26 | // event_file_iterator_next 27 | SEXP event_file_iterator_next(Rcpp::XPtr iter); 28 | RcppExport SEXP _tfevents_event_file_iterator_next(SEXP iterSEXP) { 29 | BEGIN_RCPP 30 | Rcpp::RObject rcpp_result_gen; 31 | Rcpp::RNGScope rcpp_rngScope_gen; 32 | Rcpp::traits::input_parameter< Rcpp::XPtr >::type iter(iterSEXP); 33 | rcpp_result_gen = Rcpp::wrap(event_file_iterator_next(iter)); 34 | return rcpp_result_gen; 35 | END_RCPP 36 | } 37 | // get_wall_time 38 | long get_wall_time(); 39 | RcppExport SEXP _tfevents_get_wall_time() { 40 | BEGIN_RCPP 41 | Rcpp::RObject rcpp_result_gen; 42 | Rcpp::RNGScope rcpp_rngScope_gen; 43 | rcpp_result_gen = Rcpp::wrap(get_wall_time()); 44 | return rcpp_result_gen; 45 | END_RCPP 46 | } 47 | // event_writer 48 | Rcpp::XPtr event_writer(std::string file); 49 | RcppExport SEXP _tfevents_event_writer(SEXP fileSEXP) { 50 | BEGIN_RCPP 51 | Rcpp::RObject rcpp_result_gen; 52 | Rcpp::RNGScope rcpp_rngScope_gen; 53 | Rcpp::traits::input_parameter< std::string >::type file(fileSEXP); 54 | rcpp_result_gen = Rcpp::wrap(event_writer(file)); 55 | return rcpp_result_gen; 56 | END_RCPP 57 | } 58 | // write_events 59 | bool write_events(std::vector events, Rcpp::List writers); 60 | RcppExport SEXP _tfevents_write_events(SEXP eventsSEXP, SEXP writersSEXP) { 61 | BEGIN_RCPP 62 | Rcpp::RObject rcpp_result_gen; 63 | Rcpp::RNGScope rcpp_rngScope_gen; 64 | Rcpp::traits::input_parameter< std::vector >::type events(eventsSEXP); 65 | Rcpp::traits::input_parameter< Rcpp::List >::type writers(writersSEXP); 66 | rcpp_result_gen = Rcpp::wrap(write_events(events, writers)); 67 | return rcpp_result_gen; 68 | END_RCPP 69 | } 70 | 71 | static const R_CallMethodDef CallEntries[] = { 72 | {"_tfevents_create_event_file_iterator", (DL_FUNC) &_tfevents_create_event_file_iterator, 2}, 73 | {"_tfevents_event_file_iterator_next", (DL_FUNC) &_tfevents_event_file_iterator_next, 1}, 74 | {"_tfevents_get_wall_time", (DL_FUNC) &_tfevents_get_wall_time, 0}, 75 | {"_tfevents_event_writer", (DL_FUNC) &_tfevents_event_writer, 1}, 76 | {"_tfevents_write_events", (DL_FUNC) &_tfevents_write_events, 2}, 77 | {NULL, NULL, 0} 78 | }; 79 | 80 | RcppExport void R_init_tfevents(DllInfo *dll) { 81 | R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); 82 | R_useDynamicSymbols(dll, FALSE); 83 | } 84 | -------------------------------------------------------------------------------- /tests/testthat/test-log.R: -------------------------------------------------------------------------------- 1 | test_that("write a few simple scalars", { 2 | temp <- tempfile() 3 | with_logdir(temp, { 4 | for(i in 1:10) { 5 | log_event(hello = i^2) 6 | } 7 | }) 8 | 9 | events <- collect_events(temp) 10 | expect_equal(nrow(events), 10 + 1) 11 | expect_equal(events$run, rep(".", 11)) 12 | 13 | scalars <- collect_events(temp, type="scalar") 14 | expect_equal(nrow(events), 10 + 1) 15 | expect_equal(scalars$value, (1:10)^2) 16 | expect_equal(scalars$step, 0:9) 17 | expect_equal(scalars$tag, rep("hello", 10)) 18 | }) 19 | 20 | test_that("write nested scalar for multiple runs", { 21 | temp <- tempfile() 22 | with_logdir(temp, { 23 | for(i in 1:10) { 24 | log_event( 25 | train = list(loss = i, acc = i^2), 26 | valid = list(loss = i+1, acc = (i+1)^2) 27 | ) 28 | } 29 | }) 30 | 31 | events <- collect_events(temp) 32 | expect_equal(nrow(events), 4*10 + 2) 33 | expect_equal(unique(events$run), c("train", "valid")) 34 | 35 | scalars <- collect_events(temp, type = "scalar") 36 | expect_equal(nrow(scalars), 4*10) 37 | expect_equal(unique(scalars$tag), c("loss", "acc")) 38 | expect_true(all(unique(scalars$step) %in% 0:9)) 39 | }) 40 | 41 | test_that("can log manually created scalars directly", { 42 | 43 | temp <- tempfile() 44 | with_logdir(temp, { 45 | for(i in 1:10) { 46 | log_event( 47 | train = list(loss = i, acc = i^2), 48 | valid = list(loss = i+1, acc = (i+1)^2), 49 | test = list(loss2 = summary_scalar(i+2)) 50 | ) 51 | } 52 | }) 53 | 54 | events <- collect_events(temp) 55 | expect_equal(nrow(events), 4*10 + 10 + 3) 56 | 57 | scalars <- collect_events(temp, type = "scalar") 58 | expect_equal(scalars[scalars$tag == "loss2",]$value, 1:10 + 2) 59 | }) 60 | 61 | test_that("can log with a specified step", { 62 | 63 | temp <- tempfile() 64 | with_logdir(temp, { 65 | log_event(hello = 1) 66 | log_event(hello = 1) 67 | log_event(hello = 1) 68 | log_event(hello = 1, step = 100) 69 | log_event(hello = 1) 70 | log_event(bye = 1, step = get_global_step(increment = FALSE)) 71 | }) 72 | 73 | scalars <- collect_events(temp, type = "scalar") 74 | expect_true(100 %in% scalars$step) 75 | expect_true(3 %in% scalars$step) 76 | expect_equal(scalars$step[scalars$tag == "bye"], 3) 77 | }) 78 | 79 | test_that("local_logdir", { 80 | temp1 <- tempfile() 81 | temp2 <- tempfile() 82 | f <- function() { 83 | local_logdir(temp2) 84 | get_default_logdir() 85 | } 86 | 87 | with_logdir(temp1, { 88 | expect_equal(temp1, get_default_logdir()) 89 | expect_equal(temp2, f()) 90 | expect_equal(temp1, get_default_logdir()) 91 | }) 92 | 93 | }) 94 | 95 | test_that("can write tags with the slash instead of nested list", { 96 | temp <- tempfile() 97 | with_logdir(temp, { 98 | log_event( 99 | "train/loss" = 0.1, 100 | "train/acc" = 0.1, 101 | "valid/loss" = 0.1, 102 | "valid/acc" = 0.1 103 | ) 104 | }) 105 | 106 | expect_equal(nrow(collect_events(temp, type = "summary")), 4) 107 | }) 108 | 109 | test_that("Errors gracefully when a numeric with length >1 is provided", { 110 | 111 | temp <- tempfile() 112 | expect_error({ 113 | with_logdir(temp, { 114 | log_event( 115 | x = c(0, 1) 116 | ) 117 | }) 118 | }, regexp = "Can't log") 119 | 120 | expect_error({ 121 | with_logdir(temp, { 122 | log_event( 123 | x = numeric() 124 | ) 125 | }) 126 | }, regexp = "Can't log") 127 | 128 | }) 129 | -------------------------------------------------------------------------------- /src/proto/tensor.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorboard; 4 | 5 | import "resource_handle.proto"; 6 | import "tensor_shape.proto"; 7 | import "types.proto"; 8 | 9 | option cc_enable_arenas = true; 10 | option java_outer_classname = "TensorProtos"; 11 | option java_multiple_files = true; 12 | option java_package = "org.tensorflow.framework"; 13 | option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_go_proto"; 14 | 15 | // Protocol buffer representing a tensor. 16 | message TensorProto { 17 | DataType dtype = 1; 18 | 19 | // Shape of the tensor. TODO(touts): sort out the 0-rank issues. 20 | TensorShapeProto tensor_shape = 2; 21 | 22 | // Only one of the representations below is set, one of "tensor_contents" and 23 | // the "xxx_val" attributes. We are not using oneof because as oneofs cannot 24 | // contain repeated fields it would require another extra set of messages. 25 | 26 | // Version number. 27 | // 28 | // In version 0, if the "repeated xxx" representations contain only one 29 | // element, that element is repeated to fill the shape. This makes it easy 30 | // to represent a constant Tensor with a single value. 31 | int32 version_number = 3; 32 | 33 | // Serialized raw tensor content from either Tensor::AsProtoTensorContent or 34 | // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation 35 | // can be used for all tensor types. The purpose of this representation is to 36 | // reduce serialization overhead during RPC call by avoiding serialization of 37 | // many repeated small items. 38 | bytes tensor_content = 4; 39 | 40 | // Type specific representations that make it easy to create tensor protos in 41 | // all languages. Only the representation corresponding to "dtype" can 42 | // be set. The values hold the flattened representation of the tensor in 43 | // row major order. 44 | 45 | // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll 46 | // have some pointless zero padding for each value here. 47 | repeated int32 half_val = 13 [packed = true]; 48 | 49 | // DT_FLOAT. 50 | repeated float float_val = 5 [packed = true]; 51 | 52 | // DT_DOUBLE. 53 | repeated double double_val = 6 [packed = true]; 54 | 55 | // DT_INT32, DT_INT16, DT_UINT16, DT_INT8, DT_UINT8. 56 | repeated int32 int_val = 7 [packed = true]; 57 | 58 | // DT_STRING 59 | repeated bytes string_val = 8; 60 | 61 | // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real 62 | // and imaginary parts of i-th single precision complex. 63 | repeated float scomplex_val = 9 [packed = true]; 64 | 65 | // DT_INT64 66 | repeated int64 int64_val = 10 [packed = true]; 67 | 68 | // DT_BOOL 69 | repeated bool bool_val = 11 [packed = true]; 70 | 71 | // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real 72 | // and imaginary parts of i-th double precision complex. 73 | repeated double dcomplex_val = 12 [packed = true]; 74 | 75 | // DT_RESOURCE 76 | repeated ResourceHandleProto resource_handle_val = 14; 77 | 78 | // DT_VARIANT 79 | repeated VariantTensorDataProto variant_val = 15; 80 | 81 | // DT_UINT32 82 | repeated uint32 uint32_val = 16 [packed = true]; 83 | 84 | // DT_UINT64 85 | repeated uint64 uint64_val = 17 [packed = true]; 86 | } 87 | 88 | // Protocol buffer representing the serialization format of DT_VARIANT tensors. 89 | message VariantTensorDataProto { 90 | // Name of the type of objects being serialized. 91 | string type_name = 1; 92 | // Portions of the object that are not Tensors. 93 | bytes metadata = 2; 94 | // Tensors contained within objects being serialized. 95 | repeated TensorProto tensors = 3; 96 | } 97 | -------------------------------------------------------------------------------- /tests/testthat/test-hparams.R: -------------------------------------------------------------------------------- 1 | skip_if_tbparse_not_available() 2 | 3 | test_that("simple hparams experiment", { 4 | 5 | hparams <- list( 6 | hparams_hparam("dropout", domain = c(min_value = 0.1, max_value = 0.5)), 7 | hparams_hparam("optimizer", domain = c("adam", "sgd")), 8 | hparams_hparam("use_cnn", domain = c(TRUE, FALSE)), 9 | hparams_hparam("num_units", c(8, 12, 16)) 10 | ) 11 | 12 | metrics <- list( 13 | hparams_metric(tag = "loss"), 14 | hparams_metric(tag = "accuracy", group = "valid"), 15 | hparams_metric(tag = "f1", dataset_type = "training"), 16 | hparams_metric(tag = "f12", dataset_type = "validation") 17 | ) 18 | 19 | temp <- tempfile() 20 | with_logdir(temp, { 21 | log_hparams_config(hparams, metrics) 22 | log_hparams( 23 | dropout = 0.1, 24 | optimizer = "adam", 25 | use_cnn = TRUE, 26 | num_units = 8 27 | ) 28 | 29 | for (i in 1:10) { 30 | log_event(loss = runif(1), valid = list(accuracy = runif(1)), 31 | f1 = runif(1), f12 = runif(1)) 32 | } 33 | }) 34 | 35 | reader <- tbparse$SummaryReader(temp) 36 | 37 | expect_equal(reader$hparams$tag, c("dropout", "num_units", "optimizer", "use_cnn")) 38 | expect_equal(reader$hparams$value, list(0.1, 8, "adam", TRUE)) 39 | expect_equal(nrow(reader$scalars), 40) 40 | 41 | expect_equal(nrow(collect_events(temp)), 40 + 4) 42 | expect_equal(nrow(collect_events(temp, type="summary")), 40 + 2) 43 | expect_equal(nrow(collect_events(temp, type="scalar")), 40) 44 | }) 45 | 46 | test_that("multiple runs, each in a different logdir", { 47 | 48 | hparams <- list( 49 | hparams_hparam("dropout", domain = c(min_value = 0.1, max_value = 0.5)), 50 | hparams_hparam("optimizer", domain = c("adam", "sgd")), 51 | hparams_hparam("use_cnn", domain = c(TRUE, FALSE)), 52 | hparams_hparam("num_units", c(8, 12, 16)) 53 | ) 54 | 55 | metrics <- list( 56 | hparams_metric(tag = "loss"), 57 | hparams_metric(tag = "accuracy", group = "valid"), 58 | hparams_metric(tag = "f1", dataset_type = "training") 59 | ) 60 | 61 | temp <- tempfile() 62 | 63 | with_logdir(temp, { 64 | log_hparams_config(hparams, metrics) 65 | }) 66 | 67 | for(run in 1:10) { 68 | with_logdir(file.path(temp, paste0("run", run)), { 69 | log_hparams( 70 | dropout = runif(1, min = 0.1, max = 0.5), 71 | optimizer = sample(c("adam", "sgd"), 1), 72 | use_cnn = sample(c(TRUE, FALSE), 1), 73 | num_units = sample(c(8, 12, 16), 1) 74 | ) 75 | 76 | for (i in 1:10) { 77 | log_event(loss = runif(1), valid = list(accuracy = runif(1)), 78 | f1 = runif(1)) 79 | } 80 | }) 81 | } 82 | 83 | reader <- tbparse$SummaryReader(temp) 84 | expect_equal(nrow(reader$hparams), 40) 85 | expect_equal(nrow(collect_events(file.path(temp, "run1"))), 30 + 3) 86 | }) 87 | 88 | test_that("write only hparams without the config", { 89 | 90 | temp <- tempfile() 91 | for(run in 1:10) { 92 | with_logdir(file.path(temp, paste0("run", run)), { 93 | log_hparams( 94 | dropout = runif(1, min = 0.1, max = 0.5), 95 | optimizer = sample(c("adam", "sgd"), 1), 96 | use_cnn = sample(c(TRUE, FALSE), 1), 97 | num_units = sample(c(8, 12, 16), 1) 98 | ) 99 | 100 | for (i in 1:10) { 101 | log_event(loss = runif(1), valid = list(accuracy = runif(1)), 102 | f1 = runif(1)) 103 | } 104 | }) 105 | } 106 | 107 | reader <- tbparse$SummaryReader(temp) 108 | expect_equal(nrow(reader$hparams), 40) 109 | 110 | }) 111 | 112 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # tfevents 5 | 6 | 7 | 8 | [![R-CMD-check](https://github.com/mlverse/tfevents/actions/workflows/check.yaml/badge.svg)](https://github.com/mlverse/tfevents/actions/workflows/check.yaml) 9 | [![Codecov test 10 | coverage](https://codecov.io/gh/mlverse/tfevents/branch/main/graph/badge.svg)](https://app.codecov.io/gh/mlverse/tfevents?branch=main) 11 | [![CRAN 12 | status](https://www.r-pkg.org/badges/version/tfevents)](https://CRAN.R-project.org/package=tfevents) 13 | [![](https://cranlogs.r-pkg.org/badges/tfevents)](https://cran.r-project.org/package=tfevents) 14 | 15 | 16 | tfevents allows logging data from machine learning experiments to a file 17 | format that can be later consumed by 18 | [TensorBoard](https://www.tensorflow.org/tensorboard) in order to 19 | generate visualizations. 20 | 21 | ## Installation 22 | 23 | You can install tfevents from CRAN with: 24 | 25 | ``` r 26 | install.packages("tfevents") 27 | ``` 28 | 29 | You can install the development version of tfevents from 30 | [GitHub](https://github.com/) with: 31 | 32 | You need to have `cmake` on your path. See installation instructions in 33 | the [cmake install webpage](https://cmake.org/resources/) - or: 34 | 35 | If you use `brew` on MacOS you can run: 36 | 37 | ``` shell 38 | brew install cmake 39 | ``` 40 | 41 | Or on linux install the `cmake` library, for example on Debian systems: 42 | 43 | ``` shell 44 | sudo apt install cmake 45 | ``` 46 | 47 | ``` r 48 | # install.packages("devtools") 49 | devtools::install_github("mlverse/tfevents") 50 | ``` 51 | 52 | ## Example 53 | 54 | The main entrypoint in tfevents API is the `log_event` function. It can 55 | be used to log **summaries** like scalars, images, audio (Coming soon), 56 | histograms (Coming soon) and arbitrary tensors (soon) to a log 57 | directory, which we like to call `logdir`. You can later point 58 | TensorBoard to this `logdir` to visualize the results. 59 | 60 | ``` r 61 | library(tfevents) 62 | ``` 63 | 64 | Summaries are always associated to a step in the TensorBoard API, and 65 | `log_event` automatically increases the **`step`** everytime it’s 66 | called, unless you provide the `step` argument. 67 | 68 | Let’s start by logging some metrics: 69 | 70 | ``` r 71 | epochs <- 10 72 | for (i in seq_len(epochs)) { 73 | # training code would go here 74 | log_event( 75 | train = list(loss = runif(1), acc = runif(1)), 76 | valid = list(loss = runif(1), acc = runif(1)) 77 | ) 78 | } 79 | ``` 80 | 81 | By default this will create a `logs` directory in your working directory 82 | and write metrics to it - you can change the default `logdir` using 83 | context like `with_logdir` or globally with `set_default_logdir()`. 84 | 85 | Since we passed a nested list of metrics, `log_event` will create 86 | subdirectories under `logs` to write metrics for each group. 87 | 88 | ``` r 89 | fs::dir_tree("logs") 90 | #> logs 91 | #> ├── train 92 | #> │ └── events.out.tfevents.1719410709.v2 93 | #> └── valid 94 | #> └── events.out.tfevents.1719410709.v2 95 | ``` 96 | 97 | You can later point TensorBoard to that logdir using TensorBoard’s 98 | command line interface or tensorflow’s utility function `tensorboard()` 99 | 100 | ``` r 101 | tensorflow::tensorboard(normalizePath("logs"), port = 6060) 102 | #> Started TensorBoard at http://127.0.0.1:6060 103 | ``` 104 | 105 | TensorBoard will display the results in a dashbboard, similar to one you 106 | can see in the screenshot below: 107 | 108 | 109 | 110 | You can learn more in the [tfevents website](#TODO). 111 | -------------------------------------------------------------------------------- /tests/testthat/test-image.R: -------------------------------------------------------------------------------- 1 | test_that("write image", { 2 | temp <- tempfile() 3 | orig_img <- png::readPNG(test_path("resources/img.png")) 4 | img <- array(orig_img, dim = c(1, 28, 28, 1)) 5 | 6 | with_logdir(temp, { 7 | log_event(hello = summary_image(img)) 8 | }) 9 | 10 | events <- collect_events(temp) 11 | expect_equal(nrow(events), 2) 12 | 13 | 14 | skip_if_tbparse_not_available() 15 | reader <- tbparse$SummaryReader(temp) 16 | buf <- reader$tensors$value[[1]][[3]] 17 | # TODO: in theory we don't need tensorflow for this, but couldnt find a way to 18 | # cast the bytestring to a raw vector. 19 | reloaded <- as.array(tensorflow::tf$image$decode_png(buf)) 20 | 21 | expect_equal(orig_img, reloaded[,,1]/255) 22 | }) 23 | 24 | test_that("can write nested images", { 25 | temp <- tempfile() 26 | orig_img <- png::readPNG(test_path("resources/img.png")) 27 | img <- array(orig_img, dim = c(1, 28, 28, 1)) 28 | 29 | with_logdir(temp, { 30 | log_event( 31 | train = list(im = summary_image(img)), 32 | valid = list(im = summary_image(img)) 33 | ) 34 | }) 35 | 36 | events <- collect_events(temp) 37 | expect_equal(nrow(events), 4) 38 | }) 39 | 40 | test_that("can write a batch of images", { 41 | orig_img <- png::readPNG(test_path("resources/img.png")) 42 | img <- array(orig_img, dim = c(28, 28, 1)) 43 | arr <- array(0, dim = c(10, 28, 28, 1)) 44 | for(i in 1:10) { 45 | arr[i,,,] <- img 46 | } 47 | 48 | temp <- tempfile() 49 | with_logdir(temp, { 50 | log_event(hello = summary_image(arr)) 51 | }) 52 | 53 | skip_if_tbparse_not_available() 54 | reader <- tbparse$SummaryReader(temp) 55 | tags <- reader$tags 56 | expect_equal(as.character(tags$tensors), "hello") 57 | }) 58 | 59 | test_that("can write a ggplot", { 60 | df <- data.frame(x = 1:10, y = 1:10) 61 | gg <- ggplot2::ggplot(df, ggplot2::aes(x, y)) + 62 | ggplot2::geom_point() 63 | temp <- tempfile() 64 | with_logdir(temp, { 65 | log_event(hello = summary_image(gg)) 66 | }) 67 | 68 | events <- collect_events(temp) 69 | expect_equal(nrow(events), 2) 70 | 71 | buf <- field(field(events$summary[[2]], "image"), "buffer") 72 | reloaded <- png::readPNG(as.raw(buf[[1]])) 73 | 74 | expect_equal(length(dim(reloaded)), 3) 75 | }) 76 | 77 | test_that("error when passing an array with wrong dimensions", { 78 | orig_img <- png::readPNG(test_path("resources/img.png")) 79 | img <- array(orig_img, dim = c(28, 28, 1)) 80 | temp <- tempfile() 81 | 82 | expect_error({ 83 | with_logdir(temp, { 84 | log_event(hello = summary_image(img)) 85 | }) 86 | }, regexp = "array with dimensions") 87 | 88 | }) 89 | 90 | test_that("can pass a custom display name", { 91 | 92 | temp <- tempfile() 93 | orig_img <- png::readPNG(test_path("resources/img.png")) 94 | img <- array(orig_img, dim = c(1, 28, 28, 1)) 95 | 96 | with_logdir(temp, { 97 | log_event(hello = summary_image( 98 | img, 99 | metadata = summary_metadata( 100 | plugin_name = "images", 101 | display_name = "Hello world", 102 | description = "This is a description" 103 | ) 104 | )) 105 | }) 106 | 107 | s <- collect_events(temp, type="summary") 108 | expect_equal( 109 | field(field(s$summary, "metadata"), "description"), 110 | "This is a description" 111 | ) 112 | expect_equal( 113 | field(field(s$summary, "metadata"), "display_name"), 114 | "Hello world" 115 | ) 116 | }) 117 | 118 | test_that("fails if weong metadata is specified", { 119 | orig_img <- png::readPNG(test_path("resources/img.png")) 120 | img <- array(orig_img, dim = c(1, 28, 28, 1)) 121 | expect_error({ 122 | summary_image(img, metadata = summary_metadata(plugin_name = "tensor")) 123 | }, regexp = "Plugin name should be") 124 | }) 125 | -------------------------------------------------------------------------------- /README.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | output: github_document 3 | --- 4 | 5 | 6 | 7 | ```{r, include = FALSE} 8 | knitr::opts_chunk$set( 9 | collapse = TRUE, 10 | comment = "#>", 11 | fig.path = "man/figures/README-", 12 | out.width = "100%" 13 | ) 14 | temp <- tempfile("logdir") 15 | dir.create(temp) 16 | knitr::opts_knit$set(root.dir = temp) 17 | ``` 18 | 19 | # tfevents 20 | 21 | 22 | [![R-CMD-check](https://github.com/mlverse/tfevents/actions/workflows/check.yaml/badge.svg)](https://github.com/mlverse/tfevents/actions/workflows/check.yaml) 23 | [![Codecov test coverage](https://codecov.io/gh/mlverse/tfevents/branch/main/graph/badge.svg)](https://app.codecov.io/gh/mlverse/tfevents?branch=main) 24 | [![CRAN status](https://www.r-pkg.org/badges/version/tfevents)](https://CRAN.R-project.org/package=tfevents) 25 | [![](https://cranlogs.r-pkg.org/badges/tfevents)](https://cran.r-project.org/package=tfevents) 26 | 27 | 28 | tfevents allows logging data from machine learning experiments to a file 29 | format that can be later consumed by [TensorBoard](https://www.tensorflow.org/tensorboard) in order to generate visualizations. 30 | 31 | ## Installation 32 | 33 | You can install tfevents from CRAN with: 34 | 35 | ``` r 36 | install.packages("tfevents") 37 | ``` 38 | 39 | You can install the development version of tfevents from [GitHub](https://github.com/) with: 40 | 41 | You need to have `cmake` on your path. See installation instructions in the [cmake install 42 | webpage](https://cmake.org/resources/) - or: 43 | 44 | If you use `brew` on MacOS you can run: 45 | 46 | ``` shell 47 | brew install cmake 48 | ``` 49 | 50 | Or on linux install the `cmake` library, for example on Debian systems: 51 | 52 | ``` shell 53 | sudo apt install cmake 54 | ``` 55 | 56 | ``` r 57 | # install.packages("devtools") 58 | devtools::install_github("mlverse/tfevents") 59 | ``` 60 | 61 | ## Example 62 | 63 | The main entrypoint in tfevents API is the `log_event` function. It can be used 64 | to log **summaries** like scalars, images, audio (Coming soon), histograms (Coming soon) 65 | and arbitrary tensors (soon) to a log directory, which we like to call `logdir`. 66 | You can later point TensorBoard to this `logdir` to visualize the results. 67 | 68 | ```{r example} 69 | library(tfevents) 70 | ``` 71 | 72 | Summaries are always associated to a step in the TensorBoard API, and `log_event` 73 | automatically increases the **`step`** everytime it's called, unless you provide 74 | the `step` argument. 75 | 76 | Let's start by logging some metrics: 77 | 78 | ```{r cars} 79 | epochs <- 10 80 | for (i in seq_len(epochs)) { 81 | # training code would go here 82 | log_event( 83 | train = list(loss = runif(1), acc = runif(1)), 84 | valid = list(loss = runif(1), acc = runif(1)) 85 | ) 86 | } 87 | ``` 88 | 89 | By default this will create a `logs` directory in your working directory and write 90 | metrics to it - you can change the default `logdir` using context like `with_logdir` 91 | or globally with `set_default_logdir()`. 92 | 93 | Since we passed a nested list of metrics, `log_event` will create 94 | subdirectories under `logs` to write metrics for each group. 95 | 96 | ```{r} 97 | fs::dir_tree("logs") 98 | ``` 99 | 100 | You can later point TensorBoard to that logdir using TensorBoard's command line 101 | interface or tensorflow's utility function `tensorboard()` 102 | 103 | ```{r} 104 | tensorflow::tensorboard(normalizePath("logs"), port = 6060) 105 | ``` 106 | 107 | TensorBoard will display the results in a dashbboard, similar to one you can see 108 | in the screenshot below: 109 | 110 | ```{r tensorboard, echo=FALSE, out.width="100%"} 111 | webshot2::webshot(url = "http://127.0.0.1:6060/?darkMode=false") 112 | ``` 113 | 114 | You can learn more in the [tfevents website](#TODO). 115 | -------------------------------------------------------------------------------- /src/proto/event.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorboard; 4 | 5 | import "summary.proto"; 6 | 7 | option cc_enable_arenas = true; 8 | option java_outer_classname = "EventProtos"; 9 | option java_multiple_files = true; 10 | option java_package = "org.tensorflow.util"; 11 | option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/util/event_go_proto"; 12 | 13 | // Protocol buffer representing an event that happened during 14 | // the execution of a Brain model. 15 | message Event { 16 | // Timestamp of the event. 17 | double wall_time = 1; 18 | 19 | // Global step of the event. 20 | int64 step = 2; 21 | 22 | oneof what { 23 | // An event file was started, with the specified version. 24 | // This is use to identify the contents of the record IO files 25 | // easily. Current version is "brain.Event:2". All versions 26 | // start with "brain.Event:". 27 | string file_version = 3; 28 | // An encoded version of a GraphDef. 29 | bytes graph_def = 4; 30 | // A summary was generated. 31 | Summary summary = 5; 32 | // The user output a log message. This was theoretically used by the defunct 33 | // tensorboard_logging module, which has since been removed; this field is 34 | // now deprecated and should not be used. 35 | LogMessage log_message = 6 [deprecated = true]; 36 | // The state of the session which can be used for restarting after crashes. 37 | SessionLog session_log = 7; 38 | // The metadata returned by running a session.run() call. 39 | TaggedRunMetadata tagged_run_metadata = 8; 40 | // An encoded version of a MetaGraphDef. 41 | bytes meta_graph_def = 9; 42 | } 43 | } 44 | 45 | // Protocol buffer used for logging messages to the events file. 46 | // 47 | // This was theoretically used by the defunct tensorboard_logging module, which 48 | // has been removed; this message is now deprecated and should not be used. 49 | message LogMessage { 50 | option deprecated = true; 51 | enum Level { 52 | option deprecated = true; 53 | UNKNOWN = 0; 54 | // Note: The logging level 10 cannot be named DEBUG. Some software 55 | // projects compile their C/C++ code with -DDEBUG in debug builds. So the 56 | // C++ code generated from this file should not have an identifier named 57 | // DEBUG. 58 | DEBUGGING = 10; 59 | INFO = 20; 60 | WARN = 30; 61 | ERROR = 40; 62 | FATAL = 50; 63 | } 64 | Level level = 1; 65 | string message = 2; 66 | } 67 | 68 | // Protocol buffer used for logging session state. 69 | message SessionLog { 70 | enum SessionStatus { 71 | STATUS_UNSPECIFIED = 0; 72 | START = 1; 73 | STOP = 2; 74 | CHECKPOINT = 3; 75 | } 76 | 77 | SessionStatus status = 1; 78 | // This checkpoint_path contains both the path and filename. 79 | string checkpoint_path = 2; 80 | string msg = 3; 81 | } 82 | 83 | // For logging the metadata output for a single session.run() call. 84 | message TaggedRunMetadata { 85 | // Tag name associated with this metadata. 86 | string tag = 1; 87 | // Byte-encoded version of the `RunMetadata` proto in order to allow lazy 88 | // deserialization. 89 | bytes run_metadata = 2; 90 | } 91 | 92 | // Worker heartbeat messages. Support for these operations is currently 93 | // internal and expected to change. 94 | 95 | // Current health status of a worker. 96 | enum WorkerHealth { 97 | OK = 0; // By default a worker is healthy. 98 | RECEIVED_SHUTDOWN_SIGNAL = 1; 99 | INTERNAL_ERROR = 2; 100 | SHUTTING_DOWN = 3; // Worker has been instructed to shutdown after a timeout. 101 | } 102 | 103 | // Indicates the behavior of the worker when an internal error or shutdown 104 | // signal is received. 105 | enum WorkerShutdownMode { 106 | DEFAULT = 0; 107 | NOT_CONFIGURED = 1; 108 | WAIT_FOR_COORDINATOR = 2; 109 | SHUTDOWN_AFTER_TIMEOUT = 3; 110 | } 111 | 112 | message WatchdogConfig { 113 | int64 timeout_ms = 1; 114 | } 115 | 116 | message RequestedExitCode { 117 | int32 exit_code = 1; 118 | } 119 | 120 | message WorkerHeartbeatRequest { 121 | WorkerShutdownMode shutdown_mode = 1; 122 | WatchdogConfig watchdog_config = 2; 123 | RequestedExitCode exit_code = 3; 124 | } 125 | 126 | message WorkerHeartbeatResponse { 127 | WorkerHealth health_status = 1; 128 | repeated Event worker_log = 2; 129 | string hostname = 3; 130 | } 131 | -------------------------------------------------------------------------------- /vignettes/articles/read.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Reading event records" 3 | --- 4 | 5 | ```{r, include = FALSE} 6 | knitr::opts_chunk$set( 7 | collapse = TRUE, 8 | comment = "#>" 9 | ) 10 | ``` 11 | 12 | ```{r setup} 13 | library(tfevents) 14 | ``` 15 | 16 | tfevents stores events in files inside a **logdir**, there might be many files tfevents records files in the log directory and each file can contain many events. 17 | 18 | tfevents provides functionality to read records writen to the log directory and to extract their value in a convenient representation, useful if you want to analyse the results from R. 19 | 20 | Let's write a few records a temporary directory, then we'll use tfevents functionality to read the data into R. 21 | 22 | ```{r} 23 | temp <- tempfile() 24 | set_default_logdir(temp) 25 | ``` 26 | 27 | ```{r} 28 | log_event(text = "hello world") 29 | for (i in 1:10) { 30 | log_event(hello = i* runif(1)) 31 | } 32 | ``` 33 | 34 | We can **collect** all events from the *logdir* with: 35 | 36 | ```{r} 37 | events <- collect_events(temp) 38 | events 39 | ``` 40 | 41 | `collect_events()` returns a tibble with each row repesenting an event writen to that *logdir*. Note that the tibble has 12 rows, 1 more than the number of our calls to `log_event`. That's because every event record file includes an event storing some metadata, like the time it was created and etc. The `run` column indicates the directory within the *logdir* that the events were collected from. Although tfevents only supports writing summary events, tfevents record files can contain other kind of events like log message, TensorFlow graphs and metadata information (as we see in this file), in those cases the `summary` column will have a `NA` value. In summary events it will contain additional information on the summary. 42 | 43 | You might want to collect only the summary events, like those that were created with `log_event`, in this case you can pass the `type` argument to `collect_events` specifying the kind of events that you want to collect. 44 | 45 | ```{r} 46 | summaries <- collect_events(temp, type = "summary") 47 | summaries 48 | ``` 49 | Since the above asked for summary events, the returned data frame can include additional information like the name of the plugin that was used to created the summary (eg. scalars, images, audio, etc) and the tag name for the summary. This data is already included in the objects in the `summary` column, but is extracted as columns to make analyses easier. 50 | 51 | You can extract the value out of a summary using the `value` function. 52 | 53 | ```{r} 54 | value(summaries$summary[1]) 55 | ``` 56 | 57 | Notice that `value` extracts values of a single summary and errors if you pass 58 | more summary values. To query all values you can pass the `as_list = TRUE` argument. This ensures that `value` will always return a list, making it type stable no matter the size of the summary_values vector that you pass. 59 | 60 | ```{r} 61 | # we remove the first summary, as it's a text summary 62 | value(summaries$summary[-1], as_list = TRUE) 63 | ``` 64 | 65 | If you are only interested in scalar summaries, you can use `type="scalar"` in `collect_events`: 66 | 67 | ```{r} 68 | scalars <- collect_events(temp, type = "scalar") 69 | scalars 70 | ``` 71 | 72 | Now, values can be expanded in the data frame and you get a ready to use data frame, for example: 73 | 74 | ```{r} 75 | library(ggplot2) 76 | ggplot(scalars, aes(x = step, y = value)) + 77 | geom_line() 78 | ``` 79 | 80 | ## Iterating over a logdir 81 | 82 | Passing a directory path to `collect_events` by default collects all events in that directory, but you might not want to collect them all at once because of memory constraints or even because they are not yet written, in this case you can use `events_logdir` to create an object, similar to a file connection that 83 | will allow you to load events in smaller batches. 84 | 85 | ```{r} 86 | con <- events_logdir(temp) 87 | collect_events(con, n = 1, type = "scalar") 88 | ``` 89 | 90 | Notice that the first call to `collect_events` collected the first scalar summary event in the directory because of `n = 1`. The next call will collect the remaining ones as by default `n = NULL`. 91 | 92 | ```{r} 93 | collect_events(con, type = "scalar") 94 | ``` 95 | 96 | We can now log some more scalars and recollect, you will see that the events that were just written are now collected. 97 | 98 | ```{r} 99 | for (i in 1:3) { 100 | log_event(hello = i* runif(1)) 101 | } 102 | collect_events(con, type = "scalar") 103 | ``` 104 | 105 | This interface allows you to efficiently read tfevents records without having them all on RAM at once, or work in a streaming way in a sense that a process might be writing tfevents (for example in a training loop) and another one is used to display intermediate results - for example in a Shiny app. 106 | -------------------------------------------------------------------------------- /src/tensor.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include "generated/tensor.pb.h" 5 | #include "tl/optional.hpp" 6 | #include "na.h" 7 | 8 | template<> 9 | std::vector> 10 | Rcpp::as>> (SEXP x) { 11 | const auto r_dtype = Rcpp::as(x); 12 | std::vector> dtype; 13 | for (size_t i = 0; i < r_dtype.size(); i++) { 14 | auto r_dp = r_dtype[i]; 15 | 16 | if (Rcpp::CharacterVector::is_na(r_dp)) { 17 | dtype.push_back(tl::nullopt); 18 | continue; 19 | } 20 | 21 | if (r_dp == "float") { 22 | dtype.push_back(tensorboard::DataType::DT_FLOAT); 23 | } 24 | else if (r_dp == "string") { 25 | dtype.push_back(tensorboard::DataType::DT_STRING); 26 | } 27 | else if (r_dp == "double") { 28 | dtype.push_back(tensorboard::DataType::DT_DOUBLE); 29 | } 30 | else { 31 | Rcpp::stop("Unsupported data type."); 32 | } 33 | } 34 | return dtype; 35 | } 36 | 37 | template<> 38 | std::vector 39 | Rcpp::as> (SEXP x) { 40 | const auto r_dims = Rcpp::as(x); 41 | auto r_dims_name = Rcpp::CharacterVector(0); 42 | const auto no_names = Rf_isNull(r_dims.names()); 43 | if (!no_names) { 44 | r_dims_name = r_dims.names(); 45 | } 46 | std::vector dims; 47 | for (size_t i = 0; i < r_dims.size(); i++) { 48 | tensorboard::TensorShapeProto_Dim dim; 49 | dim.set_size(r_dims[i]); 50 | 51 | if ((!no_names) && (!Rcpp::CharacterVector::is_na(r_dims_name[i]))) { 52 | dim.set_name(r_dims_name[i]); 53 | } 54 | 55 | dims.push_back(dim); 56 | } 57 | 58 | return dims; 59 | } 60 | 61 | template<> 62 | std::vector> 63 | Rcpp::as>> (SEXP x) { 64 | 65 | auto r_shapes = Rcpp::as(x); 66 | auto r_dims = Rcpp::as(r_shapes["dim"]); 67 | 68 | std::vector> shapes; 69 | for (size_t i = 0; i < r_dims.size(); i++) { 70 | if (r_is_na(r_shapes)) { 71 | shapes.push_back(tl::nullopt); 72 | continue; 73 | } 74 | 75 | tensorboard::TensorShapeProto shape; 76 | auto dims = Rcpp::as>(r_dims[i]); 77 | for (size_t j = 0; j < dims.size(); j++) { 78 | auto dim = shape.add_dim(); 79 | dim->CopyFrom(dims[j]); 80 | } 81 | shapes.push_back(shape); 82 | } 83 | 84 | return shapes; 85 | } 86 | 87 | 88 | void add_string_values (tensorboard::TensorProto* tensor, SEXP x) { 89 | if (Rf_inherits(x, "blob")) { 90 | // a blob is a list of raw vectors in R 91 | auto values = Rcpp::as(x); 92 | for (auto val : values) { 93 | auto raw = Rcpp::as(val); 94 | tensor->add_string_val(std::string(raw.begin(), raw.end())); 95 | } 96 | return; 97 | } 98 | 99 | for (auto val : Rcpp::as>(x)) { 100 | tensor->add_string_val(val); 101 | } 102 | } 103 | 104 | 105 | template<> 106 | std::vector> 107 | Rcpp::as>> (SEXP x) { 108 | 109 | auto r_tensors = Rcpp::as(x); 110 | 111 | auto r_shape = Rcpp::as>>(r_tensors["shape"]); 112 | auto r_dtype = Rcpp::as>>(r_tensors["dtype"]); 113 | auto r_content = Rcpp::as(r_tensors["content"]); 114 | 115 | std::vector> tensors; 116 | for (size_t i = 0; i < r_shape.size(); i++) { 117 | 118 | if (r_is_na(r_content[i])) { 119 | tensors.push_back(tl::nullopt); 120 | continue; 121 | } 122 | 123 | tensorboard::TensorProto tensor; 124 | 125 | tensor.set_dtype(r_dtype[i].value()); 126 | tensor.mutable_tensor_shape()->CopyFrom(r_shape[i].value()); 127 | SEXP content = r_content[i]; 128 | switch(r_dtype[i].value()) { 129 | case tensorboard::DataType::DT_FLOAT: 130 | for (auto val : Rcpp::as>(content)) { 131 | tensor.add_float_val(val); 132 | } 133 | break; 134 | case tensorboard::DataType::DT_STRING: 135 | add_string_values(&tensor, content); 136 | break; 137 | case tensorboard::DataType::DT_DOUBLE: 138 | for (auto val : Rcpp::as>(content)) { 139 | tensor.add_double_val(val); 140 | } 141 | break; 142 | default: 143 | Rcpp::stop("Unsupported type"); 144 | } 145 | 146 | tensors.push_back(tensor); 147 | } 148 | 149 | return tensors; 150 | } 151 | -------------------------------------------------------------------------------- /configure: -------------------------------------------------------------------------------- 1 | # Anticonf (tm) script by Jeroen Ooms (2022) 2 | # This script will query 'pkg-config' for the required cflags and ldflags. 3 | # If pkg-config is unavailable or does not find the library, try setting 4 | # INCLUDE_DIR and LIB_DIR manually via e.g: 5 | # R CMD INSTALL --configure-vars='INCLUDE_DIR=/.../include LIB_DIR=/.../lib' 6 | 7 | # Library settings 8 | PKG_CONFIG_NAME="protobuf" 9 | PKG_DEB_NAME="libprotobuf-dev" 10 | PKG_RPM_NAME="protobuf-devel" 11 | PKG_BREW_NAME="protobuf" 12 | PKG_TEST_HEADER="" 13 | PKG_LIBS="-lprotobuf" 14 | PKG_CFLAGS="" 15 | 16 | # Use pkg-config if available (except for on CRAN) 17 | if [ `command -v pkg-config` ] && [ ! -d "/Volumes/SSD-Data/Builds" ]; then 18 | PKGCONFIG_CFLAGS=`pkg-config --cflags --silence-errors ${PKG_CONFIG_NAME}` 19 | PKGCONFIG_LIBS=`pkg-config --libs ${PKG_CONFIG_NAME}` 20 | PKGCONFIG_MODVERSION=`pkg-config --modversion --silence-errors ${PKG_CONFIG_NAME}` 21 | fi 22 | 23 | # Note that cflags may be empty in case of success 24 | if [ "$INCLUDE_DIR" ] || [ "$LIB_DIR" ]; then 25 | echo "Found INCLUDE_DIR and/or LIB_DIR!" 26 | PKG_CFLAGS="-I$INCLUDE_DIR $PKG_CFLAGS" 27 | PKG_LIBS="-L$LIB_DIR $PKG_LIBS" 28 | elif [ "$PKGCONFIG_CFLAGS" ] || [ "$PKGCONFIG_LIBS" ]; then 29 | echo "Found pkg-config cflags and libs!" 30 | PKG_CFLAGS=${PKGCONFIG_CFLAGS} 31 | PKG_LIBS=${PKGCONFIG_LIBS} 32 | elif [ `uname` = "Darwin" ]; then 33 | test ! "$CI" && brew --version 2>/dev/null 34 | if [ $? -eq 0 ]; then 35 | BREWDIR=`brew --prefix` 36 | PATH="$BREWDIR/bin:$PATH" 37 | PKG_CFLAGS="-I$BREWDIR/include" 38 | PKG_LIBS="-L$BREWDIR/lib $PKG_LIBS" 39 | else 40 | curl -sfL "https://autobrew.github.io/scripts/protobuf" > autobrew 41 | . ./autobrew 42 | fi 43 | fi 44 | 45 | # For debugging 46 | echo "Using PKG_CFLAGS=$PKG_CFLAGS" 47 | echo "Using PKG_LIBS=$PKG_LIBS" 48 | 49 | # Use CXX17 if available (preferred for recent libprotobuf) 50 | CXX17=`${R_HOME}/bin/R CMD config CXX17` || unset CXX17 51 | 52 | if [ "$CXX17" ]; then 53 | CXX_STD=CXX17 54 | CXX="$CXX17 `${R_HOME}/bin/R CMD config CXX17STD`" 55 | CXXFLAGS=`${R_HOME}/bin/R CMD config CXX17FLAGS` 56 | echo "Using C++17 compiler: $CXX" 57 | else 58 | echo "Using default C++ compiler" 59 | CXX_STD=CXX11 60 | CXX=`${R_HOME}/bin/R CMD config CXX` 61 | CXXFLAGS=`${R_HOME}/bin/R CMD config CXXFLAGS` 62 | fi 63 | 64 | # Test configuration 65 | CPPFLAGS=`${R_HOME}/bin/R CMD config CPPFLAGS` 66 | echo "#include $PKG_TEST_HEADER" | ${CXX} -E ${CPPFLAGS} ${PKG_CFLAGS} ${CXXFLAGS} -xc++ - >/dev/null 2>configure.log 67 | 68 | # Customize the error 69 | if [ $? -ne 0 ]; then 70 | echo "------------------------------[ ANTICONF ]-----------------------------" 71 | echo "Configuration failed to find $PKG_CONFIG_NAME. Try installing:" 72 | echo " * deb: $PKG_DEB_NAME (Debian, Ubuntu, etc)" 73 | echo " * rpm: $PKG_RPM_NAME (Fedora, EPEL)" 74 | echo " * brew: $PKG_BREW_NAME (OSX)" 75 | echo "If $PKG_CONFIG_NAME is already installed, check that 'pkg-config' is in your" 76 | echo "PATH and PKG_CONFIG_PATH contains a $PKG_CONFIG_NAME.pc file. If pkg-config" 77 | echo "is unavailable you can set INCLUDE_DIR and LIB_DIR manually via:" 78 | echo "R CMD INSTALL --configure-vars='INCLUDE_DIR=... LIB_DIR=...'" 79 | echo "----------------------------[ ERROR MESSAGE ]----------------------------" 80 | cat configure.log 81 | echo "------------------------------------------------------------------------" 82 | exit 1 83 | fi 84 | 85 | # Fix for deprecated declarations 86 | if pkg-config ${PKG_CONFIG_NAME} --atleast-version 3.6; then 87 | PKG_CFLAGS="$PKG_CFLAGS -DUSENEWAPI" 88 | fi 89 | 90 | # Write to Makevars 91 | sed -e "s|@cflags@|$PKG_CFLAGS|" -e "s|@libs@|$PKG_LIBS|" -e "s|CXX11|${CXX_STD}|" src/Makevars.in > src/Makevars 92 | 93 | # Look for 'protoc' compiler 94 | if [ `command -v protoc` ]; then 95 | PROTOC_VERSION=`protoc --version` 96 | echo "Using ${PROTOC_VERSION} from `command -v protoc`" 97 | else 98 | echo "Failed to run protoc." 99 | echo "Please install the 'protobuf-compiler' package for your system." 100 | exit 1 101 | fi 102 | 103 | cd src 104 | 105 | if [ ! -d "generated" ]; then 106 | mkdir generated 107 | find proto -name *.proto -exec bash -c \ 108 | 'protoc -I "proto" --cpp_out="generated" {}' \; 109 | fi 110 | 111 | # copied from protolite: 112 | # Suppress wanrings about pragmas in the autogenerated protobuf headers. 113 | # Uwe + BDR have said this is OK and there is nothing we can do about this. 114 | find ./ -type f -name "*.pb.h" -exec sed -i.bak "s@ #pragma@/*nowarn*/#pragma@g" {} \; 115 | 116 | PB_SRC=$(echo $(find generated -type f -name "*.pb.cc" -print)) 117 | CPP_SRC=$(echo $(find *.cpp -print)) 118 | CRC_SRC=$(echo $(find . -type f -name "*.c" -print)) 119 | sed -e "s|@pbsrc@|$PB_SRC|" -e "s|@cppsrc@|$CPP_SRC|" -e "s|@csrc@|$CRC_SRC|" \ 120 | -e "s|@cflags@|$PKG_CFLAGS|" -e "s|@libs@|$PKG_LIBS|" -e "s|CXX11|${CXX_STD}|" \ 121 | Makevars.in > Makevars 122 | 123 | -------------------------------------------------------------------------------- /src/wrap.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include "generated/event.pb.h" 3 | #include "utils.h" 4 | 5 | static auto pkg = Rcpp::Environment::namespace_env("tfevents"); 6 | static auto r_summary_metadata = Rcpp::Function(pkg["summary_metadata"]); 7 | static auto r_summary_values = Rcpp::Function(pkg["summary_values"]); 8 | static auto r_vec_c_list = Rcpp::Function(pkg["vec_c_list"]); 9 | static auto r_event = Rcpp::Function(pkg["event"]); 10 | static auto r_summary_summary_image = Rcpp::Function(pkg["summary_summary_image"]); 11 | static auto r_tensor_proto = Rcpp::Function(pkg["tensor_proto"]); 12 | 13 | template <> 14 | SEXP Rcpp::wrap(const tensorboard::SummaryMetadata& object) { 15 | return r_summary_metadata( 16 | object.plugin_data().plugin_name(), 17 | object.display_name(), 18 | object.summary_description() 19 | ); 20 | } 21 | 22 | template <> 23 | SEXP Rcpp::wrap(const tensorboard::Summary_Image& object) { 24 | auto img_string = object.encoded_image_string(); 25 | return r_summary_summary_image( 26 | Rcpp::Named("buffer", Rcpp::RawVector(img_string.begin(), img_string.end())), 27 | Rcpp::Named("width", object.width()), 28 | Rcpp::Named("height", object.height()), 29 | Rcpp::Named("colorspace", object.colorspace()) 30 | ); 31 | } 32 | 33 | SEXP tensor_proto_content (const tensorboard::TensorProto& object) { 34 | auto dtype = object.dtype(); 35 | auto list = Rcpp::List(); 36 | if (dtype == tensorboard::DataType::DT_FLOAT) { 37 | Rcpp::NumericVector out; 38 | for (int i =0; i < object.float_val_size(); i++) { 39 | out.push_back(object.float_val(i)); 40 | } 41 | list.push_back(out); 42 | } 43 | else if (dtype == tensorboard::DataType::DT_DOUBLE) { 44 | Rcpp::NumericVector out; 45 | for (int i =0; i < object.double_val_size(); i++) { 46 | out.push_back(object.double_val(i)); 47 | } 48 | list.push_back(out); 49 | } 50 | else if (dtype == tensorboard::DataType::DT_STRING) { 51 | Rcpp::List out; 52 | for (int i=0; i < object.string_val_size(); i++) { 53 | auto val = object.string_val(i); 54 | Rcpp::RawVector v(val.size()); 55 | memcpy(&(v[0]), val.c_str(), v.size()); 56 | out.push_back(v); 57 | out.attr("class") = std::vector({"blob", "vctrs_list_of", "vctrs_vctr", "list"}); 58 | } 59 | list.push_back(out); 60 | } 61 | else { 62 | Rcpp::stop("Can't read this object."); 63 | } 64 | return list; 65 | } 66 | 67 | template<> 68 | SEXP Rcpp::wrap(const tensorboard::DataType& object) { 69 | switch (object) { 70 | case tensorboard::DataType::DT_FLOAT: 71 | return Rcpp::wrap("float"); 72 | case tensorboard::DataType::DT_DOUBLE: 73 | return Rcpp::wrap("double"); 74 | case tensorboard::DataType::DT_STRING: 75 | return Rcpp::wrap("string"); 76 | default: 77 | Rcpp::stop("Can't read this type."); 78 | } 79 | } 80 | 81 | template<> 82 | SEXP Rcpp::wrap(const tensorboard::TensorProto& object) { 83 | auto content = tensor_proto_content(object); 84 | auto shape = object.tensor_shape(); 85 | auto shape_out = IntegerVector(); 86 | 87 | for (int i = 0; i < shape.dim_size(); i++) { 88 | shape_out.push_back(shape.dim(i).size()); 89 | } 90 | 91 | auto shape_out2 = Rcpp::List(); 92 | shape_out2.push_back(shape_out); 93 | 94 | return r_tensor_proto( 95 | content, 96 | shape_out2, 97 | Rcpp::wrap(object.dtype()) 98 | ); 99 | } 100 | 101 | template <> 102 | SEXP Rcpp::wrap(const tensorboard::Summary& object) { 103 | auto n_values = object.value_size(); 104 | Rcpp::List summaries; 105 | for (size_t i = 0; i < n_values; i++) { 106 | auto value = object.value(i); 107 | 108 | summaries.push_back(r_summary_values( 109 | value.metadata(), 110 | value.tag(), 111 | Rcpp::Named("value", value.value_case() == tensorboard::Summary_Value::ValueCase::kSimpleValue ? value.simple_value() : pkg["na"]), 112 | Rcpp::Named("image", value.value_case() == tensorboard::Summary_Value::ValueCase::kImage ? Rcpp::wrap(value.image()) : pkg["na"]), 113 | Rcpp::Named("tensor", value.value_case() == tensorboard::Summary_Value::ValueCase::kTensor ? Rcpp::wrap(value.tensor()): pkg["na"]) 114 | )); 115 | } 116 | 117 | return r_vec_c_list(summaries); 118 | } 119 | 120 | template <> 121 | SEXP Rcpp::wrap(const tensorboard::Event& object) { 122 | return r_event( 123 | Rcpp::Named("run", pkg["na"]), 124 | Rcpp::Named("wall_time", object.wall_time()), 125 | Rcpp::Named("step", object.step()), 126 | Rcpp::Named("summary", object.what_case() == tensorboard::Event::WhatCase::kSummary ? Rcpp::wrap(object.summary()) : pkg["na"]), 127 | Rcpp::Named("file_version", object.what_case() == tensorboard::Event::WhatCase::kFileVersion ? Rcpp::wrap(object.file_version()) : pkg["na"]) 128 | ); 129 | } 130 | 131 | template <> 132 | SEXP Rcpp::wrap(const std::vector& object) { 133 | Rcpp::List events; 134 | for (size_t i = 0; i < object.size(); i++) { 135 | events.push_back(object[i]); 136 | } 137 | return r_vec_c_list(events); 138 | } 139 | -------------------------------------------------------------------------------- /vignettes/articles/hparams.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hyperparameter tuning" 3 | --- 4 | 5 | ```{r, include = FALSE} 6 | knitr::opts_chunk$set( 7 | collapse = TRUE, 8 | comment = "#>" 9 | ) 10 | ``` 11 | 12 | ```{r setup} 13 | library(tfevents) 14 | ``` 15 | 16 | Hyperparameter tuning is a important step in machine learning problems. 17 | `tfevents` has the ability to log hyperparameter values and configurations so they can be visualized with TensorBoard. 18 | 19 | In this tutorial we show the recommended steps to correctly log hyperparameter runing data and visualize it in TensorBoard. We will 20 | abstract away the model we are training and the approach described 21 | here can work with any machine learning framework. 22 | 23 | ## Configuring the experiment 24 | 25 | The first step to use `tfevents` to log hyperparameter tuning experiments is to log a hyperparameter configuration using the 26 | `log_hparams_config()` function. 27 | 28 | In order to do it, we need to first define the set of hyperparameters that we are going to experiment with along with their domains. We also 29 | define the set of metrics that we want to be displayed in the HParams dashboard. 30 | 31 | Suppose that we want to tune a neural network that has the following 32 | hyperparameters: `num_units`, `dropout` and `optimizer`. And that we 33 | want to observe the training and validation loss as well as the accuracy. 34 | 35 | ```{r} 36 | hparams <- list( 37 | hparams_hparam("num_units", domain = c(128, 256, 512)), 38 | hparams_hparam("dropout", domain = c(min_value = 0.1, max_value = 0.4)), 39 | hparams_hparam("optimizer", domain = c("sgd", "adam")) 40 | ) 41 | 42 | metrics <- list( 43 | hparams_metric("loss", group = "train"), 44 | hparams_metric("loss", group = "valid"), 45 | hparams_metric("accuracy", group = "train"), 46 | hparams_metric("accuracy", group = "valid") 47 | ) 48 | ``` 49 | 50 | We can now choose a logdir and log the hyperparameter configurations: 51 | 52 | ```{r} 53 | temp <- tempfile("logdir") 54 | local_logdir(temp) 55 | log_hparams_config(hparams, metrics) 56 | ``` 57 | 58 | We have now logged the experiment configuration, we can now proceed to loging 59 | runs of this experiment. 60 | 61 | ## Logging runs 62 | 63 | We are not actually going to train any model in this tutorial, but we will define 64 | a function that does log some metrics, like if it was training something. This 65 | function takes the hyperparameter values, and they should be used to configure how 66 | training happens. 67 | 68 | ```{r} 69 | train <- function(num_units, dropout, optimizer) { 70 | # each run will have its own logdir. 71 | # this modifies the logdir during the execution of this function 72 | epochs <- 10 73 | for (i in seq_len(epochs)) { 74 | # training code would go here 75 | log_event( 76 | train = list(loss = runif(1), accuracy = runif(1)), 77 | valid = list(loss = runif(1), accuracy = runif(1)), 78 | ) 79 | } 80 | } 81 | ``` 82 | 83 | We now writer a wraper function that takes the hyperparameter values and: 84 | 85 | 1. Creates a random logdir name for the run 86 | 2. Temporarily modifies the default logdir, so scalars and etc for each run 87 | are separated in the file system. 88 | 3. Log the set of hyperparameters that is going to be used. 89 | 4. Runs the `train` function that we defined earlier. 90 | 91 | ```{r} 92 | run_train <- function(root_logdir, num_units, dropout, optimizer) { 93 | # create a random logdir name for the run. It should be a child directory 94 | # of root_logdir 95 | logdir <- file.path( 96 | root_logdir, 97 | paste(sample(letters, size = 15, replace = TRUE), collapse = "") 98 | ) 99 | 100 | # modifies the logdir during the execution of run_train 101 | local_logdir(logdir) 102 | 103 | # before running the actual training we log the set of hyperparameters 104 | # that are used. 105 | log_hparams( 106 | num_units = num_units, 107 | dropout = dropout, 108 | optimizer = optimizer 109 | ) 110 | 111 | train(num_units, dropout, optimizer) 112 | } 113 | ``` 114 | 115 | We can now use `run_train` to run training for multiple sets of hyperparameters. 116 | 117 | ```{r} 118 | for (num_units in c(128, 256, 512)) { 119 | for (dropout in c(0.1, 0.25)) { 120 | for (optimizer in c("adam", "sgd")) { 121 | run_train(temp, num_units, dropout, optimizer) 122 | } 123 | } 124 | } 125 | ``` 126 | 127 | You can see that the root logdir that we are using as `temp` will be filled with 128 | event files. 129 | 130 | ```{r} 131 | fs::dir_tree(temp) 132 | ``` 133 | 134 | Finally, we can visualize the experiment results in TensorBoard: 135 | 136 | ```{r} 137 | tensorflow::tensorboard(temp, port = 6060) 138 | ``` 139 | 140 | The screenshot below shows the table view in the HParams dashboard in TensorBoard. 141 | 142 | ```{r echo=FALSE, out.width="100%"} 143 | webshot2::webshot("http://127.0.0.1:6060/?darkMode=false#hparams", vwidth = 1200) 144 | ``` 145 | 146 | TensorBoard also provides visualizing the results in a parallel coordinates plot 147 | and as a scatter plot. 148 | 149 | -------------------------------------------------------------------------------- /vignettes/examples/torch.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Logging torch runs" 3 | --- 4 | 5 | This example show how to use tfevents to log a training run of a DCGAN model. 6 | We use tfevents to log generated images and loss values during training. 7 | 8 | Pointing TensorBoard to the logs directory will open a dashboard like the displayed 9 | below. 10 | 11 | ![TensorBoard view from the DCGAN experiment](img/torch.png) 12 | 13 | ```{r, eval = FALSE} 14 | library(torch) 15 | library(torchvision) 16 | library(tfevents) 17 | 18 | # Datasets and loaders ---------------------------------------------------- 19 | 20 | dir <- "./mnist" # caching directory 21 | 22 | train_ds <- mnist_dataset( 23 | dir, 24 | download = TRUE, 25 | transform = transform_to_tensor 26 | ) 27 | 28 | test_ds <- mnist_dataset( 29 | dir, 30 | train = FALSE, 31 | transform = transform_to_tensor 32 | ) 33 | 34 | train_dl <- dataloader(train_ds, batch_size = 128, shuffle = TRUE) 35 | test_dl <- dataloader(test_ds, batch_size = 128) 36 | 37 | # Define the network ------------------------------------------------------ 38 | 39 | init_weights <- function(m) { 40 | if (grepl("conv", m$.classes[[1]])) { 41 | nn_init_normal_(m$weight$data(), 0.0, 0.02) 42 | } else if (grepl("batch_norm", m$.classes[[1]])) { 43 | nn_init_normal_(m$weight$data(), 1.0, 0.02) 44 | nn_init_constant_(m$bias$data(), 0) 45 | } 46 | } 47 | 48 | generator <- nn_module( 49 | "generator", 50 | initialize = function(latent_dim, out_channels) { 51 | self$main <- nn_sequential( 52 | nn_conv_transpose2d(latent_dim, 512, kernel_size = 4, 53 | stride = 1, padding = 0, bias = FALSE), 54 | nn_batch_norm2d(512), 55 | nn_relu(), 56 | nn_conv_transpose2d(512, 256, kernel_size = 4, 57 | stride = 2, padding = 1, bias = FALSE), 58 | nn_batch_norm2d(256), 59 | nn_relu(), 60 | nn_conv_transpose2d(256, 128, kernel_size = 4, 61 | stride = 2, padding = 1, bias = FALSE), 62 | nn_batch_norm2d(128), 63 | nn_relu(), 64 | nn_conv_transpose2d(128, out_channels, kernel_size = 4, 65 | stride = 2, padding = 3, bias = FALSE), 66 | nn_tanh() 67 | ) 68 | self$main$apply(init_weights) # custom weight initialization 69 | }, 70 | forward = function(input) { 71 | input <- input$view(c(input$shape, 1, 1)) 72 | self$main(input) 73 | } 74 | ) 75 | 76 | discriminator <- nn_module( 77 | "discriminator", 78 | initialize = function(in_channels) { 79 | self$main <- nn_sequential( 80 | nn_conv2d(in_channels, 16, kernel_size = 4, stride = 2, padding = 1, bias = FALSE), 81 | nn_leaky_relu(0.2, inplace = TRUE), 82 | nn_conv2d(16, 32, kernel_size = 4, stride = 2, padding = 1, bias = FALSE), 83 | nn_batch_norm2d(32), 84 | nn_leaky_relu(0.2, inplace = TRUE), 85 | nn_conv2d(32, 64, kernel_size = 4, stride = 2, padding = 1, bias = FALSE), 86 | nn_batch_norm2d(64), 87 | nn_leaky_relu(0.2, inplace = TRUE), 88 | nn_conv2d(64, 128, kernel_size = 4, stride = 2, padding = 1, bias = FALSE), 89 | nn_leaky_relu(0.2, inplace = TRUE) 90 | ) 91 | self$linear <- nn_linear(128, 1) 92 | self$main$apply(init_weights) # custom weight initialization 93 | }, 94 | forward = function(input) { 95 | x <- self$main(input) 96 | x <- torch_flatten(x, start_dim = 2) 97 | x <- self$linear(x) 98 | x[,1] 99 | } 100 | ) 101 | 102 | # Initialize models 103 | 104 | latent_dim <- 100 105 | channels <- 1 106 | 107 | G <- generator(latent_dim = latent_dim, out_channels = channels) 108 | D <- discriminator(in_channels = 1) 109 | 110 | # Set up optimizers 111 | 112 | opt_D <- optim_adam( 113 | D$parameters, 114 | lr = 2*1e-4, betas = c(0.5, 0.999) 115 | ) 116 | 117 | opt_G <- optim_adam( 118 | G$parameters, 119 | lr = 2*1e-4, betas = c(0.5, 0.999) 120 | ) 121 | 122 | 123 | bce <- nn_bce_with_logits_loss() 124 | 125 | # Training loop ---------- 126 | 127 | fixed_noise <- torch_randn(10, latent_dim = latent_dim) 128 | 129 | for (epoch in 1:10) { 130 | coro::loop(for (batch in train_dl) { 131 | input <- batch[[1]] 132 | 133 | batch_size <- input$shape[1] 134 | 135 | noise <- torch_randn(batch_size, latent_dim) 136 | 137 | fake <- G(noise) 138 | 139 | # create response vectors 140 | y_real <- torch_ones(batch_size) 141 | y_fake <- torch_zeros(batch_size) 142 | 143 | opt_D$zero_grad() 144 | loss_D <- bce(D(input), y_real) + bce(D(fake$detach()), y_fake) 145 | loss_D$backward() 146 | opt_D$step() 147 | 148 | opt_G$zero_grad() 149 | loss_G <- bce(D(fake), y_real) 150 | loss_G$backward() 151 | opt_G$step() 152 | 153 | log_event( 154 | train = list( 155 | "loss/D" = loss_D$item(), 156 | "loss/G" = loss_G$item() 157 | ) 158 | ) 159 | }) 160 | with_no_grad({ 161 | img <- G(fixed_noise) 162 | img <- as_array((img[,1,,,newaxis] + 1)/2) 163 | log_event( 164 | generated = summary_image(img), 165 | step = epoch 166 | ) 167 | }) 168 | } 169 | ``` 170 | 171 | -------------------------------------------------------------------------------- /R/log.R: -------------------------------------------------------------------------------- 1 | #' Log event 2 | #' 3 | #' @param ... Named values that you want to log. They can be possibly nested, 4 | #' in this case, the enclosing names are considered 'run' names by TensorBoard. 5 | #' @param step The step associated the logs. If `NULL`, a managed step counter 6 | #' will be used, and the global step is increased in every call to [log_event()]. 7 | #' 8 | #' @note [log_event()] writes events to the default `logdir`. You can query the 9 | #' default `logdir` with [get_default_logdir()] and modify it with 10 | #' [set_default_logdir()]. You can also use the [with_logdir()] context switcher 11 | #' to temporarily modify the logdir. 12 | #' 13 | #' @return Invisibly returns the logged data. 14 | #' 15 | #' @examples 16 | #' temp <- tempfile() 17 | #' with_logdir(temp, { 18 | #' log_event( 19 | #' train = list(loss = runif(1), acc = runif(1)), 20 | #' valid = list(loss = runif(1), acc = runif(1)) 21 | #' ) 22 | #' }) 23 | #' @export 24 | log_event <- function(..., step = get_global_step(increment = TRUE)) { 25 | data <- rlang::dots_list(..., .named = FALSE, .homonyms = "error") 26 | write_event(as_event(data, step = step, wall_time = get_wall_time())) 27 | invisible(data) 28 | } 29 | 30 | write_event <- function(event) { 31 | writers <- lapply(fs::path(get_default_logdir(), field(event, "run")), get_writer) 32 | write_events(event, writers) 33 | } 34 | 35 | .tfevents <- new.env() 36 | 37 | #' Query and modify the logdir 38 | #' 39 | #' [log_event()] has a notion of default logdir, so you don't need to specify it 40 | #' at every call. These functions allow you to query and the current logdir. 41 | #' 42 | #' @param logdir The `logdir` that you want to set as default. 43 | #' @param code Expressions that will be evaluated in a context with the `new` 44 | #' `logdir` as the default `logdir`. 45 | #' @param .env Environment that controls scope of changes. For expert use only. 46 | #' 47 | #' @returns The `logdir` for `get_default_logdir()` otherwise invisibly returns 48 | #' `NULL` 49 | #' 50 | #' @examples 51 | #' temp <- tempfile() 52 | #' get_default_logdir() 53 | #' with_logdir(temp, { 54 | #' print(get_default_logdir()) 55 | #' }) 56 | #' @export 57 | get_default_logdir <- function() { 58 | if (is.null(.tfevents$logdir)) 59 | set_default_logdir() 60 | rlang::env_get(.tfevents, "logdir") 61 | } 62 | #' @describeIn get_default_logdir Modifies the default `logdir`. 63 | #' @export 64 | set_default_logdir <- function(logdir = "logs") { 65 | rlang::env_bind(.tfevents, logdir = path.expand(logdir)) 66 | } 67 | 68 | with_logdir_impl <- function() { 69 | with_logdir_ <- withr::with_( 70 | set = set_default_logdir, 71 | get = function(logdir) { 72 | get_default_logdir() 73 | } 74 | ) 75 | function(logdir, code) { 76 | with_logdir_(logdir, code) 77 | } 78 | } 79 | 80 | #' @describeIn get_default_logdir Temporarily modify the default `logdir`. 81 | #' @export 82 | with_logdir <- with_logdir_impl() 83 | 84 | #' @describeIn get_default_logdir Temporarily modify thedefault `logdir`. 85 | #' @export 86 | local_logdir <- function(logdir, .env = parent.frame()) { 87 | old <- get_default_logdir() 88 | set_default_logdir(logdir) 89 | withr::defer(set_default_logdir(old), envir = .env) 90 | } 91 | 92 | #' Global step counters 93 | #' 94 | #' @param step New value for `step`. 95 | #' @param increment Wether to increment the `step` when getting it. 96 | #' 97 | #' @details `tfevents` tracks and automatically increased the step counter whenever 98 | #' [log_event()] is called. Note that, it maintains a separate step counter for 99 | #' each root `logdir`, thus if you change the `logdir` using [set_default_logdir()] 100 | #' or [with_logdir()], a different step counter will be used. 101 | #' 102 | #' @returns The global step value for the default logdir, when `get_global_step`, 103 | #' otherwise returns `NULL` invisibly. 104 | #' 105 | #' @examples 106 | #' temp <- tempfile() 107 | #' with_logdir(temp, { 108 | #' print(get_global_step()) 109 | #' set_global_step(100) 110 | #' print(get_global_step()) 111 | #' }) 112 | #' print(get_global_step()) 113 | #' @export 114 | get_global_step <- function(increment = TRUE) { 115 | # a separate global step count is tracked for each root logdir. 116 | # the global step is queried once when calling `log_event`. 117 | logdir <- get_default_logdir() 118 | if (!rlang::env_has(.steps, logdir)) 119 | set_global_step(-1) 120 | cur_step <- rlang::env_get(.steps, logdir) + as.integer(increment) 121 | set_global_step(cur_step) 122 | cur_step 123 | } 124 | #' @describeIn get_global_step Set the global step. 125 | #' @export 126 | set_global_step <- function(step) { 127 | rlang::env_poke(.steps, get_default_logdir(), as.integer(step), create = TRUE) 128 | } 129 | .steps <- new.env() 130 | 131 | .writers <- new.env() 132 | get_writer <- function(logdir = get_default_logdir()) { 133 | logdir <- fs::path_norm(logdir) 134 | writer <- rlang::env_get(.writers, nm = logdir, default = NULL) 135 | if (!is.null(writer)) 136 | return(writer) 137 | 138 | writer <- create_event_writer(logdir) 139 | rlang::env_poke( 140 | .writers, 141 | logdir, 142 | writer, 143 | create = TRUE 144 | ) 145 | writer 146 | } 147 | 148 | -------------------------------------------------------------------------------- /tests/testthat/test-read.R: -------------------------------------------------------------------------------- 1 | test_that("collect float tensors", { 2 | temp <- tempfile() 3 | with_logdir(temp, { 4 | log_event(hello = summary_histogram(runif(1000))) 5 | }) 6 | x <- collect_events(temp) 7 | 8 | expect_equal(nrow(x), 2) 9 | }) 10 | 11 | test_that("can use the plugin function", { 12 | 13 | temp <- tempfile() 14 | with_logdir(temp, { 15 | log_event(hello = summary_histogram(runif(1000))) 16 | log_event(bye = 1) 17 | }) 18 | 19 | summaries <- collect_events(temp, type = "summary") 20 | expect_equal(plugin(summaries$summary), c("histograms", "scalars")) 21 | expect_equal(summaries$plugin, c("histograms", "scalars")) 22 | 23 | }) 24 | 25 | test_that("can iterate over events", { 26 | # tests low level reading functionality. 27 | 28 | get_scalar_value <- function(value) { 29 | field(field(value, "summary")[[1]], "value") 30 | } 31 | 32 | temp <- tempfile() 33 | with_logdir(temp, { 34 | log_event(hello = 1) 35 | }) 36 | 37 | con <- events_logdir(temp) 38 | 39 | # reads the file definition event 40 | value <- collect_events(con, n = 1) 41 | expect_true(inherits(value, "tbl")) 42 | expect_true(inherits(value$event, "tfevents_event")) 43 | 44 | # reads the scalar 45 | value <- collect_events(con, n = 1) 46 | expect_true(inherits(value$event, "tfevents_event")) 47 | expect_equal(get_scalar_value(value$event), 1) 48 | 49 | # no more events in the file, so exhausted is returned 50 | value <- collect_events(con, n = 1) 51 | expect_true(nrow(value) == 0) 52 | 53 | with_logdir(temp, { 54 | log_event(hello = 2) 55 | }) 56 | 57 | # new events are written to the same file, 58 | # read them 59 | value <- collect_events(con, n = 1) 60 | expect_true(inherits(value$event, "tfevents_event")) 61 | expect_equal(get_scalar_value(value$event), 2) 62 | 63 | # new events are written to a different file in the 64 | # directory and they are read. 65 | with_logdir(file.path(temp, "hello"), { 66 | log_event(hello = 3) 67 | }) 68 | 69 | # first file event, is always a dummy event containg 70 | # timestamps and etc. 71 | value <- collect_events(con, n = 1) 72 | expect_true(inherits(value$event, "tfevents_event")) 73 | 74 | value <- collect_events(con, n = 1) 75 | expect_true(inherits(value$event, "tfevents_event")) 76 | expect_equal(get_scalar_value(value$event), 3) 77 | 78 | # no more events available, returns exhausted 79 | value <- collect_events(con, n = 1) 80 | expect_true(nrow(value) == 0) 81 | }) 82 | 83 | test_that("can extract value", { 84 | 85 | temp <- tempfile() 86 | 87 | orig_img <- png::readPNG(test_path("resources/img.png")) 88 | img <- array(orig_img, dim = c(1, 28, 28, 1)) 89 | 90 | f <- wav::read_wav(test_path("resources/test-audio.wav")) 91 | audio <- array(t(f), dim = c(1, rev(dim(f)))) 92 | 93 | with_logdir(temp, { 94 | log_event(hello = 1) 95 | log_event(hello = 2) 96 | log_event(hist = summary_histogram(rnorm(1000))) 97 | log_event(img = summary_image(img)) 98 | log_event(audio = summary_audio(audio)) 99 | log_event(text = summary_text("hello world")) 100 | }) 101 | 102 | summaries <- collect_events(temp, type = "summary") 103 | expect_equal(value(summaries$summary[1]), 1) 104 | expect_error(value(summaries$summary), regexp = "single summary_value") 105 | 106 | histo <- value(summaries$summary[3]) 107 | expect_true(is.data.frame(histo)) 108 | expect_true(nrow(histo) == 30) 109 | expect_equal(names(histo), c("lower", "upper", "count")) 110 | 111 | im <- value(summaries$summary[4]) 112 | expect_equal(class(im), c("matrix", "array")) 113 | expect_equal(dim(im), c(28, 28)) 114 | 115 | aud <- value(summaries$summary[5]) 116 | expect_equal(class(aud), c("matrix", "array")) 117 | expect_equal(dim(aud), c(2, 352800)) 118 | expect_equal(attr(aud, "sample_rate"), 44100) 119 | expect_equal(attr(aud, "bit_depth"), 32) 120 | 121 | tx <- value(summaries$summary[6]) 122 | expect_equal(tx, "hello world") 123 | 124 | }) 125 | 126 | test_that("collect_event with type fails", { 127 | 128 | temp <- tempfile() 129 | with_logdir(temp, { 130 | log_event(hello = summary_histogram(runif(1000))) 131 | log_event(bye = 1) 132 | }) 133 | 134 | expect_error( 135 | collect_events(temp, type = "xx"), 136 | regexp = "must be one of" 137 | ) 138 | 139 | expect_error( 140 | collect_events(temp, n =-102), 141 | regexp = "must be positive" 142 | ) 143 | 144 | }) 145 | 146 | test_that("can format a data frame containing summaries", { 147 | 148 | 149 | temp <- tempfile() 150 | with_logdir(temp, { 151 | log_event(hello = summary_histogram(runif(1000))) 152 | log_event(bye = 1) 153 | }) 154 | 155 | x <- collect_events(temp) 156 | expect_error(as.data.frame(x), regexp = NA) 157 | expect_error(format(as.data.frame(x)), regexp = NA) 158 | 159 | }) 160 | 161 | test_that("can as list summary values", { 162 | 163 | temp <- tempfile() 164 | with_logdir(temp, { 165 | log_event(hello = runif(1)) 166 | log_event(bye = 1) 167 | }) 168 | 169 | expect_error( 170 | value(collect_events(temp, type = "summary")$summary), 171 | "as_list = TRUE" 172 | ) 173 | 174 | x <- value(collect_events(temp, type = "summary")$summary, as_list = TRUE) 175 | expect_true(is.list(x)) 176 | expect_length(x, 2) 177 | 178 | }) 179 | -------------------------------------------------------------------------------- /src/as.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include "generated/event.pb.h" 3 | #include "utils.h" 4 | 5 | template <> 6 | std::vector Rcpp::as> (SEXP x) { 7 | auto r_summary_metadata = Rcpp::as(x); 8 | 9 | auto r_plugin_name = Rcpp::as>(r_summary_metadata["plugin_name"]); 10 | auto r_display_name = Rcpp::as(r_summary_metadata["display_name"]); 11 | auto r_description = Rcpp::as(r_summary_metadata["description"]); 12 | auto r_plugin_content = Rcpp::as(r_summary_metadata["plugin_content"]); 13 | 14 | std::vector metadata; 15 | for (size_t i = 0; i < r_plugin_name.size(); i++) { 16 | tensorboard::SummaryMetadata meta; 17 | meta.mutable_plugin_data()->CopyFrom(make_plugin_data(r_plugin_name[i], r_plugin_content[i])); 18 | if (!Rcpp::CharacterVector::is_na(r_display_name[i])) { 19 | meta.set_display_name(r_display_name[i]); 20 | } 21 | if (!Rcpp::CharacterVector::is_na(r_description[i])) { 22 | meta.set_summary_description(r_description[i]); 23 | } 24 | metadata.push_back(meta); 25 | } 26 | 27 | return metadata; 28 | } 29 | 30 | template <> 31 | std::vector Rcpp::as> (SEXP x) { 32 | auto r_images = Rcpp::as(x); 33 | 34 | auto r_buffer = Rcpp::as(r_images["buffer"]); 35 | auto r_width = Rcpp::as(r_images["width"]); 36 | auto r_height = Rcpp::as(r_images["height"]); 37 | auto r_colorspace = Rcpp::as(r_images["colorspace"]); 38 | 39 | std::vector images; 40 | for (size_t i =0; i < r_buffer.size(); i++) { 41 | tensorboard::Summary_Image img; 42 | // we abuse by setting height to -1 to indicate that image is NA 43 | img.set_height(-1); 44 | if (!(Rcpp::IntegerVector::is_na(r_width[i]) || Rcpp::IntegerVector::is_na(r_height[i]))) { 45 | img.set_height(r_height[i]); 46 | img.set_width(r_width[i]); 47 | img.set_colorspace(r_colorspace[i]); 48 | 49 | // buffer is a blob object, that itself is a list of raw vectors. 50 | // here it should be list of a sigle element. 51 | auto buf = Rcpp::as(r_buffer[i]); 52 | 53 | img.set_encoded_image_string(std::string(buf.begin(), buf.end())); 54 | } 55 | images.push_back(img); 56 | } 57 | return images; 58 | } 59 | 60 | template <> 61 | tensorboard::Summary Rcpp::as (SEXP x) { 62 | auto r_summary = Rcpp::as(x); 63 | 64 | const auto r_tag = Rcpp::as>(r_summary["tag"]); 65 | const auto r_metadata = Rcpp::as>(r_summary["metadata"]); 66 | const auto r_value = Rcpp::as(r_summary["value"]); // Use a numeric vector to allow NA's 67 | const auto r_image = Rcpp::as>(r_summary["image"]); 68 | const auto r_tensor = Rcpp::as>>(r_summary["tensor"]); 69 | 70 | tensorboard::Summary summary; 71 | for (size_t i = 0; i < r_tag.size(); i++) { 72 | auto value = summary.add_value(); 73 | value->set_tag(r_tag[i]); 74 | auto metadata = r_metadata[i]; 75 | value->mutable_metadata()->CopyFrom(metadata); 76 | 77 | // If the value is NA, we don't save it 78 | if (!Rcpp::NumericVector::is_na(r_value[i])) { 79 | value->set_simple_value((float)r_value[i]); 80 | } 81 | 82 | // images can be NA, in this case we make their height -1 when creating 83 | // the pb message 84 | auto image = r_image[i]; 85 | if (image.height() > 0) { 86 | // // See also https://github.com/tensorflow/tensorboard/blob/a74c10dd197e7b2a07219855a61bc62651e80065/tensorboard/plugins/image/summary_v2.py#L104 87 | // value->set_tag("image_summary/" + r_tag[i] + "/image" + (r_tag.size() > 1 ? ("/" + std::to_string(i)) : "")); 88 | value->mutable_image()->CopyFrom(image); 89 | } 90 | 91 | if (r_tensor[i].has_value()) { 92 | value->mutable_tensor()->CopyFrom(r_tensor[i].value()); 93 | } 94 | } 95 | return summary; 96 | } 97 | 98 | template <> 99 | std::vector Rcpp::as> (SEXP x) { 100 | auto r_summary = Rcpp::as(x); 101 | std::vector summary; 102 | for (size_t i = 0; i < r_summary.size(); i++) { 103 | summary.push_back(Rcpp::as(r_summary[i])); 104 | } 105 | return summary; 106 | } 107 | 108 | template <> 109 | std::vector Rcpp::as> (SEXP x) { 110 | auto r_event = Rcpp::as(x); 111 | 112 | auto r_wall_time = Rcpp::as>(r_event["wall_time"]); 113 | auto r_step = Rcpp::as>(r_event["step"]); 114 | auto r_summary = Rcpp::as>(r_event["summary"]); 115 | std::vector event; 116 | for (size_t i = 0; i < r_wall_time.size(); i++) { 117 | tensorboard::Event e; 118 | e.set_wall_time(r_wall_time[i]); 119 | e.set_step(r_step[i]); 120 | e.mutable_summary()->CopyFrom(r_summary[i]); 121 | event.push_back(e); 122 | } 123 | return event; 124 | } 125 | -------------------------------------------------------------------------------- /vignettes/articles/images.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Writing images" 3 | --- 4 | 5 | ```{r, include = FALSE} 6 | knitr::opts_chunk$set( 7 | collapse = TRUE, 8 | comment = "#>" 9 | ) 10 | 11 | # use a temp directory so we can write logs as we wish 12 | temp <- tempfile("logdir") 13 | dir.create(temp) 14 | knitr::opts_knit$set(root.dir = temp) 15 | ``` 16 | 17 | ```{r setup} 18 | library(tfevents) 19 | ``` 20 | 21 | tfevents has the ability to write log images so they can be 22 | displayed in the TensorBoard dashboard. This guide describes 23 | different ways you can log images with tfevents and how to 24 | extend this functionality to other kinds of R objects that 25 | you might want to log as images in the dashboard. 26 | 27 | ## From arrays 28 | 29 | The most common way to write images is when you have a batch of image data in 30 | an R array. For example, suppose we want to write the first 5 training samples 31 | available in the Fashion MNIST dataset builtin into Keras. 32 | 33 | ```{r} 34 | fashion_mnist <- keras::dataset_fashion_mnist() 35 | train_imgs <- fashion_mnist$train$x[1:5,,]/255 36 | str(train_imgs) 37 | ``` 38 | 39 | tfevents supports writing a batch of images from an array as long as the array has 40 | 4 dimensions: `batch_size`, `height`, `width` and `channels`. Thus for the images 41 | in the Fashion MNIST dataset we need to add an extra dimension denoting that those 42 | images have only one channel, ie. they are grayscale images. 43 | 44 | ```{r} 45 | dim(train_imgs) <- c(dim(train_imgs), 1) 46 | ``` 47 | 48 | We can now log them to the default log directory (`logs`) with: 49 | 50 | ```{r} 51 | log_event("Training samples" = summary_image(train_imgs)) 52 | ``` 53 | 54 | If we open the TensorBoard UI in the logdir we will see something like the below: 55 | 56 | ```{r} 57 | tensorflow::tensorboard(normalizePath("logs"), port = 6060) 58 | ``` 59 | 60 | Notice that images are also associated to a global step counter. If you save log 61 | images at multiple steps you will be able to visualize hwo they change using 62 | the slider in TensorBoard. 63 | 64 | ```{r batch, echo = FALSE, out.width="100%"} 65 | webshot2::webshot("http://127.0.0.1:6060/?darkMode=false", vwidth = 1200) 66 | ``` 67 | 68 | ## From ggplot objects 69 | 70 | You can also write images from ggplot2 plots, for example: 71 | 72 | ```{r} 73 | library(ggplot2) 74 | p <- ggplot(mtcars, aes(x = hp, y = mpg)) + 75 | geom_point() 76 | ``` 77 | 78 | You can log plots using `log_event` and `summary_image` and they will be displayed 79 | in TensorBoard. The default size is 480x480 pixels but you can change the default by 80 | passing `width` and `height` to `summary_image`: 81 | 82 | ```{r} 83 | temp <- tempfile("logdir") # use another logdir 84 | set_default_logdir(temp) 85 | 86 | log_event("Ggplot2 plot" = summary_image(p)) 87 | ``` 88 | 89 | We now launch TensorBoard and see: 90 | 91 | ```{r} 92 | tensorflow::tensorboard(temp, port = 6061) 93 | ``` 94 | 95 | ```{r ggpl, echo = FALSE, out.width="100%"} 96 | webshot2::webshot("http://127.0.0.1:6061/?darkMode=false", vwidth = 1200) 97 | ``` 98 | 99 | ## Extending 100 | 101 | Currently those are the only two builtin supported objects for writing images 102 | with tfevents. However we provide lower level interfaces that allow you to extend 103 | support for your own data type. 104 | 105 | The `summary_image` function is an S3 generic, so you can implement it for your 106 | own type. For example, we could implement support for logging a raster image by 107 | implementing a method for it. We also provide methods for lower level representation 108 | of images, for example, `summary_image.raw` can write a `raw()` vector containing 109 | a PNG encoded image. 110 | 111 | Let's implement a method for writing raster images, created with `as.raster()`. 112 | For example, we can use the first image in the Fashion MNIST dataset that we used 113 | earlier. 114 | 115 | ```{r} 116 | img <- as.raster(train_imgs[1,,,]) 117 | class(img) 118 | ``` 119 | 120 | Now let's define the `summary_image` method for the `raster` class. We can get a PNG encoded image by saving it to a file using `grDevices::png` and then reading it back 121 | to R. 122 | 123 | Note that it's important to have `metadata` and `tag` arguments in the signature 124 | so your users can customize hwo the image is displayed in TensorBoard. 125 | 126 | ```{r} 127 | summary_image.raster <- function(img, ..., width = 480, height = 480, metadata = NULL, tag = NA) { 128 | temp <- tempfile(fileext = ".png") 129 | on.exit({unlink(temp)}, add = TRUE) 130 | 131 | grDevices::png(filename = temp, width = width, height = height, ...) 132 | plot(img) 133 | dev.off() 134 | 135 | sze <- fs::file_info(temp)$size 136 | raw <- readBin(temp, n = sze, what = "raw") 137 | 138 | summary_image( 139 | raw, 140 | width = width, 141 | height = height, 142 | colorspace = 4, # number of channels in the IMG. since PNG - 4 143 | metadata = metadata, 144 | tag = tag 145 | ) 146 | } 147 | ``` 148 | 149 | We can now save the raster image with: 150 | 151 | ```{r} 152 | temp <- tempfile("logdir") # use another logdir 153 | set_default_logdir(temp) 154 | 155 | log_event("Raster plot" = summary_image(img)) 156 | ``` 157 | 158 | It will be displayed in the dashboard: 159 | 160 | ```{r raster, echo = FALSE, out.width="100%"} 161 | tensorflow::tensorboard(temp, port = 6062) 162 | webshot2::webshot("http://127.0.0.1:6062/?darkMode=false", vwidth = 1200) 163 | ``` 164 | 165 | 166 | 167 | -------------------------------------------------------------------------------- /R/image.R: -------------------------------------------------------------------------------- 1 | #' Creates a image summary 2 | #' 3 | #' @param img An object that can be converted to an image. 4 | #' @param ... Currently unused. 5 | #' @inheritParams summary_scalar 6 | #' @param width Width of the image. 7 | #' @param height Height of the image. 8 | #' @param colorspace Valid colorspace values are 9 | #' `1 - grayscale`, 10 | #' `2 - grayscale + alpha`, 11 | #' `3 - RGB`, 12 | #' `4 - RGBA`, 13 | #' `5 - DIGITAL_YUV`, 14 | #' `6 - BGRA` 15 | #' @family summary 16 | #' @returns An image summary that can be logged with [log_event()]. 17 | #' @examples 18 | #' tmp <- tempfile() 19 | #' with_logdir(tmp, { 20 | #' summary_image(array(runif(100), dim = c(1,10, 10, 1))) 21 | #' }) 22 | #' @export 23 | summary_image <- function(img, ..., metadata = NULL, tag = NA) { 24 | UseMethod("summary_image") 25 | } 26 | 27 | #' @describeIn summary_image Cretes an image summary from a ggplot2 graph object. 28 | #' The `...` will be forwarded to [grDevices::png()]. 29 | #' @export 30 | summary_image.ggplot <- function(img, ..., width = 480, height = 480, metadata = NULL, tag = NA) { 31 | temp <- tempfile(fileext = ".png") 32 | on.exit({unlink(temp)}, add = TRUE) 33 | 34 | grDevices::png(filename = temp, width = width, height = height, ...) 35 | plot(img) 36 | grDevices::dev.off() 37 | 38 | sze <- fs::file_info(temp)$size 39 | raw <- readBin(temp, n = sze, what = "raw") 40 | 41 | summary_image( 42 | raw, 43 | width = width, 44 | height = height, 45 | colorspace = 4, 46 | metadata = metadata, 47 | tag = tag 48 | ) 49 | } 50 | 51 | #' @describeIn summary_image Creates an image from an R array. The array should be 52 | #' numeric, with values between 0 and 1. Dimensions should be `(batch, height, width, channels)`. 53 | #' @export 54 | summary_image.array <- function(img, ..., metadata = NULL, tag = NA) { 55 | if (length(dim(img)) <= 3) { 56 | cli::cli_abort(c( 57 | "Expected an array with dimensions {.code (batch, height, width, channels)}", 58 | i = "Got an array with dimensions {.code ({paste(dim(img), collapse=', ')})}." 59 | )) 60 | } 61 | 62 | if (is.null(metadata)) { 63 | metadata <- summary_metadata(plugin_name = "images") 64 | } 65 | 66 | if (!all(field(metadata, "plugin_name") == "images")) { 67 | cli::cli_abort(c( 68 | "Plugin name should be 'images'", 69 | x = "Got {.val {unique(field(metadata, 'plugin_name'))}}" 70 | )) 71 | } 72 | 73 | # See https://github.com/tensorflow/tensorboard/blob/a74c10dd197e7b2a07219855a61bc62651e80065/tensorboard/plugins/image/summary_v2.py#L111 74 | # for the implementation. 75 | # The images are converted to a character evctor, the first 2 elements being the 76 | # dimensions, and the others containing the image encoded a png. 77 | png_images <- apply(img, 1, function(x) { 78 | png::writePNG(x) 79 | }, simplify = FALSE) 80 | blob_images <- blob::new_blob(png_images) 81 | 82 | dims <- dim(img) 83 | dims <- blob::blob(as.raw(dims[3]), as.raw(dims[2])) 84 | 85 | blobs <- c(dims, blob_images) 86 | 87 | summary_tensor( 88 | blobs, 89 | dtype = "string", 90 | metadata = metadata, 91 | tag = tag 92 | ) 93 | } 94 | 95 | #' @describeIn summary_image Creates an image from [blob::blob()] vctr of PNG encoded images, 96 | #' (eg using [png::writePNG()]). `width`, `height` and `colorspace` are recycled 97 | #' thus they can be a single scalar or a vector the same size of the images blob. 98 | #' @export 99 | summary_image.blob <- function(img, ..., width, height, colorspace, metadata = NULL, tag = NA) { 100 | c(img, width, height, colorspace) %<-% vec_recycle_common(img, width, height, colorspace) 101 | 102 | image <- summary_summary_image( 103 | buffer = img, 104 | width = width, 105 | height = height, 106 | colorspace = colorspace 107 | ) 108 | new_summary_image(image, metadata = metadata, tag = tag) 109 | } 110 | 111 | #' @describeIn summary_image Creates an image from a png encoded image. Eg, created 112 | #' with [png::writePNG()]. In this case you need to provide `width`, `height` and 113 | #' `colorspace` arguments. 114 | #' @export 115 | summary_image.raw <- function(img, ..., width, height, colorspace, metadata = NULL, tag = NA) { 116 | summary_image( 117 | img = blob::blob(img), 118 | width = width, 119 | height = height, 120 | colorspace = colorspace, 121 | metadata = metadata, 122 | tag = tag 123 | ) 124 | } 125 | 126 | new_summary_image <- function(img = new_summary_summary_image(), ..., metadata = NULL, tag = character()) { 127 | if (is.null(metadata)) { 128 | metadata <- summary_metadata(plugin_name = "images") 129 | } 130 | summary_values(metadata = metadata, image = img, class = "tfevents_summary_image", tag = tag) 131 | } 132 | 133 | summary_summary_image <- function(buffer, width, height, colorspace) { 134 | new_summary_summary_image( 135 | buffer = vec_cast(buffer, blob()), 136 | width = width, 137 | height = height, 138 | colorspace = colorspace 139 | ) 140 | } 141 | 142 | #' @importFrom blob blob 143 | new_summary_summary_image <- function(buffer = blob(), width = integer(), height = integer(), colorspace = integer()) { 144 | buffer <- vec_cast(buffer, blob()) 145 | vctrs::new_rcrd( 146 | fields = list( 147 | buffer = buffer, 148 | width = width, 149 | height = height, 150 | colorspace = colorspace 151 | ), 152 | class = "summary_summary_image" 153 | ) 154 | } 155 | 156 | #' @export 157 | vec_ptype2.summary_summary_image.summary_summary_image <- function(x, y, ...) { 158 | new_summary_summary_image() 159 | } 160 | #' @export 161 | vec_cast.summary_summary_image.summary_summary_image <- function(x, to, ...) { 162 | x 163 | } 164 | -------------------------------------------------------------------------------- /src/proto/summary.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorboard; 4 | 5 | import "tensor.proto"; 6 | 7 | option cc_enable_arenas = true; 8 | option java_outer_classname = "SummaryProtos"; 9 | option java_multiple_files = true; 10 | option java_package = "org.tensorflow.framework"; 11 | option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/summary_go_proto"; 12 | 13 | // Metadata associated with a series of Summary data 14 | message SummaryDescription { 15 | // Hint on how plugins should process the data in this series. 16 | // Supported values include "scalar", "histogram", "image", "audio" 17 | string type_hint = 1; 18 | } 19 | 20 | // Serialization format for histogram module in 21 | // core/lib/histogram/histogram.h 22 | message HistogramProto { 23 | double min = 1; 24 | double max = 2; 25 | double num = 3; 26 | double sum = 4; 27 | double sum_squares = 5; 28 | 29 | // Parallel arrays encoding the bucket boundaries and the bucket values. 30 | // bucket(i) is the count for the bucket i. The range for 31 | // a bucket is: 32 | // i == 0: -DBL_MAX .. bucket_limit(0) 33 | // i != 0: bucket_limit(i-1) .. bucket_limit(i) 34 | repeated double bucket_limit = 6 [packed = true]; 35 | repeated double bucket = 7 [packed = true]; 36 | } 37 | 38 | // A SummaryMetadata encapsulates information on which plugins are able to make 39 | // use of a certain summary value. 40 | message SummaryMetadata { 41 | message PluginData { 42 | // The name of the plugin this data pertains to. 43 | string plugin_name = 1; 44 | 45 | // The content to store for the plugin. The best practice is for this to be 46 | // a binary serialized protocol buffer. 47 | bytes content = 2; 48 | } 49 | 50 | // Data that associates a summary with a certain plugin. 51 | PluginData plugin_data = 1; 52 | 53 | // Display name for viewing in TensorBoard. 54 | string display_name = 2; 55 | 56 | // Longform readable description of the summary sequence. Markdown supported. 57 | string summary_description = 3; 58 | 59 | // Class of data stored in this time series. Required for compatibility with 60 | // TensorBoard's generic data facilities (`DataProvider`, et al.). This value 61 | // imposes constraints on the dtype and shape of the corresponding tensor 62 | // values. See `DataClass` docs for details. 63 | DataClass data_class = 4; 64 | } 65 | 66 | enum DataClass { 67 | // Unknown data class, used (implicitly) for legacy data. Will not be 68 | // processed by data ingestion pipelines. 69 | DATA_CLASS_UNKNOWN = 0; 70 | // Scalar time series. Each `Value` for the corresponding tag must have 71 | // `tensor` set to a rank-0 tensor of type `DT_FLOAT` (float32). 72 | DATA_CLASS_SCALAR = 1; 73 | // Tensor time series. Each `Value` for the corresponding tag must have 74 | // `tensor` set. The tensor value is arbitrary, but should be small to 75 | // accommodate direct storage in database backends: an upper bound of a few 76 | // kilobytes is a reasonable rule of thumb. 77 | DATA_CLASS_TENSOR = 2; 78 | // Blob sequence time series. Each `Value` for the corresponding tag must 79 | // have `tensor` set to a rank-1 tensor of bytestring dtype. 80 | DATA_CLASS_BLOB_SEQUENCE = 3; 81 | } 82 | 83 | // A Summary is a set of named values to be displayed by the 84 | // visualizer. 85 | // 86 | // Summaries are produced regularly during training, as controlled by 87 | // the "summary_interval_secs" attribute of the training operation. 88 | // Summaries are also produced at the end of an evaluation. 89 | message Summary { 90 | message Image { 91 | // Dimensions of the image. 92 | int32 height = 1; 93 | int32 width = 2; 94 | // Valid colorspace values are 95 | // 1 - grayscale 96 | // 2 - grayscale + alpha 97 | // 3 - RGB 98 | // 4 - RGBA 99 | // 5 - DIGITAL_YUV 100 | // 6 - BGRA 101 | int32 colorspace = 3; 102 | // Image data in encoded format. All image formats supported by 103 | // image_codec::CoderUtil can be stored here. 104 | bytes encoded_image_string = 4; 105 | } 106 | 107 | message Audio { 108 | // Sample rate of the audio in Hz. 109 | float sample_rate = 1; 110 | // Number of channels of audio. 111 | int64 num_channels = 2; 112 | // Length of the audio in frames (samples per channel). 113 | int64 length_frames = 3; 114 | // Encoded audio data and its associated RFC 2045 content type (e.g. 115 | // "audio/wav"). 116 | bytes encoded_audio_string = 4; 117 | string content_type = 5; 118 | } 119 | 120 | message Value { 121 | // This field is deprecated and will not be set. 122 | string node_name = 7; 123 | 124 | // Tag name for the data. Used by TensorBoard plugins to organize data. Tags 125 | // are often organized by scope (which contains slashes to convey 126 | // hierarchy). For example: foo/bar/0 127 | string tag = 1; 128 | 129 | // Contains metadata on the summary value such as which plugins may use it. 130 | // Take note that many summary values may lack a metadata field. This is 131 | // because the FileWriter only keeps a metadata object on the first summary 132 | // value with a certain tag for each tag. TensorBoard then remembers which 133 | // tags are associated with which plugins. This saves space. 134 | SummaryMetadata metadata = 9; 135 | 136 | // Value associated with the tag. 137 | oneof value { 138 | float simple_value = 2; 139 | bytes obsolete_old_style_histogram = 3; 140 | Image image = 4; 141 | HistogramProto histo = 5; 142 | Audio audio = 6; 143 | TensorProto tensor = 8; 144 | } 145 | } 146 | 147 | // Set of values for the summary. 148 | repeated Value value = 1; 149 | } 150 | -------------------------------------------------------------------------------- /R/events.R: -------------------------------------------------------------------------------- 1 | #' Coerce an object to a event. 2 | #' 3 | #' @param x Object that will be coerced to an event. 4 | #' @param step The step that will be used when the event is logged. This is used 5 | #' by TensorBoard when showing data. 6 | #' @param wall_time The all time the event will appended to the event. This field 7 | #' is used by TensorBoard when displaying information based on actual time. 8 | #' @param ... currently unused. 9 | #' 10 | #' @returns A event vctr with class . 11 | #' 12 | #' @section Extending `as_event`: 13 | #' 14 | #' `as_event` is an S3 generic and you can implement method for your own class. 15 | #' We don't export the `event` constructor though, so you should implement it 16 | #' in terms of other `as_event` methods. 17 | #' 18 | #' @examples 19 | #' as_event(list(hello = 1), step = 1, wall_time = 1) 20 | #' 21 | #' @export 22 | as_event <- function(x, step, wall_time, ...) { 23 | UseMethod("as_event") 24 | } 25 | 26 | #' @export 27 | as_event.list <- function(x, step, wall_time, ..., name = ".") { 28 | ev <- map2( 29 | x, 30 | function(obj, nm) { 31 | as_event(obj, step = step, wall_time = wall_time, name = c(name, nm)) 32 | } 33 | ) 34 | vec_c(!!!squash_if(unname(ev), vec_is_list)) 35 | } 36 | 37 | squash_if <- function(x, predicate) { 38 | lapply(x, function(obj) { 39 | if (predicate(obj)) { 40 | unlist(obj) 41 | } else { 42 | obj 43 | } 44 | }) 45 | } 46 | 47 | #' @export 48 | as_event.numeric <- function(x, step, wall_time, ..., name) { 49 | if (!rlang::is_scalar_atomic(x)) { 50 | cli::cli_abort(c( 51 | "Can't log a numeric vector with length != 1.", 52 | i = "Expected a single value but got a vector with length {.val {length(x)}}." 53 | )) 54 | } 55 | x <- summary_scalar(x) 56 | as_event(x, step = step, wall_time = wall_time, name = name) 57 | } 58 | 59 | #' @export 60 | as_event.character <- function(x, step, wall_time, ..., name) { 61 | x <- summary_text(x) 62 | as_event(x, step = step, wall_time = wall_time, name = name) 63 | } 64 | 65 | #' @importFrom utils tail 66 | #' @export 67 | as_event.tfevents_summary_values <- function(x, step, wall_time, ..., name) { 68 | field(x, "tag") <- make_tag(field(x, "tag"), tail(name, 1)) 69 | event( 70 | run = paste0(name[-length(name)], collapse = "/"), 71 | wall_time = wall_time, 72 | step = step, 73 | summary = summary(list(x)) 74 | ) 75 | } 76 | 77 | make_tag <- function(cur_tag, name) { 78 | name <- rep(name, length(cur_tag)) 79 | 80 | if (any(name[!is.na(cur_tag)] != "")) { 81 | cli::cli_abort(c( 82 | x = "Two tags were provided for the same summary.", 83 | i = "You can only a specify tags once for a summary." 84 | )) 85 | } 86 | 87 | cur_tag[is.na(cur_tag)] <- name[is.na(cur_tag)] 88 | 89 | if (any(cur_tag == "")) { 90 | cli::cli_abort(c( 91 | x = "All summaries must have a tag, but found at least one without one.", 92 | i = "See {.help log_event} to find out how to specify tags for summaries." 93 | )) 94 | } 95 | 96 | cur_tag 97 | } 98 | 99 | #' Creates events 100 | #' 101 | #' We try to match events as closely as possible to the protobuf messages. 102 | #' The hierarchy looks like: 103 | #' ``` 104 | #' event (): 105 | #' - run () 106 | #' - wall_time () 107 | #' - step () 108 | #' - summary ( aka list_of): 109 | #' - values (list): 110 | #' - : 111 | #' - metadata () 112 | #' - tag () 113 | #' - value () 114 | #' - image () 115 | #' - buffer () 116 | #' - width () 117 | #' - height () 118 | #' - colorspace () 119 | #' ``` 120 | #' 121 | #' @keywords internal 122 | #' @import vctrs 123 | event <- function(run, wall_time, step, ..., summary = NA, file_version = NA) { 124 | new_event( 125 | run = vec_cast(run, character()), 126 | wall_time = as.integer(wall_time), 127 | step = as.integer(step), 128 | summary = vec_cast(summary, new_summary()), 129 | file_version = vec_cast(file_version, character()) 130 | ) 131 | } 132 | 133 | new_event <- function(run = character(), 134 | wall_time = integer(), 135 | step = integer(), 136 | ..., 137 | summary = new_summary(), 138 | file_version = character()) { 139 | new_rcrd(list( 140 | run = run, 141 | wall_time = wall_time, 142 | step = step, 143 | summary = summary, 144 | file_version = file_version 145 | ), class = "tfevents_event") 146 | } 147 | 148 | 149 | #' @export 150 | format.tfevents_event <- function(x, ...) { 151 | paste0("<", field(x, "run"),"/", format(field(x, "step")), ">") 152 | } 153 | 154 | #' @export 155 | vec_ptype2.tfevents_event.tfevents_event <- function(x, y, ...) { 156 | x 157 | } 158 | 159 | #' @export 160 | vec_cast.tfevents_event.tfevents_event <- function(x, to, ...) { 161 | x 162 | } 163 | 164 | summary_values <- function(metadata, tag = NA, ..., value = NA, image = NA, tensor = NA, class = NULL) { 165 | value <- vec_cast(value, numeric()) 166 | image <- vec_cast(image, new_summary_summary_image()) 167 | tag <- vec_cast(tag, character()) 168 | tensor <- vec_cast(tensor, new_tensor_proto()) 169 | 170 | c(metadata, tag, value, image, tensor) %<-% vec_recycle_common(metadata, tag, value, image, tensor) 171 | 172 | new_summary_values(metadata = metadata, tag = tag, value = value, image = image, 173 | tensor = tensor, class = class) 174 | } 175 | 176 | new_summary_values <- function(metadata = new_summary_metadata(), tag = character(), ..., 177 | value = numeric(), image = new_summary_summary_image(), 178 | tensor = new_tensor_proto(), class = NULL) { 179 | vctrs::new_rcrd( 180 | fields = list(metadata = metadata, tag = tag, value = value, image = image, 181 | tensor = tensor), 182 | class = c(class, "tfevents_summary_values") 183 | ) 184 | } 185 | 186 | summary <- function(values) { 187 | new_summary(values) 188 | } 189 | 190 | new_summary <- function(values = list(new_summary_values())) { 191 | new_list_of(values, ptype=new_summary_values(), class = "tfevents_summary") 192 | } 193 | 194 | #' @export 195 | vec_ptype2.tfevents_summary_values.tfevents_summary_values <- function(x, y, ...) { 196 | new_summary_values() 197 | } 198 | 199 | #' @export 200 | vec_ptype2.tfevents_summary_values.tfevents_summary <- function(x, y, ...) { 201 | new_summary() 202 | } 203 | #' @export 204 | vec_ptype2.tfevents_summary.tfevents_summary_values <- function(x, y, ...) { 205 | new_summary() 206 | } 207 | 208 | # vec_cast.vctrs_percent.double <- function(x, to, ...) percent(x) 209 | # vec_cast.double.vctrs_percent <- function(x, to, ...) vec_data(x) 210 | 211 | #' @export 212 | vec_cast.tfevents_summary_values.tfevents_summary_values <- function(x, to, ...) { 213 | x 214 | } 215 | #' @export 216 | vec_cast.tfevents_summary.tfevents_summary_values <- function(x, to, ...) { 217 | if (is.na(x)) return(vec_cast(NA, new_summary())) 218 | new_summary(list(x)) 219 | } 220 | 221 | 222 | #' Summary metadata 223 | #' 224 | #' Creates a summary metadata that can be passed to multiple `summary_` functions. 225 | #' 226 | #' @param plugin_name The name of the TensorBoard plugin that might use the summary. 227 | #' @param display_name Display name for the summary. 228 | #' @param description A description of the summary. 229 | #' @param plugin_content An optional plugin content. Note that it will only be 230 | #' used if the C++ function `make_plugin_data` is aware of `plugin_content` 231 | #' for the specified plugin name. For advanced use only. 232 | #' @param ... Currently unused. For future expansion. 233 | #' 234 | #' @returns A `summary_metadata` object. 235 | #' 236 | #' @examples 237 | #' summary <- summary_scalar(1, metadata = summary_metadata("scalars")) 238 | #' 239 | #' @export 240 | summary_metadata <- function( 241 | plugin_name, 242 | display_name = NA_character_, 243 | description = NA_character_, ..., 244 | plugin_content = NA) { 245 | rlang::check_dots_empty() 246 | plugin_content <- vec_cast(plugin_content, list()) 247 | new_summary_metadata(plugin_name = plugin_name, display_name = display_name, 248 | description = description, 249 | plugin_content = plugin_content) 250 | } 251 | 252 | new_summary_metadata <- function(plugin_name = character(), display_name = character(), 253 | description = character(), plugin_content = list()) { 254 | vctrs::new_rcrd( 255 | fields = list( 256 | plugin_name = plugin_name, 257 | display_name = display_name, 258 | description = description, 259 | plugin_content = plugin_content 260 | ), 261 | class = "tfevents_summary_metadata" 262 | ) 263 | } 264 | 265 | #' @export 266 | format.tfevents_summary <- function(x, ...) { 267 | sapply(x, format) 268 | } 269 | 270 | #' @export 271 | format.tfevents_summary_values <- function(x, ...) { 272 | paste0("<", field(x, "tag"), ">") 273 | } 274 | 275 | # These values are used from the C++ code 276 | vec_c_list <- function(x) { 277 | vec_c(!!!x) 278 | } 279 | na <- NA 280 | is_na <- function(x) { 281 | if (inherits(x, "vctrs_vctr")) 282 | vec_any_missing(x) 283 | else if (is.null(x)) 284 | TRUE 285 | else 286 | rlang::is_na(x) 287 | } 288 | -------------------------------------------------------------------------------- /R/read.R: -------------------------------------------------------------------------------- 1 | #' Collect data from tfevents records 2 | #' 3 | #' Collects all events of a kind in a single data.frame ready for analysis. 4 | #' 5 | #' @param logdir The log directory that you want to query events from. Either a 6 | #' file path or a connection created with [events_logdir()]. 7 | #' @param n The maximum number of events to read from the connection. If `NULL` 8 | #' then all events are read, the default is `NULL`. 9 | #' @param type The kind of events that are to be read. By default all events are 10 | #' read. If a different type is specified, then the result can include other 11 | #' columns as well as more lines. 12 | #' 13 | #' @returns 14 | #' A `tibble` with the collected events. 15 | #' 16 | #' @examples 17 | #' temp <- tempfile() 18 | #' with_logdir(temp, { 19 | #' for(i in 1:5) { 20 | #' log_event(my_log = runif(1)) 21 | #' } 22 | #' }) 23 | #' # collect all events in files, including file description events 24 | #' collect_events(temp) 25 | #' # collect summaries in the logdir 26 | #' collect_events(temp, type = "summary") 27 | #' # collect only scalar events 28 | #' collect_events(temp, type = "scalar") 29 | #' 30 | #' @export 31 | collect_events <- function(logdir = get_default_logdir(), n = NULL, 32 | type = c("any", "summary", "scalar")) { 33 | logdir <- events_logdir(logdir) 34 | type <- rlang::arg_match(type) 35 | 36 | if (!is.null(n) && n <= 0) { 37 | cli::cli_abort(c( 38 | "{.arg n} must be positive or `NULL`", 39 | "{.val {n}} is <=0 and not allowed." 40 | )) 41 | } 42 | 43 | events <- list() 44 | repeat { 45 | event <- read_next(logdir, type = type) 46 | if (is_exhausted(event)) { 47 | break 48 | } else { 49 | events[[length(events) + 1]] <- event 50 | if (!is.null(n) && length(events) == n) 51 | break 52 | } 53 | } 54 | vec_rbind(!!!events) 55 | } 56 | 57 | #' @describeIn collect_events Creates a connection to a logdir that can be reused 58 | #' to read further events later. 59 | #' @export 60 | events_logdir <- function(logdir = get_default_logdir()) { 61 | if (inherits(logdir, "tfevents_logdir_connection")) 62 | return(logdir) 63 | new_events_logdir_connection(logdir) 64 | } 65 | 66 | new_events_logdir_connection <- function(logdir) { 67 | con <- structure( 68 | new.env(parent = emptyenv()), 69 | class = "tfevents_logdir_connection" 70 | ) 71 | 72 | files <- fs::dir_ls(logdir, type = "file", regexp = ".*tfevents.*", recurse = TRUE) 73 | iterators <- create_iterators(files, logdir) 74 | 75 | con$logdir <- logdir 76 | con$iterators <- iterators 77 | con$files <- files 78 | 79 | con 80 | } 81 | 82 | # possibly includes new files that might have been created in the logdir, and 83 | # could yield new values. 84 | refresh_events_logdir_connection <- function(con) { 85 | # if exhausted, maybe there's a new file in the directory that we were not 86 | # tracking yet, so we try it before returing the exhausted flag. 87 | # check if new files were added to the directory 88 | new_files <- fs::dir_ls(con$logdir, type = "file", regexp = ".*tfevents.*", recurse = TRUE) 89 | new_files <- new_files[!new_files %in% con$files] 90 | 91 | # append to files and iterators 92 | con$files <- c(con$files, new_files) 93 | con$iterators <- append(con$iterators, create_iterators(new_files, con$logdir)) 94 | invisible(NULL) 95 | } 96 | 97 | read_next <- function(con, type) { 98 | if (type == "any") { 99 | read_next_event(con) 100 | } else if (type == "summary") { 101 | read_next_summary(con) 102 | } else if (type == "scalar") { 103 | read_next_scalar(con) 104 | } 105 | } 106 | 107 | read_next_event <- function(con) { 108 | out <- try_iterators(con$iterators) 109 | if (is_exhausted(out)) { 110 | refresh_events_logdir_connection(con) 111 | out <- try_iterators(con$iterators) 112 | } 113 | out 114 | } 115 | 116 | read_next_summary <- function(con) { 117 | out <- read_next_event(con) 118 | if (is_exhausted(out)) { 119 | out 120 | } else if (is.na(out$summary)) { 121 | read_next_summary(con) 122 | } else { 123 | out <- tidyr::unnest(out, summary) 124 | out$tag <- field(out$summary, "tag") 125 | out$plugin <- plugin(out$summary) 126 | out 127 | } 128 | } 129 | 130 | read_next_scalar <- function(con) { 131 | out <- read_next_summary(con) 132 | if (is_exhausted(out)) { 133 | out 134 | } else if (out$plugin == "scalars") { 135 | out$value <- value(out$summary) 136 | out 137 | } else { 138 | read_next_scalar(con) 139 | } 140 | } 141 | 142 | try_iterators <- function(iterators) { 143 | rlang::check_installed("tibble") 144 | for (iterator in iterators) { 145 | event <- try(event_file_iterator_next(iterator), silent = TRUE) 146 | if (!inherits(event, "try-error")) { 147 | events <- tibble::tibble(event = event) 148 | events$run <- field(events$event, "run") 149 | events$step <- field(events$event, "step") 150 | events$summary <- field(events$event, "summary") 151 | return(events) 152 | } 153 | } 154 | exhausted() 155 | } 156 | 157 | #' Extracts the value of a summary value 158 | #' 159 | #' Summaries are complicated objects because they reflect the Protobuf object 160 | #' structure that are serialized in the tfevents records files. This function 161 | #' allows one to easily query vaues from summaries and will dispatch to the 162 | #' correct way to extract images, audio, text, etc from summary values. 163 | #' 164 | #' @param x A `tfevents_summary_values` object. 165 | #' @param ... Currently unused. To allow future extension. 166 | #' @returns 167 | #' Depending on the type of the summary it returns an image, audio, text or 168 | #' scalar. 169 | #' 170 | #' @examples 171 | #' temp <- tempfile() 172 | #' with_logdir(temp, { 173 | #' for(i in 1:5) { 174 | #' log_event(my_log = runif(1)) 175 | #' } 176 | #' }) 177 | #' 178 | #' # iterate over all events 179 | #' summary <- collect_events(temp, n = 1, type = "summary") 180 | #' value(summary$summary) 181 | #' 182 | #' @export 183 | value <- function(x, ...) { 184 | UseMethod("value") 185 | } 186 | 187 | #' @describeIn value Acess values from `summary_values`. 188 | #' @param as_list A boolean indicating if the results should be returned in a list. 189 | #' The default is to return a single value. If you need to extract values from 190 | #' multiple summaries use `as_list = TRUE`. 191 | #' @export 192 | value.tfevents_summary_values <- function(x, ..., as_list = FALSE) { 193 | if (!vec_size(x) == 1 && !as_list) { 194 | cli::cli_abort(c( 195 | "You must pass a single summary_value to get it's value.", 196 | i = "Got size {.val {vec_size(x)}}", 197 | i = "Use {.val as_list = TRUE} to cast many values at once." 198 | )) 199 | } 200 | if (as_list) { 201 | lapply(x, value) 202 | } else { 203 | class(x) <- c(paste0("tfevents_summary_values_", plugin(x)), class(x)) 204 | value(x) 205 | } 206 | } 207 | 208 | #' @export 209 | value.tfevents_summary_values_scalars <- function(x, ...) { 210 | field(x, "value") 211 | } 212 | 213 | #' @export 214 | value.tfevents_summary_values_histograms <- function(x, ...) { 215 | tensor <- field(x, "tensor") 216 | 217 | values <- field(tensor, "content")[[1]] 218 | dims <- field(field(tensor, "shape"), "dim")[[1]] 219 | 220 | # values in the tensor arre in C ordering, so we need to move to fortran ordering. 221 | arr <- aperm(array(values, dim = rev(dims)), perm = rev(seq_along(dims))) 222 | 223 | df <- as.data.frame(arr) 224 | colnames(df) <- c("lower", "upper", "count") 225 | 226 | df 227 | } 228 | 229 | #' @export 230 | value.tfevents_summary_values_audio <- function(x, ...) { 231 | # audio is stored in the tensor field. 232 | tensor <- field(x, "tensor") 233 | 234 | # content is serialized as a wave encoded tensor. 235 | content <- field(tensor, "content")[[1]] 236 | 237 | # in order to read, we first write the bytes to a file, then read with wav::read_wav 238 | tmp <- tempfile(fileext = ".wav") 239 | on.exit({file.remove(tmp)}, add = TRUE) 240 | writeBin(object = content[[1]], con = tmp) 241 | 242 | wav::read_wav(tmp) 243 | } 244 | 245 | #' @export 246 | value.tfevents_summary_values_images <- function(x, ...) { 247 | # images are stored as tensors in the summary proto 248 | image <- field(x, "tensor") 249 | 250 | # the stored tensor is a 1d vector with 3 elements. 251 | # first 2 describe the dimension of the image, the third 252 | # contains a PNG encoded image. 253 | buffer <- field(image, "content")[[1]] 254 | dim1 <- readBin(buffer[[1]], what = integer(), size = 1) 255 | dim2 <- readBin(buffer[[2]], what = integer(), size = 1) 256 | img <- png::readPNG(buffer[[3]]) 257 | 258 | if (!identical(dim(img)[1:2], c(dim1, dim2))) { 259 | cli::cli_abort("An error ocurred. Report a issue in the tfevents repository.") 260 | } 261 | 262 | img 263 | } 264 | 265 | #' @export 266 | value.tfevents_summary_values_text <- function(x, ...) { 267 | tensor <- field(x, "tensor") 268 | rawToChar(field(tensor, "content")[[1]][[1]]) 269 | } 270 | 271 | plugin <- function(summary) { 272 | if (!inherits(summary, "tfevents_summary_values")) { 273 | cli::cli_abort(c( 274 | "{.arg summary} must be {.cls tfevents_summary_values}", 275 | i = "Got object with {.cls {class(summary)}}." 276 | )) 277 | } 278 | 279 | field(field(summary, "metadata"), "plugin_name") 280 | } 281 | 282 | exhausted <- function () { 283 | as.symbol(".__exhausted__.") 284 | } 285 | 286 | is_exhausted <- function (x) { 287 | identical(x, exhausted()) 288 | } 289 | 290 | fill_run_field <- function(event, run) { 291 | field(event, "run") <- rep(run, vec_size(event)) 292 | event 293 | } 294 | 295 | create_iterators <- function(files, logdir) { 296 | lapply(files, function(name) { 297 | create_event_file_iterator( 298 | name, 299 | fs::path_dir(fs::path_rel(name, logdir)) 300 | ) 301 | }) 302 | } 303 | --------------------------------------------------------------------------------