├── .gitignore ├── .Rbuildignore ├── vignettes └── rTensor_Vignette.pdf ├── rTensor.Rproj ├── man ├── rs_fold.Rd ├── cs_fold.Rd ├── dim-methods.Rd ├── show-methods.Rd ├── head-methods.Rd ├── print-methods.Rd ├── tail-methods.Rd ├── fnorm-methods.Rd ├── cs_unfold-methods.Rd ├── rs_unfold-methods.Rd ├── t_svd_reconstruct.Rd ├── initialize-methods.Rd ├── rand_tensor.Rd ├── khatri_rao.Rd ├── innerProd-methods.Rd ├── kronecker_list.Rd ├── vec-methods.Rd ├── hamadard_list.Rd ├── tperm-methods.Rd ├── as.tensor.Rd ├── khatri_rao_list.Rd ├── t-methods.Rd ├── modeSum-methods.Rd ├── modeMean-methods.Rd ├── unmatvec.Rd ├── matvec-methods.Rd ├── t_mult.Rd ├── k_fold.Rd ├── k_unfold-methods.Rd ├── extract-methods.Rd ├── Ops-methods.Rd ├── ttm.Rd ├── t_svd.Rd ├── fold.Rd ├── unfold-methods.Rd ├── hosvd.Rd ├── ttl.Rd ├── pvd.Rd ├── cp.Rd ├── mpca.Rd ├── tucker.Rd ├── rTensor-package.Rd ├── tucker.nonneg.Rd └── Tensor-class.Rd ├── .travis.yml ├── NEWS ├── README.md ├── NAMESPACE ├── DESCRIPTION ├── tests └── test_tucker_nonneg.R └── R ├── rTensor_Misc.R ├── rTensor_Class.R └── rTensor_Decomp.R /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | -------------------------------------------------------------------------------- /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^\.travis\.yml$ 4 | ^\.lintr$ 5 | -------------------------------------------------------------------------------- /vignettes/rTensor_Vignette.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jamesyili/rTensor/HEAD/vignettes/rTensor_Vignette.pdf -------------------------------------------------------------------------------- /rTensor.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: knitr 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | PackageRoxygenize: rd,collate,namespace 22 | -------------------------------------------------------------------------------- /man/rs_fold.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{rs_fold} 4 | \alias{rs_fold} 5 | \title{Row Space Folding of Matrix} 6 | \usage{ 7 | rs_fold(mat, m = NULL, modes = NULL) 8 | } 9 | \arguments{ 10 | \item{mat}{matrix to be folded} 11 | 12 | \item{m}{the mode corresponding to rs_unfold} 13 | 14 | \item{modes}{the original modes of the tensor} 15 | } 16 | \description{ 17 | DEPRECATED. Please see \code{\link{k_fold}}. 18 | } 19 | 20 | -------------------------------------------------------------------------------- /man/cs_fold.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{cs_fold} 4 | \alias{cs_fold} 5 | \title{Column Space Folding of Matrix} 6 | \usage{ 7 | cs_fold(mat, m = NULL, modes = NULL) 8 | } 9 | \arguments{ 10 | \item{mat}{matrix to be folded} 11 | 12 | \item{m}{the mode corresponding to cs_unfold} 13 | 14 | \item{modes}{the original modes of the tensor} 15 | } 16 | \description{ 17 | DEPRECATED. Please see \code{\link{unmatvec}} 18 | } 19 | 20 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: r 2 | # see also https://docs.travis-ci.com/user/languages/r 3 | 4 | # Using the package cache to store R package dependencies can significantly 5 | # speed up build times and is recommended for most builds. 6 | cache: 7 | - apt 8 | - packages 9 | 10 | # You will need to set sudo: false in order to use the container based builds 11 | # and package caching. 12 | sudo: false 13 | 14 | warnings_are_errors: true 15 | 16 | notifications: 17 | email: 18 | on_success: change 19 | on_failure: change 20 | -------------------------------------------------------------------------------- /man/dim-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{dim-methods} 5 | \alias{dim,Tensor-method} 6 | \alias{dim-methods} 7 | \title{Mode Getter for Tensor} 8 | \usage{ 9 | \S4method{dim}{Tensor}(x) 10 | } 11 | \arguments{ 12 | \item{x}{the Tensor instance} 13 | } 14 | \value{ 15 | an integer vector of the modes associated with \code{x} 16 | } 17 | \description{ 18 | Return the vector of modes from a tensor 19 | } 20 | \details{ 21 | \code{dim(x)} 22 | } 23 | \examples{ 24 | tnsr <- rand_tensor() 25 | dim(tnsr) 26 | } 27 | 28 | -------------------------------------------------------------------------------- /NEWS: -------------------------------------------------------------------------------- 1 | What's New? 2 | 3 | Version 1.2: 4 | Major Fixes: 5 | - updated the svd() usage inside hosvd, hucker, mpca, etc. to give the right number of eigenvectors for modes that were unbalanaced 6 | - Revamp to the naming scheme of the unfolding operations (see JSS paper draft/Vignette) 7 | 8 | Minor Fixes: 9 | - updated the percentaage calculations that were given in the outputs (before was decimal) 10 | 11 | Additions: 12 | - JSS draft/Vingnette 13 | 14 | 15 | Version 1.1: 16 | Removed getters (getData, getModes, getNumModes), sweep methods 17 | Fixed [major] pvd, tucker, mpca 18 | Fixed [minor] t_svd, cp 19 | Added tperm, image, [<- methods 20 | -------------------------------------------------------------------------------- /man/show-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{show-methods} 5 | \alias{show,Tensor-method} 6 | \alias{show-methods} 7 | \title{Show for Tensor} 8 | \usage{ 9 | \S4method{show}{Tensor}(object) 10 | } 11 | \arguments{ 12 | \item{object}{the Tensor instance} 13 | 14 | \item{...}{additional parameters to be passed into show()} 15 | } 16 | \description{ 17 | Extend show for Tensor 18 | } 19 | \details{ 20 | \code{show(object)} 21 | } 22 | \examples{ 23 | tnsr <- rand_tensor() 24 | tnsr 25 | } 26 | \seealso{ 27 | \code{\link{print}} 28 | } 29 | 30 | -------------------------------------------------------------------------------- /man/head-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{head-methods} 5 | \alias{head,Tensor-method} 6 | \alias{head-methods} 7 | \title{Head for Tensor} 8 | \usage{ 9 | \S4method{head}{Tensor}(x, ...) 10 | } 11 | \arguments{ 12 | \item{x}{the Tensor instance} 13 | 14 | \item{...}{additional parameters to be passed into head()} 15 | } 16 | \description{ 17 | Extend head for Tensor 18 | } 19 | \details{ 20 | \code{head(x,...)} 21 | } 22 | \examples{ 23 | tnsr <- rand_tensor() 24 | head(tnsr) 25 | } 26 | \seealso{ 27 | \code{\link{tail-methods}} 28 | } 29 | 30 | -------------------------------------------------------------------------------- /man/print-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{print-methods} 5 | \alias{print,Tensor-method} 6 | \alias{print-methods} 7 | \title{Print for Tensor} 8 | \usage{ 9 | \S4method{print}{Tensor}(x, ...) 10 | } 11 | \arguments{ 12 | \item{x}{the Tensor instance} 13 | 14 | \item{...}{additional parameters to be passed into print()} 15 | } 16 | \description{ 17 | Extend print for Tensor 18 | } 19 | \details{ 20 | \code{print(x,...)} 21 | } 22 | \examples{ 23 | tnsr <- rand_tensor() 24 | print(tnsr) 25 | } 26 | \seealso{ 27 | \code{\link{show}} 28 | } 29 | 30 | -------------------------------------------------------------------------------- /man/tail-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{tail-methods} 5 | \alias{tail,Tensor-method} 6 | \alias{tail-methods} 7 | \title{Tail for Tensor} 8 | \usage{ 9 | \S4method{tail}{Tensor}(x, ...) 10 | } 11 | \arguments{ 12 | \item{x}{the Tensor instance} 13 | 14 | \item{...}{additional parameters to be passed into tail()} 15 | } 16 | \description{ 17 | Extend tail for Tensor 18 | } 19 | \details{ 20 | \code{tail(x,...)} 21 | } 22 | \examples{ 23 | tnsr <- rand_tensor() 24 | tail(tnsr) 25 | } 26 | \seealso{ 27 | \code{\link{head-methods}} 28 | } 29 | 30 | -------------------------------------------------------------------------------- /man/fnorm-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{fnorm-methods} 5 | \alias{fnorm} 6 | \alias{fnorm,Tensor-method} 7 | \alias{fnorm-methods} 8 | \title{Tensor Frobenius Norm} 9 | \usage{ 10 | fnorm(tnsr) 11 | 12 | \S4method{fnorm}{Tensor}(tnsr) 13 | } 14 | \arguments{ 15 | \item{tnsr}{the Tensor instance} 16 | } 17 | \value{ 18 | numeric Frobenius norm of \code{x} 19 | } 20 | \description{ 21 | Returns the Frobenius norm of the Tensor instance. 22 | } 23 | \details{ 24 | \code{fnorm(tnsr)} 25 | } 26 | \examples{ 27 | tnsr <- rand_tensor() 28 | fnorm(tnsr) 29 | } 30 | 31 | -------------------------------------------------------------------------------- /man/cs_unfold-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{cs_unfold-methods} 5 | \alias{cs_unfold} 6 | \alias{cs_unfold,Tensor-method} 7 | \alias{cs_unfold-methods} 8 | \title{Tensor Column Space Unfolding} 9 | \usage{ 10 | cs_unfold(tnsr, m) 11 | 12 | \S4method{cs_unfold}{Tensor}(tnsr, m = NULL) 13 | } 14 | \arguments{ 15 | \item{tnsr}{Tensor instance} 16 | 17 | \item{m}{mode to be unfolded on} 18 | } 19 | \description{ 20 | DEPRECATED. Please see \code{\link{matvec-methods}} and \code{\link{unfold-methods}}. 21 | } 22 | \details{ 23 | \code{cs_unfold(tnsr,m=NULL)} 24 | } 25 | 26 | -------------------------------------------------------------------------------- /man/rs_unfold-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{rs_unfold-methods} 5 | \alias{rs_unfold} 6 | \alias{rs_unfold,Tensor-method} 7 | \alias{rs_unfold-methods} 8 | \title{Tensor Row Space Unfolding} 9 | \usage{ 10 | rs_unfold(tnsr, m) 11 | 12 | \S4method{rs_unfold}{Tensor}(tnsr, m = NULL) 13 | } 14 | \arguments{ 15 | \item{tnsr}{Tensor instance} 16 | 17 | \item{m}{mode to be unfolded on} 18 | } 19 | \description{ 20 | DEPRECATED. Please see \code{\link{k_unfold-methods}} and \code{\link{unfold-methods}}. 21 | } 22 | \details{ 23 | \code{rs_unfold(tnsr,m=NULL)} 24 | } 25 | 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | rTensor 2 | ======= 3 | 4 | rTensor v1.3: See NEWS for changelog 5 | 6 | [![Build Status](https://travis-ci.org/vsimko/rTensor.svg)](https://travis-ci.org/vsimko/rTensor) 7 | [![codecov.io](https://codecov.io/github/vsimko/rTensor/coverage.svg?branch=master)](https://codecov.io/github/vsimko/rTensor?branch=master) 8 | [![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/rTensor)](http://cran.r-project.org/package=rTensor) 9 | [![Issue Stats](http://issuestats.com/github/vsimko/rTensor/badge/pr)](http://issuestats.com/github/vsimko/rTensor) 10 | [![Issue Stats](http://issuestats.com/github/vsimko/rTensor/badge/issue)](http://issuestats.com/github/vsimko/rTensor) 11 | -------------------------------------------------------------------------------- /man/t_svd_reconstruct.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Decomp.R 3 | \name{t_svd_reconstruct} 4 | \alias{t_svd_reconstruct} 5 | \title{Reconstruct Tensor From TSVD} 6 | \usage{ 7 | t_svd_reconstruct(L) 8 | } 9 | \arguments{ 10 | \item{L}{list that is an output from \code{\link{t_svd}}} 11 | } 12 | \value{ 13 | a 3-Tensor 14 | } 15 | \description{ 16 | Reconstruct the original 3-Tensor after it has been decomposed into \code{U, S, V} via \code{\link{t_svd}}. 17 | } 18 | \examples{ 19 | tnsr <- rand_tensor(c(10,10,10)) 20 | tsvdD <- t_svd(tnsr) 21 | 1 - fnorm(t_svd_reconstruct(tsvdD)-tnsr)/fnorm(tnsr) 22 | } 23 | \seealso{ 24 | \code{\link{t_svd}} 25 | } 26 | 27 | -------------------------------------------------------------------------------- /man/initialize-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{initialize-methods} 5 | \alias{initialize,Tensor-method} 6 | \alias{initialize-methods} 7 | \title{Initializes a Tensor instance} 8 | \usage{ 9 | \S4method{initialize}{Tensor}(.Object, num_modes = NULL, modes = NULL, 10 | data = NULL) 11 | } 12 | \arguments{ 13 | \item{.Object}{the tensor object} 14 | 15 | \item{num_modes}{number of modes of the tensor} 16 | 17 | \item{modes}{modes of the tensor} 18 | 19 | \item{data}{can be vector, matrix, or array} 20 | } 21 | \description{ 22 | Not designed to be called by the user. Use \code{as.tensor} instead. 23 | } 24 | \seealso{ 25 | \code{as.tensor} 26 | } 27 | 28 | -------------------------------------------------------------------------------- /man/rand_tensor.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{rand_tensor} 4 | \alias{rand_tensor} 5 | \title{Tensor with Random Entries} 6 | \usage{ 7 | rand_tensor(modes = c(3, 4, 5), drop = FALSE) 8 | } 9 | \arguments{ 10 | \item{modes}{the modes of the output Tensor} 11 | 12 | \item{drop}{whether or not modes equal to 1 should be dropped} 13 | } 14 | \value{ 15 | a Tensor object with modes given by \code{modes} 16 | } 17 | \description{ 18 | Generate a Tensor with specified modes with iid normal(0,1) entries. 19 | } 20 | \note{ 21 | Default \code{rand_tensor()} generates a 3-Tensor with modes \code{c(3,4,5)}. 22 | } 23 | \examples{ 24 | rand_tensor() 25 | rand_tensor(c(4,4,4)) 26 | rand_tensor(c(10,2,1),TRUE) 27 | } 28 | 29 | -------------------------------------------------------------------------------- /man/khatri_rao.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{khatri_rao} 4 | \alias{khatri_rao} 5 | \title{Khatri-Rao Product} 6 | \usage{ 7 | khatri_rao(x, y) 8 | } 9 | \arguments{ 10 | \item{x}{first matrix} 11 | 12 | \item{y}{second matrix} 13 | } 14 | \value{ 15 | matrix that is the Khatri-Rao product 16 | } 17 | \description{ 18 | Returns the Khatri-Rao (column-wise Kronecker) product of two matrices. If the inputs are vectors then this is the same as the Kronecker product. 19 | } 20 | \note{ 21 | The number of columns must match in the two inputs. 22 | } 23 | \examples{ 24 | dim(khatri_rao(matrix(runif(12),ncol=4),matrix(runif(12),ncol=4))) 25 | } 26 | \seealso{ 27 | \code{\link{kronecker}}, \code{\link{khatri_rao_list}} 28 | } 29 | 30 | -------------------------------------------------------------------------------- /man/innerProd-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{innerProd-methods} 5 | \alias{innerProd} 6 | \alias{innerProd,Tensor,Tensor-method} 7 | \alias{innerProd-methods} 8 | \title{Tensors Inner Product} 9 | \usage{ 10 | innerProd(tnsr1, tnsr2) 11 | 12 | \S4method{innerProd}{Tensor,Tensor}(tnsr1, tnsr2) 13 | } 14 | \arguments{ 15 | \item{tnsr1}{first Tensor instance} 16 | 17 | \item{tnsr2}{second Tensor instance} 18 | } 19 | \value{ 20 | inner product between \code{x1} and \code{x2} 21 | } 22 | \description{ 23 | Returns the inner product between two Tensors 24 | } 25 | \details{ 26 | \code{innerProd(tnsr1,tnsr2)} 27 | } 28 | \examples{ 29 | tnsr1 <- rand_tensor() 30 | tnsr2 <- rand_tensor() 31 | innerProd(tnsr1,tnsr2) 32 | } 33 | 34 | -------------------------------------------------------------------------------- /man/kronecker_list.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{kronecker_list} 4 | \alias{kronecker_list} 5 | \title{List Kronecker Product} 6 | \usage{ 7 | kronecker_list(L) 8 | } 9 | \arguments{ 10 | \item{L}{list of matrices or vectors} 11 | } 12 | \value{ 13 | matrix that is the Kronecker product 14 | } 15 | \description{ 16 | Returns the Kronecker product from a list of matrices or vectors. Commonly used for n-mode products and various Tensor decompositions. 17 | } 18 | \examples{ 19 | smalllizt <- list('mat1' = matrix(runif(12),ncol=4), 20 | 'mat2' = matrix(runif(12),ncol=4), 21 | 'mat3' = matrix(runif(12),ncol=4)) 22 | dim(kronecker_list(smalllizt)) 23 | } 24 | \seealso{ 25 | \code{\link{hamadard_list}}, \code{\link{khatri_rao_list}}, \code{\link{kronecker}} 26 | } 27 | 28 | -------------------------------------------------------------------------------- /man/vec-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{vec-methods} 5 | \alias{vec} 6 | \alias{vec,Tensor-method} 7 | \alias{vec-methods} 8 | \title{Tensor Vec} 9 | \usage{ 10 | vec(tnsr) 11 | 12 | \S4method{vec}{Tensor}(tnsr) 13 | } 14 | \arguments{ 15 | \item{tnsr}{the Tensor instance} 16 | } 17 | \value{ 18 | vector with length \code{prod(x@modes)} 19 | } 20 | \description{ 21 | Turns the tensor into a single vector, following the convention that earlier indices vary slower than later indices. 22 | } 23 | \details{ 24 | \code{vec(tnsr)} 25 | } 26 | \examples{ 27 | tnsr <- rand_tensor(c(4,5,6,7)) 28 | vec(tnsr) 29 | } 30 | \references{ 31 | T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 32 | } 33 | 34 | -------------------------------------------------------------------------------- /man/hamadard_list.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{hamadard_list} 4 | \alias{hamadard_list} 5 | \title{List Hamadard Product} 6 | \usage{ 7 | hamadard_list(L) 8 | } 9 | \arguments{ 10 | \item{L}{list of matrices or vectors} 11 | } 12 | \value{ 13 | matrix that is the Hamadard product 14 | } 15 | \description{ 16 | Returns the Hamadard (element-wise) product from a list of matrices or vectors. Commonly used for n-mode products and various Tensor decompositions. 17 | } 18 | \note{ 19 | The modes/dimensions of each element in the list must match. 20 | } 21 | \examples{ 22 | lizt <- list('mat1' = matrix(runif(40),ncol=4), 23 | 'mat2' = matrix(runif(40),ncol=4), 24 | 'mat3' = matrix(runif(40),ncol=4)) 25 | dim(hamadard_list(lizt)) 26 | } 27 | \seealso{ 28 | \code{\link{kronecker_list}}, \code{\link{khatri_rao_list}} 29 | } 30 | 31 | -------------------------------------------------------------------------------- /man/tperm-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{tperm-methods} 5 | \alias{tperm} 6 | \alias{tperm,Tensor-method} 7 | \alias{tperm-methods} 8 | \title{Mode Permutation for Tensor} 9 | \usage{ 10 | tperm(tnsr, perm, ...) 11 | 12 | \S4method{tperm}{Tensor}(tnsr, perm, ...) 13 | } 14 | \arguments{ 15 | \item{tnsr}{the Tensor instance} 16 | 17 | \item{perm}{the new permutation of the current modes} 18 | 19 | \item{...}{additional parameters to be passed into \code{aperm}} 20 | } 21 | \description{ 22 | Overloads \code{aperm} for Tensor class for convenience. 23 | } 24 | \details{ 25 | \code{tperm(tnsr,perm=NULL,...)} 26 | } 27 | \examples{ 28 | tnsr <- rand_tensor(c(3,4,5)) 29 | dim(tperm(tnsr,perm=c(2,1,3))) 30 | dim(tperm(tnsr,perm=c(1,3,2))) 31 | } 32 | \seealso{ 33 | \code{\link{aperm}} 34 | } 35 | 36 | -------------------------------------------------------------------------------- /man/as.tensor.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \name{as.tensor} 4 | \alias{as.tensor} 5 | \title{Tensor Conversion} 6 | \usage{ 7 | as.tensor(x, drop = FALSE) 8 | } 9 | \arguments{ 10 | \item{x}{an instance of \code{array}, \code{matrix}, or \code{vector}} 11 | 12 | \item{drop}{whether or not modes of 1 should be dropped} 13 | } 14 | \value{ 15 | a \code{\link{Tensor-class}} object 16 | } 17 | \description{ 18 | Create a \code{\link{Tensor-class}} object from an \code{array}, \code{matrix}, or \code{vector}. 19 | } 20 | \examples{ 21 | #From vector 22 | vec <- runif(100); vecT <- as.tensor(vec); vecT 23 | #From matrix 24 | mat <- matrix(runif(1000),nrow=100,ncol=10) 25 | matT <- as.tensor(mat); matT 26 | #From array 27 | indices <- c(10,20,30,40) 28 | arr <- array(runif(prod(indices)), dim = indices) 29 | arrT <- as.tensor(arr); arrT 30 | } 31 | 32 | -------------------------------------------------------------------------------- /man/khatri_rao_list.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{khatri_rao_list} 4 | \alias{khatri_rao_list} 5 | \title{List Khatri-Rao Product} 6 | \usage{ 7 | khatri_rao_list(L, reverse = FALSE) 8 | } 9 | \arguments{ 10 | \item{L}{list of matrices or vectors} 11 | 12 | \item{reverse}{whether or not to reverse the order} 13 | } 14 | \value{ 15 | matrix that is the Khatri-Rao product 16 | } 17 | \description{ 18 | Returns the Khatri-Rao product from a list of matrices or vectors. Commonly used for n-mode products and various Tensor decompositions. 19 | } 20 | \note{ 21 | The number of columns must match in every element of the input list. 22 | } 23 | \examples{ 24 | smalllizt <- list('mat1' = matrix(runif(12),ncol=4), 25 | 'mat2' = matrix(runif(12),ncol=4), 26 | 'mat3' = matrix(runif(12),ncol=4)) 27 | dim(khatri_rao_list(smalllizt)) 28 | } 29 | \seealso{ 30 | \code{\link{khatri_rao}} 31 | } 32 | 33 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | export(as.tensor) 4 | export(cp) 5 | export(cs_fold) 6 | export(cs_unfold) 7 | export(fnorm) 8 | export(fold) 9 | export(hamadard_list) 10 | export(hosvd) 11 | export(innerProd) 12 | export(k_fold) 13 | export(k_unfold) 14 | export(khatri_rao) 15 | export(khatri_rao_list) 16 | export(kronecker_list) 17 | export(matvec) 18 | export(modeMean) 19 | export(modeSum) 20 | export(mpca) 21 | export(pvd) 22 | export(rand_tensor) 23 | export(rs_fold) 24 | export(rs_unfold) 25 | export(t_mult) 26 | export(t_svd) 27 | export(t_svd_reconstruct) 28 | export(tperm) 29 | export(ttl) 30 | export(ttm) 31 | export(tucker) 32 | export(tucker.nonneg) 33 | export(unfold) 34 | export(unmatvec) 35 | export(vec) 36 | exportClasses(Tensor) 37 | exportMethods("[") 38 | exportMethods(Ops) 39 | exportMethods(dim) 40 | exportMethods(head) 41 | exportMethods(print) 42 | exportMethods(show) 43 | exportMethods(t) 44 | exportMethods(tail) 45 | -------------------------------------------------------------------------------- /man/t-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{t-methods} 5 | \alias{t,Tensor-method} 6 | \alias{t-methods} 7 | \title{Tensor Transpose} 8 | \usage{ 9 | \S4method{t}{Tensor}(x) 10 | } 11 | \arguments{ 12 | \item{x}{a 3-tensor} 13 | } 14 | \value{ 15 | tensor transpose of \code{x} 16 | } 17 | \description{ 18 | Implements the tensor transpose based on block circulant matrices (Kilmer et al. 2013) for 3-tensors. 19 | } 20 | \details{ 21 | \code{t(x)} 22 | } 23 | \examples{ 24 | tnsr <- rand_tensor() 25 | identical(t(tnsr)@data[,,1],t(tnsr@data[,,1])) 26 | identical(t(tnsr)@data[,,2],t(tnsr@data[,,5])) 27 | identical(t(t(tnsr)),tnsr) 28 | } 29 | \references{ 30 | M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 31 | } 32 | 33 | -------------------------------------------------------------------------------- /man/modeSum-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{modeSum-methods} 5 | \alias{modeSum} 6 | \alias{modeSum,Tensor-method} 7 | \alias{modeSum-methods} 8 | \title{Tensor Sum Across Single Mode} 9 | \usage{ 10 | modeSum(tnsr, m, drop) 11 | 12 | \S4method{modeSum}{Tensor}(tnsr, m = NULL, drop = FALSE) 13 | } 14 | \arguments{ 15 | \item{tnsr}{the Tensor instance} 16 | 17 | \item{m}{the index of the mode to sum across} 18 | 19 | \item{drop}{whether or not mode m should be dropped} 20 | } 21 | \value{ 22 | K-1 or K tensor, where \code{K = x@num_modes} 23 | } 24 | \description{ 25 | Given a mode for a K-tensor, this returns the K-1 tensor resulting from summing across that particular mode. 26 | } 27 | \details{ 28 | \code{modeSum(tnsr,m=NULL,drop=FALSE)} 29 | } 30 | \examples{ 31 | tnsr <- rand_tensor() 32 | modeSum(tnsr,3,drop=TRUE) 33 | } 34 | \seealso{ 35 | \code{\link{modeMean}} 36 | } 37 | 38 | -------------------------------------------------------------------------------- /man/modeMean-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{modeMean-methods} 5 | \alias{modeMean} 6 | \alias{modeMean,Tensor-method} 7 | \alias{modeMean-methods} 8 | \title{Tensor Mean Across Single Mode} 9 | \usage{ 10 | modeMean(tnsr, m, drop) 11 | 12 | \S4method{modeMean}{Tensor}(tnsr, m = NULL, drop = FALSE) 13 | } 14 | \arguments{ 15 | \item{tnsr}{the Tensor instance} 16 | 17 | \item{m}{the index of the mode to average across} 18 | 19 | \item{drop}{whether or not mode m should be dropped} 20 | } 21 | \value{ 22 | K-1 or K Tensor, where \code{K = x@num_modes} 23 | } 24 | \description{ 25 | Given a mode for a K-tensor, this returns the K-1 tensor resulting from taking the mean across that particular mode. 26 | } 27 | \details{ 28 | \code{modeMean(tnsr,m=NULL,drop=FALSE)} 29 | } 30 | \examples{ 31 | tnsr <- rand_tensor() 32 | modeMean(tnsr,1,drop=TRUE) 33 | } 34 | \seealso{ 35 | \code{\link{modeSum}} 36 | } 37 | 38 | -------------------------------------------------------------------------------- /man/unmatvec.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{unmatvec} 4 | \alias{unmatvec} 5 | \title{Unmatvec Folding of Matrix} 6 | \usage{ 7 | unmatvec(mat, modes = NULL) 8 | } 9 | \arguments{ 10 | \item{mat}{matrix to be folded into a Tensor} 11 | 12 | \item{modes}{the modes of the output Tensor} 13 | } 14 | \value{ 15 | Tensor object with modes given by \code{modes} 16 | } 17 | \description{ 18 | The inverse operation to \code{\link{matvec-methods}}, turning a matrix into a Tensor. For a full account of matrix folding/unfolding operations, consult Kolda and Bader (2009). 19 | } 20 | \examples{ 21 | tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 22 | matT1<-matvec(tnsr) 23 | identical(unmatvec(matT1,modes=c(3,4,5)),tnsr) 24 | } 25 | \references{ 26 | T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 27 | } 28 | \seealso{ 29 | \code{\link{matvec-methods}}, \code{\link{fold}}, \code{\link{k_fold}} 30 | } 31 | 32 | -------------------------------------------------------------------------------- /man/matvec-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{matvec-methods} 5 | \alias{matvec} 6 | \alias{matvec,Tensor-method} 7 | \alias{matvec-methods} 8 | \title{Tensor Matvec Unfolding} 9 | \usage{ 10 | matvec(tnsr) 11 | 12 | \S4method{matvec}{Tensor}(tnsr) 13 | } 14 | \arguments{ 15 | \item{tnsr}{the Tensor instance} 16 | } 17 | \value{ 18 | matrix with \code{prod(x@modes[-m])} rows and \code{x@modes[m]} columns 19 | } 20 | \description{ 21 | For 3-tensors only. Stacks the slices along the third mode. This is the prevalent unfolding for T-SVD and T-MULT based on block circulant matrices. 22 | } 23 | \details{ 24 | \code{matvec(tnsr)} 25 | } 26 | \examples{ 27 | tnsr <- rand_tensor(c(2,3,4)) 28 | matT1<- matvec(tnsr) 29 | } 30 | \references{ 31 | M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 32 | } 33 | \seealso{ 34 | \code{\link{k_unfold-methods}} and \code{\link{unfold-methods}} 35 | } 36 | 37 | -------------------------------------------------------------------------------- /man/t_mult.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{t_mult} 4 | \alias{t_mult} 5 | \title{Tensor Multiplication (T-MULT)} 6 | \usage{ 7 | t_mult(x, y) 8 | } 9 | \arguments{ 10 | \item{x}{a 3-tensor} 11 | 12 | \item{y}{another 3-tensor} 13 | } 14 | \value{ 15 | tensor product between \code{x} and \code{y} 16 | } 17 | \description{ 18 | Implements T-MULT based on block circulant matrices (Kilmer et al. 2013) for 3-tensors. 19 | } 20 | \details{ 21 | Uses the Fast Fourier Transform (FFT) speed up suggested by Kilmer et al. 2013 instead of explicitly constructing the block circulant matrix. For the mathematical details of T-MULT, see Kilmer et al. (2013). 22 | } 23 | \note{ 24 | This only works (so far) between 3-Tensors. 25 | } 26 | \examples{ 27 | tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 28 | tnsr2 <- new("Tensor",3L,c(4L,3L,5L),data=runif(60)) 29 | t_mult(tnsr, tnsr2) 30 | } 31 | \references{ 32 | M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 33 | } 34 | 35 | -------------------------------------------------------------------------------- /man/k_fold.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{k_fold} 4 | \alias{k_fold} 5 | \title{k-mode Folding of Matrix} 6 | \usage{ 7 | k_fold(mat, m = NULL, modes = NULL) 8 | } 9 | \arguments{ 10 | \item{mat}{matrix to be folded into a Tensor} 11 | 12 | \item{m}{the index of the mode that is mapped onto the row indices} 13 | 14 | \item{modes}{the modes of the output Tensor} 15 | } 16 | \value{ 17 | Tensor object with modes given by \code{modes} 18 | } 19 | \description{ 20 | k-mode folding of a matrix into a Tensor. This is the inverse funtion to \code{k_unfold} in the m mode. In particular, \code{k_fold(k_unfold(tnsr, m),m,getModes(tnsr))} will result in the original Tensor. 21 | } 22 | \details{ 23 | This is a wrapper function to \code{\link{fold}}. 24 | } 25 | \examples{ 26 | tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 27 | matT2<-k_unfold(tnsr,m=2) 28 | identical(k_fold(matT2,m=2,modes=c(3,4,5)),tnsr) 29 | } 30 | \references{ 31 | T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 32 | } 33 | \seealso{ 34 | \code{\link{k_unfold-methods}}, \code{\link{fold}}, \code{\link{unmatvec}} 35 | } 36 | 37 | -------------------------------------------------------------------------------- /man/k_unfold-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{k_unfold-methods} 5 | \alias{k_unfold} 6 | \alias{k_unfold,Tensor-method} 7 | \alias{k_unfold-methods} 8 | \title{Tensor k-mode Unfolding} 9 | \usage{ 10 | k_unfold(tnsr, m) 11 | 12 | \S4method{k_unfold}{Tensor}(tnsr, m = NULL) 13 | } 14 | \arguments{ 15 | \item{tnsr}{the Tensor instance} 16 | 17 | \item{m}{the index of the mode to unfold on} 18 | } 19 | \value{ 20 | matrix with \code{x@modes[m]} rows and \code{prod(x@modes[-m])} columns 21 | } 22 | \description{ 23 | Unfolding of a tensor by mapping the kth mode (specified through parameter \code{m}), and all other modes onto the column space. This the most common type of unfolding operation for Tucker decompositions and its variants. Also known as k-mode matricization. 24 | } 25 | \details{ 26 | \code{k_unfold(tnsr,m=NULL)} 27 | } 28 | \examples{ 29 | tnsr <- rand_tensor() 30 | matT2<-rs_unfold(tnsr,m=2) 31 | } 32 | \references{ 33 | T. Kolda and B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 34 | } 35 | \seealso{ 36 | \code{\link{matvec-methods}} and \code{\link{unfold-methods}} 37 | } 38 | 39 | -------------------------------------------------------------------------------- /man/extract-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{[-methods} 5 | \alias{[,Tensor-method} 6 | \alias{[-methods} 7 | \alias{[<-,Tensor-method} 8 | \alias{extract,Tensor-method} 9 | \title{Extract or Replace Subtensors} 10 | \usage{ 11 | \S4method{[}{Tensor}(x, i, j, ..., drop = TRUE) 12 | 13 | \S4method{[}{Tensor}(x, i, j, ...) <- value 14 | } 15 | \arguments{ 16 | \item{x}{Tensor to be subset} 17 | 18 | \item{i, j, ...}{indices that specify the extents of the sub-tensor} 19 | 20 | \item{drop}{whether or not to reduce the number of modes to exclude those that have '1' as the mode} 21 | 22 | \item{value}{either vector, matrix, or array that will replace the subtensor} 23 | } 24 | \value{ 25 | an object of class Tensor 26 | } 27 | \description{ 28 | Extends '[' and '[<-' from the base array class for the Tensor class. Works exactly as it would for the base 'array' class. 29 | } 30 | \details{ 31 | \code{x[i,j,...,drop=TRUE]} 32 | } 33 | \examples{ 34 | tnsr <- rand_tensor() 35 | tnsr[1,2,3] 36 | tnsr[3,1,] 37 | tnsr[,,5] 38 | tnsr[,,5,drop=FALSE] 39 | 40 | tnsr[1,2,3] <- 3; tnsr[1,2,3] 41 | tnsr[3,1,] <- rep(0,5); tnsr[3,1,] 42 | tnsr[,2,] <- matrix(0,nrow=3,ncol=5); tnsr[,2,] 43 | } 44 | 45 | -------------------------------------------------------------------------------- /man/Ops-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{Ops-methods} 5 | \alias{Ops,Tensor,Tensor-method} 6 | \alias{Ops,Tensor,array-method} 7 | \alias{Ops,Tensor,numeric-method} 8 | \alias{Ops,array,Tensor-method} 9 | \alias{Ops,numeric,Tensor-method} 10 | \alias{Ops-methods} 11 | \title{Conformable elementwise operators for Tensor} 12 | \usage{ 13 | \S4method{Ops}{Tensor,Tensor}(e1, e2) 14 | } 15 | \arguments{ 16 | \item{e1}{left-hand object} 17 | 18 | \item{e2}{right-hand object} 19 | } 20 | \description{ 21 | Overloads elementwise operators for tensors, arrays, and vectors that are conformable (have the same modes). 22 | } 23 | \examples{ 24 | tnsr <- rand_tensor(c(3,4,5)) 25 | tnsr2 <- rand_tensor(c(3,4,5)) 26 | tnsrsum <- tnsr + tnsr2 27 | tnsrdiff <- tnsr - tnsr2 28 | tnsrelemprod <- tnsr * tnsr2 29 | tnsrelemquot <- tnsr / tnsr2 30 | for (i in 1:3L){ 31 | for (j in 1:4L){ 32 | for (k in 1:5L){ 33 | stopifnot(tnsrsum@data[i,j,k]==tnsr@data[i,j,k]+tnsr2@data[i,j,k]) 34 | stopifnot(tnsrdiff@data[i,j,k]==(tnsr@data[i,j,k]-tnsr2@data[i,j,k])) 35 | stopifnot(tnsrelemprod@data[i,j,k]==tnsr@data[i,j,k]*tnsr2@data[i,j,k]) 36 | stopifnot(tnsrelemquot@data[i,j,k]==tnsr@data[i,j,k]/tnsr2@data[i,j,k]) 37 | } 38 | } 39 | } 40 | } 41 | 42 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: rTensor 2 | Type: Package 3 | Title: Tools for tensor analysis and decomposition 4 | Version: 1.3 5 | Author: James Li and Jacob Bien and Martin Wells 6 | Maintainer: James Li 7 | Description: rTensor is a set of tools for creation, manipulation, and modeling 8 | of tensors with arbitrary number of modes. A tensor in the context of data 9 | analysis is a multidimensional array. rTensor does this by providing a S4 10 | class 'Tensor' that wraps around the base 'array' class. rTensor also 11 | provides common tensor operations as methods, including matrix unfolding, 12 | summing/averaging across modes, calculating the Frobenius norm, and taking 13 | the inner product between two tensors. Familiar array operations are 14 | overloaded, such as index subsetting via '[' and element-wise operations. 15 | rTensor also implements various tensor decomposition, including CP, GLRAM, 16 | MPCA, PVD, and Tucker. For tensors with 3 modes, rTensor also implements 17 | transpose, product, and SVD, as defined in Kilmer et al. (2013). Some 18 | auxiliary functions include the Khatri-Rao product, Kronecker product, and 19 | the Hamadard product for a list of matrices. Development of rTensor has 20 | been generously supported by Cornell's Department of Statistical Science. 21 | License: GPL (>= 2) 22 | Depends: 23 | methods 24 | Date: 2014-11-05 25 | URL: http://jamesyili.github.io/rTensor 26 | RoxygenNote: 5.0.1 27 | -------------------------------------------------------------------------------- /man/ttm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{ttm} 4 | \alias{ttm} 5 | \title{Tensor Times Matrix (m-Mode Product)} 6 | \usage{ 7 | ttm(tnsr, mat, m = NULL, transpose = FALSE) 8 | } 9 | \arguments{ 10 | \item{tnsr}{Tensor object with K modes} 11 | 12 | \item{mat}{input matrix with same number columns as the \code{m}th mode of \code{tnsr}} 13 | 14 | \item{m}{the mode to contract on} 15 | 16 | \item{transpose}{if mat should be transposed before multiplication} 17 | } 18 | \value{ 19 | a Tensor object with K modes 20 | } 21 | \description{ 22 | Contracted (m-Mode) product between a Tensor of arbitrary number of modes and a matrix. The result is folded back into Tensor. 23 | } 24 | \details{ 25 | By definition, \code{rs_unfold(ttm(tnsr,mat),m) = mat\%*\%rs_unfold(tnsr,m)}, so the number of columns in \code{mat} must match the \code{m}th mode of \code{tnsr}. For the math on the m-Mode Product, see Kolda and Bader (2009). 26 | } 27 | \note{ 28 | The \code{m}th mode of \code{tnsr} must match the number of columns in \code{mat}. By default, the returned Tensor does not drop any modes equal to 1. 29 | } 30 | \examples{ 31 | tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 32 | mat <- matrix(runif(50),ncol=5) 33 | ttm(tnsr,mat,m=3) 34 | } 35 | \references{ 36 | T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 37 | } 38 | \seealso{ 39 | \code{\link{ttl}}, \code{\link{rs_unfold-methods}} 40 | } 41 | 42 | -------------------------------------------------------------------------------- /man/t_svd.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Decomp.R 3 | \name{t_svd} 4 | \alias{t_svd} 5 | \title{Tensor Singular Value Decomposition} 6 | \usage{ 7 | t_svd(tnsr) 8 | } 9 | \arguments{ 10 | \item{tnsr}{3-Tensor to decompose via TSVD} 11 | } 12 | \value{ 13 | a list containing the following:\describe{ 14 | \item{\code{U}}{the left orthgonal 3-Tensor} 15 | \item{\code{V}}{the right orthgonal 3-Tensor} 16 | \item{\code{S}}{the middle 3-Tensor consisting of face-wise diagonal matrices} 17 | } 18 | } 19 | \description{ 20 | TSVD for a 3-Tensor. Constructs 3-Tensors \code{U, S, V} such that \code{tnsr = t_mult(t_mult(U,S),t(V))}. \code{U} and \code{V} are orthgonal 3-Tensors with orthogonality defined in Kilmer et al. (2013), and \code{S} is a 3-Tensor consists of facewise diagonal matrices. For more details on the TSVD, consult Kilmer et al. (2013). 21 | } 22 | \note{ 23 | Computation involves complex values, but if the inputs are real, then the outputs are also real. Some loss of precision occurs in the truncation of the imaginary components during the FFT and inverse FFT. 24 | } 25 | \examples{ 26 | tnsr <- rand_tensor() 27 | tsvdD <- t_svd(tnsr) 28 | } 29 | \references{ 30 | M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 31 | } 32 | \seealso{ 33 | \code{\link{t_mult}}, \code{\link{t_svd_reconstruct}} 34 | } 35 | 36 | -------------------------------------------------------------------------------- /man/fold.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{fold} 4 | \alias{fold} 5 | \title{General Folding of Matrix} 6 | \usage{ 7 | fold(mat, row_idx = NULL, col_idx = NULL, modes = NULL) 8 | } 9 | \arguments{ 10 | \item{mat}{matrix to be folded into a Tensor} 11 | 12 | \item{row_idx}{the indices of the modes that are mapped onto the row space} 13 | 14 | \item{col_idx}{the indices of the modes that are mapped onto the column space} 15 | 16 | \item{modes}{the modes of the output Tensor} 17 | } 18 | \value{ 19 | Tensor object with modes given by \code{modes} 20 | } 21 | \description{ 22 | General folding of a matrix into a Tensor. This is designed to be the inverse function to \code{\link{unfold-methods}}, with the same ordering of the indices. This amounts to following: if we were to unfold a Tensor using a set of \code{row_idx} and \code{col_idx}, then we can fold the resulting matrix back into the original Tensor using the same \code{row_idx} and \code{col_idx}. 23 | } 24 | \details{ 25 | This function uses \code{aperm} as the primary workhorse. 26 | } 27 | \examples{ 28 | tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 29 | matT3<-unfold(tnsr,row_idx=2,col_idx=c(3,1)) 30 | identical(fold(matT3,row_idx=2,col_idx=c(3,1),modes=c(3,4,5)),tnsr) 31 | } 32 | \references{ 33 | T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 34 | } 35 | \seealso{ 36 | \code{\link{unfold-methods}}, \code{\link{k_fold}}, \code{\link{unmatvec}} 37 | } 38 | 39 | -------------------------------------------------------------------------------- /man/unfold-methods.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{methods} 4 | \name{unfold-methods} 5 | \alias{unfold} 6 | \alias{unfold,Tensor-method} 7 | \alias{unfold-methods} 8 | \title{Tensor Unfolding} 9 | \usage{ 10 | unfold(tnsr, row_idx, col_idx) 11 | 12 | \S4method{unfold}{Tensor}(tnsr, row_idx = NULL, col_idx = NULL) 13 | } 14 | \arguments{ 15 | \item{tnsr}{the Tensor instance} 16 | 17 | \item{row_idx}{the indices of the modes to map onto the row space} 18 | 19 | \item{col_idx}{the indices of the modes to map onto the column space} 20 | } 21 | \value{ 22 | matrix with \code{prod(row_idx)} rows and \code{prod(col_idx)} columns 23 | } 24 | \description{ 25 | Unfolds the tensor into a matrix, with the modes in \code{rs} onto the rows and modes in \code{cs} onto the columns. Note that \code{c(rs,cs)} must have the same elements (order doesn't matter) as \code{x@modes}. Within the rows and columns, the order of the unfolding is determined by the order of the modes. This convention is consistent with Kolda and Bader (2009). 26 | } 27 | \details{ 28 | For Row Space Unfolding or m-mode Unfolding, see \code{\link{rs_unfold-methods}}. For Column Space Unfolding or matvec, see \code{\link{cs_unfold-methods}}. 29 | 30 | \code{\link{vec-methods}} returns the vectorization of the tensor. 31 | 32 | \code{unfold(tnsr,row_idx=NULL,col_idx=NULL)} 33 | } 34 | \examples{ 35 | tnsr <- rand_tensor() 36 | matT3<-unfold(tnsr,row_idx=2,col_idx=c(3,1)) 37 | } 38 | \references{ 39 | T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 40 | } 41 | \seealso{ 42 | \code{\link{k_unfold-methods}} and \code{\link{matvec-methods}} 43 | } 44 | 45 | -------------------------------------------------------------------------------- /man/hosvd.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Decomp.R 3 | \name{hosvd} 4 | \alias{hosvd} 5 | \title{(Truncated-)Higher-order SVD} 6 | \usage{ 7 | hosvd(tnsr, ranks = NULL) 8 | } 9 | \arguments{ 10 | \item{tnsr}{Tensor with K modes} 11 | 12 | \item{ranks}{a vector of desired modes in the output core tensor, default is \code{tnsr@modes}} 13 | } 14 | \value{ 15 | a list containing the following:\describe{ 16 | \item{\code{Z}}{core tensor with modes speficied by \code{ranks}} 17 | \item{\code{U}}{a list of orthogonal matrices, one for each mode} 18 | \item{\code{est}}{estimate of \code{tnsr} after compression} 19 | \item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)} - if there was no truncation, then this is O(mach_eps) } 20 | } 21 | } 22 | \description{ 23 | Higher-order SVD of a K-Tensor. Write the K-Tensor as a (m-mode) product of a core Tensor (possibly smaller modes) and K orthogonal factor matrices. Truncations can be specified via \code{ranks} (making them smaller than the original modes of the K-Tensor will result in a truncation). For the mathematical details on HOSVD, consult Lathauwer et. al. (2000). 24 | } 25 | \details{ 26 | Uses the Alternating Least Squares (ALS) estimation procedure. A progress bar is included to help monitor operations on large tensors. 27 | } 28 | \note{ 29 | The length of \code{ranks} must match \code{tnsr@num_modes}. 30 | } 31 | \examples{ 32 | tnsr <- rand_tensor(c(6,7,8)) 33 | hosvdD <-hosvd(tnsr) 34 | hosvdD$fnorm_resid 35 | hosvdD2 <-hosvd(tnsr,ranks=c(3,3,4)) 36 | hosvdD2$fnorm_resid 37 | } 38 | \references{ 39 | L. Lathauwer, B.Moor, J. Vanderwalle "A multilinear singular value decomposition". Journal of Matrix Analysis and Applications 2000. 40 | } 41 | \seealso{ 42 | \code{\link{tucker}} 43 | } 44 | 45 | -------------------------------------------------------------------------------- /man/ttl.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Misc.R 3 | \name{ttl} 4 | \alias{ttl} 5 | \title{Tensor Times List} 6 | \usage{ 7 | ttl(tnsr, list_mat, ms = NULL, transpose = FALSE) 8 | } 9 | \arguments{ 10 | \item{tnsr}{Tensor object with K modes} 11 | 12 | \item{list_mat}{a list of matrices} 13 | 14 | \item{ms}{a vector of modes to contract on (order should match the order of \code{list_mat})} 15 | 16 | \item{transpose}{if matrices should be transposed before multiplication} 17 | } 18 | \value{ 19 | Tensor object with K modes 20 | } 21 | \description{ 22 | Contracted (m-Mode) product between a Tensor of arbitrary number of modes and a list of matrices. The result is folded back into Tensor. 23 | } 24 | \details{ 25 | Performs \code{ttm} repeated for a single Tensor and a list of matrices on multiple modes. For instance, suppose we want to do multiply a Tensor object \code{tnsr} with three matrices \code{mat1}, \code{mat2}, \code{mat3} on modes 1, 2, and 3. We could do \code{ttm(ttm(ttm(tnsr,mat1,1),mat2,2),3)}, or we could do \code{ttl(tnsr,list(mat1,mat2,mat3),c(1,2,3))}. The order of the matrices in the list should obviously match the order of the modes. This is a common operation for various Tensor decompositions such as CP and Tucker. For the math on the m-Mode Product, see Kolda and Bader (2009). 26 | } 27 | \note{ 28 | The returned Tensor does not drop any modes equal to 1. 29 | } 30 | \examples{ 31 | tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 32 | lizt <- list('mat1' = matrix(runif(30),ncol=3), 33 | 'mat2' = matrix(runif(40),ncol=4), 34 | 'mat3' = matrix(runif(50),ncol=5)) 35 | ttl(tnsr,lizt,ms=c(1,2,3)) 36 | } 37 | \references{ 38 | T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 39 | } 40 | \seealso{ 41 | \code{\link{ttm}} 42 | } 43 | 44 | -------------------------------------------------------------------------------- /tests/test_tucker_nonneg.R: -------------------------------------------------------------------------------- 1 | # An example of nonnegative and semi-nonnegative Tucker decomposition 2 | 3 | require( rTensor ) 4 | ## Generate nonnegative synthetic 3-order tensor 5 | 6 | T.dim <- c( 40, 50, 60 ) 7 | # randomly generate core tensor 8 | Z.dim <- c( 4, 5, 6 ) 9 | Z <- rand_tensor( Z.dim ) 10 | Z@data <- pmax( Z@data, 0 ) 11 | 12 | # randomly generate factor matrices 13 | U <- lapply( seq_along(T.dim), function( mode_ix ) { 14 | matrix( pmax( 0, rnorm( T.dim[[mode_ix]]*Z.dim[[mode_ix]] ) ), 15 | ncol = Z.dim[[mode_ix]] ) 16 | } ) 17 | 18 | # generate tensor 19 | T.orig <- ttl( Z, U, seq_along(Z.dim) ) 20 | Z <- Z/max(T.orig@data) 21 | T.orig@data <- T.orig@data / max(T.orig@data) 22 | 23 | sn.ratio <- 0.6 24 | # -- add noise -- 25 | T.noise <- rand_tensor( T.dim ) 26 | T <- T.orig + 10^(-sn.ratio/0.2)*fnorm(T.orig)/fnorm(T.noise)*T.noise; 27 | 28 | # Solve the problem 29 | T.tucker_nonneg <- tucker.nonneg( T, Z.dim, tol = 1E-4, hosvd = TRUE, 30 | max_iter = 1000, verbose = TRUE, lambda = rep.int(0.1,4) ) 31 | 32 | # Reporting 33 | rel_err <- fnorm(T.tucker_nonneg$est - T.orig)/fnorm(T.orig) 34 | message('Relative error of decomposition = ', rel_err) 35 | 36 | # test semi-nonnegative Tucker decomposition 37 | 38 | Z.SNTD <- rand_tensor( Z.dim ) 39 | 40 | # generate tensor 41 | T.SNTD_orig <- ttl( Z.SNTD, U, seq_along(Z.dim) ) 42 | T.SNTD_orig@data <- T.SNTD_orig@data / max(T.SNTD_orig@data) 43 | 44 | # -- add noise -- 45 | T.SNTD <- T.SNTD_orig + 10^(-sn.ratio/0.2)*fnorm(T.SNTD_orig)/fnorm(T.noise)*T.noise; 46 | 47 | # Solve the problem 48 | T.SNTD_tucker_nonneg <- tucker.nonneg( T.SNTD, Z.dim, tol = 1E-4, hosvd = TRUE, core_nonneg = FALSE, 49 | max_iter = 1000, verbose = TRUE, lambda = rep.int(0.5,4) ) 50 | 51 | # Reporting 52 | rel_err.SNTD <- fnorm(T.SNTD_tucker_nonneg$est - T.SNTD_orig)/fnorm(T.SNTD_orig) 53 | message('Relative error of decomposition = ', rel_err.SNTD) 54 | -------------------------------------------------------------------------------- /man/pvd.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Decomp.R 3 | \name{pvd} 4 | \alias{pvd} 5 | \title{Population Value Decomposition} 6 | \usage{ 7 | pvd(tnsr, uranks = NULL, wranks = NULL, a = NULL, b = NULL) 8 | } 9 | \arguments{ 10 | \item{tnsr}{3-Tensor with the third mode being the measurement mode} 11 | 12 | \item{uranks}{ranks of the U matrices} 13 | 14 | \item{wranks}{ranks of the W matrices} 15 | 16 | \item{a}{rank of \code{P = U\%*\%t(U)}} 17 | 18 | \item{b}{rank of \code{D = W\%*\%t(W)}} 19 | } 20 | \value{ 21 | a list containing the following:\describe{ 22 | \item{\code{P}}{population-level matrix \code{P = U\%*\%t(U)}, where U is constructed by stacking the truncated left eigenvectors of slicewise PCA along the third mode} 23 | \item{\code{V}}{a list of image-level core matrices} 24 | \item{\code{D}}{population-leve matrix \code{D = W\%*\%t(W)}, where W is constructed by stacking the truncated right eigenvectors of slicewise PCA along the third mode} 25 | \item{\code{est}}{estimate of \code{tnsr} after compression} 26 | \item{\code{norm_percent}}{the percent of Frobenius norm explained by the approximation} 27 | \item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)}} 28 | } 29 | } 30 | \description{ 31 | The default Population Value Decomposition (PVD) of a series of 2D images. Constructs population-level matrices P, V, and D to account for variances within as well as across the images. Structurally similar to Tucker (\code{\link{tucker}}) and GLRAM (\code{\link{mpca}}), but retains crucial differences. Requires \code{2*n3 + 2} parameters to specified the final ranks of P, V, and D, where n3 is the third mode (how many images are in the set). Consult Crainiceanu et al. (2013) for the construction and rationale behind the PVD model. 32 | } 33 | \details{ 34 | The PVD is not an iterative method, but instead relies on \code{n3 + 2}separate PCA decompositions. The third mode is for how many images are in the set. 35 | } 36 | \examples{ 37 | tnsr <- rand_tensor(c(10,5,100)) 38 | pvdD<-pvd(tnsr,uranks=rep(8,100),wranks=rep(4,100),a=8,b=4) 39 | } 40 | \references{ 41 | C. Crainiceanu, B. Caffo, S. Luo, V. Zipunnikov, N. Punjabi, "Population value decomposition: a framework for the analysis of image populations". Journal of the American Statistical Association, 2013. 42 | } 43 | 44 | -------------------------------------------------------------------------------- /man/cp.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Decomp.R 3 | \name{cp} 4 | \alias{cp} 5 | \title{Canonical Polyadic Decomposition} 6 | \usage{ 7 | cp(tnsr, num_components = NULL, max_iter = 25, tol = 1e-05) 8 | } 9 | \arguments{ 10 | \item{tnsr}{Tensor with K modes} 11 | 12 | \item{num_components}{the number of rank-1 K-Tensors to use in approximation} 13 | 14 | \item{max_iter}{maximum number of iterations if error stays above \code{tol}} 15 | 16 | \item{tol}{relative Frobenius norm error tolerance} 17 | } 18 | \value{ 19 | a list containing the following \describe{ 20 | \item{\code{lambdas}}{a vector of normalizing constants, one for each component} 21 | \item{\code{U}}{a list of matrices - one for each mode - each matrix with \code{num_components} columns} 22 | \item{\code{conv}}{whether or not \code{resid} < \code{tol} by the last iteration} 23 | \item{\code{norm_percent}}{the percent of Frobenius norm explained by the approximation} 24 | \item{\code{est}}{estimate of \code{tnsr} after compression} 25 | \item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)}} 26 | \item{\code{all_resids}}{vector containing the Frobenius norm of error for all the iterations} 27 | } 28 | } 29 | \description{ 30 | Canonical Polyadic (CP) decomposition of a tensor, aka CANDECOMP/PARAFRAC. Approximate a K-Tensor using a sum of \code{num_components} rank-1 K-Tensors. A rank-1 K-Tensor can be written as an outer product of K vectors. There are a total of \code{num_compoents *tnsr@num_modes} vectors in the output, stored in \code{tnsr@num_modes} matrices, each with \code{num_components} columns. This is an iterative algorithm, with two possible stopping conditions: either relative error in Frobenius norm has gotten below \code{tol}, or the \code{max_iter} number of iterations has been reached. For more details on CP decomposition, consult Kolda and Bader (2009). 31 | } 32 | \details{ 33 | Uses the Alternating Least Squares (ALS) estimation procedure. A progress bar is included to help monitor operations on large tensors. 34 | } 35 | \examples{ 36 | tnsr <- rand_tensor(c(6,7,8)) 37 | cpD <- cp(tnsr,num_components=5) 38 | cpD$conv 39 | cpD$norm_percent 40 | plot(cpD$all_resids) 41 | } 42 | \references{ 43 | T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 44 | } 45 | \seealso{ 46 | \code{\link{tucker}} 47 | } 48 | 49 | -------------------------------------------------------------------------------- /man/mpca.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Decomp.R 3 | \name{mpca} 4 | \alias{mpca} 5 | \title{Multilinear Principal Components Analysis} 6 | \usage{ 7 | mpca(tnsr, ranks = NULL, max_iter = 25, tol = 1e-05) 8 | } 9 | \arguments{ 10 | \item{tnsr}{Tensor with K modes} 11 | 12 | \item{ranks}{a vector of the compressed modes of the output core Tensor, this has length K-1} 13 | 14 | \item{max_iter}{maximum number of iterations if error stays above \code{tol}} 15 | 16 | \item{tol}{relative Frobenius norm error tolerance} 17 | } 18 | \value{ 19 | a list containing the following:\describe{ 20 | \item{\code{Z_ext}}{the extended core tensor, with the first K-1 modes given by \code{ranks}} 21 | \item{\code{U}}{a list of K-1 orthgonal factor matrices - one for each compressed mode, with the number of columns of the matrices given by \code{ranks}} 22 | \item{\code{conv}}{whether or not \code{resid} < \code{tol} by the last iteration} 23 | \item{\code{est}}{estimate of \code{tnsr} after compression} 24 | \item{\code{norm_percent}}{the percent of Frobenius norm explained by the approximation} 25 | \item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)}} 26 | \item{\code{all_resids}}{vector containing the Frobenius norm of error for all the iterations} 27 | } 28 | } 29 | \description{ 30 | This is basically the Tucker decomposition of a K-Tensor, \code{\link{tucker}}, with one of the modes uncompressed. If K = 3, then this is also known as the Generalized Low Rank Approximation of Matrices (GLRAM). This implementation assumes that the last mode is the measurement mode and hence uncompressed. This is an iterative algorithm, with two possible stopping conditions: either relative error in Frobenius norm has gotten below \code{tol}, or the \code{max_iter} number of iterations has been reached. For more details on the MPCA of tensors, consult Lu et al. (2008). 31 | } 32 | \details{ 33 | Uses the Alternating Least Squares (ALS) estimation procedure. A progress bar is included to help monitor operations on large tensors. 34 | } 35 | \note{ 36 | The length of \code{ranks} must match \code{tnsr@num_modes-1}. 37 | } 38 | \examples{ 39 | tnsr <-rand_tensor(c(100,10,10)) 40 | mpcaD <- mpca(tnsr,ranks=c(30,5)) 41 | mpcaD$conv 42 | mpcaD$norm_percent 43 | plot(mpcaD$all_resids) 44 | } 45 | \references{ 46 | H. Lu, K. Plataniotis, A. Venetsanopoulos, "Mpca: Multilinear principal component analysis of tensor objects". IEEE Trans. Neural networks, 2008. 47 | } 48 | \seealso{ 49 | \code{\link{tucker}}, \code{\link{hosvd}} 50 | } 51 | 52 | -------------------------------------------------------------------------------- /man/tucker.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Decomp.R 3 | \name{tucker} 4 | \alias{tucker} 5 | \title{Tucker Decomposition} 6 | \usage{ 7 | tucker(tnsr, ranks = NULL, max_iter = 25, tol = 1e-05) 8 | } 9 | \arguments{ 10 | \item{tnsr}{Tensor with K modes} 11 | 12 | \item{ranks}{a vector of the modes of the output core Tensor} 13 | 14 | \item{max_iter}{maximum number of iterations if error stays above \code{tol}} 15 | 16 | \item{tol}{relative Frobenius norm error tolerance} 17 | } 18 | \value{ 19 | a list containing the following:\describe{ 20 | \item{\code{Z}}{the core tensor, with modes specified by \code{ranks}} 21 | \item{\code{U}}{a list of orthgonal factor matrices - one for each mode, with the number of columns of the matrices given by \code{ranks}} 22 | \item{\code{conv}}{whether or not \code{resid} < \code{tol} by the last iteration} 23 | \item{\code{est}}{estimate of \code{tnsr} after compression} 24 | \item{\code{norm_percent}}{the percent of Frobenius norm explained by the approximation} 25 | \item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)}} 26 | \item{\code{all_resids}}{vector containing the Frobenius norm of error for all the iterations} 27 | } 28 | } 29 | \description{ 30 | The Tucker decomposition of a tensor. Approximates a K-Tensor using a n-mode product of a core tensor (with modes specified by \code{ranks}) with orthogonal factor matrices. If there is no truncation in one of the modes, then this is the same as the MPCA, \code{\link{mpca}}. If there is no truncation in all the modes (i.e. \code{ranks = tnsr@modes}), then this is the same as the HOSVD, \code{\link{hosvd}}. This is an iterative algorithm, with two possible stopping conditions: either relative error in Frobenius norm has gotten below \code{tol}, or the \code{max_iter} number of iterations has been reached. For more details on the Tucker decomposition, consult Kolda and Bader (2009). 31 | } 32 | \details{ 33 | Uses the Alternating Least Squares (ALS) estimation procedure also known as Higher-Order Orthogonal Iteration (HOOI). Intialized using a (Truncated-)HOSVD. A progress bar is included to help monitor operations on large tensors. 34 | } 35 | \note{ 36 | The length of \code{ranks} must match \code{tnsr@num_modes}. 37 | } 38 | \examples{ 39 | tnsr <- rand_tensor(c(6,7,8)) 40 | tuckerD <- tucker(tnsr,ranks=c(3,3,4)) 41 | tuckerD$conv 42 | tuckerD$norm_percent 43 | plot(tuckerD$all_resids) 44 | } 45 | \references{ 46 | T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 47 | } 48 | \seealso{ 49 | \code{\link{hosvd}}, \code{\link{mpca}} 50 | } 51 | 52 | -------------------------------------------------------------------------------- /man/rTensor-package.Rd: -------------------------------------------------------------------------------- 1 | \docType{package} 2 | \name{rTensor-package} 3 | \alias{rTensor} 4 | \alias{rTensor-package} 5 | \title{Tools for tensor analysis and decomposition} 6 | \description{ 7 | This package is centered around the \code{\link{Tensor-class}}, which defines a S4 class for tensors of arbitrary number of modes. A vignette and/or a possible paper will be included in a future release of this package. 8 | } 9 | \details{ 10 | This page will summarize the full functionality of this package. Note that since all the methods associated with S4 class \code{\link{Tensor-class}} are documented there, we will not duplicate it here. 11 | 12 | The remaining functions can be split into two groups: the first is a set of tensor decompositions, and the second is a set of helper functions that are useful in tensor manipulation. 13 | 14 | rTensor implements the following tensor decompositions: \describe{ 15 | \item{\code{\link{cp}}}{Canonical Polyadic (CP) decomposition} 16 | \item{\code{\link{tucker}}}{General Tucker decomposition} 17 | \item{\code{\link{mpca}}}{Multilinear Principal Component Analysis; note that for 3-Tensors this is also known as Generalized Low Rank Approximation of Matrices(GLRAM)} 18 | \item{\code{\link{hosvd}}}{(Truncated-)Higher-order singular value decomposition} 19 | \item{\code{\link{t_svd}}}{Tensor singular value decomposition; 3-Tensors only; also note that there is an asociated reconstruction function \code{\link{t_svd_reconstruct}}} 20 | \item{\code{\link{pvd}}}{Population value decomposition of images; 3-Tensors only} 21 | } 22 | 23 | rTensor also provides a set functions for tensors multiplication: \describe{ 24 | \item{\code{\link{ttm}}}{Tensor times matrix, aka m-mode product} 25 | \item{\code{\link{ttl}}}{Tensor times list (of matrices)} 26 | \item{\code{\link{t_mult}}}{Tensor product based on block circulant unfolding; only implemented for a pair of 3-Tensors} 27 | } 28 | 29 | ...as well as for matrices: \describe{ 30 | \item{\code{\link{hamadard_list}}}{Computes the Hamadard (element-wise) product of a list of matrices} 31 | \item{\code{\link{kronecker_list}}}{Computes the Kronecker product of a list of matrices} 32 | \item{\code{\link{khatri_rao}}}{Computes the Khatri-Rao product of two matrices} 33 | \item{\code{\link{khatri_rao_list}}}{Computes the Khatri-Rao product of a list of matrices} 34 | \item{\code{\link{fold}}}{General folding of a matrix into a tensor} 35 | \item{\code{\link{k_fold}}}{Inverse operation for \code{\link{k_unfold}}} 36 | \item{\code{\link{unmatvec}}}{Inverse operation for \code{\link{matvec}}} 37 | } 38 | 39 | For more information on any of the functions, please consult the individual man pages. 40 | } 41 | \author{ 42 | James Li \email{jamesyili@gmail.com}, Jacob Bien, and Martin T. Wells 43 | } 44 | -------------------------------------------------------------------------------- /man/tucker.nonneg.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Decomp.R 3 | \name{tucker.nonneg} 4 | \alias{tucker.nonneg} 5 | \title{sparse (semi-)nonnegative Tucker decomposition} 6 | \usage{ 7 | tucker.nonneg(tnsr, ranks, core_nonneg = TRUE, tol = 1e-04, hosvd = FALSE, 8 | max_iter = 500, max_time = 0, lambda = rep.int(0, length(ranks) + 1), 9 | L_min = 1, rw = 0.9999, bound = Inf, U0 = NULL, Z0 = NULL, 10 | verbose = FALSE, unfold_tnsr = length(dim(tnsr)) * prod(dim(tnsr)) < 11 | 4000^2) 12 | } 13 | \arguments{ 14 | \item{tnsr}{nonnegative tensor with \code{K} modes} 15 | 16 | \item{ranks}{an integer vector of length \code{K} specifying the modes sizes for the output core tensor \code{Z}} 17 | 18 | \item{core_nonneg}{constrain core tensor \code{Z} to be nonnegative} 19 | 20 | \item{tol}{relative Frobenius norm error tolerance} 21 | 22 | \item{hosvd}{If TRUE, apply High Order SVD to improve initial U and Z} 23 | 24 | \item{max_iter}{maximum number of iterations if error stays above \code{tol}} 25 | 26 | \item{max_time}{max running time} 27 | 28 | \item{lambda}{\code{K+1} vector of sparsity regularizer coefficients for the factor matrices and the core tensor} 29 | 30 | \item{L_min}{lower bound for Lipschitz constant for the gradients of residual error \eqn{l(Z,U) = fnorm(tnsr - ttl(Z, U))} by \code{Z} and each \code{U}} 31 | 32 | \item{rw}{controls the extrapolation weight} 33 | 34 | \item{bound}{upper bound for the elements of \code{Z} and \code{U[[n]]} (the ones that have zero regularization coefficient \code{lambda})} 35 | 36 | \item{U0}{initial factor matrices, defaults to nonnegative Gaussian random matrices} 37 | 38 | \item{Z0}{initial core tensor \code{Z}, defaults to nonnegative Gaussian random tensor} 39 | 40 | \item{verbose}{more output algorithm progress} 41 | 42 | \item{unfold_tnsr}{precalculate \code{tnsr} to matrix unfolding by every mode (speeds up calculation, but may require lots of memory)} 43 | } 44 | \value{ 45 | a list:\describe{ 46 | \item{\code{U}}{nonnegative factor matrices} 47 | \item{\code{Z}}{nonnegative core tensor} 48 | \item{\code{est}}{estimate \eqn{Z \times_1 U_1 \ldots \times_K U_K}} 49 | \item{\code{conv}}{method convergence indicator} 50 | \item{\code{resid}}{the Frobenius norm of the residual error \code{l(Z,U)} plus regularization penalty (if any)} 51 | \item{\code{n_iter}}{number of iterations} 52 | \item{\code{n_redo}}{number of times Z and U were recalculated to avoid the increase in objective function} 53 | \item{\code{diag}}{convergence info for each iteration\describe{ 54 | \item{\code{all_resids}}{residues} 55 | \item{\code{all_rel_resid_deltas}}{residue delta relative to the current residue} 56 | \item{\code{all_rel_resids}}{residue relative to the \code{sqrt(||tnsr||)}} 57 | }}} 58 | } 59 | \description{ 60 | Decomposes nonnegative tensor \code{tnsr} into core optionally nonnegative tensor \code{Z} and sparse nonnegative factor matrices \code{U[n]}. 61 | } 62 | \details{ 63 | The function uses the alternating proximal gradient method to solve the following optimization problem: 64 | \deqn{\min 0.5 \|tnsr - Z \times_1 U_1 \ldots \times_K U_K \|_{F^2} + 65 | \sum_{n=1}^{K} \lambda_n \|U_n\|_1 + \lambda_{K+1} \|Z\|_1, \;\textit{where}\; Z \geq 0, \, U_i \geq 0.} 66 | If \code{core_nonneg} is \code{FALSE}, core tensor \code{Z} is allowed to have negative 67 | elements and \eqn{z_{i,j}=max(0,z_{i,j}-\lambda_{K+1}/L_{K+1})} rule is replaced by \eqn{z_{i,j}=sign(z_{i,j})max(0,|z_{i,j}|-\lambda_{K+1}/L_{K+1})}. 68 | The method stops if either the relative improvement of the error is below the tolerance \code{tol} for 3 consequitive iterations or 69 | both the relative error improvement and relative error (wrt the \code{tnsr} norm) are below the tolerance. 70 | Otherwise it stops if the maximal number of iterations or the time limit were reached. 71 | } 72 | \note{ 73 | The implementation is based on ntds() MATLAB code by Yangyang Xu and Wotao Yin. 74 | } 75 | \references{ 76 | Y. Xu, "Alternating proximal gradient method for sparse nonnegative Tucker decomposition", Math. Prog. Comp., 7, 39-70, 2013. 77 | } 78 | \seealso{ 79 | \code{\link{tucker}} 80 | 81 | \url{http://www.caam.rice.edu/~optimization/bcu/} 82 | } 83 | 84 | -------------------------------------------------------------------------------- /man/Tensor-class.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rTensor_Class.R 3 | \docType{class} 4 | \name{Tensor-class} 5 | \alias{Tensor} 6 | \alias{Tensor-class} 7 | \title{S4 Class for a Tensor} 8 | \description{ 9 | An S4 class for a tensor with arbitrary number of modes. The Tensor class extends the base 'array' class to include additional tensor manipulation (folding, unfolding, reshaping, subsetting) as well as a formal class definition that enables more explicit tensor algebra. 10 | } 11 | \details{ 12 | {This can be seen as a wrapper class to the base \code{array} class. While it is possible to create an instance using \code{new}, it is also possible to do so by passing the data into \code{\link{as.tensor}}. 13 | 14 | Each slot of a Tensor instance can be obtained using \code{@}. 15 | 16 | The following methods are overloaded for the Tensor class: \code{\link{dim-methods}}, \code{\link{head-methods}}, \code{\link{tail-methods}}, \code{\link{print-methods}}, \code{\link{show-methods}}, element-wise array operations, array subsetting (extract via `['), array subset replacing (replace via `[<-'), and \code{\link{tperm-methods}}, which is a wrapper around the base \code{aperm} method. 17 | 18 | To sum across any one mode of a tenor, use the function \code{\link{modeSum-methods}}. To compute the mean across any one mode, use \code{\link{modeMean-methods}}. 19 | 20 | You can always unfold any Tensor into a matrix, and the \code{\link{unfold-methods}}, \code{\link{k_unfold-methods}}, and \code{\link{matvec-methods}} methods are for that purpose. The output can be kept as a Tensor with 2 modes or a \code{matrix} object. The vectorization function is also provided as \code{vec}. See the attached vignette for a visualization of the different unfoldings. 21 | 22 | Conversion from \code{array}/\code{matrix} to Tensor is facilitated via \code{\link{as.tensor}}. To convert from a Tensor instance, simply invoke \code{@data}. 23 | 24 | The Frobenius norm of the Tensor is given by \code{\link{fnorm-methods}}, while the inner product between two Tensors (of equal modes) is given by \code{\link{innerProd-methods}}. You can also sum through any one mode to obtain the K-1 Tensor sum using \code{\link{modeSum-methods}}. \code{\link{modeMean-methods}} provides similar functionality to obtain the K-1 Tensor mean. These are primarily meant to be used internally but may be useful in doing statistics with Tensors. 25 | 26 | For Tensors with 3 modes, we also overloaded \code{t} (transpose) defined by Kilmer et.al (2013). See \code{\link{t-methods}}. 27 | 28 | To create a Tensor with i.i.d. random normal(0, 1) entries, see \code{\link{rand_tensor}}. 29 | } 30 | } 31 | \note{ 32 | All of the decompositions and regression models in this package require a Tensor input. 33 | } 34 | \section{Slots}{ 35 | 36 | \describe{ 37 | \item{num_modes}{number of modes (integer)} 38 | \item{modes}{vector of modes (integer), aka sizes/extents/dimensions} 39 | \item{data}{actual data of the tensor, which can be 'array' or 'vector'} 40 | } 41 | } 42 | 43 | \section{Methods}{ 44 | 45 | \describe{ 46 | \item{[}{\code{signature(tnsr = "Tensor")}: ... } 47 | \item{[<-}{\code{signature(tnsr = "Tensor")}: ... } 48 | \item{matvec}{\code{signature(tnsr = "Tensor")}: ... } 49 | \item{dim}{\code{signature(tnsr = "Tensor")}: ... } 50 | \item{fnorm}{\code{signature(tnsr = "Tensor")}: ... } 51 | \item{head}{\code{signature(tnsr = "Tensor")}: ... } 52 | \item{initialize}{\code{signature(.Object = "Tensor")}: ... } 53 | \item{innerProd}{\code{signature(tnsr1 = "Tensor", tnsr2 = "Tensor")}: ... } 54 | \item{modeMean}{\code{signature(tnsr = "Tensor")}: ... } 55 | \item{modeSum}{\code{signature(tnsr = "Tensor")}: ... } 56 | \item{Ops}{\code{signature(e1 = "array", e2 = "Tensor")}: ... } 57 | \item{Ops}{\code{signature(e1 = "numeric", e2 = "Tensor")}: ... } 58 | \item{Ops}{\code{signature(e1 = "Tensor", e2 = "array")}: ... } 59 | \item{Ops}{\code{signature(e1 = "Tensor", e2 = "numeric")}: ... } 60 | \item{Ops}{\code{signature(e1 = "Tensor", e2 = "Tensor")}: ... } 61 | \item{print}{\code{signature(tnsr = "Tensor")}: ... } 62 | \item{k_unfold}{\code{signature(tnsr = "Tensor")}: ... } 63 | \item{show}{\code{signature(tnsr = "Tensor")}: ... } 64 | \item{t}{\code{signature(tnsr = "Tensor")}: ... } 65 | \item{tail}{\code{signature(tnsr = "Tensor")}: ... } 66 | \item{unfold}{\code{signature(tnsr = "Tensor")}: ... } 67 | \item{tperm}{\code{signature(tnsr = "Tensor")}: ...} 68 | \item{image}{\code{signature(tnsr = "Tensor")}: ...} 69 | } 70 | } 71 | \examples{ 72 | tnsr <- rand_tensor() 73 | class(tnsr) 74 | tnsr 75 | print(tnsr) 76 | dim(tnsr) 77 | tnsr@num_modes 78 | tnsr@data 79 | } 80 | \author{ 81 | James Li \email{jamesyili@gmail.com} 82 | } 83 | \references{ 84 | M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 85 | } 86 | \seealso{ 87 | \code{\link{as.tensor}} 88 | } 89 | 90 | -------------------------------------------------------------------------------- /R/rTensor_Misc.R: -------------------------------------------------------------------------------- 1 | ###Functions that operate on Matrices and Arrays 2 | 3 | #'List Hamadard Product 4 | #' 5 | #'Returns the Hamadard (element-wise) product from a list of matrices or vectors. Commonly used for n-mode products and various Tensor decompositions. 6 | #'@name hamadard_list 7 | #'@rdname hamadard_list 8 | #'@aliases hamadard_list 9 | #'@export 10 | #'@param L list of matrices or vectors 11 | #'@return matrix that is the Hamadard product 12 | #'@seealso \code{\link{kronecker_list}}, \code{\link{khatri_rao_list}} 13 | #'@note The modes/dimensions of each element in the list must match. 14 | #'@examples 15 | #'lizt <- list('mat1' = matrix(runif(40),ncol=4), 16 | #' 'mat2' = matrix(runif(40),ncol=4), 17 | #' 'mat3' = matrix(runif(40),ncol=4)) 18 | #'dim(hamadard_list(lizt)) 19 | hamadard_list <- function(L){ 20 | isvecORmat <- function(x){is.matrix(x) || is.vector(x)} 21 | stopifnot(all(unlist(lapply(L,isvecORmat)))) 22 | retmat <- L[[1]] 23 | for (i in 2:length(L)){ 24 | retmat <- retmat*L[[i]] 25 | } 26 | retmat 27 | } 28 | 29 | #'List Kronecker Product 30 | #' 31 | #'Returns the Kronecker product from a list of matrices or vectors. Commonly used for n-mode products and various Tensor decompositions. 32 | #'@name kronecker_list 33 | #'@rdname kronecker_list 34 | #'@aliases kronecker_list 35 | #'@export 36 | #'@param L list of matrices or vectors 37 | #'@return matrix that is the Kronecker product 38 | #'@seealso \code{\link{hamadard_list}}, \code{\link{khatri_rao_list}}, \code{\link{kronecker}} 39 | #'@examples 40 | #'smalllizt <- list('mat1' = matrix(runif(12),ncol=4), 41 | #' 'mat2' = matrix(runif(12),ncol=4), 42 | #' 'mat3' = matrix(runif(12),ncol=4)) 43 | #'dim(kronecker_list(smalllizt)) 44 | kronecker_list <- function(L){ 45 | isvecORmat <- function(x){is.matrix(x) || is.vector(x)} 46 | stopifnot(all(unlist(lapply(L,isvecORmat)))) 47 | retmat <- L[[1]] 48 | for(i in 2:length(L)){ 49 | retmat <- kronecker(retmat,L[[i]]) 50 | } 51 | retmat 52 | } 53 | 54 | #'Khatri-Rao Product 55 | #' 56 | #'Returns the Khatri-Rao (column-wise Kronecker) product of two matrices. If the inputs are vectors then this is the same as the Kronecker product. 57 | #'@name khatri_rao 58 | #'@rdname khatri_rao 59 | #'@aliases khatri_rao 60 | #'@export 61 | #'@param x first matrix 62 | #'@param y second matrix 63 | #'@return matrix that is the Khatri-Rao product 64 | #'@seealso \code{\link{kronecker}}, \code{\link{khatri_rao_list}} 65 | #'@note The number of columns must match in the two inputs. 66 | #'@examples 67 | #'dim(khatri_rao(matrix(runif(12),ncol=4),matrix(runif(12),ncol=4))) 68 | khatri_rao <- function(x,y){ 69 | if (!(is.matrix(x)&&is.matrix(y))) stop("Arguments must be matrices.") 70 | if (dim(x)[2]!=dim(y)[2]) stop("Arguments must have same number of columns.") 71 | retmat <- matrix(0,nrow=dim(x)[1]*dim(y)[1],ncol=dim(x)[2]) 72 | for (j in 1:ncol(retmat)) retmat[,j] <- kronecker(x[,j],y[,j]) 73 | retmat 74 | } 75 | 76 | #'List Khatri-Rao Product 77 | #' 78 | #'Returns the Khatri-Rao product from a list of matrices or vectors. Commonly used for n-mode products and various Tensor decompositions. 79 | #'@name khatri_rao_list 80 | #'@rdname khatri_rao_list 81 | #'@aliases khatri_rao_list 82 | #'@export 83 | #'@param L list of matrices or vectors 84 | #'@param reverse whether or not to reverse the order 85 | #'@return matrix that is the Khatri-Rao product 86 | #'@seealso \code{\link{khatri_rao}} 87 | #'@note The number of columns must match in every element of the input list. 88 | #'@examples 89 | #'smalllizt <- list('mat1' = matrix(runif(12),ncol=4), 90 | #' 'mat2' = matrix(runif(12),ncol=4), 91 | #' 'mat3' = matrix(runif(12),ncol=4)) 92 | #'dim(khatri_rao_list(smalllizt)) 93 | khatri_rao_list <- function(L,reverse=FALSE){ 94 | stopifnot(all(unlist(lapply(L,is.matrix)))) 95 | ncols <- unlist(lapply(L,ncol)) 96 | stopifnot(length(unique(ncols))==1) 97 | ncols <- ncols[1] 98 | nrows <- unlist(lapply(L,nrow)) 99 | retmat <- matrix(0,nrow=prod(nrows),ncol=ncols) 100 | if (reverse) L <- rev(L) 101 | for(j in 1:ncols){ 102 | Lj <- lapply(L,function(x) x[,j]) 103 | retmat[,j] <- kronecker_list(Lj) 104 | } 105 | retmat 106 | } 107 | 108 | #'Tensor Times Matrix (m-Mode Product) 109 | #' 110 | #'Contracted (m-Mode) product between a Tensor of arbitrary number of modes and a matrix. The result is folded back into Tensor. 111 | #'@name ttm 112 | #'@rdname ttm 113 | #'@aliases ttm 114 | #'@details By definition, \code{rs_unfold(ttm(tnsr,mat),m) = mat\%*\%rs_unfold(tnsr,m)}, so the number of columns in \code{mat} must match the \code{m}th mode of \code{tnsr}. For the math on the m-Mode Product, see Kolda and Bader (2009). 115 | #'@export 116 | #'@param tnsr Tensor object with K modes 117 | #'@param mat input matrix with same number columns as the \code{m}th mode of \code{tnsr} 118 | #'@param m the mode to contract on 119 | #'@param transpose if mat should be transposed before multiplication 120 | #'@return a Tensor object with K modes 121 | #'@seealso \code{\link{ttl}}, \code{\link{rs_unfold-methods}} 122 | #'@note The \code{m}th mode of \code{tnsr} must match the number of columns in \code{mat}. By default, the returned Tensor does not drop any modes equal to 1. 123 | #'@references T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 124 | #'@examples 125 | #'tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 126 | #'mat <- matrix(runif(50),ncol=5) 127 | #'ttm(tnsr,mat,m=3) 128 | ttm<-function(tnsr,mat,m=NULL,transpose=FALSE){ 129 | stopifnot(is.matrix(mat)) 130 | if(is.null(m)) stop("m must be specified") 131 | mat_dims <- dim(mat) 132 | if ( transpose ) mat_dims <- rev(mat_dims) 133 | modes_in <- tnsr@modes 134 | stopifnot(modes_in[m]==mat_dims[2]) 135 | modes_out <- modes_in 136 | modes_out[m] <- mat_dims[1] 137 | tnsr_m <- rs_unfold(tnsr,m=m)@data 138 | retarr_m <- if (transpose) crossprod(mat,tnsr_m) else mat%*%tnsr_m 139 | rs_fold(retarr_m,m=m,modes=modes_out) 140 | } 141 | 142 | #'Tensor Times List 143 | #' 144 | #'Contracted (m-Mode) product between a Tensor of arbitrary number of modes and a list of matrices. The result is folded back into Tensor. 145 | #'@name ttl 146 | #'@rdname ttl 147 | #'@aliases ttl 148 | #'@details Performs \code{ttm} repeated for a single Tensor and a list of matrices on multiple modes. For instance, suppose we want to do multiply a Tensor object \code{tnsr} with three matrices \code{mat1}, \code{mat2}, \code{mat3} on modes 1, 2, and 3. We could do \code{ttm(ttm(ttm(tnsr,mat1,1),mat2,2),3)}, or we could do \code{ttl(tnsr,list(mat1,mat2,mat3),c(1,2,3))}. The order of the matrices in the list should obviously match the order of the modes. This is a common operation for various Tensor decompositions such as CP and Tucker. For the math on the m-Mode Product, see Kolda and Bader (2009). 149 | #'@export 150 | #'@param tnsr Tensor object with K modes 151 | #'@param list_mat a list of matrices 152 | #'@param ms a vector of modes to contract on (order should match the order of \code{list_mat}) 153 | #'@param transpose if matrices should be transposed before multiplication 154 | #'@return Tensor object with K modes 155 | #'@seealso \code{\link{ttm}} 156 | #'@note The returned Tensor does not drop any modes equal to 1. 157 | #'@references T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 158 | #'@examples 159 | #'tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 160 | #'lizt <- list('mat1' = matrix(runif(30),ncol=3), 161 | #' 'mat2' = matrix(runif(40),ncol=4), 162 | #' 'mat3' = matrix(runif(50),ncol=5)) 163 | #'ttl(tnsr,lizt,ms=c(1,2,3)) 164 | ttl<-function(tnsr,list_mat,ms=NULL,transpose=FALSE){ 165 | if(is.null(ms)||!is.vector(ms)) stop ("m modes must be specified as a vector") 166 | if(length(ms)!=length(list_mat)) stop("m modes length does not match list_mat length") 167 | num_mats <- length(list_mat) 168 | if(length(unique(ms))!=num_mats) warning("consider pre-multiplying matrices for the same m for speed") 169 | mat_nrows <- vector("list", num_mats) 170 | mat_ncols <- vector("list", num_mats) 171 | for(i in 1:num_mats){ 172 | mat <- list_mat[[i]] 173 | m <- ms[i] 174 | mat_dims <- dim(mat) 175 | if (transpose) mat_dims <- rev(mat_dims) 176 | modes_in <- tnsr@modes 177 | stopifnot(modes_in[m]==mat_dims[2]) 178 | modes_out <- modes_in 179 | modes_out[m] <- mat_dims[1] 180 | tnsr_m <- rs_unfold(tnsr,m=m)@data 181 | retarr_m <- if (transpose) crossprod(mat, tnsr_m) else mat%*%tnsr_m 182 | tnsr <- rs_fold(retarr_m,m=m,modes=modes_out) 183 | } 184 | tnsr 185 | } 186 | 187 | #'Tensor Multiplication (T-MULT) 188 | #' 189 | #'Implements T-MULT based on block circulant matrices (Kilmer et al. 2013) for 3-tensors. 190 | #' 191 | #'@details Uses the Fast Fourier Transform (FFT) speed up suggested by Kilmer et al. 2013 instead of explicitly constructing the block circulant matrix. For the mathematical details of T-MULT, see Kilmer et al. (2013). 192 | #'@export 193 | #'@name t_mult 194 | #'@rdname t_mult 195 | #'@aliases t_mult 196 | #'@param x a 3-tensor 197 | #'@param y another 3-tensor 198 | #'@return tensor product between \code{x} and \code{y} 199 | #'@note This only works (so far) between 3-Tensors. 200 | #'@references M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 201 | #'@examples 202 | #'tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 203 | #'tnsr2 <- new("Tensor",3L,c(4L,3L,5L),data=runif(60)) 204 | #'t_mult(tnsr, tnsr2) 205 | t_mult <- function(x,y){ 206 | if((x@num_modes>3)||(y@num_modes>3)) stop("Tensor Multiplication currently only implemented for 3-Tensors") 207 | modes_x <- x@modes 208 | modes_y <- y@modes 209 | if(modes_x[2]!=modes_y[1]) stop("Mode 2 of x and Mode 1 of y must match") 210 | n3 <- modes_x[3] 211 | if(n3!=modes_y[3]) stop("Modes 3 of x and y must match") 212 | #fft's for x and y 213 | fft_x <- aperm(apply(x@data,MARGIN=1:2,fft),c(2,3,1)) 214 | fft_y <- aperm(apply(y@data,MARGIN=1:2,fft),c(2,3,1)) 215 | #multiply the faces (this is terribad! TO-DO: think of better way!) 216 | fft_ret <- array(0,dim=c(modes_x[1],modes_y[2],n3)) 217 | for(i in 1:n3){ 218 | first <- fft_x[,,i,drop=FALSE] 219 | second <- fft_y[,,i,drop=FALSE] 220 | fft_ret[,,i]<-matrix(first,nrow=dim(first)[1])%*%matrix(second,,nrow=dim(second)[1]) 221 | } 222 | #ifft and return as Tensor 223 | ifft <- function(x){suppressWarnings(as.numeric(fft(x,inverse=TRUE))/length(x))} 224 | as.tensor(aperm(apply(fft_ret,MARGIN=1:2,ifft),c(2,3,1)),drop=FALSE) 225 | } 226 | 227 | #####Special Tensors 228 | 229 | #'Tensor with Random Entries 230 | #' 231 | #'Generate a Tensor with specified modes with iid normal(0,1) entries. 232 | #'@export 233 | #'@name rand_tensor 234 | #'@rdname rand_tensor 235 | #'@aliases rand_tensor 236 | #'@param modes the modes of the output Tensor 237 | #'@param drop whether or not modes equal to 1 should be dropped 238 | #'@return a Tensor object with modes given by \code{modes} 239 | #'@note Default \code{rand_tensor()} generates a 3-Tensor with modes \code{c(3,4,5)}. 240 | #'@examples 241 | #'rand_tensor() 242 | #'rand_tensor(c(4,4,4)) 243 | #'rand_tensor(c(10,2,1),TRUE) 244 | rand_tensor <- function(modes=c(3,4,5),drop=FALSE){ 245 | as.tensor(array(rnorm(prod(modes)), dim=modes),drop=drop) 246 | } 247 | 248 | ###Matrix Foldings 249 | 250 | #'General Folding of Matrix 251 | #' 252 | #'General folding of a matrix into a Tensor. This is designed to be the inverse function to \code{\link{unfold-methods}}, with the same ordering of the indices. This amounts to following: if we were to unfold a Tensor using a set of \code{row_idx} and \code{col_idx}, then we can fold the resulting matrix back into the original Tensor using the same \code{row_idx} and \code{col_idx}. 253 | #'@export 254 | #'@details This function uses \code{aperm} as the primary workhorse. 255 | #'@name fold 256 | #'@rdname fold 257 | #'@aliases fold 258 | #'@param mat matrix to be folded into a Tensor 259 | #'@param row_idx the indices of the modes that are mapped onto the row space 260 | #'@param col_idx the indices of the modes that are mapped onto the column space 261 | #'@param modes the modes of the output Tensor 262 | #'@return Tensor object with modes given by \code{modes} 263 | #'@seealso \code{\link{unfold-methods}}, \code{\link{k_fold}}, \code{\link{unmatvec}} 264 | #'@references T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 265 | #'@examples 266 | #'tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 267 | #'matT3<-unfold(tnsr,row_idx=2,col_idx=c(3,1)) 268 | #'identical(fold(matT3,row_idx=2,col_idx=c(3,1),modes=c(3,4,5)),tnsr) 269 | fold <- function(mat, row_idx = NULL, col_idx = NULL, modes=NULL){ 270 | #checks 271 | rs <- row_idx 272 | cs <- col_idx 273 | if(is.null(rs)||is.null(cs)) stop("row space and col space indices must be specified") 274 | if(is.null(modes)) stop("Tensor modes must be specified") 275 | if(!is(mat,"Tensor")){ 276 | if(!is.matrix(mat)) stop("mat must be of class 'matrix'") 277 | }else{ 278 | stopifnot(mat@num_modes==2) 279 | mat <- mat@data 280 | } 281 | num_modes <- length(modes) 282 | stopifnot(num_modes==length(rs)+length(cs)) 283 | mat_modes <- dim(mat) 284 | if((mat_modes[1]!=prod(modes[rs])) || (mat_modes[2]!=prod(modes[cs]))) stop("matrix nrow/ncol does not match Tensor modes") 285 | #rearranges into array 286 | iperm <- match(1:num_modes,c(rs,cs)) 287 | as.tensor(aperm(array(mat,dim=c(modes[rs],modes[cs])),iperm)) 288 | } 289 | 290 | #'k-mode Folding of Matrix 291 | #' 292 | #'k-mode folding of a matrix into a Tensor. This is the inverse funtion to \code{k_unfold} in the m mode. In particular, \code{k_fold(k_unfold(tnsr, m),m,getModes(tnsr))} will result in the original Tensor. 293 | #'@export 294 | #'@details This is a wrapper function to \code{\link{fold}}. 295 | #'@name k_fold 296 | #'@rdname k_fold 297 | #'@aliases k_fold 298 | #'@param mat matrix to be folded into a Tensor 299 | #'@param m the index of the mode that is mapped onto the row indices 300 | #'@param modes the modes of the output Tensor 301 | #'@return Tensor object with modes given by \code{modes} 302 | #'@references T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 303 | #'@seealso \code{\link{k_unfold-methods}}, \code{\link{fold}}, \code{\link{unmatvec}} 304 | #'@examples 305 | #'tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 306 | #'matT2<-k_unfold(tnsr,m=2) 307 | #'identical(k_fold(matT2,m=2,modes=c(3,4,5)),tnsr) 308 | k_fold <- function(mat,m=NULL,modes=NULL){ 309 | if(is.null(m)) stop("mode m must be specified") 310 | if(is.null(modes)) stop("Tensor modes must be specified") 311 | num_modes <- length(modes) 312 | rs <- m 313 | cs <- (1:num_modes)[-m] 314 | fold(mat,row_idx=rs,col_idx=cs,modes=modes) 315 | } 316 | 317 | #'Unmatvec Folding of Matrix 318 | #' 319 | #'The inverse operation to \code{\link{matvec-methods}}, turning a matrix into a Tensor. For a full account of matrix folding/unfolding operations, consult Kolda and Bader (2009). 320 | #'@export 321 | #'@name unmatvec 322 | #'@rdname unmatvec 323 | #'@aliases unmatvec 324 | #'@param mat matrix to be folded into a Tensor 325 | #'@param modes the modes of the output Tensor 326 | #'@return Tensor object with modes given by \code{modes} 327 | #'@references T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 328 | #'@seealso \code{\link{matvec-methods}}, \code{\link{fold}}, \code{\link{k_fold}} 329 | #'@examples 330 | #'tnsr <- new("Tensor",3L,c(3L,4L,5L),data=runif(60)) 331 | #'matT1<-matvec(tnsr) 332 | #'identical(unmatvec(matT1,modes=c(3,4,5)),tnsr) 333 | unmatvec <- function(mat,modes=NULL){ 334 | if(is.null(modes)) stop("Tensor modes must be specified") 335 | num_modes <- length(modes) 336 | cs <- 2 337 | rs <- (1:num_modes)[-2] 338 | fold(mat,row_idx=rs,col_idx=cs,modes=modes) 339 | } 340 | 341 | #'Row Space Folding of Matrix 342 | #' 343 | #'DEPRECATED. Please see \code{\link{k_fold}}. 344 | #'@export 345 | #'@param mat matrix to be folded 346 | #'@param m the mode corresponding to rs_unfold 347 | #'@param modes the original modes of the tensor 348 | #'@name rs_fold 349 | #'@rdname rs_fold 350 | #'@aliases rs_fold 351 | rs_fold <- function(mat,m=NULL,modes=NULL){ 352 | if(is.null(m)) stop("mode m must be specified") 353 | if(is.null(modes)) stop("Tensor modes must be specified") 354 | num_modes <- length(modes) 355 | rs <- m 356 | cs <- (1:num_modes)[-m] 357 | fold(mat,row_idx=rs,col_idx=cs,modes=modes) 358 | } 359 | 360 | 361 | #'Column Space Folding of Matrix 362 | #' 363 | #'DEPRECATED. Please see \code{\link{unmatvec}} 364 | #'@export 365 | #'@param mat matrix to be folded 366 | #'@param m the mode corresponding to cs_unfold 367 | #'@param modes the original modes of the tensor 368 | #'@name cs_fold 369 | #'@rdname cs_fold 370 | #'@aliases cs_fold 371 | cs_fold <- function(mat,m=NULL,modes=NULL){ 372 | if(is.null(m)) stop("mode m must be specified") 373 | if(is.null(modes)) stop("Tensor modes must be specified") 374 | num_modes <- length(modes) 375 | cs <- m 376 | rs <- (1:num_modes)[-m] 377 | fold(mat,row_idx=rs,col_idx=cs,modes=modes) 378 | } 379 | 380 | ###Invisible Functions (undocumented) 381 | #Creates a superdiagonal tensor 382 | .superdiagonal_tensor <- function(num_modes,len,elements=1L){ 383 | modes <- rep(len,num_modes) 384 | arr <- array(0, dim = modes) 385 | if(length(elements)==1) elements <- rep(elements,len) 386 | for (i in 1:len){ 387 | txt <- paste("arr[",paste(rep("i", num_modes),collapse=","),"] <- ", elements[i],sep="") 388 | eval(parse(text=txt)) 389 | } 390 | as.tensor(arr) 391 | } 392 | #3-Tensor Kilmer et. al (2013) identity 393 | .identity_tensor3d <- function(modes){ 394 | if(length(modes)!=3L) stop("identity tensor only implemented for 3d so far") 395 | n <- modes[1] 396 | stopifnot(n==modes[2]) 397 | arr <- array(0,dim=modes) 398 | arr[,,1] <- diag(1,n,n) 399 | as.tensor(arr) 400 | } 401 | #Simple timing functions 402 | .tic <- function (gcFirst = TRUE,overwrite=TRUE) { 403 | if(gcFirst) gc(FALSE) 404 | tic <- proc.time() 405 | ticExists <- ".tic"%in%ls(all.names=TRUE,envir=baseenv()) 406 | if(overwrite||!ticExists){ 407 | assign(".tic", tic, envir=baseenv()) 408 | } 409 | else{ 410 | stop("Another timing function running") 411 | } 412 | invisible(tic) 413 | } 414 | .toc <- function (pr=FALSE) { 415 | toc <- proc.time() 416 | tic <- get(".tic", envir=baseenv()) 417 | if(pr) print(toc - tic) 418 | invisible(toc - tic) 419 | } -------------------------------------------------------------------------------- /R/rTensor_Class.R: -------------------------------------------------------------------------------- 1 | ###Class Definition 2 | 3 | #'S4 Class for a Tensor 4 | #' 5 | #'An S4 class for a tensor with arbitrary number of modes. The Tensor class extends the base 'array' class to include additional tensor manipulation (folding, unfolding, reshaping, subsetting) as well as a formal class definition that enables more explicit tensor algebra. 6 | #' 7 | #'@section Slots: 8 | #' \describe{ 9 | #' \item{num_modes}{number of modes (integer)} 10 | #' \item{modes}{vector of modes (integer), aka sizes/extents/dimensions} 11 | #' \item{data}{actual data of the tensor, which can be 'array' or 'vector'} 12 | #' } 13 | #'@name Tensor-class 14 | #'@rdname Tensor-class 15 | #'@aliases Tensor Tensor-class 16 | #'@docType class 17 | #'@exportClass Tensor 18 | #'@section Methods: 19 | #' \describe{ 20 | #' \item{[}{\code{signature(tnsr = "Tensor")}: ... } 21 | #' \item{[<-}{\code{signature(tnsr = "Tensor")}: ... } 22 | #' \item{matvec}{\code{signature(tnsr = "Tensor")}: ... } 23 | #' \item{dim}{\code{signature(tnsr = "Tensor")}: ... } 24 | #' \item{fnorm}{\code{signature(tnsr = "Tensor")}: ... } 25 | #' \item{head}{\code{signature(tnsr = "Tensor")}: ... } 26 | #' \item{initialize}{\code{signature(.Object = "Tensor")}: ... } 27 | #' \item{innerProd}{\code{signature(tnsr1 = "Tensor", tnsr2 = "Tensor")}: ... } 28 | #' \item{modeMean}{\code{signature(tnsr = "Tensor")}: ... } 29 | #' \item{modeSum}{\code{signature(tnsr = "Tensor")}: ... } 30 | #' \item{Ops}{\code{signature(e1 = "array", e2 = "Tensor")}: ... } 31 | #' \item{Ops}{\code{signature(e1 = "numeric", e2 = "Tensor")}: ... } 32 | #' \item{Ops}{\code{signature(e1 = "Tensor", e2 = "array")}: ... } 33 | #' \item{Ops}{\code{signature(e1 = "Tensor", e2 = "numeric")}: ... } 34 | #' \item{Ops}{\code{signature(e1 = "Tensor", e2 = "Tensor")}: ... } 35 | #' \item{print}{\code{signature(tnsr = "Tensor")}: ... } 36 | #' \item{k_unfold}{\code{signature(tnsr = "Tensor")}: ... } 37 | #' \item{show}{\code{signature(tnsr = "Tensor")}: ... } 38 | #' \item{t}{\code{signature(tnsr = "Tensor")}: ... } 39 | #' \item{tail}{\code{signature(tnsr = "Tensor")}: ... } 40 | #' \item{unfold}{\code{signature(tnsr = "Tensor")}: ... } 41 | #' \item{tperm}{\code{signature(tnsr = "Tensor")}: ...} 42 | #' \item{image}{\code{signature(tnsr = "Tensor")}: ...} 43 | #' } 44 | #'@author James Li \email{jamesyili@@gmail.com} 45 | #'@details {This can be seen as a wrapper class to the base \code{array} class. While it is possible to create an instance using \code{new}, it is also possible to do so by passing the data into \code{\link{as.tensor}}. 46 | #' 47 | #'Each slot of a Tensor instance can be obtained using \code{@@}. 48 | #' 49 | #'The following methods are overloaded for the Tensor class: \code{\link{dim-methods}}, \code{\link{head-methods}}, \code{\link{tail-methods}}, \code{\link{print-methods}}, \code{\link{show-methods}}, element-wise array operations, array subsetting (extract via `['), array subset replacing (replace via `[<-'), and \code{\link{tperm-methods}}, which is a wrapper around the base \code{aperm} method. 50 | #' 51 | #'To sum across any one mode of a tenor, use the function \code{\link{modeSum-methods}}. To compute the mean across any one mode, use \code{\link{modeMean-methods}}. 52 | #' 53 | #'You can always unfold any Tensor into a matrix, and the \code{\link{unfold-methods}}, \code{\link{k_unfold-methods}}, and \code{\link{matvec-methods}} methods are for that purpose. The output can be kept as a Tensor with 2 modes or a \code{matrix} object. The vectorization function is also provided as \code{vec}. See the attached vignette for a visualization of the different unfoldings. 54 | #' 55 | #'Conversion from \code{array}/\code{matrix} to Tensor is facilitated via \code{\link{as.tensor}}. To convert from a Tensor instance, simply invoke \code{@@data}. 56 | #' 57 | #'The Frobenius norm of the Tensor is given by \code{\link{fnorm-methods}}, while the inner product between two Tensors (of equal modes) is given by \code{\link{innerProd-methods}}. You can also sum through any one mode to obtain the K-1 Tensor sum using \code{\link{modeSum-methods}}. \code{\link{modeMean-methods}} provides similar functionality to obtain the K-1 Tensor mean. These are primarily meant to be used internally but may be useful in doing statistics with Tensors. 58 | #' 59 | #'For Tensors with 3 modes, we also overloaded \code{t} (transpose) defined by Kilmer et.al (2013). See \code{\link{t-methods}}. 60 | #' 61 | #'To create a Tensor with i.i.d. random normal(0, 1) entries, see \code{\link{rand_tensor}}. 62 | #'} 63 | #'@note All of the decompositions and regression models in this package require a Tensor input. 64 | #'@references M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 65 | #'@seealso \code{\link{as.tensor}} 66 | #'@examples 67 | #'tnsr <- rand_tensor() 68 | #'class(tnsr) 69 | #'tnsr 70 | #'print(tnsr) 71 | #'dim(tnsr) 72 | #'tnsr@@num_modes 73 | #'tnsr@@data 74 | setClass("Tensor", 75 | representation(num_modes = "integer", modes = "integer", data="array"), 76 | validity = function(object){ 77 | num_modes <- object@num_modes 78 | modes <- object@modes 79 | errors <- character() 80 | if (any(modes <= 0)){ 81 | msg <- "'modes' must contain strictly positive values; if any mode is 1, consider a smaller num_modes" 82 | errors <- c(errors, msg) 83 | } 84 | if(length(errors)==0) TRUE else errors 85 | }) 86 | 87 | ###Generic Definitions 88 | 89 | #'Tensor Unfolding 90 | #' 91 | #'Unfolds the tensor into a matrix, with the modes in \code{rs} onto the rows and modes in \code{cs} onto the columns. Note that \code{c(rs,cs)} must have the same elements (order doesn't matter) as \code{x@@modes}. Within the rows and columns, the order of the unfolding is determined by the order of the modes. This convention is consistent with Kolda and Bader (2009). 92 | #' 93 | #'For Row Space Unfolding or m-mode Unfolding, see \code{\link{rs_unfold-methods}}. For Column Space Unfolding or matvec, see \code{\link{cs_unfold-methods}}. 94 | #' 95 | #'\code{\link{vec-methods}} returns the vectorization of the tensor. 96 | #' 97 | #'@details \code{unfold(tnsr,row_idx=NULL,col_idx=NULL)} 98 | #'@export 99 | #'@docType methods 100 | #'@name unfold-methods 101 | #'@rdname unfold-methods 102 | #'@aliases unfold unfold,Tensor-method 103 | #'@references T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 104 | #'@param tnsr the Tensor instance 105 | #'@param row_idx the indices of the modes to map onto the row space 106 | #'@param col_idx the indices of the modes to map onto the column space 107 | #'@return matrix with \code{prod(row_idx)} rows and \code{prod(col_idx)} columns 108 | #'@seealso \code{\link{k_unfold-methods}} and \code{\link{matvec-methods}} 109 | #'@examples 110 | #'tnsr <- rand_tensor() 111 | #'matT3<-unfold(tnsr,row_idx=2,col_idx=c(3,1)) 112 | setGeneric(name="unfold", 113 | def=function(tnsr,row_idx,col_idx){standardGeneric("unfold")}) 114 | 115 | #'Tensor k-mode Unfolding 116 | #' 117 | #'Unfolding of a tensor by mapping the kth mode (specified through parameter \code{m}), and all other modes onto the column space. This the most common type of unfolding operation for Tucker decompositions and its variants. Also known as k-mode matricization. 118 | #' 119 | #'@docType methods 120 | #'@name k_unfold-methods 121 | #'@details \code{k_unfold(tnsr,m=NULL)} 122 | #'@export 123 | #'@rdname k_unfold-methods 124 | #'@aliases k_unfold k_unfold,Tensor-method 125 | ####aliases k_unfold,ANY-method 126 | #'@references T. Kolda and B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 127 | #'@param tnsr the Tensor instance 128 | #'@param m the index of the mode to unfold on 129 | #'@return matrix with \code{x@@modes[m]} rows and \code{prod(x@@modes[-m])} columns 130 | #'@seealso \code{\link{matvec-methods}} and \code{\link{unfold-methods}} 131 | #'@examples 132 | #'tnsr <- rand_tensor() 133 | #'matT2<-rs_unfold(tnsr,m=2) 134 | setGeneric(name="k_unfold", 135 | def=function(tnsr,m){standardGeneric("k_unfold")}) 136 | 137 | #'Tensor Matvec Unfolding 138 | #' 139 | #'For 3-tensors only. Stacks the slices along the third mode. This is the prevalent unfolding for T-SVD and T-MULT based on block circulant matrices. 140 | #'@docType methods 141 | #'@name matvec-methods 142 | #'@details \code{matvec(tnsr)} 143 | #'@export 144 | #'@rdname matvec-methods 145 | #'@aliases matvec matvec,Tensor-method 146 | #'@references M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 147 | #'@param tnsr the Tensor instance 148 | #'@return matrix with \code{prod(x@@modes[-m])} rows and \code{x@@modes[m]} columns 149 | #'@seealso \code{\link{k_unfold-methods}} and \code{\link{unfold-methods}} 150 | #'@examples 151 | #'tnsr <- rand_tensor(c(2,3,4)) 152 | #'matT1<- matvec(tnsr) 153 | setGeneric(name="matvec", 154 | def=function(tnsr){standardGeneric("matvec")}) 155 | 156 | #'Tensor Row Space Unfolding 157 | #' 158 | #'DEPRECATED. Please see \code{\link{k_unfold-methods}} and \code{\link{unfold-methods}}. 159 | #' 160 | #'@docType methods 161 | #'@name rs_unfold-methods 162 | #'@details \code{rs_unfold(tnsr,m=NULL)} 163 | #'@param tnsr Tensor instance 164 | #'@param m mode to be unfolded on 165 | #'@export 166 | #'@rdname rs_unfold-methods 167 | #'@aliases rs_unfold rs_unfold,Tensor-method 168 | ####aliases rs_unfold,ANY-method 169 | setGeneric(name="rs_unfold", 170 | def=function(tnsr,m){standardGeneric("rs_unfold")}) 171 | 172 | #'Tensor Column Space Unfolding 173 | #' 174 | #'DEPRECATED. Please see \code{\link{matvec-methods}} and \code{\link{unfold-methods}}. 175 | #' 176 | #'@docType methods 177 | #'@name cs_unfold-methods 178 | #'@details \code{cs_unfold(tnsr,m=NULL)} 179 | #'@param tnsr Tensor instance 180 | #'@param m mode to be unfolded on 181 | #'@export 182 | #'@rdname cs_unfold-methods 183 | #'@aliases cs_unfold cs_unfold,Tensor-method 184 | setGeneric(name="cs_unfold", 185 | def=function(tnsr,m){standardGeneric("cs_unfold")}) 186 | 187 | #'Tensor Sum Across Single Mode 188 | #' 189 | #'Given a mode for a K-tensor, this returns the K-1 tensor resulting from summing across that particular mode. 190 | #' 191 | #'@docType methods 192 | #'@name modeSum-methods 193 | #'@details \code{modeSum(tnsr,m=NULL,drop=FALSE)} 194 | #'@export 195 | #'@rdname modeSum-methods 196 | #'@aliases modeSum modeSum,Tensor-method 197 | #'@param tnsr the Tensor instance 198 | #'@param m the index of the mode to sum across 199 | #'@param drop whether or not mode m should be dropped 200 | #'@return K-1 or K tensor, where \code{K = x@@num_modes} 201 | #'@seealso \code{\link{modeMean}} 202 | #'@examples 203 | #'tnsr <- rand_tensor() 204 | #'modeSum(tnsr,3,drop=TRUE) 205 | setGeneric(name="modeSum", 206 | def=function(tnsr,m,drop){standardGeneric("modeSum")}) 207 | 208 | #'Tensor Mean Across Single Mode 209 | #' 210 | #'Given a mode for a K-tensor, this returns the K-1 tensor resulting from taking the mean across that particular mode. 211 | #' 212 | #'@docType methods 213 | #'@name modeMean-methods 214 | #'@details \code{modeMean(tnsr,m=NULL,drop=FALSE)} 215 | #'@export 216 | #'@rdname modeMean-methods 217 | #'@aliases modeMean modeMean,Tensor-method 218 | #'@param tnsr the Tensor instance 219 | #'@param m the index of the mode to average across 220 | #'@param drop whether or not mode m should be dropped 221 | #'@return K-1 or K Tensor, where \code{K = x@@num_modes} 222 | #'@seealso \code{\link{modeSum}} 223 | #'@examples 224 | #'tnsr <- rand_tensor() 225 | #'modeMean(tnsr,1,drop=TRUE) 226 | setGeneric(name="modeMean", 227 | def=function(tnsr,m,drop){standardGeneric("modeMean")}) 228 | 229 | #'Tensor Frobenius Norm 230 | #' 231 | #'Returns the Frobenius norm of the Tensor instance. 232 | #' 233 | #'@docType methods 234 | #'@name fnorm-methods 235 | #'@details \code{fnorm(tnsr)} 236 | #'@export 237 | #'@rdname fnorm-methods 238 | #'@aliases fnorm fnorm,Tensor-method 239 | #'@param tnsr the Tensor instance 240 | #'@return numeric Frobenius norm of \code{x} 241 | #'@examples 242 | #'tnsr <- rand_tensor() 243 | #'fnorm(tnsr) 244 | setGeneric(name="fnorm", 245 | def=function(tnsr){standardGeneric("fnorm")}) 246 | 247 | #'Tensors Inner Product 248 | #' 249 | #'Returns the inner product between two Tensors 250 | #' 251 | #'@docType methods 252 | #'@name innerProd-methods 253 | #'@details \code{innerProd(tnsr1,tnsr2)} 254 | #'@export 255 | #'@rdname innerProd-methods 256 | #'@aliases innerProd innerProd,Tensor,Tensor-method 257 | #'@param tnsr1 first Tensor instance 258 | #'@param tnsr2 second Tensor instance 259 | #'@return inner product between \code{x1} and \code{x2} 260 | #'@examples 261 | #'tnsr1 <- rand_tensor() 262 | #'tnsr2 <- rand_tensor() 263 | #'innerProd(tnsr1,tnsr2) 264 | setGeneric(name="innerProd", 265 | def=function(tnsr1,tnsr2){standardGeneric("innerProd")}) 266 | 267 | #'Initializes a Tensor instance 268 | #' 269 | #'Not designed to be called by the user. Use \code{as.tensor} instead. 270 | #' 271 | #'@docType methods 272 | #'@name initialize-methods 273 | #'@rdname initialize-methods 274 | #'@param .Object the tensor object 275 | #'@param num_modes number of modes of the tensor 276 | #'@param modes modes of the tensor 277 | #'@param data can be vector, matrix, or array 278 | #'@aliases initialize,Tensor-method 279 | #'@seealso \code{as.tensor} 280 | setMethod(f="initialize", 281 | signature="Tensor", 282 | definition = function(.Object, num_modes=NULL, modes=NULL, data=NULL){ 283 | if(is.null(num_modes)){ 284 | if (is.vector(data)) num_modes <- 1L 285 | else{num_modes <- length(dim(data))} 286 | } 287 | if(is.null(modes)){ 288 | if (is.vector(data)) modes <- length(data) 289 | else{modes <- dim(data)} 290 | } 291 | .Object@num_modes <- num_modes 292 | .Object@modes <- modes 293 | .Object@data <- array(data,dim=modes) 294 | validObject(.Object) 295 | .Object 296 | }) 297 | 298 | ###Method Definitions 299 | options(warn=-1) 300 | 301 | #'Mode Getter for Tensor 302 | #' 303 | #'Return the vector of modes from a tensor 304 | #' 305 | #'@name dim-methods 306 | #'@details \code{dim(x)} 307 | #'@export 308 | #'@aliases dim,Tensor-method 309 | #'@docType methods 310 | #'@rdname dim-methods 311 | #'@param x the Tensor instance 312 | #'@return an integer vector of the modes associated with \code{x} 313 | #'@examples 314 | #'tnsr <- rand_tensor() 315 | #'dim(tnsr) 316 | setMethod(f="dim", 317 | signature="Tensor", 318 | definition=function(x){ 319 | x@modes 320 | }) 321 | 322 | #'Show for Tensor 323 | #' 324 | #'Extend show for Tensor 325 | #' 326 | #'@name show-methods 327 | #'@details \code{show(object)} 328 | #'@export 329 | #'@aliases show,Tensor-method 330 | #'@docType methods 331 | #'@rdname show-methods 332 | #'@param object the Tensor instance 333 | #'@param ... additional parameters to be passed into show() 334 | #'@seealso \code{\link{print}} 335 | #'@examples 336 | #'tnsr <- rand_tensor() 337 | #'tnsr 338 | setMethod(f="show", 339 | signature="Tensor", 340 | definition=function(object){ 341 | cat("Numeric Tensor of", object@num_modes, "Modes\n", sep=" ") 342 | cat("Modes: ", object@modes, "\n", sep=" ") 343 | cat("Data: \n") 344 | print(head(object@data)) 345 | }) 346 | 347 | #'Print for Tensor 348 | #' 349 | #'Extend print for Tensor 350 | #' 351 | #'@name print-methods 352 | #'@details \code{print(x,...)} 353 | #'@export 354 | #'@aliases print,Tensor-method 355 | #'@docType methods 356 | #'@rdname print-methods 357 | #'@param x the Tensor instance 358 | #'@param ... additional parameters to be passed into print() 359 | #'@seealso \code{\link{show}} 360 | #'@examples 361 | #'tnsr <- rand_tensor() 362 | #'print(tnsr) 363 | setMethod(f="print", 364 | signature="Tensor", 365 | definition=function(x,...){ 366 | show(x) 367 | }) 368 | 369 | #'Head for Tensor 370 | #' 371 | #'Extend head for Tensor 372 | #' 373 | #'@name head-methods 374 | #'@details \code{head(x,...)} 375 | #'@export 376 | #'@aliases head,Tensor-method 377 | #'@docType methods 378 | #'@rdname head-methods 379 | #'@param x the Tensor instance 380 | #'@param ... additional parameters to be passed into head() 381 | #'@seealso \code{\link{tail-methods}} 382 | #'@examples 383 | #'tnsr <- rand_tensor() 384 | #'head(tnsr) 385 | setMethod(f="head", 386 | signature="Tensor", 387 | definition=function(x,...){ 388 | head(x@data,...) 389 | }) 390 | 391 | #'Tail for Tensor 392 | #' 393 | #'Extend tail for Tensor 394 | #' 395 | #'@name tail-methods 396 | #'@details \code{tail(x,...)} 397 | #'@export 398 | #'@aliases tail,Tensor-method 399 | #'@docType methods 400 | #'@rdname tail-methods 401 | #'@param x the Tensor instance 402 | #'@param ... additional parameters to be passed into tail() 403 | #'@seealso \code{\link{head-methods}} 404 | #'@examples 405 | #'tnsr <- rand_tensor() 406 | #'tail(tnsr) 407 | setMethod(f="tail", 408 | signature="Tensor", 409 | definition=function(x,...){ 410 | tail(x@data,...) 411 | }) 412 | 413 | #'Extract or Replace Subtensors 414 | #' 415 | #'Extends '[' and '[<-' from the base array class for the Tensor class. Works exactly as it would for the base 'array' class. 416 | #' 417 | #'@name [-methods 418 | #'@details \code{x[i,j,...,drop=TRUE]} 419 | #'@export 420 | #'@aliases [,Tensor-method extract,Tensor-method [<-,Tensor-method 421 | #'@docType methods 422 | #'@rdname extract-methods 423 | #'@param x Tensor to be subset 424 | #'@param i,j,... indices that specify the extents of the sub-tensor 425 | #'@param drop whether or not to reduce the number of modes to exclude those that have '1' as the mode 426 | #'@param value either vector, matrix, or array that will replace the subtensor 427 | #'@return an object of class Tensor 428 | #'@examples 429 | #'tnsr <- rand_tensor() 430 | #'tnsr[1,2,3] 431 | #'tnsr[3,1,] 432 | #'tnsr[,,5] 433 | #'tnsr[,,5,drop=FALSE] 434 | #' 435 | #'tnsr[1,2,3] <- 3; tnsr[1,2,3] 436 | #'tnsr[3,1,] <- rep(0,5); tnsr[3,1,] 437 | #'tnsr[,2,] <- matrix(0,nrow=3,ncol=5); tnsr[,2,] 438 | setMethod("[", signature="Tensor", 439 | definition=function(x,i,j,...,drop=TRUE){ 440 | if(!drop) as.tensor(`[`(x@data,i,j,drop=FALSE,...),drop=drop) 441 | else as.tensor(`[`(x@data,i,j,...)) 442 | }) 443 | 444 | #'@aliases [,Tensor-method extract,Tensor-method [<-,Tensor-method 445 | #'@rdname extract-methods 446 | setMethod("[<-", signature="Tensor", 447 | definition=function(x,i,j,...,value){ 448 | as.tensor(`[<-`(x@data,i,j,...,value=value)) 449 | }) 450 | 451 | #'Tensor Transpose 452 | #' 453 | #'Implements the tensor transpose based on block circulant matrices (Kilmer et al. 2013) for 3-tensors. 454 | #' 455 | #'@docType methods 456 | #'@name t-methods 457 | #'@rdname t-methods 458 | #'@details \code{t(x)} 459 | #'@export 460 | #'@aliases t,Tensor-method 461 | #'@param x a 3-tensor 462 | #'@return tensor transpose of \code{x} 463 | #'@references M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 464 | #'@examples 465 | #'tnsr <- rand_tensor() 466 | #'identical(t(tnsr)@@data[,,1],t(tnsr@@data[,,1])) 467 | #'identical(t(tnsr)@@data[,,2],t(tnsr@@data[,,5])) 468 | #'identical(t(t(tnsr)),tnsr) 469 | setMethod("t",signature="Tensor", 470 | definition=function(x){ 471 | tnsr <- x 472 | if(tnsr@num_modes!=3) stop("Tensor Transpose currently only implemented for 3d Tensors") 473 | modes <- tnsr@modes 474 | new_arr <- array(apply(tnsr@data[,,c(1L,modes[3]:2L),drop=FALSE],MARGIN=3,FUN=t),dim=modes[c(2,1,3)]) 475 | as.tensor(new_arr) 476 | }) 477 | 478 | #'Conformable elementwise operators for Tensor 479 | #' 480 | #'Overloads elementwise operators for tensors, arrays, and vectors that are conformable (have the same modes). 481 | #' 482 | #'@export 483 | #'@name Ops-methods 484 | #'@docType methods 485 | #'@aliases Ops-methods Ops,Tensor,Tensor-method Ops,Tensor,array-method Ops,Tensor,numeric-method Ops,array,Tensor-method Ops,numeric,Tensor-method 486 | #'@param e1 left-hand object 487 | #'@param e2 right-hand object 488 | #'@examples 489 | #'tnsr <- rand_tensor(c(3,4,5)) 490 | #'tnsr2 <- rand_tensor(c(3,4,5)) 491 | #'tnsrsum <- tnsr + tnsr2 492 | #'tnsrdiff <- tnsr - tnsr2 493 | #'tnsrelemprod <- tnsr * tnsr2 494 | #'tnsrelemquot <- tnsr / tnsr2 495 | #'for (i in 1:3L){ 496 | #' for (j in 1:4L){ 497 | #' for (k in 1:5L){ 498 | #' stopifnot(tnsrsum@@data[i,j,k]==tnsr@@data[i,j,k]+tnsr2@@data[i,j,k]) 499 | #' stopifnot(tnsrdiff@@data[i,j,k]==(tnsr@@data[i,j,k]-tnsr2@@data[i,j,k])) 500 | #' stopifnot(tnsrelemprod@@data[i,j,k]==tnsr@@data[i,j,k]*tnsr2@@data[i,j,k]) 501 | #' stopifnot(tnsrelemquot@@data[i,j,k]==tnsr@@data[i,j,k]/tnsr2@@data[i,j,k]) 502 | #'} 503 | #'} 504 | #'} 505 | setMethod("Ops", signature(e1="Tensor", e2="Tensor"), 506 | definition=function(e1,e2){ 507 | e1@data<-callGeneric(e1@data, e2@data) 508 | validObject(e1) 509 | e1 510 | }) 511 | setMethod("Ops", signature(e1="Tensor", e2="array"), 512 | definition=function(e1,e2){ 513 | e1@data<-callGeneric(e1@data,e2) 514 | validObject(e1) 515 | e1 516 | }) 517 | setMethod("Ops", signature(e1="array", e2="Tensor"), 518 | definition=function(e1,e2){ 519 | e2@data<-callGeneric(e1,e2@data) 520 | validObject(e2) 521 | e2 522 | }) 523 | setMethod("Ops", signature(e1="Tensor", e2="numeric"), 524 | definition=function(e1,e2){ 525 | e1@data<-callGeneric(e1@data,e2) 526 | validObject(e1) 527 | e1 528 | }) 529 | setMethod("Ops", signature(e1="numeric", e2="Tensor"), 530 | definition=function(e1,e2){ 531 | e2@data<-callGeneric(e1,e2@data) 532 | validObject(e2) 533 | e2 534 | }) 535 | 536 | #'@rdname modeSum-methods 537 | #'@aliases modeSum,Tensor-method 538 | setMethod("modeSum",signature="Tensor", 539 | definition=function(tnsr,m=NULL,drop=FALSE){ 540 | if(is.null(m)) stop("must specify mode m") 541 | num_modes <- tnsr@num_modes 542 | if(m<1||m>num_modes) stop("m out of bounds") 543 | perm <- c(m,(1L:num_modes)[-m]) 544 | modes <- tnsr@modes 545 | newmodes <- modes; newmodes[m]<-1 546 | arr <- array(colSums(aperm(tnsr@data,perm),dims=1L),dim=newmodes) 547 | as.tensor(arr,drop=drop) 548 | }) 549 | 550 | #'@rdname modeMean-methods 551 | #'@aliases modeMean,Tensor-method 552 | setMethod("modeMean",signature="Tensor", 553 | definition=function(tnsr,m=NULL,drop=FALSE){ 554 | if(is.null(m)) stop("must specify mode m") 555 | num_modes <- tnsr@num_modes 556 | if(m<1||m>num_modes) stop("m out of bounds") 557 | perm <- c(m,(1L:num_modes)[-m]) 558 | modes <- tnsr@modes 559 | newmodes <- modes; newmodes[m]<-1 560 | arr <- array(colSums(aperm(tnsr@data,perm),dims=1L),dim=newmodes) 561 | as.tensor(arr/modes[m],drop=drop) 562 | }) 563 | 564 | #'@rdname fnorm-methods 565 | #'@aliases fnorm,Tensor-method 566 | setMethod("fnorm",signature="Tensor", 567 | definition=function(tnsr){ 568 | arr<-tnsr@data 569 | sqrt(sum(arr*arr)) 570 | }) 571 | 572 | #'@rdname innerProd-methods 573 | #'@aliases innerProd,Tensor,Tensor-method 574 | setMethod("innerProd",signature=c(tnsr1="Tensor", tnsr2="Tensor"), 575 | definition=function(tnsr1,tnsr2){ 576 | stopifnot(tnsr1@modes==tnsr2@modes) 577 | arr1 <- tnsr1@data 578 | arr2 <- tnsr2@data 579 | sum(as.numeric(arr1*arr2)) 580 | }) 581 | 582 | ###Tensor Unfoldings 583 | 584 | #'@rdname unfold-methods 585 | #'@aliases unfold,Tensor-method 586 | setMethod("unfold", signature="Tensor", 587 | definition=function(tnsr,row_idx=NULL,col_idx=NULL){ 588 | #checks 589 | rs <- row_idx 590 | cs <- col_idx 591 | if(is.null(rs)||is.null(cs)) stop("row and column indices must be specified") 592 | num_modes <- tnsr@num_modes 593 | if (length(rs) + length(cs) != num_modes) stop("incorrect number of indices") 594 | if(any(rs<1) || any(rs>num_modes) || any(cs < 1) || any(cs>num_modes)) stop("illegal indices specified") 595 | perm <- c(rs,cs) 596 | if (any(sort(perm,decreasing=TRUE) != num_modes:1)) stop("missing and/or repeated indices") 597 | modes <- tnsr@modes 598 | mat <- tnsr@data 599 | new_modes <- c(prod(modes[rs]),prod(modes[cs])) 600 | #rearranges into a matrix 601 | mat <- aperm(mat,perm) 602 | dim(mat) <- new_modes 603 | as.tensor(mat) 604 | }) 605 | 606 | #'@rdname k_unfold-methods 607 | #'@aliases k_unfold,Tensor-method 608 | setMethod("k_unfold", signature="Tensor", 609 | definition=function(tnsr,m=NULL){ 610 | if(is.null(m)) stop("mode m must be specified") 611 | num_modes <- tnsr@num_modes 612 | rs <- m 613 | cs <- (1:num_modes)[-m] 614 | unfold(tnsr,row_idx=rs,col_idx=cs) 615 | }) 616 | 617 | 618 | #'@rdname matvec-methods 619 | #'@aliases matvec,Tensor-method matvec,Tensor-method 620 | setMethod('matvec',signature="Tensor", 621 | definition=function(tnsr){ 622 | if(tnsr@num_modes!=3) stop("Matvec currently only implemented for 3d Tensors") 623 | num_modes <- tnsr@num_modes 624 | stopifnot(num_modes==3) 625 | unfold(tnsr,row_idx=c(1,3),col_idx=2) 626 | }) 627 | 628 | #'@rdname rs_unfold-methods 629 | #'@aliases rs_unfold,Tensor-method 630 | setMethod("rs_unfold", signature="Tensor", 631 | definition=function(tnsr,m=NULL){ 632 | if(is.null(m)) stop("mode m must be specified") 633 | num_modes <- tnsr@num_modes 634 | rs <- m 635 | cs <- (1:num_modes)[-m] 636 | unfold(tnsr,row_idx=rs,col_idx=cs) 637 | }) 638 | 639 | #'@rdname cs_unfold-methods 640 | #'@aliases cs_unfold,Tensor-method 641 | setMethod("cs_unfold", signature="Tensor", 642 | definition=function(tnsr,m=NULL){ 643 | if(is.null(m)) stop("mode m must be specified") 644 | num_modes <- tnsr@num_modes 645 | rs <- (1:num_modes)[-m] 646 | cs <- m 647 | unfold(tnsr,row_idx=rs,col_idx=cs) 648 | }) 649 | options(warn=1) 650 | 651 | ###Creation of Tensor from an array/matrix/vector 652 | 653 | #'Tensor Conversion 654 | #' 655 | #'Create a \code{\link{Tensor-class}} object from an \code{array}, \code{matrix}, or \code{vector}. 656 | #'@export 657 | #'@name as.tensor 658 | #'@rdname as.tensor 659 | #'@aliases as.tensor 660 | #'@param x an instance of \code{array}, \code{matrix}, or \code{vector} 661 | #'@param drop whether or not modes of 1 should be dropped 662 | #'@return a \code{\link{Tensor-class}} object 663 | #'@examples 664 | #'#From vector 665 | #'vec <- runif(100); vecT <- as.tensor(vec); vecT 666 | #'#From matrix 667 | #'mat <- matrix(runif(1000),nrow=100,ncol=10) 668 | #'matT <- as.tensor(mat); matT 669 | #'#From array 670 | #'indices <- c(10,20,30,40) 671 | #'arr <- array(runif(prod(indices)), dim = indices) 672 | #'arrT <- as.tensor(arr); arrT 673 | as.tensor <- function(x,drop=FALSE){ 674 | stopifnot(is.array(x)||is.vector(x)) 675 | if (is.vector(x)){ 676 | modes <- c(length(x)) 677 | num_modes <- 1L 678 | }else{ 679 | modes <- dim(x) 680 | num_modes <- length(modes) 681 | dim1s <- which(modes==1) 682 | if(drop && (length(dim1s)>0)){ 683 | modes <- modes[-dim1s] 684 | num_modes <- num_modes-length(dim1s) 685 | } 686 | } 687 | new("Tensor",num_modes,modes,data=array(x,dim=modes)) 688 | } 689 | 690 | #'Mode Permutation for Tensor 691 | #' 692 | #'Overloads \code{aperm} for Tensor class for convenience. 693 | #' 694 | #'@docType methods 695 | #'@name tperm-methods 696 | #'@rdname tperm-methods 697 | #'@aliases tperm tperm-methods tperm,Tensor-method 698 | #'@details \code{tperm(tnsr,perm=NULL,...)} 699 | #'@export 700 | #'@param tnsr the Tensor instance 701 | #'@param perm the new permutation of the current modes 702 | #'@param ... additional parameters to be passed into \code{aperm} 703 | #'@examples 704 | #'tnsr <- rand_tensor(c(3,4,5)) 705 | #'dim(tperm(tnsr,perm=c(2,1,3))) 706 | #'dim(tperm(tnsr,perm=c(1,3,2))) 707 | setGeneric(name="tperm", 708 | def=function(tnsr,perm,...){standardGeneric("tperm")}) 709 | 710 | #'@seealso \code{\link{aperm}} 711 | #'@rdname tperm-methods 712 | #'@aliases tperm-methods tperm,Tensor-method 713 | setMethod("tperm",signature="Tensor", 714 | definition=function(tnsr,...){ 715 | as.tensor(aperm(tnsr@data,...)) 716 | }) 717 | 718 | 719 | #'Tensor Vec 720 | #' 721 | #'Turns the tensor into a single vector, following the convention that earlier indices vary slower than later indices. 722 | #'@docType methods 723 | #'@name vec-methods 724 | #'@details \code{vec(tnsr)} 725 | #'@export 726 | #'@rdname vec-methods 727 | #'@aliases vec vec,Tensor-method 728 | #'@references T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 729 | #'@param tnsr the Tensor instance 730 | #'@return vector with length \code{prod(x@@modes)} 731 | #'@examples 732 | #'tnsr <- rand_tensor(c(4,5,6,7)) 733 | #'vec(tnsr) 734 | #'@rdname vec-methods 735 | #'@aliases vec,Tensor-method vec,Tensor-method 736 | setGeneric(name="vec",def=function(tnsr){standardGeneric("vec")}) 737 | 738 | #'@rdname vec-methods 739 | #'@aliases vec,Tensor-method vec,Tensor-method 740 | setMethod("vec",signature="Tensor", 741 | definition=function(tnsr){ 742 | as.vector(tnsr@data) 743 | }) 744 | 745 | 746 | -------------------------------------------------------------------------------- /R/rTensor_Decomp.R: -------------------------------------------------------------------------------- 1 | ###Tensor Decompositions 2 | 3 | #'(Truncated-)Higher-order SVD 4 | #' 5 | #'Higher-order SVD of a K-Tensor. Write the K-Tensor as a (m-mode) product of a core Tensor (possibly smaller modes) and K orthogonal factor matrices. Truncations can be specified via \code{ranks} (making them smaller than the original modes of the K-Tensor will result in a truncation). For the mathematical details on HOSVD, consult Lathauwer et. al. (2000). 6 | #'@export 7 | #'@details Uses the Alternating Least Squares (ALS) estimation procedure. A progress bar is included to help monitor operations on large tensors. 8 | #'@name hosvd 9 | #'@rdname hosvd 10 | #'@aliases hosvd 11 | #'@param tnsr Tensor with K modes 12 | #'@param ranks a vector of desired modes in the output core tensor, default is \code{tnsr@@modes} 13 | #'@return a list containing the following:\describe{ 14 | #'\item{\code{Z}}{core tensor with modes speficied by \code{ranks}} 15 | #'\item{\code{U}}{a list of orthogonal matrices, one for each mode} 16 | #'\item{\code{est}}{estimate of \code{tnsr} after compression} 17 | #'\item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)} - if there was no truncation, then this is O(mach_eps) } 18 | #'} 19 | #'@seealso \code{\link{tucker}} 20 | #'@references L. Lathauwer, B.Moor, J. Vanderwalle "A multilinear singular value decomposition". Journal of Matrix Analysis and Applications 2000. 21 | #'@note The length of \code{ranks} must match \code{tnsr@@num_modes}. 22 | #'@examples 23 | #'tnsr <- rand_tensor(c(6,7,8)) 24 | #'hosvdD <-hosvd(tnsr) 25 | #'hosvdD$fnorm_resid 26 | #'hosvdD2 <-hosvd(tnsr,ranks=c(3,3,4)) 27 | #'hosvdD2$fnorm_resid 28 | hosvd <- function(tnsr,ranks=NULL){ 29 | #stopifnot(is(tnsr,"Tensor")) 30 | num_modes <- tnsr@num_modes 31 | #no truncation if ranks not provided 32 | if(is.null(ranks)){ 33 | ranks <- tnsr@modes 34 | } 35 | #progress bar 36 | pb <- txtProgressBar(min=0,max=num_modes,style=3) 37 | #loops through and performs SVD on mode-m matricization of tnsr 38 | U_list <- vector("list",num_modes) 39 | for(m in 1:num_modes){ 40 | temp_mat <- rs_unfold(tnsr,m=m)@data 41 | U_list[[m]] <- svd(temp_mat,nu=ranks[m])$u 42 | setTxtProgressBar(pb,m) 43 | } 44 | close(pb) 45 | #computes the core tensor 46 | Z <- ttl(tnsr,lapply(U_list,t),ms=1:num_modes) 47 | est <- ttl(Z,U_list,ms=1:num_modes) 48 | resid <- fnorm(est-tnsr) 49 | #put together the return list, and returns 50 | list(Z=Z,U=U_list,est=est,fnorm_resid=resid) 51 | } 52 | 53 | #'Canonical Polyadic Decomposition 54 | #' 55 | #'Canonical Polyadic (CP) decomposition of a tensor, aka CANDECOMP/PARAFRAC. Approximate a K-Tensor using a sum of \code{num_components} rank-1 K-Tensors. A rank-1 K-Tensor can be written as an outer product of K vectors. There are a total of \code{num_compoents *tnsr@@num_modes} vectors in the output, stored in \code{tnsr@@num_modes} matrices, each with \code{num_components} columns. This is an iterative algorithm, with two possible stopping conditions: either relative error in Frobenius norm has gotten below \code{tol}, or the \code{max_iter} number of iterations has been reached. For more details on CP decomposition, consult Kolda and Bader (2009). 56 | #'@export 57 | #'@details Uses the Alternating Least Squares (ALS) estimation procedure. A progress bar is included to help monitor operations on large tensors. 58 | #'@name cp 59 | #'@rdname cp 60 | #'@aliases cp 61 | #'@param tnsr Tensor with K modes 62 | #'@param num_components the number of rank-1 K-Tensors to use in approximation 63 | #'@param max_iter maximum number of iterations if error stays above \code{tol} 64 | #'@param tol relative Frobenius norm error tolerance 65 | #'@return a list containing the following \describe{ 66 | #'\item{\code{lambdas}}{a vector of normalizing constants, one for each component} 67 | #'\item{\code{U}}{a list of matrices - one for each mode - each matrix with \code{num_components} columns} 68 | #'\item{\code{conv}}{whether or not \code{resid} < \code{tol} by the last iteration} 69 | #'\item{\code{norm_percent}}{the percent of Frobenius norm explained by the approximation} 70 | #'\item{\code{est}}{estimate of \code{tnsr} after compression} 71 | #'\item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)}} 72 | #'\item{\code{all_resids}}{vector containing the Frobenius norm of error for all the iterations} 73 | #'} 74 | #'@seealso \code{\link{tucker}} 75 | #'@references T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 76 | #'@examples 77 | #'tnsr <- rand_tensor(c(6,7,8)) 78 | #'cpD <- cp(tnsr,num_components=5) 79 | #'cpD$conv 80 | #'cpD$norm_percent 81 | #'plot(cpD$all_resids) 82 | cp <- function(tnsr, num_components=NULL,max_iter=25, tol=1e-5){ 83 | if(is.null(num_components)) stop("num_components must be specified") 84 | stopifnot(is(tnsr,"Tensor")) 85 | #initialization via truncated hosvd 86 | num_modes <- tnsr@num_modes 87 | modes <- tnsr@modes 88 | U_list <- vector("list",num_modes) 89 | unfolded_mat <- vector("list",num_modes) 90 | tnsr_norm <- fnorm(tnsr) 91 | for(m in 1:num_modes){ 92 | unfolded_mat[[m]] <- rs_unfold(tnsr,m=m)@data 93 | U_list[[m]] <- matrix(rnorm(modes[m]*num_components), nrow=modes[m], ncol=num_components) 94 | } 95 | est <- tnsr 96 | curr_iter <- 1 97 | converged <- FALSE 98 | #set up convergence check 99 | fnorm_resid <- rep(0, max_iter) 100 | CHECK_CONV <- function(est){ 101 | curr_resid <- fnorm(est - tnsr) 102 | fnorm_resid[curr_iter] <<- curr_resid 103 | if (curr_iter==1) return(FALSE) 104 | if (abs(curr_resid-fnorm_resid[curr_iter-1])/tnsr_norm < tol) return(TRUE) 105 | else{ return(FALSE)} 106 | } 107 | #progress bar 108 | pb <- txtProgressBar(min=0,max=max_iter,style=3) 109 | #main loop (until convergence or max_iter) 110 | norm_vec <- function(vec){ 111 | norm(as.matrix(vec)) 112 | } 113 | while((curr_iter < max_iter) && (!converged)){ 114 | setTxtProgressBar(pb,curr_iter) 115 | for(m in 1:num_modes){ 116 | V <- hamadard_list(lapply(U_list[-m],function(x) {t(x)%*%x})) 117 | V_inv <- solve(V) 118 | tmp <- unfolded_mat[[m]]%*%khatri_rao_list(U_list[-m],reverse=TRUE)%*%V_inv 119 | lambdas <- apply(tmp,2,norm_vec) 120 | U_list[[m]] <- sweep(tmp,2,lambdas,"/") 121 | Z <- .superdiagonal_tensor(num_modes=num_modes,len=num_components,elements=lambdas) 122 | est <- ttl(Z,U_list,ms=1:num_modes) 123 | } 124 | #checks convergence 125 | if(CHECK_CONV(est)){ 126 | converged <- TRUE 127 | setTxtProgressBar(pb,max_iter) 128 | }else{ 129 | curr_iter <- curr_iter + 1 130 | } 131 | } 132 | if(!converged){setTxtProgressBar(pb,max_iter)} 133 | close(pb) 134 | #end of main loop 135 | #put together return list, and returns 136 | fnorm_resid <- fnorm_resid[fnorm_resid!=0] 137 | norm_percent<- (1-(tail(fnorm_resid,1)/tnsr_norm))*100 138 | invisible(list(lambdas=lambdas, U=U_list, conv=converged, est=est, norm_percent=norm_percent, fnorm_resid = tail(fnorm_resid,1),all_resids=fnorm_resid)) 139 | } 140 | 141 | #'Tucker Decomposition 142 | #' 143 | #'The Tucker decomposition of a tensor. Approximates a K-Tensor using a n-mode product of a core tensor (with modes specified by \code{ranks}) with orthogonal factor matrices. If there is no truncation in one of the modes, then this is the same as the MPCA, \code{\link{mpca}}. If there is no truncation in all the modes (i.e. \code{ranks = tnsr@@modes}), then this is the same as the HOSVD, \code{\link{hosvd}}. This is an iterative algorithm, with two possible stopping conditions: either relative error in Frobenius norm has gotten below \code{tol}, or the \code{max_iter} number of iterations has been reached. For more details on the Tucker decomposition, consult Kolda and Bader (2009). 144 | #'@export 145 | #'@details Uses the Alternating Least Squares (ALS) estimation procedure also known as Higher-Order Orthogonal Iteration (HOOI). Intialized using a (Truncated-)HOSVD. A progress bar is included to help monitor operations on large tensors. 146 | #'@name tucker 147 | #'@rdname tucker 148 | #'@aliases tucker 149 | #'@param tnsr Tensor with K modes 150 | #'@param ranks a vector of the modes of the output core Tensor 151 | #'@param max_iter maximum number of iterations if error stays above \code{tol} 152 | #'@param tol relative Frobenius norm error tolerance 153 | #'@return a list containing the following:\describe{ 154 | #'\item{\code{Z}}{the core tensor, with modes specified by \code{ranks}} 155 | #'\item{\code{U}}{a list of orthgonal factor matrices - one for each mode, with the number of columns of the matrices given by \code{ranks}} 156 | #'\item{\code{conv}}{whether or not \code{resid} < \code{tol} by the last iteration} 157 | #'\item{\code{est}}{estimate of \code{tnsr} after compression} 158 | #'\item{\code{norm_percent}}{the percent of Frobenius norm explained by the approximation} 159 | #'\item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)}} 160 | #'\item{\code{all_resids}}{vector containing the Frobenius norm of error for all the iterations} 161 | #'} 162 | #'@seealso \code{\link{hosvd}}, \code{\link{mpca}} 163 | #'@references T. Kolda, B. Bader, "Tensor decomposition and applications". SIAM Applied Mathematics and Applications 2009. 164 | #'@note The length of \code{ranks} must match \code{tnsr@@num_modes}. 165 | #'@examples 166 | #'tnsr <- rand_tensor(c(6,7,8)) 167 | #'tuckerD <- tucker(tnsr,ranks=c(3,3,4)) 168 | #'tuckerD$conv 169 | #'tuckerD$norm_percent 170 | #'plot(tuckerD$all_resids) 171 | tucker <- function(tnsr,ranks=NULL,max_iter=25,tol=1e-5){ 172 | stopifnot(is(tnsr,"Tensor")) 173 | if(is.null(ranks)) stop("ranks must be specified") 174 | #initialization via truncated hosvd 175 | num_modes <- tnsr@num_modes 176 | U_list <- vector("list",num_modes) 177 | for(m in 1:num_modes){ 178 | temp_mat <- rs_unfold(tnsr,m=m)@data 179 | U_list[[m]] <- svd(temp_mat,nu=ranks[m])$u 180 | } 181 | tnsr_norm <- fnorm(tnsr) 182 | curr_iter <- 1 183 | converged <- FALSE 184 | #set up convergence check 185 | fnorm_resid <- rep(0, max_iter) 186 | CHECK_CONV <- function(Z,U_list){ 187 | est <- ttl(Z,U_list,ms=1:num_modes) 188 | curr_resid <- fnorm(tnsr - est) 189 | fnorm_resid[curr_iter] <<- curr_resid 190 | if (curr_iter==1) return(FALSE) 191 | if (abs(curr_resid-fnorm_resid[curr_iter-1])/tnsr_norm < tol) return(TRUE) 192 | else{return(FALSE)} 193 | } 194 | #progress bar 195 | pb <- txtProgressBar(min=0,max=max_iter,style=3) 196 | #main loop (until convergence or max_iter) 197 | while((curr_iter < max_iter) && (!converged)){ 198 | setTxtProgressBar(pb,curr_iter) 199 | modes <- tnsr@modes 200 | modes_seq <- 1:num_modes 201 | for(m in modes_seq){ 202 | #core Z minus mode m 203 | X <- ttl(tnsr,lapply(U_list[-m],t),ms=modes_seq[-m]) 204 | #truncated SVD of X 205 | #U_list[[m]] <- (svd(rs_unfold(X,m=m)@data,nu=ranks[m],nv=prod(modes[-m]))$u)[,1:ranks[m]] 206 | U_list[[m]] <- svd(rs_unfold(X,m=m)@data,nu=ranks[m])$u 207 | } 208 | #compute core tensor Z 209 | Z <- ttm(X,mat=t(U_list[[num_modes]]),m=num_modes) 210 | 211 | #checks convergence 212 | if(CHECK_CONV(Z, U_list)){ 213 | converged <- TRUE 214 | setTxtProgressBar(pb,max_iter) 215 | }else{ 216 | curr_iter <- curr_iter + 1 217 | } 218 | } 219 | close(pb) 220 | #end of main loop 221 | #put together return list, and returns 222 | fnorm_resid <- fnorm_resid[fnorm_resid!=0] 223 | norm_percent<-(1-(tail(fnorm_resid,1)/tnsr_norm))*100 224 | est <- ttl(Z,U_list,ms=1:num_modes) 225 | invisible(list(Z=Z, U=U_list, conv=converged, est=est, norm_percent = norm_percent, fnorm_resid=tail(fnorm_resid,1), all_resids=fnorm_resid)) 226 | } 227 | 228 | #'Multilinear Principal Components Analysis 229 | #' 230 | #'This is basically the Tucker decomposition of a K-Tensor, \code{\link{tucker}}, with one of the modes uncompressed. If K = 3, then this is also known as the Generalized Low Rank Approximation of Matrices (GLRAM). This implementation assumes that the last mode is the measurement mode and hence uncompressed. This is an iterative algorithm, with two possible stopping conditions: either relative error in Frobenius norm has gotten below \code{tol}, or the \code{max_iter} number of iterations has been reached. For more details on the MPCA of tensors, consult Lu et al. (2008). 231 | #'@export 232 | #'@details Uses the Alternating Least Squares (ALS) estimation procedure. A progress bar is included to help monitor operations on large tensors. 233 | #'@name mpca 234 | #'@rdname mpca 235 | #'@aliases mpca 236 | #'@param tnsr Tensor with K modes 237 | #'@param ranks a vector of the compressed modes of the output core Tensor, this has length K-1 238 | #'@param max_iter maximum number of iterations if error stays above \code{tol} 239 | #'@param tol relative Frobenius norm error tolerance 240 | #'@return a list containing the following:\describe{ 241 | #'\item{\code{Z_ext}}{the extended core tensor, with the first K-1 modes given by \code{ranks}} 242 | #'\item{\code{U}}{a list of K-1 orthgonal factor matrices - one for each compressed mode, with the number of columns of the matrices given by \code{ranks}} 243 | #'\item{\code{conv}}{whether or not \code{resid} < \code{tol} by the last iteration} 244 | #'\item{\code{est}}{estimate of \code{tnsr} after compression} 245 | #'\item{\code{norm_percent}}{the percent of Frobenius norm explained by the approximation} 246 | #'\item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)}} 247 | #'\item{\code{all_resids}}{vector containing the Frobenius norm of error for all the iterations} 248 | #'} 249 | #'@seealso \code{\link{tucker}}, \code{\link{hosvd}} 250 | #'@references H. Lu, K. Plataniotis, A. Venetsanopoulos, "Mpca: Multilinear principal component analysis of tensor objects". IEEE Trans. Neural networks, 2008. 251 | #'@note The length of \code{ranks} must match \code{tnsr@@num_modes-1}. 252 | #'@examples 253 | #'tnsr <-rand_tensor(c(100,10,10)) 254 | #'mpcaD <- mpca(tnsr,ranks=c(30,5)) 255 | #'mpcaD$conv 256 | #'mpcaD$norm_percent 257 | #'plot(mpcaD$all_resids) 258 | mpca <- function(tnsr, ranks = NULL, max_iter = 25, tol=1e-5){ 259 | if(is.null(ranks)) stop("ranks must be specified") 260 | stopifnot(is(tnsr,"Tensor")) 261 | #initialization via hosvd of M-1 modes 262 | num_modes <- tnsr@num_modes 263 | stopifnot(length(ranks)==(num_modes-1)) 264 | ranks <- c(ranks,1) 265 | modes <- tnsr@modes 266 | U_list <- vector("list",num_modes) 267 | unfolded_mat <- vector("list",num_modes) 268 | for(m in 1:(num_modes-1)){ 269 | unfolded_mat <- rs_unfold(tnsr,m=m)@data 270 | mode_m_cov <- unfolded_mat%*%t(unfolded_mat) 271 | U_list[[m]] <- svd(mode_m_cov, nu=ranks[m])$u 272 | } 273 | Z_ext <- ttl(tnsr,lapply(U_list[-num_modes],t),ms=1:(num_modes-1)) 274 | tnsr_norm <- fnorm(tnsr) 275 | curr_iter <- 1 276 | converged <- FALSE 277 | #set up convergence check 278 | fnorm_resid <- rep(0, max_iter) 279 | CHECK_CONV <- function(Z_ext,U_list){ 280 | est <- ttl(Z_ext,U_list[-num_modes],ms=1:(num_modes-1)) 281 | curr_resid <- fnorm(tnsr - est) 282 | fnorm_resid[curr_iter] <<- curr_resid 283 | if (curr_iter==1) return(FALSE) 284 | if (abs(curr_resid-fnorm_resid[curr_iter-1])/tnsr_norm < tol) return(TRUE) 285 | else{return(FALSE)} 286 | } 287 | #progress bar 288 | pb <- txtProgressBar(min=0,max=max_iter,style=3) 289 | #main loop (until convergence or max_iter) 290 | while((curr_iter < max_iter) && (!converged)){ 291 | setTxtProgressBar(pb,curr_iter) 292 | modes <-tnsr@modes 293 | modes_seq <- 1:(num_modes-1) 294 | for(m in modes_seq){ 295 | #extended core Z minus mode m 296 | X <- ttl(tnsr,lapply(U_list[-c(m,num_modes)],t),ms=modes_seq[-m]) 297 | #truncated SVD of X 298 | U_list[[m]] <- svd(rs_unfold(X,m=m)@data,nu=ranks[m])$u 299 | } 300 | #compute core tensor Z_ext 301 | Z_ext <- ttm(X,mat=t(U_list[[num_modes-1]]),m=num_modes-1) 302 | #checks convergence 303 | if(CHECK_CONV(Z_ext, U_list)){ 304 | converged <- TRUE 305 | setTxtProgressBar(pb,max_iter) 306 | }else{ 307 | curr_iter <- curr_iter + 1 308 | } 309 | } 310 | close(pb) 311 | #end of main loop 312 | #put together return list, and returns 313 | est <- ttl(Z_ext,U_list[-num_modes],ms=1:(num_modes-1)) 314 | fnorm_resid <- fnorm_resid[fnorm_resid!=0] 315 | norm_percent<-(1-(tail(fnorm_resid,1)/tnsr_norm))*100 316 | invisible(list(Z_ext=Z_ext, U=U_list, conv=converged, est=est, norm_percent = norm_percent, fnorm_resid=tail(fnorm_resid,1), all_resids=fnorm_resid)) 317 | } 318 | 319 | #'Population Value Decomposition 320 | #' 321 | #'The default Population Value Decomposition (PVD) of a series of 2D images. Constructs population-level matrices P, V, and D to account for variances within as well as across the images. Structurally similar to Tucker (\code{\link{tucker}}) and GLRAM (\code{\link{mpca}}), but retains crucial differences. Requires \code{2*n3 + 2} parameters to specified the final ranks of P, V, and D, where n3 is the third mode (how many images are in the set). Consult Crainiceanu et al. (2013) for the construction and rationale behind the PVD model. 322 | #'@export 323 | #'@details The PVD is not an iterative method, but instead relies on \code{n3 + 2}separate PCA decompositions. The third mode is for how many images are in the set. 324 | #'@name pvd 325 | #'@rdname pvd 326 | #'@aliases pvd 327 | #'@param tnsr 3-Tensor with the third mode being the measurement mode 328 | #'@param uranks ranks of the U matrices 329 | #'@param wranks ranks of the W matrices 330 | #'@param a rank of \code{P = U\%*\%t(U)} 331 | #'@param b rank of \code{D = W\%*\%t(W)} 332 | #'@return a list containing the following:\describe{ 333 | #'\item{\code{P}}{population-level matrix \code{P = U\%*\%t(U)}, where U is constructed by stacking the truncated left eigenvectors of slicewise PCA along the third mode} 334 | #'\item{\code{V}}{a list of image-level core matrices} 335 | #'\item{\code{D}}{population-leve matrix \code{D = W\%*\%t(W)}, where W is constructed by stacking the truncated right eigenvectors of slicewise PCA along the third mode} 336 | #'\item{\code{est}}{estimate of \code{tnsr} after compression} 337 | #'\item{\code{norm_percent}}{the percent of Frobenius norm explained by the approximation} 338 | #'\item{\code{fnorm_resid}}{the Frobenius norm of the error \code{fnorm(est-tnsr)}} 339 | #'} 340 | #'@references C. Crainiceanu, B. Caffo, S. Luo, V. Zipunnikov, N. Punjabi, "Population value decomposition: a framework for the analysis of image populations". Journal of the American Statistical Association, 2013. 341 | #'@examples 342 | #'tnsr <- rand_tensor(c(10,5,100)) 343 | #'pvdD<-pvd(tnsr,uranks=rep(8,100),wranks=rep(4,100),a=8,b=4) 344 | pvd <- function(tnsr,uranks=NULL,wranks=NULL,a=NULL,b=NULL){ 345 | if(tnsr@num_modes!=3) stop("PVD only for 3D") 346 | if(is.null(uranks)||is.null(wranks)) stop("U and V ranks must be specified") 347 | if(is.null(a)||is.null(b)) stop("a and b must be specified") 348 | modes <- tnsr@modes 349 | n <- modes[3] 350 | if(length(uranks)!=n||length(wranks)!=n) stop("ranks must be of length n3") 351 | pb <- txtProgressBar(min=0,max=(n+3),style=3) 352 | x <- tnsr@data 353 | Us <- vector('list',n) 354 | Vs <- vector('list',n) 355 | S <- vector('list',n) 356 | for(i in 1:n){ 357 | svdz <- svd(x[,,i],nu=uranks[i],nv=wranks[i]) 358 | Us[[i]] <- svdz$u 359 | Vs[[i]] <- svdz$v 360 | S[[i]] <- svdz$d[1:min(uranks[i],wranks[i])] 361 | setTxtProgressBar(pb,i) 362 | } 363 | U <- matrix(unlist(Us),nrow=modes[1],ncol=sum(uranks)*n) 364 | #eigenU <- eigen(U%*%t(U)) 365 | P <- eigen(U%*%t(U))$vectors[,1:a] #E-vecs of UU^T 366 | setTxtProgressBar(pb,n+1) 367 | V <- matrix(unlist(Vs),nrow=modes[2],ncol=sum(wranks)*n) 368 | #eigenV <- eigen(V%*%t(V)) 369 | Dt <- eigen(V%*%t(V))$vectors[,1:b] #E-vecs of VV^T 370 | D <- t(Dt) 371 | setTxtProgressBar(pb,n+2) 372 | V2 <- vector('list',n) 373 | est <- array(0,dim=modes) 374 | for(i in 1:n){ 375 | V2[[i]] <- (t(P)%*%Us[[i]])%*%diag(S[[i]],nrow=uranks[i],ncol=wranks[i])%*%(t(Vs[[i]])%*%Dt) 376 | est[,,i] <- P%*%V2[[i]]%*%D 377 | } 378 | est <- as.tensor(est) 379 | fnorm_resid <- fnorm(est-tnsr) 380 | setTxtProgressBar(pb,n+3) 381 | norm_percent<-(1-(fnorm_resid/fnorm(tnsr)))*100 382 | invisible(list(P=P,D=D,V=V2,est=est,norm_percent=norm_percent,fnorm_resid=fnorm_resid)) 383 | } 384 | 385 | #'Tensor Singular Value Decomposition 386 | #' 387 | #'TSVD for a 3-Tensor. Constructs 3-Tensors \code{U, S, V} such that \code{tnsr = t_mult(t_mult(U,S),t(V))}. \code{U} and \code{V} are orthgonal 3-Tensors with orthogonality defined in Kilmer et al. (2013), and \code{S} is a 3-Tensor consists of facewise diagonal matrices. For more details on the TSVD, consult Kilmer et al. (2013). 388 | #'@export 389 | #'@name t_svd 390 | #'@rdname t_svd 391 | #'@aliases t_svd 392 | #'@param tnsr 3-Tensor to decompose via TSVD 393 | #'@return a list containing the following:\describe{ 394 | #'\item{\code{U}}{the left orthgonal 3-Tensor} 395 | #'\item{\code{V}}{the right orthgonal 3-Tensor} 396 | #'\item{\code{S}}{the middle 3-Tensor consisting of face-wise diagonal matrices} 397 | #'} 398 | #'@seealso \code{\link{t_mult}}, \code{\link{t_svd_reconstruct}} 399 | #'@references M. Kilmer, K. Braman, N. Hao, and R. Hoover, "Third-order tensors as operators on matrices: a theoretical and computational framework with applications in imaging". SIAM Journal on Matrix Analysis and Applications 2013. 400 | #'@note Computation involves complex values, but if the inputs are real, then the outputs are also real. Some loss of precision occurs in the truncation of the imaginary components during the FFT and inverse FFT. 401 | #'@examples 402 | #'tnsr <- rand_tensor() 403 | #'tsvdD <- t_svd(tnsr) 404 | t_svd<-function(tnsr){ 405 | if(tnsr@num_modes!=3) stop("T-SVD only implemented for 3d so far") 406 | modes <- tnsr@modes 407 | n1 <- modes[1] 408 | n2 <- modes[2] 409 | n3 <- modes[3] 410 | #progress bar 411 | pb <- txtProgressBar(min=0,max=n3,style=3) 412 | #define ifft 413 | ifft <- function(x){suppressWarnings(as.numeric(fft(x,inverse=TRUE))/length(x))} 414 | #fft for each of the n1n2 vectors (of length n3) along mode 3 415 | fftz <- aperm(apply(tnsr@data,MARGIN=1:2,fft),c(2,3,1)) 416 | #svd for each face (svdz is a list of the results) 417 | U_arr <- array(0,dim=c(n1,n1,n3)) 418 | V_arr <- array(0,dim=c(n2,n2,n3)) 419 | m <- min(n1,n2) 420 | S_arr <- array(0,dim=c(n1,n2,n3)) 421 | #Think of a way to avoid a loop in the beginning 422 | #Problem is that svd returns a list but ideally we want 3 arrays 423 | #Even with unlist this doesn't seem possible 424 | for (j in 1:n3){ 425 | setTxtProgressBar(pb,j) 426 | decomp <- svd(fftz[,,j],nu=n1,nv=n2) 427 | U_arr[,,j] <- decomp$u 428 | V_arr[,,j] <- decomp$v 429 | S_arr[,,j] <- diag(decomp$d,nrow=n1,ncol=n2) #length is min(n1,n2) 430 | } 431 | close(pb) 432 | #for each svd result, we want to apply ifft 433 | U <- as.tensor(aperm(apply(U_arr,MARGIN=1:2,ifft),c(2,3,1))) 434 | V <- as.tensor(aperm(apply(V_arr,MARGIN=1:2,ifft),c(2,3,1))) 435 | S <- as.tensor(aperm(apply(S_arr,MARGIN=1:2,ifft),c(2,3,1))) 436 | invisible(list(U=U,V=V,S=S)) 437 | } 438 | 439 | #'Reconstruct Tensor From TSVD 440 | #' 441 | #'Reconstruct the original 3-Tensor after it has been decomposed into \code{U, S, V} via \code{\link{t_svd}}. 442 | #'@export 443 | #'@name t_svd_reconstruct 444 | #'@rdname t_svd_reconstruct 445 | #'@aliases t_svd_reconstruct 446 | #'@param L list that is an output from \code{\link{t_svd}} 447 | #'@return a 3-Tensor 448 | #'@seealso \code{\link{t_svd}} 449 | #'@examples 450 | #'tnsr <- rand_tensor(c(10,10,10)) 451 | #'tsvdD <- t_svd(tnsr) 452 | #'1 - fnorm(t_svd_reconstruct(tsvdD)-tnsr)/fnorm(tnsr) 453 | t_svd_reconstruct <- function(L){ 454 | t_mult(t_mult(L$U,L$S),t(L$V)) 455 | } 456 | 457 | ###t-compress (Not Supported) 458 | .t_compress <- function(tnsr,k){ 459 | modes <- tnsr@modes 460 | n1 <- modes[1] 461 | n2 <- modes[2] 462 | n3 <- modes[3] 463 | #progress bar 464 | pb <- txtProgressBar(min=0,max=n3,style=3) 465 | #define ifft 466 | ifft <- function(x){suppressWarnings(as.numeric(fft(x,inverse=TRUE))/length(x))} 467 | #fft for each of the n1n2 vectors (of length n3) along mode 3 468 | fftz <- aperm(apply(tnsr@data,MARGIN=1:2,fft),c(2,3,1)) 469 | #svd for each face (svdz is a list of the results) 470 | U_arr <- array(0,dim=c(n1,n1,n3)) 471 | V_arr <- array(0,dim=c(n2,n2,n3)) 472 | m <- min(n1,n2) 473 | S_arr <- array(0,dim=c(n1,n2,n3)) 474 | #Think of a way to avoid a loop in the beginning 475 | #Problem is that svd returns a list but ideally we want 3 arrays 476 | #Even with unlist this doesn't seem possible 477 | for (j in 1:n3){ 478 | setTxtProgressBar(pb,j) 479 | decomp <- svd(fftz[,,j],nu=n1,nv=n2) 480 | U_arr[,,j] <- decomp$u 481 | V_arr[,,j] <- decomp$v 482 | S_arr[,,j] <- diag(decomp$d,nrow=n1,ncol=n2) #length is min(n1,n2) 483 | } 484 | close(pb) 485 | #for each svd result, we want to apply ifft 486 | U <- as.tensor(aperm(apply(U_arr,MARGIN=1:2,ifft),c(2,3,1))) 487 | V <- as.tensor(aperm(apply(V_arr,MARGIN=1:2,ifft),c(2,3,1))) 488 | S <- as.tensor(aperm(apply(S_arr,MARGIN=1:2,ifft),c(2,3,1))) 489 | 490 | est <- as.tensor(array(0,dim=modes)) 491 | for (i in 1:k){ 492 | est <- est + t_mult(t_mult(U[,i,,drop=FALSE],S[i,i,,drop=FALSE]),t(V[,i,,drop=FALSE])) 493 | } 494 | resid <- fnorm(est-tnsr) 495 | invisible(list(est=est, fnorm_resid = resid, norm_percent = (1-resid/fnorm(tnsr))*100)) 496 | } 497 | 498 | ###t-compress2 (Not Supported) 499 | .t_compress2 <- function(tnsr,k1,k2){ 500 | A = modeSum(tnsr,m=3,drop=TRUE) 501 | svdz <- svd(A@data,nu=k1,nv=k2) 502 | Util <- svdz$u 503 | Vtil <- svdz$v 504 | modes <- tnsr@modes 505 | n3 <- modes[3] 506 | core <- array(0,dim=c(k1,k2,n3)) 507 | for(i in 1:n3){ 508 | core[,,i]<-t(Util)%*%tnsr[,,i]@data%*%Vtil 509 | } 510 | est <- array(0,dim=modes) 511 | for(i in 1:k1){ 512 | for (j in 1:k2){ 513 | est = est + Util[,i] %o% Vtil[,j] %o% core[i,j,] 514 | } 515 | } 516 | resid <- fnorm(tnsr - est) 517 | invisible(list(core = as.tensor(core), est=est, fnorm_resid = resid, norm_percent = (1-resid/fnorm(tnsr))*100)) 518 | } 519 | 520 | #' sparse (semi-)nonnegative Tucker decomposition 521 | #' 522 | #' Decomposes nonnegative tensor \code{tnsr} into core optionally nonnegative tensor \code{Z} and sparse nonnegative factor matrices \code{U[n]}. 523 | #'@export 524 | #'@param tnsr nonnegative tensor with \code{K} modes 525 | #'@param ranks an integer vector of length \code{K} specifying the modes sizes for the output core tensor \code{Z} 526 | #'@param core_nonneg constrain core tensor \code{Z} to be nonnegative 527 | #'@param tol relative Frobenius norm error tolerance 528 | #'@param hosvd If TRUE, apply High Order SVD to improve initial U and Z 529 | #'@param max_iter maximum number of iterations if error stays above \code{tol} 530 | #'@param max_time max running time 531 | #'@param lambda \code{K+1} vector of sparsity regularizer coefficients for the factor matrices and the core tensor 532 | #'@param L_min lower bound for Lipschitz constant for the gradients of residual error \eqn{l(Z,U) = fnorm(tnsr - ttl(Z, U))} by \code{Z} and each \code{U} 533 | #'@param rw controls the extrapolation weight 534 | #'@param bound upper bound for the elements of \code{Z} and \code{U[[n]]} (the ones that have zero regularization coefficient \code{lambda}) 535 | #'@param U0 initial factor matrices, defaults to nonnegative Gaussian random matrices 536 | #'@param Z0 initial core tensor \code{Z}, defaults to nonnegative Gaussian random tensor 537 | #'@param verbose more output algorithm progress 538 | #'@param unfold_tnsr precalculate \code{tnsr} to matrix unfolding by every mode (speeds up calculation, but may require lots of memory) 539 | #'@return a list:\describe{ 540 | #'\item{\code{U}}{nonnegative factor matrices} 541 | #'\item{\code{Z}}{nonnegative core tensor} 542 | #'\item{\code{est}}{estimate \eqn{Z \times_1 U_1 \ldots \times_K U_K}} 543 | #'\item{\code{conv}}{method convergence indicator} 544 | #'\item{\code{resid}}{the Frobenius norm of the residual error \code{l(Z,U)} plus regularization penalty (if any)} 545 | #'\item{\code{n_iter}}{number of iterations} 546 | #'\item{\code{n_redo}}{number of times Z and U were recalculated to avoid the increase in objective function} 547 | #'\item{\code{diag}}{convergence info for each iteration\describe{ 548 | #'\item{\code{all_resids}}{residues} 549 | #'\item{\code{all_rel_resid_deltas}}{residue delta relative to the current residue} 550 | #'\item{\code{all_rel_resids}}{residue relative to the \code{sqrt(||tnsr||)}} 551 | #'}}} 552 | #' 553 | #'@details The function uses the alternating proximal gradient method to solve the following optimization problem: 554 | #' \deqn{\min 0.5 \|tnsr - Z \times_1 U_1 \ldots \times_K U_K \|_{F^2} + 555 | #' \sum_{n=1}^{K} \lambda_n \|U_n\|_1 + \lambda_{K+1} \|Z\|_1, \;\textit{where}\; Z \geq 0, \, U_i \geq 0.} 556 | #' If \code{core_nonneg} is \code{FALSE}, core tensor \code{Z} is allowed to have negative 557 | #' elements and \eqn{z_{i,j}=max(0,z_{i,j}-\lambda_{K+1}/L_{K+1})} rule is replaced by \eqn{z_{i,j}=sign(z_{i,j})max(0,|z_{i,j}|-\lambda_{K+1}/L_{K+1})}. 558 | #' The method stops if either the relative improvement of the error is below the tolerance \code{tol} for 3 consequitive iterations or 559 | #' both the relative error improvement and relative error (wrt the \code{tnsr} norm) are below the tolerance. 560 | #' Otherwise it stops if the maximal number of iterations or the time limit were reached. 561 | #' 562 | #'@note The implementation is based on ntds() MATLAB code by Yangyang Xu and Wotao Yin. 563 | #'@references Y. Xu, "Alternating proximal gradient method for sparse nonnegative Tucker decomposition", Math. Prog. Comp., 7, 39-70, 2013. 564 | #'@seealso \code{\link{tucker}} 565 | #'@seealso \url{http://www.caam.rice.edu/~optimization/bcu/} 566 | tucker.nonneg <- function( tnsr, ranks, core_nonneg=TRUE, 567 | tol=1e-4, hosvd=FALSE, 568 | max_iter = 500, max_time=0, 569 | lambda = rep.int( 0, length(ranks)+1 ), L_min = 1, rw=0.9999, 570 | bound = Inf, 571 | U0=NULL, Z0=NULL, verbose=FALSE, 572 | unfold_tnsr=length(dim(tnsr))*prod(dim(tnsr)) < 4000^2 ) 573 | { 574 | #progress bar 575 | start_time <- proc.time() 576 | pb <- txtProgressBar(min=0,max=max_iter,style=3) 577 | 578 | make_nonneg.tnsr <- function( tnsr ) 579 | { 580 | tnsr@data[ tnsr@data < 0 ] <- 0 581 | return ( tnsr ) 582 | } 583 | make_nonneg.mtx <- function( mtx ) 584 | { 585 | mtx[ mtx < 0 ] <- 0 586 | return ( mtx ) 587 | } 588 | # update core tensor 589 | # return new residual error 590 | makeZStep <- function( curZ ) 591 | { 592 | gradZ <- ttl(curZ, Usq, seq_len(K)) - TtU 593 | # update core vector 594 | Z <<- curZ - gradZ/L[[K+1]] 595 | if ( lambda[[K+1]] > 0 ) { 596 | if ( core_nonneg ) { 597 | Z <<- Z - lambda[[K+1]]/L[[K+1]] 598 | } else { 599 | Z@data <<- sign(Z@data) * pmax( 0, abs(Z@data) - lambda[[K+1]]/L[[K+1]] ) 600 | } 601 | } 602 | if ( core_nonneg ) Z <<- make_nonneg.tnsr( Z ) 603 | # do projection 604 | if ( doproj[[K+1]] ) { 605 | mask <- abs(Z@data) > bound 606 | Z@data[mask] <<- sign(Z@data[mask]) * bound 607 | } 608 | return ( invisible() ) 609 | } 610 | 611 | # update n-th factor matrix (U[[n]]) 612 | # return new residual error 613 | makeUnStep <- function( curU, n ) 614 | { 615 | if ( !is.null(Tmtx) ) { 616 | B <- unfold( ttl( Z, U[-n], seq_len(K)[-n] ), n, seq_len(K)[-n] ) 617 | Bsq <- tcrossprod(B@data) 618 | TB <- tcrossprod(Tmtx[[n]], B@data) 619 | } else { 620 | B <- unfold( ttl( Z, Usq[-n], seq_len(K)[-n] ), n, seq_len(K)[-n] ) 621 | TB <- unfold( ttl( tnsr, U[-n], seq_len(K)[-n], transpose=TRUE ), n, seq_len(K)[-n] ) 622 | Zn <- unfold( Z, n, seq_len(K)[-n] ) 623 | 624 | Bsq <- tcrossprod( B@data, Zn@data ) 625 | TB <- tcrossprod( TB@data, Zn@data ) 626 | } 627 | # compute the gradient 628 | gradU <- curU %*% Bsq - TB 629 | # update Lipschitz constant 630 | L0[[n]] <<- L[[n]] 631 | L[[n]] <<- max( L_min, norm(Bsq, '2') ) 632 | # update n-th factor matrix 633 | newU <- make_nonneg.mtx( curU - (gradU+lambda[[n]])/L[[n]] ) 634 | if ( doproj[[n]] ) newU[ newU > bound ] <- bound 635 | 636 | # update U[[n]] 637 | U[[n]] <<- newU 638 | Usq[[n]] <<- crossprod( U[[n]] ) 639 | nrmUsq[[n]] <<- norm( Usq[[n]], '2' ) 640 | 641 | # --- diagnostics, reporting, stopping checks --- 642 | newResid <- 0.5*(sum(Usq[[n]]*Bsq)-2*sum(U[[n]]*TB)+Tnrm^2) 643 | if (sparse.reg) { 644 | newResid <- newResid + lambda %*% c( sapply( U, sum ), sum(abs(Z@data)) ) 645 | } 646 | return ( newResid ) 647 | } 648 | 649 | Kway <- dim(tnsr) # dimension of tnsr 650 | K <- length(Kway) # tnsr is an K-way tensor 651 | 652 | if ( is.null(U0) ) { 653 | if ( verbose ) message( 'Generating random initial factor matrices estimates...' ) 654 | U0 <- lapply( seq_len(K), function(n) make_nonneg.mtx( matrix( rnorm( Kway[[n]]*ranks[[n]] ), ncol = ranks[[n]] ) ) ) 655 | } 656 | if ( is.null(Z0) ) { 657 | if ( verbose ) message( 'Generating random initial core tensor estimate...' ) 658 | Z0 <- rand_tensor( modes = ranks, drop = FALSE) 659 | } 660 | if ( core_nonneg ) Z0 <- make_nonneg.tnsr(Z0) 661 | 662 | # pre-process the starting point 663 | if (hosvd) { 664 | if ( verbose ) message( 'Applying High Order SVD to improve initial U and Z...' ) 665 | # "solve" Z = tnsr x_1 U_1' ... x_K U_K' 666 | U0 <- lapply( seq_len(K), function(n) { 667 | U0n_tilde <- unfold( ttl(tnsr, U0[-n], seq_len(K)[-n], transpose=TRUE ), 668 | row_idx = n, col_idx = seq_len(K)[-n] )@data 669 | U0n_vecs <- svd( U0n_tilde, nu = ranks[[n]], nv = 0 )$u 670 | U0n <- matrix( unlist( lapply( U0n_vecs, function( Uvec ) { 671 | # make the largest absolute element positive 672 | i <- which.max( abs(Uvec) ) 673 | if ( Uvec[[i]] < 0 ) Uvec <- -Uvec 674 | # project to > 0 675 | Uvec <- pmax( .Machine$double.eps, Uvec ) 676 | } ) ), ncol=ranks[[n]] ) 677 | return ( U0n/sum(U0n) ) 678 | } ) 679 | Z0 <- ttl( tnsr, U0, seq_len(K), transpose=TRUE ) 680 | } 681 | # check the existence of sparseness regularizer 682 | sparse.reg <- any(lambda>0) 683 | # add bound constraint for well-definedness 684 | doproj <- lambda == 0 & is.finite(bound) 685 | 686 | Tnrm <- fnorm(tnsr) 687 | 688 | # rescale the initial point according to the number of elements 689 | Knum <- Kway * ranks 690 | totalNum <- prod(ranks) + sum(Knum) 691 | U0 <- lapply( seq_along(U0), function(n) U0[[n]]/norm(U0[[n]],"F")*Tnrm^(Knum[[n]]/totalNum) ) 692 | Usq0 <- lapply( U0, crossprod ) 693 | nrmUsq <- sapply( Usq0, norm, '2' ) 694 | Z0 <- Z0/fnorm(Z0)*Tnrm^(prod(ranks)/totalNum) 695 | 696 | resid0 <- 0.5*fnorm( tnsr-ttl(Z0,U0,seq_len(K)) )^2 697 | if (sparse.reg) resid0 <- resid0 + lambda %*% c( sapply( U0, sum ), sum(abs(Z0@data)) ) 698 | resid <- resid0 699 | 700 | # Precompute matrix unfoldings of input tensor to save computing time if it is not too large 701 | if (unfold_tnsr) { 702 | if ( verbose ) message( 'Precomputing input tensor unfoldings...' ) 703 | Tmtx <- lapply( seq_len(K), function(n) unfold( tnsr, row_idx = n, col_idx = seq_len(K)[-n] )@data ) 704 | } else { 705 | if ( verbose ) message( 'No precomputing of tensor unfoldings' ) 706 | Tmtx <- NULL 707 | } 708 | 709 | # Iterations of block-coordinate update 710 | # iteratively updated variables: 711 | # GradU: gradients with respect to each component matrix of U 712 | # GradZ: gradient with respect to Z 713 | # U,Z: new updates 714 | # U0,Z0: old updates 715 | # Um,Zm: extrapolations of U 716 | # L, L0: current and previous Lipschitz bounds 717 | # resid, resid0: current and previous residual error 718 | U <- U0 719 | Um <- U0 720 | Usq <- Usq0 721 | Z <- Z0 722 | Zm <- Z0 723 | 724 | t0 <- rep.int( 1, K+1 ) 725 | t <- t0 726 | wU <- rep.int( 0, K+1 ) 727 | L0 <- rep.int( 1, K+1 ) 728 | L <- L0 729 | 730 | all_resids <- numeric(0) 731 | all_rel_resid_deltas <- numeric(0) 732 | all_rel_resids <- numeric(0) 733 | n_stall <- 0 734 | n_redo <- 0 735 | conv <- FALSE 736 | 737 | # do the iterations 738 | if ( verbose ) message( 'Starting iterations...' ) 739 | for (n_iter in seq_len(max_iter)) { 740 | setTxtProgressBar(pb, n_iter) 741 | 742 | residn0 <- resid 743 | TtU0 <- list( ttm( tnsr, U0[[1]], 1, transpose=TRUE ) ) 744 | for (n in 2:K) { 745 | TtU0[[n]] <- ttm( TtU0[[n-1]], U0[[n]], n, transpose=TRUE ) 746 | } 747 | 748 | for (n in seq.int(from=K,to=1) ) { 749 | # -- update the core tensor Z -- 750 | L0[[K+1]] <- L[[K+1]] 751 | L[[K+1]] <- pmax( L_min, prod( nrmUsq ) ) 752 | 753 | # try to make a step using extrapolated decompositon (Zm,Um) 754 | TtU <- if ( n < K ) ttl( TtU0[[n]], U[(n+1):K], (n+1):K, transpose=TRUE ) else TtU0[[K]] 755 | makeZStep( Zm ) 756 | residn <- makeUnStep( Um[[n]], n ) 757 | if ( residn>residn0 ) { 758 | # extrapolated Zm,Um decomposition lead to residual norm increase, 759 | # revert extrapolation and make a step using Z0,U0 to ensure 760 | # objective function is decreased 761 | n_redo <- n_redo + 1 762 | # re-update to make objective nonincreasing 763 | Usq[[n]] <- Usq0[[n]] # Z update needs it 764 | makeZStep( Z0 ) 765 | residn <- makeUnStep( U0[[n]], n ) 766 | if ( residn>residn0 ) warning( n_iter, ': residue increase at redo step' ) 767 | } 768 | # --- correction and extrapolation --- 769 | t[[n]] <- (1+sqrt(1+4*t0[[n]]^2))/2 770 | # choose smaller weight of U[[n]] for convergence 771 | wU[[n]] <- min( (t0[[n]]-1)/t[[n]], rw*sqrt(L0[[n]]/L[[n]]) ) 772 | Um[[n]] <- U[[n]] + wU[[n]]*( U[[n]]-U0[[n]] ) 773 | t[[K+1]] <- (1+sqrt(1+4*t0[[K+1]]^2))/2 774 | # choose smaller weight of Z for convergence 775 | wU[[K+1]] <- min( (t0[[K+1]]-1)/t[[K+1]], rw*sqrt(L0[[K+1]]/L[[K+1]]) ) 776 | Zm <- Z + wU[K+1]*(Z-Z0) 777 | 778 | # store the current update 779 | Z0 <- Z 780 | U0[[n]] <- U[[n]] 781 | Usq0[[n]] <- Usq[[n]] 782 | t0[c(n,K+1)] <- t[c(n,K+1)] 783 | residn0 <- residn 784 | } 785 | 786 | # --- diagnostics, reporting, stopping checks --- 787 | resid0 <- resid 788 | resid <- max( 0, residn ) 789 | 790 | rel_resid_delta <- abs(resid-resid0)/(resid0+1) 791 | rel_resid <- sqrt(2*resid)/Tnrm 792 | 793 | # reporting 794 | all_resids <- append( all_resids, resid ) 795 | all_rel_resid_deltas <- append( all_rel_resid_deltas, rel_resid_delta ) 796 | all_rel_resids <- append( all_rel_resids, rel_resid ) 797 | 798 | # check stopping criterion 799 | crit <- rel_resid_delta < tol 800 | n_stall <- ifelse( crit, n_stall+1, 0 ) 801 | if ( n_stall >= 3 || rel_resid < tol ) { 802 | if ( verbose ) { 803 | if ( rel_resid == 0 ) message( 'Residue is zero. Exact decomposition was found' ) 804 | if ( n_stall >= 3 ) message( 'Residue relative delta below ', tol, ' ', n_stall, ' times in a row' ) 805 | if ( rel_resid < tol ) message( 'Residue is ', rel_resid, ' times below input tensor norm' ) 806 | message( 'tucker.nonneg() converged in ', n_iter, ' iteration(s), ', n_redo, ' redo steps' ) 807 | } 808 | conv = TRUE 809 | break 810 | } 811 | if ( max_time > 0 && ( proc.time() - start_time )[[3]] > max_time ) { 812 | warning( "Maximal time exceeded, might be not an optimal solution") 813 | break 814 | } 815 | } 816 | setTxtProgressBar(pb,max_iter) 817 | close(pb) 818 | if ( !conv && n_iter == max_iter ) { 819 | warning( "Maximal number of iterations reached, might be not an optimal solution") 820 | } 821 | 822 | return ( invisible( list(U=U, Z=Z, est=ttl(Z,U,seq_len(K)), 823 | n_iter = n_iter, 824 | n_redo = n_redo, 825 | conv = conv, 826 | resid = resid, 827 | diag = list( all_resids = all_resids, 828 | all_rel_resid_deltas = all_rel_resid_deltas, 829 | all_rel_resids = all_rel_resids ) ) ) ) 830 | } 831 | --------------------------------------------------------------------------------