├── DESCRIPTION ├── MD5 ├── NAMESPACE ├── R ├── dbn_dnn_train.R ├── dbn_train.R ├── load_mnist.R ├── nn_predict.R ├── nn_train.R ├── rbm_train.R ├── sae_dnn_train.R ├── sae_train.R └── sigm.R └── man ├── dbn.dnn.train.Rd ├── load.mnist.Rd ├── nn.predict.Rd ├── nn.test.Rd ├── nn.train.Rd ├── rbm.down.Rd ├── rbm.train.Rd ├── rbm.up.Rd └── sae.dnn.train.Rd /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: deepnet 2 | Type: Package 3 | Title: Deep Learning Toolkit in R 4 | Version: 0.2.1 5 | Date: 2014-03-20 6 | Author: Xiao Rong 7 | Maintainer: Xiao Rong 8 | Description: Implement some deep learning architectures and neural network 9 | algorithms, including BP,RBM,DBN,Deep autoencoder and so on. 10 | License: GPL 11 | Packaged: 2022-06-24 12:10:21 UTC; hornik 12 | NeedsCompilation: no 13 | Repository: CRAN 14 | Date/Publication: 2022-06-24 12:29:27 UTC 15 | -------------------------------------------------------------------------------- /MD5: -------------------------------------------------------------------------------- 1 | e85570803253722bb5ae3f98cf92a909 *DESCRIPTION 2 | 22ac7b5f35e19598c995817ce9c94dc4 *NAMESPACE 3 | 94de9b8fdb88addceab762d029422434 *R/dbn_dnn_train.R 4 | 322102ab3dff32139813dc7f64f82e25 *R/dbn_train.R 5 | edb1672f0cedad2fb3dfce53527b6ee3 *R/load_mnist.R 6 | ce9a3cdc72c7af5ab1d98b22a00760b4 *R/nn_predict.R 7 | de9d52a596f6b662f6792f8dd42c14a0 *R/nn_train.R 8 | 835c1bd192212d37f5164609461583b3 *R/rbm_train.R 9 | 15dedb2cf455a8549f66986033bc9e6d *R/sae_dnn_train.R 10 | 54c9b1c08214146f6c29f798264495e7 *R/sae_train.R 11 | 40584cbe9b87e055ac2bd97ab14331e9 *R/sigm.R 12 | 2b6e47563d1b8baa2ced3beee263b318 *man/dbn.dnn.train.Rd 13 | 7ab6a22b74679ff072d7875478265605 *man/load.mnist.Rd 14 | 621730e7d6268dd4b86550c60114c0a1 *man/nn.predict.Rd 15 | cbce25a4fa1add3ae8efd2f2e4955b16 *man/nn.test.Rd 16 | fbd172f68b22f70ab60e139d1c631a14 *man/nn.train.Rd 17 | 40fe5f99305c4361d2054ca05355ec3e *man/rbm.down.Rd 18 | 01e3c59c0c41429a09d53afa5df57be8 *man/rbm.train.Rd 19 | fd33a563ac39802729a501222cad9ada *man/rbm.up.Rd 20 | fecb025d7d8c9f109b4338aefed14284 *man/sae.dnn.train.Rd 21 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | export(dbn.dnn.train) 2 | export(load.mnist) 3 | export(nn.predict) 4 | export(nn.test) 5 | export(nn.train) 6 | export(rbm.down) 7 | export(rbm.train) 8 | export(rbm.up) 9 | export(sae.dnn.train) 10 | 11 | importFrom("grDevices", "gray") 12 | importFrom("graphics", "image") 13 | importFrom("stats", "runif") 14 | -------------------------------------------------------------------------------- /R/dbn_dnn_train.R: -------------------------------------------------------------------------------- 1 | ##' Training a Deep neural network with weights initialized by DBN 2 | ##' 3 | ##' Training a Deep neural network with weights initialized by DBN 4 | ##' @param x matrix of x values for examples 5 | ##' @param y vector or matrix of target values for examples 6 | ##' @param hidden vector for number of units of hidden layers.Default is c(10). 7 | ##' @param activationfun activation function of hidden unit.Can be "sigm","linear" or "tanh".Default is "sigm" for logistic function 8 | ##' @param learningrate learning rate for gradient descent. Default is 0.8. 9 | ##' @param momentum momentum for gradient descent. Default is 0.5 . 10 | ##' @param learningrate_scale learning rate will be mutiplied by this scale after every iteration. Default is 1 . 11 | ##' @param numepochs number of iteration for samples Default is 3. 12 | ##' @param batchsize size of mini-batch. Default is 100. 13 | ##' @param output function of output unit, can be "sigm","linear" or "softmax". Default is "sigm". 14 | ##' @param hidden_dropout drop out fraction for hidden layer. Default is 0. 15 | ##' @param visible_dropout drop out fraction for input layer Default is 0. 16 | ##' @param cd number of iteration for Gibbs sample of CD algorithm. 17 | ##' @author Xiao Rong 18 | ##' @examples 19 | ##' Var1 <- c(rnorm(50,1,0.5),rnorm(50,-0.6,0.2)) 20 | ##' Var2 <- c(rnorm(50,-0.8,0.2),rnorm(50,2,1)) 21 | ##' x <- matrix(c(Var1,Var2),nrow=100,ncol=2) 22 | ##' y <- c(rep(1,50),rep(0,50)) 23 | ##' dnn <-dbn.dnn.train(x,y,hidden=c(5,5)) 24 | ##' ## predict by dnn 25 | ##' test_Var1 <- c(rnorm(50,1,0.5),rnorm(50,-0.6,0.2)) 26 | ##' test_Var2 <- c(rnorm(50,-0.8,0.2),rnorm(50,2,1)) 27 | ##' test_x <- matrix(c(test_Var1,test_Var2),nrow=100,ncol=2) 28 | ##' nn.test(dnn,test_x,y) 29 | ##' @export 30 | dbn.dnn.train <- function(x,y,hidden=c(1), 31 | activationfun="sigm", 32 | learningrate=0.8, 33 | momentum=0.5, 34 | learningrate_scale=1, 35 | output="sigm", 36 | numepochs=3,batchsize=100, 37 | hidden_dropout=0,visible_dropout=0,cd=1){ 38 | 39 | output_dim <- 0 40 | if(is.vector(y)){ 41 | output_dim <- 1 42 | }else if(is.matrix(y)){ 43 | output_dim <- ncol(y) 44 | } 45 | if (output_dim == 0) 46 | stop("y must be a vector or matrix!") 47 | message("begin to train dbn ......") 48 | dbn <- dbn.train(x,hidden=hidden, 49 | numepochs=numepochs,batchsize=batchsize, 50 | learningrate=learningrate,learningrate_scale=learningrate_scale, 51 | momentum=momentum,cd=cd) 52 | message("dbn has been trained.") 53 | initW <- list() 54 | initB <- list() 55 | for(i in 1:(length(dbn$size) - 1)){ 56 | initW[[i]] <- dbn$rbm[[i]]$W 57 | initB[[i]] <- dbn$rbm[[i]]$C 58 | } 59 | #random init weight between last hidden layer and output layer 60 | last_hidden <- dbn$size[length(dbn$size)] 61 | initW[[length(dbn$size)]] <- matrix(runif(output_dim*last_hidden,min=-0.1,max=0.1), c(output_dim,last_hidden)) 62 | initB[[length(dbn$size)]] <- runif(output_dim,min=-0.1,max=0.1) 63 | message("begin to train deep nn ......") 64 | dnn <- nn.train(x,y,initW=initW,initB=initB,hidden=hidden, 65 | activationfun=activationfun, 66 | learningrate=learningrate, 67 | momentum=momentum, 68 | learningrate_scale=learningrate_scale, 69 | output=output, 70 | numepochs=numepochs,batchsize=batchsize, 71 | hidden_dropout=0,visible_dropout=0) 72 | message("deep nn has been trained.") 73 | dnn 74 | } -------------------------------------------------------------------------------- /R/dbn_train.R: -------------------------------------------------------------------------------- 1 | dbn.train <- function(x,hidden=c(10,10), 2 | numepochs=3,batchsize=100, 3 | learningrate=0.8,learningrate_scale=1,momentum=0.5, 4 | visible_type="bin",hidden_type="bin",cd=1){ 5 | if (!is.matrix(x)) 6 | stop("x must be a matrix!") 7 | input_dim <- ncol(x) 8 | dbn <- list( 9 | size = c(input_dim, hidden) 10 | ) 11 | train_x <- x 12 | message("training layer 1 rbm ...") 13 | dbn$rbm[[1]] <- rbm.train(train_x,hidden[1], 14 | numepochs=numepochs,batchsize=batchsize, 15 | learningrate=learningrate,learningrate_scale=learningrate_scale, 16 | momentum=momentum, 17 | visible_type=visible_type,hidden_type=hidden_type,cd=cd) 18 | 19 | if(length(dbn$size) > 2){ 20 | for(i in 2:(length(dbn$size) - 1)){ 21 | train_x <- rbm.up(dbn$rbm[[i-1]], train_x) 22 | message(sprintf("training layer %d rbm ...",i)) 23 | dbn$rbm[[i]] <- rbm.train(train_x,hidden[i], 24 | numepochs=numepochs,batchsize=batchsize, 25 | learningrate=learningrate,learningrate_scale=learningrate_scale, 26 | momentum=momentum, 27 | visible_type=visible_type,hidden_type=hidden_type,cd=cd) 28 | } 29 | } 30 | dbn 31 | } 32 | 33 | dbn.down <- function(dbn,h,round=10){ 34 | hi <- h 35 | i <- length(dbn$size) - 1 #top rbm 36 | for(j in 1:round){ 37 | vi <- rbm.down(dbn$rbm[[i]],hi) 38 | hi <- rbm.up(dbn$rbm[[i]],vi) 39 | } 40 | if(length(dbn$size) > 2){ 41 | hi <- vi 42 | for(i in (length(dbn$size) - 2):1){ 43 | vi <- rbm.down(dbn$rbm[[i]],hi) 44 | hi <- vi 45 | } 46 | } 47 | vi 48 | } 49 | -------------------------------------------------------------------------------- /R/load_mnist.R: -------------------------------------------------------------------------------- 1 | ##' Load MNIST DataSet 2 | ##' 3 | ##' Load MNIST DataSet 4 | ##' @param dir dir of minst dataset 5 | ##' @return mnist dataset 6 | ##' train$n number of train samples 7 | ##' train$x pix of every train sample image 8 | ##' train$y label of every train sample image 9 | ##' train$yy one-of-c vector of label of train sample image 10 | ##' test$n number of test samples 11 | ##' test$x pix of every test sample image 12 | ##' test$y label of every test sample image 13 | ##' test$yy one-of-c vector of label of test sample image 14 | ##' @author Xiao Rong 15 | ##' @export 16 | load.mnist <- function(dir) { 17 | load.image.file <- function(filename) { 18 | ret <- list() 19 | f <- file(filename,'rb') 20 | readBin(f,'integer',n=1,size=4,endian='big') 21 | ret$n <- readBin(f,'integer',n=1,size=4,endian='big') 22 | nrow <- readBin(f,'integer',n=1,size=4,endian='big') 23 | ncol <- readBin(f,'integer',n=1,size=4,endian='big') 24 | x <- readBin(f,'integer',n=ret$n*nrow*ncol,size=1,signed=F) 25 | ret$x <- matrix(x, ncol=nrow*ncol, byrow=T) 26 | close(f) 27 | ret 28 | } 29 | load.label.file <- function(filename) { 30 | f = file(filename,'rb') 31 | readBin(f,'integer',n=1,size=4,endian='big') 32 | n = readBin(f,'integer',n=1,size=4,endian='big') 33 | y = readBin(f,'integer',n=n,size=1,signed=F) 34 | close(f) 35 | y 36 | } 37 | mnist <- list() 38 | mnist$train <- load.image.file(paste(dir,'/train-images-idx3-ubyte',sep="")) 39 | mnist$test <- load.image.file(paste(dir,'/t10k-images-idx3-ubyte',sep="")) 40 | 41 | mnist$train$y <- load.label.file(paste(dir,'/train-labels-idx1-ubyte',sep="")) 42 | n <- length(mnist$train$y) 43 | mnist$train$yy <- matrix(rep(0,n*10),nrow=n,ncol=10) 44 | for (i in 1:n){ 45 | mnist$train$yy[i,mnist$train$y[i] + 1] <- 1 46 | } 47 | mnist$test$y <- load.label.file(paste(dir,'/t10k-labels-idx1-ubyte',sep="")) 48 | m <- length(mnist$test$y) 49 | mnist$test$yy <- matrix(rep(0,m*10),nrow=m,ncol=10) 50 | for (j in 1:m){ 51 | mnist$test$yy[j,mnist$test$y[j] + 1] <- 1 52 | } 53 | mnist 54 | } 55 | 56 | 57 | show.digit <- function(arr784, col=gray(12:1/12), ...) { 58 | image(matrix(arr784, nrow=28)[,28:1], col=col, ...) 59 | } -------------------------------------------------------------------------------- /R/nn_predict.R: -------------------------------------------------------------------------------- 1 | ##' Predict new samples by Trainded NN 2 | ##' 3 | ##' Predict new samples by Trainded NN 4 | ##' @param nn nerual network trained by function nn.train 5 | ##' @param x new samples to predict 6 | ##' @return return raw output value of neural network.For classification task,return the probability of a class 7 | ##' @examples 8 | ##' Var1 <- c(rnorm(50,1,0.5),rnorm(50,-0.6,0.2)) 9 | ##' Var2 <- c(rnorm(50,-0.8,0.2),rnorm(50,2,1)) 10 | ##' x <- matrix(c(Var1,Var2),nrow=100,ncol=2) 11 | ##' y <- c(rep(1,50),rep(0,50)) 12 | ##' nn <-nn.train(x,y,hidden=c(5)) 13 | ##' ## predict by nn 14 | ##' test_Var1 <- c(rnorm(50,1,0.5),rnorm(50,-0.6,0.2)) 15 | ##' test_Var2 <- c(rnorm(50,-0.8,0.2),rnorm(50,2,1)) 16 | ##' test_x <- matrix(c(test_Var1,test_Var2),nrow=100,ncol=2) 17 | ##' yy <- nn.predict(nn,test_x) 18 | ##' 19 | ##' @author Xiao Rong 20 | ##' @export 21 | 22 | nn.predict <- function(nn,x){ 23 | m <- nrow(x) 24 | post <- x 25 | #hidden layer 26 | for(i in 2:(length(nn$size) - 1)){ 27 | pre <- t( nn$W[[i-1]] %*% t(post) + nn$B[[i-1]] ) 28 | if(nn$activationfun == "sigm"){ 29 | post <- sigm( pre ) 30 | }else if(nn$activationfun == "tanh"){ 31 | post <- tanh(pre) 32 | }else{ 33 | stop("unsupport activation function 'nn$activationfun'"); 34 | } 35 | post <- post * (1 - nn$hidden_dropout) 36 | } 37 | #output layer 38 | i <- length(nn$size) 39 | pre <- t( nn$W[[i-1]] %*% t(post) + nn$B[[i-1]] ) 40 | if(nn$output == "sigm"){ 41 | post <- sigm( pre ) 42 | }else if(nn$output == "linear"){ 43 | post <- pre 44 | }else if(nn$output == "softmax"){ 45 | post <- exp(pre) 46 | post <- post / rowSums(post) 47 | } else{ 48 | stop("unsupport output function!"); 49 | } 50 | post 51 | } 52 | 53 | ##' Test new samples by Trainded NN 54 | ##' 55 | ##' Test new samples by Trainded NN,return error rate for classification 56 | ##' @param nn nerual network trained by function nn.train 57 | ##' @param x new samples to predict 58 | ##' @param y new samples' label 59 | ##' @param t threshold for classification. If nn.predict value >= t then label 1,else label 0 60 | ##' @return error rate 61 | ##' @examples 62 | ##' Var1 <- c(rnorm(50,1,0.5),rnorm(50,-0.6,0.2)) 63 | ##' Var2 <- c(rnorm(50,-0.8,0.2),rnorm(50,2,1)) 64 | ##' x <- matrix(c(Var1,Var2),nrow=100,ncol=2) 65 | ##' y <- c(rep(1,50),rep(0,50)) 66 | ##' nn <-nn.train(x,y,hidden=c(5)) 67 | ##' test_Var1 <- c(rnorm(50,1,0.5),rnorm(50,-0.6,0.2)) 68 | ##' test_Var2 <- c(rnorm(50,-0.8,0.2),rnorm(50,2,1)) 69 | ##' test_x <- matrix(c(test_Var1,test_Var2),nrow=100,ncol=2) 70 | ##' err <- nn.test(nn,test_x,y) 71 | ##' 72 | ##' @author Xiao Rong 73 | ##' @export 74 | nn.test <- function (nn,x,y,t=0.5){ 75 | y_p <- nn.predict(nn,x) 76 | m <- nrow(x) 77 | y_p[y_p>=t] <- 1 78 | y_p[y_p