├── inst
├── extdata
│ └── empty.txt
└── CITATION
├── .gitignore
├── index.html
├── docs
├── figures
│ └── coreANTsXNetTools.png
├── pkgdown.yml
├── reference
│ ├── drawRectangles-1.png
│ ├── drawRectangles-2.png
│ ├── applySuperResolutionModel-1.png
│ ├── applySuperResolutionModel-2.png
│ └── applySuperResolutionModel-3.png
├── link.svg
├── bootstrap-toc.css
└── docsearch.js
├── tests
├── testthat.R
└── testthat
│ ├── test-pretrained.R
│ ├── test-alexNetModel.R
│ └── test-alexNetModel3D.R
├── requirements.txt
├── codecov.yml
├── R
├── utils-pipe.R
├── RcppExports.R
├── mriSuperResolution.R
├── deepBackProjectionUtilities.R
├── customActivationLayers.R
└── createAutoencoderModel.R
├── .Rbuildignore
├── man
├── pipe.Rd
├── layer_activation_log_softmax.Rd
├── maximum_mean_discrepancy.Rd
├── GMSD.Rd
├── AttentionLayer2D.Rd
├── AttentionLayer3D.Rd
├── PSNR.Rd
├── MSE.Rd
├── MAE.Rd
├── padOrCropImageToSize.Rd
├── sampleFromCategoricalDistribution.Rd
├── cropImageCenter.Rd
├── decodeUnet.Rd
├── LogSoftmaxLayer.Rd
├── getMixtureDensityLossFunction.Rd
├── splitMixtureParameters.Rd
├── getMixtureDensitySamplingFunction.Rd
├── encodeUnet.Rd
├── mixture_density_network_softmax.Rd
├── getMixtureDensityMseAccuracyFunction.Rd
├── padImageByFactor.Rd
├── ClusteringLayer.Rd
├── layer_l2_normalization_2d.Rd
├── EfficientAttentionLayer2D.Rd
├── EfficientAttentionLayer3D.Rd
├── createNoBrainerUnetModel3D.Rd
├── SpatialTransformerLayer3D.Rd
├── AttentionAugmentationLayer2D.Rd
├── InpaintingDeepFillModel.Rd
├── SSIM.Rd
├── mriSuperResolution.Rd
├── ResampleTensorLayer2D.Rd
├── peak_signal_to_noise_ratio.Rd
├── ResampleTensorToTargetTensorLayer3D.Rd
├── ResampleTensorLayer3D.Rd
├── linMatchIntensity.Rd
├── pearson_correlation_coefficient.Rd
├── jaccardSimilarity.Rd
├── SpatialTransformerLayer2D.Rd
├── weighted_categorical_crossentropy.Rd
├── ResampleTensorToTargetTensorLayer2D.Rd
├── createSysuMediaUnetModel2D.Rd
├── CycleGanModel.Rd
├── sampleFromOutput.Rd
├── createAutoencoderModel.Rd
├── lungExtraction.Rd
├── layer_attention_augmentation_2d.Rd
├── ScaleLayer.Rd
├── corticalThickness.Rd
├── layer_mixture_density.Rd
├── layer_resample_tensor_to_target_tensor_3d.Rd
├── layer_resample_tensor_to_target_tensor_2d.Rd
├── ContextualAttentionLayer3D.Rd
├── layer_resample_tensor_3d.Rd
├── SuperResolutionGanModel.Rd
├── layer_resample_tensor_2d.Rd
├── ContextualAttentionLayer2D.Rd
├── L2NormalizationLayer2D.Rd
├── basisWarp.Rd
├── regressionMatchImage.Rd
├── L2NormalizationLayer3D.Rd
├── MixtureDensityNetworkLayer.Rd
├── drawRectangles.Rd
├── elBicho.Rd
├── convertCoordinates.Rd
├── createHippMapp3rUnetModel3D.Rd
├── layer_anchor_box_2d.Rd
├── categorical_focal_loss.Rd
├── getANTsXNetData.Rd
├── createConvolutionalAutoencoderModel2D.Rd
├── createConvolutionalAutoencoderModel3D.Rd
├── categorical_focal_gain.Rd
├── createSimpleFullyConvolutionalNeuralNetworkModel3D.Rd
├── uvaSeg.Rd
├── VanillaGanModel.Rd
├── WassersteinGanModel.Rd
├── createDenoisingAutoEncoderSuperResolutionModel2D.Rd
├── layer_attention_2d.Rd
├── layer_attention_3d.Rd
├── layer_spatial_transformer_2d.Rd
├── createDenoisingAutoEncoderSuperResolutionModel3D.Rd
├── reconstructImageFromPatches.Rd
├── createResNetSuperResolutionModel2D.Rd
├── DeepConvolutionalGanModel.Rd
├── createResNetSuperResolutionModel3D.Rd
├── createExpandedSuperResolutionModel2D.Rd
├── histogramWarpImageIntensities.Rd
├── uvaSegTrain.Rd
├── createDeepDenoiseSuperResolutionModel2D.Rd
├── createExpandedSuperResolutionModel3D.Rd
├── createDeepDenoiseSuperResolutionModel3D.Rd
├── ImprovedWassersteinGanModel.Rd
├── layer_efficient_attention_2d.Rd
├── layer_efficient_attention_3d.Rd
├── LossSSD.Rd
├── applySuperResolutionModelToImage.Rd
├── createImageSuperResolutionModel2D.Rd
├── applyDeepBackProjectionModel.Rd
├── createImageSuperResolutionModel3D.Rd
├── layer_attention_augmented_convolution_block_2d.Rd
├── layer_contextual_attention_2d.Rd
├── applySuperResolutionModel.Rd
├── AnchorBoxLayer2D.Rd
├── deepAtropos.Rd
├── AnchorBoxLayer3D.Rd
├── multilabel_surface_loss.Rd
├── hippMapp3rSegmentation.Rd
├── layer_instance_normalization.Rd
├── InstanceNormalizationLayer.Rd
├── longitudinalCorticalThickness.Rd
├── brainAge.Rd
├── createFullyConvolutionalVggModel2D.Rd
├── sysuMediaWmhSegmentation.Rd
├── multilabel_dice_coefficient.Rd
├── createFullyConvolutionalVggModel3D.Rd
├── decodeSsd2D.Rd
├── ewDavid.Rd
├── decodeSsd3D.Rd
├── extractImagePatches.Rd
├── brainExtraction.Rd
├── createEnhancedDeepSuperResolutionModel2D.Rd
├── applySuperResolutionModelPatch.Rd
├── extractImagePatchCoordinates.Rd
├── createSimpleClassificationWithSpatialTransformerNetworkModel2D.Rd
├── createSimpleClassificationWithSpatialTransformerNetworkModel3D.Rd
└── getPretrainedNetwork.Rd
├── ANTsRNet.Rproj
├── .github
└── ISSUE_TEMPLATE
│ └── bug_report.md
├── appveyor.yml
├── DESCRIPTION
└── .travis.yml
/inst/extdata/empty.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .RData
3 | .Rhistory
4 | .Rproj.user
5 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/docs/figures/coreANTsXNetTools.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ashishpatel26/ANTsRNet/master/docs/figures/coreANTsXNetTools.png
--------------------------------------------------------------------------------
/docs/pkgdown.yml:
--------------------------------------------------------------------------------
1 | pandoc: '2.11'
2 | pkgdown: 1.5.1
3 | pkgdown_sha: ~
4 | articles: []
5 | last_built: 2021-03-26T18:27Z
6 |
7 |
--------------------------------------------------------------------------------
/docs/reference/drawRectangles-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ashishpatel26/ANTsRNet/master/docs/reference/drawRectangles-1.png
--------------------------------------------------------------------------------
/docs/reference/drawRectangles-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ashishpatel26/ANTsRNet/master/docs/reference/drawRectangles-2.png
--------------------------------------------------------------------------------
/tests/testthat.R:
--------------------------------------------------------------------------------
1 | library(testthat)
2 | library(ANTsRNet)
3 |
4 | have_keras = keras::is_keras_available()
5 | test_check("ANTsRNet")
6 |
--------------------------------------------------------------------------------
/docs/reference/applySuperResolutionModel-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ashishpatel26/ANTsRNet/master/docs/reference/applySuperResolutionModel-1.png
--------------------------------------------------------------------------------
/docs/reference/applySuperResolutionModel-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ashishpatel26/ANTsRNet/master/docs/reference/applySuperResolutionModel-2.png
--------------------------------------------------------------------------------
/docs/reference/applySuperResolutionModel-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ashishpatel26/ANTsRNet/master/docs/reference/applySuperResolutionModel-3.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | keras
2 | h5py
3 | pyyaml
4 | requests
5 | Pillow
6 | scipy
7 | tensorflow>=2.0.1
8 | setuptools
9 | wheel
10 | theano
11 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | comment: false
2 |
3 | coverage:
4 | status:
5 | project:
6 | default:
7 | target: auto
8 | threshold: 1%
9 | patch:
10 | default:
11 | target: auto
12 | threshold: 1%
13 |
--------------------------------------------------------------------------------
/R/utils-pipe.R:
--------------------------------------------------------------------------------
1 | #' Pipe operator
2 | #'
3 | #' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
4 | #'
5 | #' @name %>%
6 | #' @rdname pipe
7 | #' @keywords internal
8 | #' @export
9 | #' @importFrom magrittr %>%
10 | #' @usage lhs \%>\% rhs
11 | NULL
12 |
--------------------------------------------------------------------------------
/.Rbuildignore:
--------------------------------------------------------------------------------
1 | ^.*\.Rproj$
2 | ^\.Rproj\.user$
3 | \.travis\.yml$
4 | _config\.yml$
5 | _pkgdown.yml
6 | docs
7 | ^ANTsRNet\.Rproj$
8 | ^\.github$
9 | ^index\.html$
10 | ^requirements\.txt$
11 | ^_pkgdown\.yml$
12 | ^docs$
13 | ^pkgdown$
14 | ^codecov\.yml$
15 | ^appveyor\.yml$
16 |
--------------------------------------------------------------------------------
/R/RcppExports.R:
--------------------------------------------------------------------------------
1 | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
2 | # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
3 |
4 | fuzzyClustering <- function(data, centers, m) {
5 | .Call('_ANTsRNet_fuzzyClustering', PACKAGE = 'ANTsRNet', data, centers, m)
6 | }
7 |
8 |
--------------------------------------------------------------------------------
/man/pipe.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/utils-pipe.R
3 | \name{\%>\%}
4 | \alias{\%>\%}
5 | \title{Pipe operator}
6 | \usage{
7 | lhs \%>\% rhs
8 | }
9 | \description{
10 | See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
11 | }
12 | \keyword{internal}
13 |
--------------------------------------------------------------------------------
/ANTsRNet.Rproj:
--------------------------------------------------------------------------------
1 | Version: 1.0
2 |
3 | RestoreWorkspace: No
4 | SaveWorkspace: No
5 | AlwaysSaveHistory: Default
6 |
7 | EnableCodeIndexing: Yes
8 | UseSpacesForTab: Yes
9 | NumSpacesForTab: 2
10 | Encoding: UTF-8
11 |
12 | RnwWeave: knitr
13 | LaTeX: pdfLaTeX
14 |
15 | AutoAppendNewline: Yes
16 | StripTrailingWhitespace: Yes
17 |
18 | BuildType: Package
19 | PackageUseDevtools: Yes
20 | PackageInstallArgs: --no-multiarch --with-keep.source
21 | PackageRoxygenize: rd,collate,namespace
22 |
--------------------------------------------------------------------------------
/man/layer_activation_log_softmax.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customActivationLayers.R
3 | \name{layer_activation_log_softmax}
4 | \alias{layer_activation_log_softmax}
5 | \title{Log softmax layer}
6 | \usage{
7 | layer_activation_log_softmax(object, axis = -1, trainable = TRUE)
8 | }
9 | \arguments{
10 | \item{axis}{Integer specifying which axis.}
11 |
12 | \item{trainable}{Whether the layer weights will be updated during training.}
13 | }
14 | \value{
15 | a keras layer tensor
16 | }
17 | \description{
18 | Creates a log softmax layer
19 | }
20 | \author{
21 | Tustison NJ
22 | }
23 |
--------------------------------------------------------------------------------
/man/maximum_mean_discrepancy.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customMetrics.R
3 | \name{maximum_mean_discrepancy}
4 | \alias{maximum_mean_discrepancy}
5 | \title{Function for maximum-mean discrepancy}
6 | \usage{
7 | maximum_mean_discrepancy(y_true, y_pred, sigma = 1)
8 | }
9 | \arguments{
10 | \item{y_true}{True labels (Tensor)}
11 |
12 | \item{y_pred}{Predictions (Tensor of the same shape as \code{y_true})}
13 | }
14 | \value{
15 | mmd value
16 | }
17 | \description{
18 | \url{https://jmlr.csail.mit.edu/papers/volume13/gretton12a/gretton12a.pdf}
19 | }
20 | \author{
21 | Tustison NJ
22 | }
23 |
--------------------------------------------------------------------------------
/man/GMSD.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/superResolutionUtilities.R
3 | \name{GMSD}
4 | \alias{GMSD}
5 | \title{Gradient Magnitude Similarity Deviation}
6 | \usage{
7 | GMSD(x, y)
8 | }
9 | \arguments{
10 | \item{x}{input image.}
11 |
12 | \item{y}{input image.}
13 | }
14 | \value{
15 | scalar
16 | }
17 | \description{
18 | A fast and simple metric that correlates to perceptual quality
19 | }
20 | \examples{
21 |
22 | library( ANTsR )
23 |
24 | r16 <- antsImageRead( getANTsRData( 'r16' ) )
25 | r85 <- antsImageRead( getANTsRData( 'r85' ) )
26 | value <- GMSD( r16, r85 )
27 |
28 | }
29 | \author{
30 | Avants BB
31 | }
32 |
--------------------------------------------------------------------------------
/man/AttentionLayer2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \docType{class}
4 | \name{AttentionLayer2D}
5 | \alias{AttentionLayer2D}
6 | \title{Attention layer (2-D)}
7 | \value{
8 | output of tensor shape.
9 | }
10 | \description{
11 | Attention layer (2-D)
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{numberOfChannels}{number of channels.}
17 | }
18 | }
19 |
20 | \section{Details}{
21 |
22 | \code{$initialize} instantiates a new class.
23 |
24 | \code{$call} main body.
25 |
26 | \code{$compute_output_shape} computes the output shape.
27 | }
28 |
29 | \author{
30 | Tustison NJ
31 | }
32 |
--------------------------------------------------------------------------------
/man/AttentionLayer3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \docType{class}
4 | \name{AttentionLayer3D}
5 | \alias{AttentionLayer3D}
6 | \title{Attention layer (3-D)}
7 | \value{
8 | output of tensor shape.
9 | }
10 | \description{
11 | Attention layer (3-D)
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{numberOfChannels}{number of channels.}
17 | }
18 | }
19 |
20 | \section{Details}{
21 |
22 | \code{$initialize} instantiates a new class.
23 |
24 | \code{$call} main body.
25 |
26 | \code{$compute_output_shape} computes the output shape.
27 | }
28 |
29 | \author{
30 | Tustison NJ
31 | }
32 |
--------------------------------------------------------------------------------
/man/PSNR.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/superResolutionUtilities.R
3 | \name{PSNR}
4 | \alias{PSNR}
5 | \title{Peak signal-to-noise ratio between two images.}
6 | \usage{
7 | PSNR(x, y)
8 | }
9 | \arguments{
10 | \item{x}{input image.}
11 |
12 | \item{y}{input image.}
13 | }
14 | \value{
15 | the peak signal-to-noise ratio
16 | }
17 | \description{
18 | Peak signal-to-noise ratio between two images.
19 | }
20 | \examples{
21 |
22 | library( ANTsR )
23 |
24 | r16 <- antsImageRead( getANTsRData( 'r16' ) )
25 | r85 <- antsImageRead( getANTsRData( 'r85' ) )
26 | psnrValue <- PSNR( r16, r85 )
27 |
28 | }
29 | \author{
30 | Avants BB
31 | }
32 |
--------------------------------------------------------------------------------
/man/MSE.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/superResolutionUtilities.R
3 | \name{MSE}
4 | \alias{MSE}
5 | \title{Mean square error of a single image or between two images.}
6 | \usage{
7 | MSE(x, y = NULL)
8 | }
9 | \arguments{
10 | \item{x}{input image.}
11 |
12 | \item{y}{input image.}
13 | }
14 | \value{
15 | the mean squared error
16 | }
17 | \description{
18 | Mean square error of a single image or between two images.
19 | }
20 | \examples{
21 |
22 | library( ANTsR )
23 |
24 | r16 <- antsImageRead( getANTsRData( 'r16' ) )
25 | r85 <- antsImageRead( getANTsRData( 'r85' ) )
26 | mseValue <- MSE( r16, r85 )
27 |
28 | }
29 | \author{
30 | Avants BB (from redr)
31 | }
32 |
--------------------------------------------------------------------------------
/man/MAE.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/superResolutionUtilities.R
3 | \name{MAE}
4 | \alias{MAE}
5 | \title{Mean absolute error of a single image or between two images.}
6 | \usage{
7 | MAE(x, y = NULL)
8 | }
9 | \arguments{
10 | \item{x}{input image.}
11 |
12 | \item{y}{input image.}
13 | }
14 | \value{
15 | the mean absolute error
16 | }
17 | \description{
18 | Mean absolute error of a single image or between two images.
19 | }
20 | \examples{
21 |
22 | library( ANTsR )
23 |
24 | r16 <- antsImageRead( getANTsRData( 'r16' ) )
25 | r85 <- antsImageRead( getANTsRData( 'r85' ) )
26 | maeValue <- MAE( r16, r85 )
27 |
28 | }
29 | \author{
30 | Avants BB (from redr)
31 | }
32 |
--------------------------------------------------------------------------------
/man/padOrCropImageToSize.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/croppingAndPaddingUtilities.R
3 | \name{padOrCropImageToSize}
4 | \alias{padOrCropImageToSize}
5 | \title{Pad or crop image to a specified size}
6 | \usage{
7 | padOrCropImageToSize(image, size)
8 | }
9 | \arguments{
10 | \item{image}{Input ANTs image}
11 |
12 | \item{size}{size of the output image.}
13 | }
14 | \value{
15 | a padded/cropped image
16 | }
17 | \description{
18 | Pad or crop image to a specified size
19 | }
20 | \examples{
21 |
22 | library( ANTsR )
23 |
24 | image <- antsImageRead( getANTsRData( "r16" ) )
25 | paddedImage <- padOrCropImageToSize( image, c( 333, 333 ) )
26 |
27 | }
28 | \author{
29 | Tustison NJ
30 | }
31 |
--------------------------------------------------------------------------------
/man/sampleFromCategoricalDistribution.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mixtureDensityUtilities.R
3 | \name{sampleFromCategoricalDistribution}
4 | \alias{sampleFromCategoricalDistribution}
5 | \title{Sample from a categorical distribution}
6 | \usage{
7 | sampleFromCategoricalDistribution(distribution)
8 | }
9 | \arguments{
10 | \item{distribution}{input categorical distribution from which
11 | to sample.}
12 | }
13 | \value{
14 | a single sample
15 | }
16 | \description{
17 | Ported from:
18 | }
19 | \details{
20 | \preformatted{ https://github.com/cpmpercussion/keras-mdn-layer/
21 | }
22 | }
23 | \examples{
24 |
25 | library( keras )
26 |
27 |
28 | }
29 | \author{
30 | Tustison NJ
31 | }
32 |
--------------------------------------------------------------------------------
/man/cropImageCenter.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/croppingAndPaddingUtilities.R
3 | \name{cropImageCenter}
4 | \alias{cropImageCenter}
5 | \title{Crop the center of an image.}
6 | \usage{
7 | cropImageCenter(image, cropSize)
8 | }
9 | \arguments{
10 | \item{image}{Input ANTs image}
11 |
12 | \item{cropSize}{width, height, depth (if 3-D), and time (if 4-D) of
13 | the cropped image.}
14 | }
15 | \value{
16 | a cropped image
17 | }
18 | \description{
19 | Crop the center of an image.
20 | }
21 | \examples{
22 |
23 | library( ANTsR )
24 |
25 | image <- antsImageRead( getANTsRData( "r16" ) )
26 | croppedImage <- cropImageCenter( image, c( 64, 64 ) )
27 |
28 | }
29 | \author{
30 | Tustison NJ
31 | }
32 |
--------------------------------------------------------------------------------
/man/decodeUnet.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/unetUtilities.R
3 | \name{decodeUnet}
4 | \alias{decodeUnet}
5 | \title{Decoding function for the u-net prediction outcome}
6 | \usage{
7 | decodeUnet(yPredicted, domainImage)
8 | }
9 | \arguments{
10 | \item{yPredicted}{an array of shape (\code{batchSize}, \code{width},
11 | \code{height}, \code{}, \code{numberOfSegmentationLabels})}
12 |
13 | \item{domainImage}{image definining the geometry of the returned probability
14 | images.}
15 | }
16 | \value{
17 | a list of list of probability images.
18 | }
19 | \description{
20 | Function for translating the U-net predictions to ANTsR probability
21 | images.
22 | }
23 | \author{
24 | Tustison NJ
25 | }
26 |
--------------------------------------------------------------------------------
/man/LogSoftmaxLayer.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customActivationLayers.R
3 | \docType{class}
4 | \name{LogSoftmaxLayer}
5 | \alias{LogSoftmaxLayer}
6 | \title{Creates a log softmax layer}
7 | \value{
8 | a log softmax layer
9 | }
10 | \description{
11 | Creates a log softmax layer taken from
12 | }
13 | \details{
14 | \url{https://github.com/tensorflow/tensorflow/pull/25514/files}
15 | }
16 | \section{Arguments}{
17 |
18 | \describe{
19 | \item{axis}{Integer specifying the axis.}
20 | }
21 | }
22 |
23 | \section{Details}{
24 |
25 | \code{$initialize} instantiates a new class.
26 | \code{$call} main body.
27 | \code{$compute_output_shape} computes the output shape.
28 | }
29 |
30 | \author{
31 | Tustison NJ
32 | }
33 |
--------------------------------------------------------------------------------
/man/getMixtureDensityLossFunction.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mixtureDensityUtilities.R
3 | \name{getMixtureDensityLossFunction}
4 | \alias{getMixtureDensityLossFunction}
5 | \title{Returns a loss function for the mixture density.}
6 | \usage{
7 | getMixtureDensityLossFunction(outputDimension, numberOfMixtures)
8 | }
9 | \arguments{
10 | \item{outputDimension}{output dimension}
11 |
12 | \item{numberOfMixtures}{number of mixture components}
13 | }
14 | \value{
15 | a function providing the mean square error accuracy
16 | }
17 | \description{
18 | Ported from:
19 | }
20 | \details{
21 | \preformatted{ https://github.com/cpmpercussion/keras-mdn-layer/
22 | }
23 | }
24 | \examples{
25 |
26 | library( keras )
27 |
28 |
29 | }
30 | \author{
31 | Tustison NJ
32 | }
33 |
--------------------------------------------------------------------------------
/man/splitMixtureParameters.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mixtureDensityUtilities.R
3 | \name{splitMixtureParameters}
4 | \alias{splitMixtureParameters}
5 | \title{Splits the mixture parameters.}
6 | \usage{
7 | splitMixtureParameters(parameters, outputDimension, numberOfMixtures)
8 | }
9 | \arguments{
10 | \item{parameters}{vector parameter to split}
11 |
12 | \item{outputDimension}{output dimension}
13 |
14 | \item{numberOfMixtures}{number of mixture components}
15 | }
16 | \value{
17 | separate mixture parameters
18 | }
19 | \description{
20 | Ported from:
21 | }
22 | \details{
23 | \preformatted{ https://github.com/cpmpercussion/keras-mdn-layer/
24 | }
25 | }
26 | \examples{
27 |
28 | library( keras )
29 |
30 |
31 | }
32 | \author{
33 | Tustison NJ
34 | }
35 |
--------------------------------------------------------------------------------
/man/getMixtureDensitySamplingFunction.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mixtureDensityUtilities.R
3 | \name{getMixtureDensitySamplingFunction}
4 | \alias{getMixtureDensitySamplingFunction}
5 | \title{Returns a sampling function for the mixture density.}
6 | \usage{
7 | getMixtureDensitySamplingFunction(outputDimension, numberOfMixtures)
8 | }
9 | \arguments{
10 | \item{outputDimension}{output dimension}
11 |
12 | \item{numberOfMixtures}{number of mixture components}
13 | }
14 | \value{
15 | a function for sampling a mixture density
16 | }
17 | \description{
18 | Ported from:
19 | }
20 | \details{
21 | \preformatted{ https://github.com/cpmpercussion/keras-mdn-layer/
22 | }
23 | }
24 | \examples{
25 |
26 | library( keras )
27 |
28 |
29 | }
30 | \author{
31 | Tustison NJ
32 | }
33 |
--------------------------------------------------------------------------------
/man/encodeUnet.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/unetUtilities.R
3 | \name{encodeUnet}
4 | \alias{encodeUnet}
5 | \title{One-hot encoding function}
6 | \usage{
7 | encodeUnet(segmentationsArray, segmentationLabels = NULL)
8 | }
9 | \arguments{
10 | \item{segmentationsArray}{an array of shape (\code{batchSize}, \code{width},
11 | \code{height}, \code{})}
12 |
13 | \item{segmentationLabels}{vector of segmentation labels. Note that a
14 | background label (typically 0) needs to be included.}
15 | }
16 | \value{
17 | an n-D array of shape
18 | \eqn{ batchSize \times width \times height \times \times numberOfSegmentationLabels }
19 | }
20 | \description{
21 | Function for translating the segmentations to a one-hot representation.
22 | }
23 | \author{
24 | Tustison NJ
25 | }
26 |
--------------------------------------------------------------------------------
/man/mixture_density_network_softmax.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mixtureDensityUtilities.R
3 | \name{mixture_density_network_softmax}
4 | \alias{mixture_density_network_softmax}
5 | \title{Softmax function for mixture density with temperature adjustment}
6 | \usage{
7 | mixture_density_network_softmax(logits, temperature = 1)
8 | }
9 | \arguments{
10 | \item{logits}{input of logits/mixture weights to adjust}
11 |
12 | \item{temperature}{the temperature for to adjust the distribution (default 1.0)}
13 | }
14 | \value{
15 | softmax loss value
16 | }
17 | \description{
18 | Ported from:
19 | }
20 | \details{
21 | \preformatted{ https://github.com/cpmpercussion/keras-mdn-layer/
22 | }
23 | }
24 | \examples{
25 |
26 | library( keras )
27 |
28 |
29 | }
30 | \author{
31 | Tustison NJ
32 | }
33 |
--------------------------------------------------------------------------------
/man/getMixtureDensityMseAccuracyFunction.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mixtureDensityUtilities.R
3 | \name{getMixtureDensityMseAccuracyFunction}
4 | \alias{getMixtureDensityMseAccuracyFunction}
5 | \title{Returns a MSE accuracy function for the mixture density.}
6 | \usage{
7 | getMixtureDensityMseAccuracyFunction(outputDimension, numberOfMixtures)
8 | }
9 | \arguments{
10 | \item{outputDimension}{output dimension}
11 |
12 | \item{numberOfMixtures}{number of mixture components}
13 | }
14 | \value{
15 | a function providing the mean square error accuracy
16 | }
17 | \description{
18 | Ported from:
19 | }
20 | \details{
21 | \preformatted{ https://github.com/cpmpercussion/keras-mdn-layer/
22 | }
23 | }
24 | \examples{
25 |
26 | library( keras )
27 |
28 |
29 | }
30 | \author{
31 | Tustison NJ
32 | }
33 |
--------------------------------------------------------------------------------
/docs/link.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
13 |
--------------------------------------------------------------------------------
/man/padImageByFactor.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/croppingAndPaddingUtilities.R
3 | \name{padImageByFactor}
4 | \alias{padImageByFactor}
5 | \title{Pad an image based on a factor.}
6 | \usage{
7 | padImageByFactor(image, factor)
8 | }
9 | \arguments{
10 | \item{image}{Input ANTs image}
11 |
12 | \item{factor}{padding factor. Can be an integer or vector of size
13 | equal to the image dimensionality.}
14 | }
15 | \value{
16 | a padded image
17 | }
18 | \description{
19 | Pad image of size \code{(x, y, z)} to \code{(x', y', z')} where
20 | \code{(x', y', z')} is a divisible by a user-specified factor.
21 | }
22 | \examples{
23 |
24 | library( ANTsR )
25 | image <- antsImageRead( getANTsRData( "r16" ) )
26 | image <- cropImage( image )
27 | paddedImage <- padImageByFactor( image, 4 )
28 |
29 | }
30 | \author{
31 | Tustison NJ, Avants BB
32 | }
33 |
--------------------------------------------------------------------------------
/tests/testthat/test-pretrained.R:
--------------------------------------------------------------------------------
1 | testthat::context("Downloading a pre-trained model")
2 | testthat::test_that("mriSuperResolution loads", {
3 | res = getPretrainedNetwork("mriSuperResolution")
4 | testthat::expect_true(file.exists(res))
5 | model = keras::load_model_hdf5(res)
6 | testthat::expect_is(model, "keras.engine.training.Model" )
7 | })
8 |
9 | # testthat::test_that("mriSuperResolution loads", {
10 | # all_files = getPretrainedNetwork()
11 | # all_files = setdiff(all_files, c("show", "mriSuperResolution"))
12 | # all_files = c("ctHumanLung")
13 | # all_networks = sapply(all_files, getPretrainedNetwork)
14 | # testthat::expect_true(all(file.exists(all_networks)))
15 | # keras::load_model_hdf5(all_networks[1])
16 | # models = lapply(all_networks, keras::load_model_hdf5)
17 | # model = keras::load_model_hdf5(res)
18 | # testthat::expect_is(model, "keras.engine.training.Model" )
19 | # })
20 |
--------------------------------------------------------------------------------
/man/ClusteringLayer.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/deepEmbeddedClusteringUtilities.R
3 | \docType{class}
4 | \name{ClusteringLayer}
5 | \alias{ClusteringLayer}
6 | \title{Clustering layer for Deep Embedded Clustering}
7 | \value{
8 | clustering layer
9 | }
10 | \description{
11 | Clustering layer for Deep Embedded Clustering
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{numberOfClusters}{number of clusters.}
17 | \item{initialClusterWeights}{}
18 | \item{alpha}{parameter}
19 | \item{alpha}{name}
20 | }
21 | }
22 |
23 | \section{Details}{
24 |
25 | \code{$initialize} instantiates a new class.
26 |
27 | \code{$call} main body.
28 |
29 | \code{$compute_output_shape} computes the output shape.
30 | }
31 |
32 | \examples{
33 | model = ClusteringLayer$new(numberOfClusters = 2)
34 | \dontrun{
35 | model$build(c(20, 20))
36 | }
37 | }
38 | \author{
39 | Tustison NJ
40 | }
41 |
--------------------------------------------------------------------------------
/man/layer_l2_normalization_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \name{layer_l2_normalization_2d}
4 | \alias{layer_l2_normalization_2d}
5 | \alias{layer_l2_normalization_3d}
6 | \title{Normalization layer (2-D and 3-D)}
7 | \usage{
8 | layer_l2_normalization_2d(object, scale = 20, name = NULL, trainable = TRUE)
9 |
10 | layer_l2_normalization_3d(object, scale = 20, name = NULL, trainable = TRUE)
11 | }
12 | \arguments{
13 | \item{object}{Object to compose layer with. This is either a
14 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
15 | or another Layer which this layer will call.}
16 |
17 | \item{scale}{box scale}
18 |
19 | \item{name}{The name of the layer}
20 |
21 | \item{trainable}{Whether the layer weights will be updated during training.}
22 | }
23 | \value{
24 | a keras layer tensor
25 | }
26 | \description{
27 | Wraps a custom layer for the SSD network
28 | }
29 |
--------------------------------------------------------------------------------
/man/EfficientAttentionLayer2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \docType{class}
4 | \name{EfficientAttentionLayer2D}
5 | \alias{EfficientAttentionLayer2D}
6 | \title{Efficient attention layer (2-D)}
7 | \value{
8 | output of tensor shape.
9 | }
10 | \description{
11 | Efficient attention layer (2-D)
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{numberOfFiltersFG}{number of filters for F and G layers.}
17 | \item{numberOfFiltersH}{number of filters for H. If = NA, only
18 | use filter F for efficiency.}
19 | \item{poolSize}{pool_size in max pool layer.}
20 | \item{doConcatenateFinalLayers}{concatenate final layer with input.
21 | Alternatively, add.}
22 | }
23 | }
24 |
25 | \section{Details}{
26 |
27 | \code{$initialize} instantiates a new class.
28 |
29 | \code{$call} main body.
30 |
31 | \code{$compute_output_shape} computes the output shape.
32 | }
33 |
34 | \author{
35 | BB Avants, NJ Tustison
36 | }
37 |
--------------------------------------------------------------------------------
/man/EfficientAttentionLayer3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \docType{class}
4 | \name{EfficientAttentionLayer3D}
5 | \alias{EfficientAttentionLayer3D}
6 | \title{Efficient attention layer (3-D)}
7 | \value{
8 | output of tensor shape.
9 | }
10 | \description{
11 | Efficient attention layer (3-D)
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{numberOfFiltersFG}{number of filters for F and G layers.}
17 | \item{numberOfFiltersH}{number of filters for H. If = NA, only
18 | use filter F for efficiency.}
19 | \item{poolSize}{pool_size in max pool layer.}
20 | \item{doConcatenateFinalLayers}{concatenate final layer with input.
21 | Alternatively, add.}
22 | }
23 | }
24 |
25 | \section{Details}{
26 |
27 | \code{$initialize} instantiates a new class.
28 |
29 | \code{$call} main body.
30 |
31 | \code{$compute_output_shape} computes the output shape.
32 | }
33 |
34 | \author{
35 | BB Avants, NJ Tustison
36 | }
37 |
--------------------------------------------------------------------------------
/man/createNoBrainerUnetModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createCustomUnetModel.R
3 | \name{createNoBrainerUnetModel3D}
4 | \alias{createNoBrainerUnetModel3D}
5 | \title{Implementation of the "NoBrainer" U-net architecture}
6 | \usage{
7 | createNoBrainerUnetModel3D(inputImageSize)
8 | }
9 | \arguments{
10 | \item{inputImageSize}{Used for specifying the input tensor shape. The
11 | shape (or dimension) of that tensor is the image dimensions followed by
12 | the number of channels (e.g., red, green, and blue).}
13 | }
14 | \value{
15 | a u-net keras model
16 | }
17 | \description{
18 | Creates a keras model implementation of the u-net architecture
19 | avaialable here:
20 | }
21 | \details{
22 | \preformatted{ \url{https://github.com/neuronets/nobrainer/}
23 | }
24 | }
25 | \examples{
26 |
27 | library( ANTsRNet )
28 |
29 | model <- createNoBrainerUnetModel3D( list( NULL, NULL, NULL, 1 ) )
30 |
31 | }
32 | \author{
33 | Tustison NJ
34 | }
35 |
--------------------------------------------------------------------------------
/man/SpatialTransformerLayer3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/spatialTransformerNetworkUtilities.R
3 | \docType{class}
4 | \name{SpatialTransformerLayer3D}
5 | \alias{SpatialTransformerLayer3D}
6 | \title{Spatial transfomer layer (3-D)}
7 | \value{
8 | resampled batch images.
9 | }
10 | \description{
11 | Spatial transfomer layer (3-D)
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{inputs}{list of size 2 where the first element are the images. The second
17 | element are the weights.}
18 | \item{resampledSize}{size of the resampled output images.}
19 | }
20 | }
21 |
22 | \section{Details}{
23 |
24 | \code{$initialize} instantiates a new class.
25 |
26 | \code{$call} main body.
27 |
28 | \code{$compute_output_shape} computes the output shape.
29 | }
30 |
31 | \examples{
32 | model = SpatialTransformerLayer3D$new(c(30L, 30L, 30L))
33 | model$compute_output_shape(input_shape = c(25, 25, 25))
34 |
35 |
36 | }
37 | \author{
38 | Tustison NJ
39 | }
40 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Smartphone (please complete the following information):**
32 | - Device: [e.g. iPhone6]
33 | - OS: [e.g. iOS8.1]
34 | - Browser [e.g. stock browser, safari]
35 | - Version [e.g. 22]
36 |
37 | **Additional context**
38 | Add any other context about the problem here.
39 |
--------------------------------------------------------------------------------
/man/AttentionAugmentationLayer2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \docType{class}
4 | \name{AttentionAugmentationLayer2D}
5 | \alias{AttentionAugmentationLayer2D}
6 | \title{Attention augmentation layer (2-D)}
7 | \value{
8 | output of tensor shape \link{batchSize, height, width, depthOfQueries}.
9 | }
10 | \description{
11 | Attention augmentation layer (2-D)
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{depthOfQueries}{number of filters for queries.}
17 | \item{depthOfValues}{number of filters for values.}
18 | \item{numberOfHeads}{number of attention heads to use. It is required
19 | that depthOfQueries/numberOfHeads > 0.}
20 | \item{isRelative}{whether or not to use relative encodings.}
21 | }
22 | }
23 |
24 | \section{Details}{
25 |
26 | \code{$initialize} instantiates a new class.
27 |
28 | \code{$call} main body.
29 |
30 | \code{$compute_output_shape} computes the output shape.
31 | }
32 |
33 | \author{
34 | Tustison NJ
35 | }
36 |
--------------------------------------------------------------------------------
/man/InpaintingDeepFillModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createInpaintingDeepFillModel.R
3 | \docType{class}
4 | \name{InpaintingDeepFillModel}
5 | \alias{InpaintingDeepFillModel}
6 | \title{In-painting with contextual attention}
7 | \description{
8 | Original generative adverserial network (GAN) model from the
9 | paper:
10 | }
11 | \details{
12 | https://arxiv.org/abs/1801.07892
13 |
14 | and ported from the (TensorFlow) implementation:
15 |
16 | https://github.com/JiahuiYu/generative_inpainting
17 | }
18 | \section{Arguments}{
19 |
20 | \describe{
21 | \item{inputImageSize}{}
22 | }
23 | }
24 |
25 | \section{Details}{
26 |
27 | \code{$initialize} {instantiates a new class and builds the
28 | generator and discriminator.}
29 | \code{$buildGenerator}{build generator.}
30 | \code{$buildGenerator}{build discriminator.}
31 | }
32 |
33 | \examples{
34 | x = InpaintingDeepFillModel$new(c( 28, 28, 1 ))
35 | \dontrun{
36 | x$buildNetwork()
37 | }
38 |
39 | }
40 | \author{
41 | Tustison NJ
42 | }
43 |
--------------------------------------------------------------------------------
/man/SSIM.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/superResolutionUtilities.R
3 | \name{SSIM}
4 | \alias{SSIM}
5 | \title{Structural similarity index (SSI) between two images.}
6 | \usage{
7 | SSIM(x, y, K = c(0.01, 0.03))
8 | }
9 | \arguments{
10 | \item{x}{input image.}
11 |
12 | \item{y}{input image.}
13 |
14 | \item{K}{vector of length 2 which contain SSI parameters meant to stabilize
15 | the formula in case of weak denominators.}
16 | }
17 | \value{
18 | the structural similarity index
19 | }
20 | \description{
21 | Implementation of the SSI quantity for two images proposed in
22 | }
23 | \details{
24 | Z. Wang, A.C. Bovik, H.R. Sheikh, E.P. Simoncelli. "Image quality
25 | assessment: from error visibility to structural similarity". IEEE TIP.
26 | 13 (4): 600–612.
27 | }
28 | \examples{
29 |
30 | library( ANTsR )
31 |
32 | r16 <- antsImageRead( getANTsRData( 'r16' ) )
33 | r85 <- antsImageRead( getANTsRData( 'r85' ) )
34 | ssimValue <- SSIM( r16, r85 )
35 |
36 | }
37 | \author{
38 | Avants BB
39 | }
40 |
--------------------------------------------------------------------------------
/man/mriSuperResolution.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mriSuperResolution.R
3 | \name{mriSuperResolution}
4 | \alias{mriSuperResolution}
5 | \title{Super-resolution for MRI}
6 | \usage{
7 | mriSuperResolution(image, antsxnetCacheDirectory = NULL, verbose = FALSE)
8 | }
9 | \arguments{
10 | \item{image}{magnetic resonance image}
11 |
12 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
13 | template and model weights. Since these can be resused, if
14 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
15 | inst/extdata/ subfolder of the ANTsRNet package.}
16 |
17 | \item{verbose}{print progress.}
18 | }
19 | \value{
20 | super-resolution image.
21 | }
22 | \description{
23 | Perform super-resolution (2x) of MRI data using deep back projection network.
24 | }
25 | \examples{
26 | \dontrun{
27 | library( ANTsRNet )
28 |
29 | image <- antsImageRead( "t1.nii.gz" )
30 | imageSr <- mriSuperResolution( image )
31 | }
32 | }
33 | \author{
34 | Avants BB
35 | }
36 |
--------------------------------------------------------------------------------
/man/ResampleTensorLayer2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/resampleTensorUtilities.R
3 | \docType{class}
4 | \name{ResampleTensorLayer2D}
5 | \alias{ResampleTensorLayer2D}
6 | \title{Creates a resample tensor layer (2-D)}
7 | \value{
8 | a resampled version of the input tensor
9 | }
10 | \description{
11 | Creates a lambda layer which interpolates/resizes an input tensor based
12 | on the specified shape
13 | }
14 | \section{Arguments}{
15 |
16 | \describe{
17 | \item{shape}{A 2-D vector specifying the new shape.}
18 | \item{interpolationType}{Type of interpolation. Can be
19 | \code{'nearestNeighbor'}, \code{'nearestNeighbor'},
20 | \code{'linear'}, \code{'bilinear'},
21 | \code{'cubic'}, or \code{'bicubic'}}
22 | \item{x}{}
23 | \item{mask}{}
24 | \item{input_shape}{}
25 | }
26 | }
27 |
28 | \section{Details}{
29 |
30 | \code{$initialize} instantiates a new class.
31 |
32 | \code{$call} main body.
33 |
34 | \code{$compute_output_shape} computes the output shape.
35 | }
36 |
37 | \author{
38 | Tustison NJ
39 | }
40 |
--------------------------------------------------------------------------------
/man/peak_signal_to_noise_ratio.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customMetrics.R
3 | \name{peak_signal_to_noise_ratio}
4 | \alias{peak_signal_to_noise_ratio}
5 | \title{Function to calculate peak-signal-to-noise ratio.}
6 | \usage{
7 | peak_signal_to_noise_ratio(y_true, y_pred)
8 | }
9 | \arguments{
10 | \item{y_true}{True labels (Tensor)}
11 |
12 | \item{y_pred}{Predictions (Tensor of the same shape as \code{y_true})}
13 | }
14 | \value{
15 | PSNR value
16 | }
17 | \description{
18 | Function to calculate peak-signal-to-noise ratio.
19 | }
20 | \examples{
21 |
22 | library( ANTsRNet )
23 | library( keras )
24 |
25 | model <- createUnetModel2D( c( 64, 64, 1 ) )
26 |
27 | metric_peak_signal_to_noise_ratio <-
28 | custom_metric( "peak_signal_to_noise_ratio",
29 | peak_signal_to_noise_ratio )
30 |
31 | model \%>\% compile( loss = loss_categorical_crossentropy,
32 | optimizer = optimizer_adam( lr = 0.0001 ),
33 | metrics = c( metric_peak_signal_to_noise_ratio ) )
34 |
35 | }
36 | \author{
37 | Tustison NJ
38 | }
39 |
--------------------------------------------------------------------------------
/man/ResampleTensorToTargetTensorLayer3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/resampleTensorUtilities.R
3 | \docType{class}
4 | \name{ResampleTensorToTargetTensorLayer3D}
5 | \alias{ResampleTensorToTargetTensorLayer3D}
6 | \title{Creates a resampled tensor (to target tensor) layer (3-D)}
7 | \value{
8 | a resampled version of the input tensor
9 | }
10 | \description{
11 | Creates a lambda layer which interpolates/resizes an input tensor based
12 | on the specified target tensor
13 | }
14 | \section{Arguments}{
15 |
16 | \describe{
17 | \item{interpolationType}{Type of interpolation. Can be
18 | \code{'nearestNeighbor'}, \code{'nearestNeighbor'},
19 | \code{'linear'}, \code{'bilinear'},
20 | \code{'cubic'}, or \code{'bicubic'}}
21 | \item{x}{}
22 | \item{mask}{}
23 | \item{input_shape}{}
24 | }
25 | }
26 |
27 | \section{Details}{
28 |
29 | \code{$initialize} instantiates a new class.
30 |
31 | \code{$call} main body.
32 |
33 | \code{$compute_output_shape} computes the output shape.
34 | }
35 |
36 | \author{
37 | Tustison NJ
38 | }
39 |
--------------------------------------------------------------------------------
/man/ResampleTensorLayer3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/resampleTensorUtilities.R
3 | \docType{class}
4 | \name{ResampleTensorLayer3D}
5 | \alias{ResampleTensorLayer3D}
6 | \title{Creates a resampled tensor (to fixed size) layer (3-D)}
7 | \value{
8 | a resampled version of the input tensor
9 | }
10 | \description{
11 | Creates a lambda layer which interpolates/resizes an input tensor based
12 | on the specified shape
13 | }
14 | \section{Arguments}{
15 |
16 | \describe{
17 | \item{shape}{A 3-D vector specifying the new shape.}
18 | \item{interpolationType}{Type of interpolation. Can be
19 | \code{'nearestNeighbor'}, \code{'nearestNeighbor'},
20 | \code{'linear'}, \code{'bilinear'},
21 | \code{'cubic'}, or \code{'bicubic'}}
22 | \item{x}{}
23 | \item{mask}{}
24 | \item{input_shape}{}
25 | }
26 | }
27 |
28 | \section{Details}{
29 |
30 | \code{$initialize} instantiates a new class.
31 |
32 | \code{$call} main body.
33 |
34 | \code{$compute_output_shape} computes the output shape.
35 | }
36 |
37 | \author{
38 | Tustison NJ
39 | }
40 |
--------------------------------------------------------------------------------
/man/linMatchIntensity.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/applyDBPN4x.R
3 | \name{linMatchIntensity}
4 | \alias{linMatchIntensity}
5 | \title{linMatchIntensity}
6 | \usage{
7 | linMatchIntensity(fromImg, toImg, polyOrder = 1, truncate = TRUE, mask)
8 | }
9 | \arguments{
10 | \item{fromImg}{image whose intensity function we will match to the \code{toImg}}
11 |
12 | \item{toImg}{defines the reference intensity function.}
13 |
14 | \item{polyOrder}{of polynomial fit. default is none or just linear fit.}
15 |
16 | \item{truncate}{boolean which turns on/off the clipping of intensity.}
17 |
18 | \item{mask}{mask the matching region}
19 | }
20 | \value{
21 | the \code{fromImg} matched to the \code{toImg}
22 | }
23 | \description{
24 | regression between two image intensity spaces
25 | }
26 | \examples{
27 | library(ANTsRCore)
28 | sourceImage <- antsImageRead( getANTsRData( "r16" ) )
29 | referenceImage <- antsImageRead( getANTsRData( "r64" ) )
30 | matchedImage <- linMatchIntensity( sourceImage, referenceImage )
31 | }
32 | \author{
33 | Avants BB
34 | }
35 |
--------------------------------------------------------------------------------
/man/pearson_correlation_coefficient.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customMetrics.R
3 | \name{pearson_correlation_coefficient}
4 | \alias{pearson_correlation_coefficient}
5 | \title{Function for Pearson correlation coefficient.}
6 | \usage{
7 | pearson_correlation_coefficient(y_true, y_pred)
8 | }
9 | \arguments{
10 | \item{y_true}{True labels (Tensor)}
11 |
12 | \item{y_pred}{Predictions (Tensor of the same shape as \code{y_true})}
13 | }
14 | \value{
15 | Correlation
16 | }
17 | \description{
18 | Function for Pearson correlation coefficient.
19 | }
20 | \examples{
21 |
22 | library( ANTsRNet )
23 | library( keras )
24 |
25 | model <- createUnetModel2D( c( 64, 64, 1 ) )
26 |
27 | metric_pearson_correlation_coefficient <-
28 | custom_metric( "pearson_correlation_coefficient",
29 | pearson_correlation_coefficient )
30 |
31 | model \%>\% compile( loss = loss_categorical_crossentropy,
32 | optimizer = optimizer_adam( lr = 0.0001 ),
33 | metrics = c( metric_pearson_correlation_coefficient ) )
34 |
35 | }
36 | \author{
37 | Tustison NJ
38 | }
39 |
--------------------------------------------------------------------------------
/man/jaccardSimilarity.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \name{jaccardSimilarity}
4 | \alias{jaccardSimilarity}
5 | \title{Jaccard similarity between two sets of boxes.}
6 | \usage{
7 | jaccardSimilarity(boxes1, boxes2)
8 | }
9 | \arguments{
10 | \item{boxes1}{A 2-D array where each row corresponds to a single box
11 | consisting of the format (xmin,xmax,ymin,ymax) or
12 | (xmin,xmax,ymin,ymax,zmin,zmax)}
13 |
14 | \item{boxes2}{A 2-D array where each row corresponds to a single box
15 | consisting of the format (xmin,xmax,ymin,ymax) or
16 | (xmin,xmax,ymin,ymax,zmin,zmax)}
17 | }
18 | \value{
19 | the Jaccard simliarity
20 | }
21 | \description{
22 | Function for determinining the Jaccard or iou (intersection over union)
23 | similarity measure between two sets of boxes.
24 | }
25 | \examples{
26 | vec2d = c(1, 10, 1, 10)
27 | vec2d_2 = c(1, 8, 1, 5)
28 | jaccardSimilarity(vec2d, vec2d_2)
29 | vec3d = c(1, 10, 1, 10, 1, 10)
30 | vec3d_2 = c(1, 8, 1, 5, 1, 10)
31 | jaccardSimilarity(vec3d, vec3d_2)
32 | }
33 | \author{
34 | Tustison NJ
35 | }
36 |
--------------------------------------------------------------------------------
/man/SpatialTransformerLayer2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/spatialTransformerNetworkUtilities.R
3 | \docType{class}
4 | \name{SpatialTransformerLayer2D}
5 | \alias{SpatialTransformerLayer2D}
6 | \title{Spatial transformer layer (2-D)}
7 | \value{
8 | resampled batch images.
9 | }
10 | \description{
11 | Spatial transformer layer (2-D)
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{inputs}{list of size 2 where the first element are the images. The second
17 | element are the weights.}
18 | \item{resampledSize}{size of the resampled output images.}
19 | }
20 | }
21 |
22 | \section{Details}{
23 |
24 | \code{$initialize} instantiates a new class.
25 |
26 | \code{$call} main body.
27 |
28 | \code{$compute_output_shape} computes the output shape.
29 | }
30 |
31 | \examples{
32 | resampledSize = c(30L, 30L)
33 | model = SpatialTransformerLayer2D$new(resampledSize)
34 | model$initialize(resampledSize)
35 | testthat::expect_error(model$initialize(5))
36 | model$compute_output_shape(input_shape = c(25, 25))
37 | }
38 | \author{
39 | Tustison NJ
40 | }
41 |
--------------------------------------------------------------------------------
/man/weighted_categorical_crossentropy.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customMetrics.R
3 | \name{weighted_categorical_crossentropy}
4 | \alias{weighted_categorical_crossentropy}
5 | \title{Function for weighted categorical cross entropy}
6 | \usage{
7 | weighted_categorical_crossentropy(y_true, y_pred, weights)
8 | }
9 | \arguments{
10 | \item{y_true}{True labels (Tensor)}
11 |
12 | \item{y_pred}{Predictions (Tensor of the same shape as \code{y_true})}
13 |
14 | \item{weights}{weights for each class}
15 | }
16 | \value{
17 | function value
18 | }
19 | \description{
20 | ported from this implementation:
21 | }
22 | \details{
23 | \url{https://gist.github.com/wassname/ce364fddfc8a025bfab4348cf5de852d}
24 | }
25 | \examples{
26 |
27 | library( ANTsRNet )
28 | library( keras )
29 |
30 | model <- createUnetModel2D( c( 64, 64, 1 ), numberOfOutputs = 2 )
31 |
32 | model \%>\% compile( loss = weighted_categorical_crossentropy( weights = c( 1, 1 ) ),
33 | optimizer = optimizer_adam( lr = 0.0001 ),
34 | metrics = "accuracy" )
35 |
36 | }
37 | \author{
38 | Tustison NJ
39 | }
40 |
--------------------------------------------------------------------------------
/man/ResampleTensorToTargetTensorLayer2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/resampleTensorUtilities.R
3 | \docType{class}
4 | \name{ResampleTensorToTargetTensorLayer2D}
5 | \alias{ResampleTensorToTargetTensorLayer2D}
6 | \title{Creates a resampled tensor (to target tensor) layer (2-D)}
7 | \value{
8 | a resampled version of the input tensor
9 | }
10 | \description{
11 | Creates a lambda layer which interpolates/resizes an input tensor based
12 | on the specified target tensor
13 | }
14 | \section{Arguments}{
15 |
16 | \describe{
17 | \item{targetTensor}{tensor of desired size.}
18 | \item{interpolationType}{Type of interpolation. Can be
19 | \code{'nearestNeighbor'}, \code{'nearestNeighbor'},
20 | \code{'linear'}, \code{'bilinear'},
21 | \code{'cubic'}, or \code{'bicubic'}}
22 | \item{x}{}
23 | \item{mask}{}
24 | \item{input_shape}{}
25 | }
26 | }
27 |
28 | \section{Details}{
29 |
30 | \code{$initialize} instantiates a new class.
31 |
32 | \code{$call} main body.
33 |
34 | \code{$compute_output_shape} computes the output shape.
35 | }
36 |
37 | \author{
38 | Tustison NJ
39 | }
40 |
--------------------------------------------------------------------------------
/man/createSysuMediaUnetModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createCustomUnetModel.R
3 | \name{createSysuMediaUnetModel2D}
4 | \alias{createSysuMediaUnetModel2D}
5 | \title{Implementation of the sysu_media U-net architecture}
6 | \usage{
7 | createSysuMediaUnetModel2D(inputImageSize)
8 | }
9 | \arguments{
10 | \item{inputImageSize}{Used for specifying the input tensor shape.
11 | This will be \code{c(200, 200, 2)} for t1/flair input and
12 | \code{c(200, 200, 1)} for flair-only input.}
13 | }
14 | \value{
15 | a u-net keras model
16 | }
17 | \description{
18 | Creates a keras model implementation of the u-net architecture
19 | in the 2017 MICCAI WMH challenge by the sysu_medial team described
20 | here:
21 | }
22 | \details{
23 | \preformatted{\url{https://pubmed.ncbi.nlm.nih.gov/30125711/}
24 | }
25 |
26 | with the original implementation available here:\preformatted{\url{https://github.com/hongweilibran/wmh_ibbmTum}
27 | }
28 | }
29 | \examples{
30 | \dontrun{
31 |
32 | model <- createLiWmhUnetModel2D( c( 200, 200, 1 ) )
33 |
34 | }
35 | }
36 | \author{
37 | Tustison NJ
38 | }
39 |
--------------------------------------------------------------------------------
/man/CycleGanModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createCycleGanModel.R
3 | \docType{class}
4 | \name{CycleGanModel}
5 | \alias{CycleGanModel}
6 | \title{Cycle GAN model}
7 | \description{
8 | Cycle generative adverserial network from the paper:
9 | }
10 | \details{
11 | https://arxiv.org/pdf/1703.10593
12 |
13 | and ported from the Keras (python) implementation:
14 |
15 | https://github.com/eriklindernoren/Keras-GAN/blob/master/cyclegan/cyclegan.py
16 | }
17 | \section{Arguments}{
18 |
19 | \describe{
20 | \item{inputImageSize}{}
21 | \item{latentDimension}{}
22 | }
23 | }
24 |
25 | \section{Details}{
26 |
27 | \code{$initialize} {instantiates a new class and builds the
28 | generator and discriminator.}
29 | \code{$buildGenerator}{build generator.}
30 | \code{$buildGenerator}{build discriminator.}
31 | }
32 |
33 | \examples{
34 |
35 | library( keras )
36 | library( ANTsRNet )
37 |
38 | keras::backend()$clear_session()
39 | ganModel <- CycleGanModel$new(
40 | inputImageSize = c( 128, 128, 3 ) )
41 | ganModel$buildGenerator()
42 |
43 | }
44 | \author{
45 | Tustison NJ
46 | }
47 |
--------------------------------------------------------------------------------
/man/sampleFromOutput.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mixtureDensityUtilities.R
3 | \name{sampleFromOutput}
4 | \alias{sampleFromOutput}
5 | \title{Sample from a distribution}
6 | \usage{
7 | sampleFromOutput(
8 | parameters,
9 | outputDimension,
10 | numberOfMixtures,
11 | temperature = 1,
12 | sigmaTemperature = 1
13 | )
14 | }
15 | \arguments{
16 | \item{parameters}{vector parameter to split}
17 |
18 | \item{outputDimension}{output dimension}
19 |
20 | \item{numberOfMixtures}{number of mixture components}
21 |
22 | \item{temperature}{the temperature for to adjust the distribution
23 | (default 1.0), passed to \code{\link{mixture_density_network_softmax}}}
24 |
25 | \item{sigmaTemperature}{multipler to \code{sigma} from the
26 | output of \code{\link{sampleFromCategoricalDistribution}}}
27 | }
28 | \value{
29 | a single sample
30 | }
31 | \description{
32 | Ported from:
33 | }
34 | \details{
35 | \preformatted{ https://github.com/cpmpercussion/keras-mdn-layer/
36 | }
37 | }
38 | \examples{
39 |
40 | library( keras )
41 |
42 |
43 | }
44 | \author{
45 | Tustison NJ
46 | }
47 |
--------------------------------------------------------------------------------
/man/createAutoencoderModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createAutoencoderModel.R
3 | \name{createAutoencoderModel}
4 | \alias{createAutoencoderModel}
5 | \title{Function for creating a symmetric autoencoder model.}
6 | \usage{
7 | createAutoencoderModel(
8 | numberOfUnitsPerLayer,
9 | activation = "relu",
10 | initializer = "glorot_uniform"
11 | )
12 | }
13 | \arguments{
14 | \item{numberOfUnitsPerLayer}{vector defining the number of units
15 | in the encoding branch}
16 |
17 | \item{activation}{activation type for the dense layers}
18 |
19 | \item{initializer}{initializer type for the dense layers}
20 | }
21 | \value{
22 | two models: the encoder and auto-encoder
23 | }
24 | \description{
25 | Builds an autoencoder based on the specified array definining the
26 | number of units in the encoding branch. Ported to Keras R from the
27 | Keras python implementation here:
28 | }
29 | \details{
30 | \url{https://github.com/XifengGuo/DEC-keras}
31 | }
32 | \examples{
33 |
34 | library( ANTsRNet )
35 | library( keras )
36 |
37 | ae <- createAutoencoderModel( c( 784, 500, 500, 2000, 10 ) )
38 |
39 | }
40 | \author{
41 | Tustison NJ
42 | }
43 |
--------------------------------------------------------------------------------
/man/lungExtraction.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/lungExtraction.R
3 | \name{lungExtraction}
4 | \alias{lungExtraction}
5 | \title{Lung extraction}
6 | \usage{
7 | lungExtraction(
8 | image,
9 | modality = c("proton", "ct"),
10 | antsxnetCacheDirectory = NULL,
11 | verbose = FALSE
12 | )
13 | }
14 | \arguments{
15 | \item{image}{input 3-D lung image.}
16 |
17 | \item{modality}{image type. Options include "proton" or "ct".}
18 |
19 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
20 | template and model weights. Since these can be resused, if
21 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
22 | subdirectory ~/.keras/ANTsXNet/.}
23 |
24 | \item{verbose}{print progress.}
25 | }
26 | \value{
27 | segmentation and probability images
28 | }
29 | \description{
30 | Perform proton (H1) or CT lung extraction using a U-net architecture.
31 | }
32 | \examples{
33 | \dontrun{
34 | library( ANTsRNet )
35 | library( keras )
36 |
37 | image <- antsImageRead( "lung.nii.gz" )
38 | output <- lungExtraction( image, modality = "proton" )
39 | }
40 | }
41 | \author{
42 | Tustison NJ
43 | }
44 |
--------------------------------------------------------------------------------
/man/layer_attention_augmentation_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \name{layer_attention_augmentation_2d}
4 | \alias{layer_attention_augmentation_2d}
5 | \title{Attention augmentation layer (2-D)}
6 | \usage{
7 | layer_attention_augmentation_2d(
8 | object,
9 | depthOfQueries,
10 | depthOfValues,
11 | numberOfHeads,
12 | isRelative,
13 | trainable = TRUE
14 | )
15 | }
16 | \arguments{
17 | \item{object}{Object to compose layer with. This is either a
18 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
19 | or another Layer which this layer will call.}
20 |
21 | \item{depthOfQueries}{number of filters for queries.}
22 |
23 | \item{depthOfValues}{number of filters for values.}
24 |
25 | \item{numberOfHeads}{number of attention heads to use. It is required
26 | that \code{depthOfQueries/numberOfHeads > 0}.}
27 |
28 | \item{isRelative}{whether or not to use relative encodings.}
29 |
30 | \item{trainable}{Whether the layer weights will be updated during training.}
31 | }
32 | \value{
33 | a keras layer tensor
34 | }
35 | \description{
36 | Wraps the AttentionAugmentation2D layer.
37 | }
38 | \examples{
39 | \dontrun{
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/man/ScaleLayer.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/denseUnetUtilities.R
3 | \docType{class}
4 | \name{ScaleLayer}
5 | \alias{ScaleLayer}
6 | \title{Custom scale layer}
7 | \value{
8 | A keras layer
9 | }
10 | \description{
11 | Custom scale layer
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{axis}{integer specifying which axis to normalize.}
17 | \item{momentum}{momentum value used for computation of the exponential
18 | average of the mean and standard deviation.}
19 | \item{input_shape}{Dimensionality of the input (integer) not
20 | including the samples axis.}
21 | }
22 | }
23 |
24 | \section{Details}{
25 |
26 | \code{$initialize} instantiates a new class.
27 |
28 | \code{$call} main body.
29 |
30 | \code{$compute_output_shape} computes the output shape.
31 | }
32 |
33 | \examples{
34 | library(keras)
35 | inputImageSize = c( 256L, 256L, 1L )
36 | inputs <- keras::layer_input( shape = inputImageSize )
37 | outputs <- inputs \%>\% layer_zero_padding_2d( padding = c( 3L, 3L ) )
38 | layer_scale = ANTsRNet:::layer_scale
39 | outputs = outputs \%>\%
40 | layer_scale(axis = -1L)
41 | lay = ScaleLayer$new()
42 | \dontrun{
43 | lay$build(input_shape = inputImageSize)
44 | }
45 | }
46 | \author{
47 | Tustison NJ
48 | }
49 |
--------------------------------------------------------------------------------
/man/corticalThickness.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/corticalThickness.R
3 | \name{corticalThickness}
4 | \alias{corticalThickness}
5 | \title{Cortical thickness using deep learning}
6 | \usage{
7 | corticalThickness(t1, antsxnetCacheDirectory = NULL, verbose = FALSE)
8 | }
9 | \arguments{
10 | \item{t1}{input 3-D unprocessed T1-weighted brain image}
11 |
12 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
13 | template and model weights. Since these can be resused, if
14 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
15 | subdirectory ~/.keras/ANTsXNet/.}
16 |
17 | \item{verbose}{print progress.}
18 | }
19 | \value{
20 | Cortical thickness image and segmentation probability images.
21 | }
22 | \description{
23 | Perform KellyKapowski cortical thickness using \code{deepAtropos} for
24 | segmentation. Description concerning implementaiton and evaluation:
25 | }
26 | \details{
27 | \url{https://www.medrxiv.org/content/10.1101/2020.10.19.20215392v1}
28 | }
29 | \examples{
30 | \dontrun{
31 | library( ANTsRNet )
32 | library( keras )
33 |
34 | image <- antsImageRead( "t1w_image.nii.gz" )
35 | kk <- corticalThickness( image )
36 | }
37 | }
38 | \author{
39 | Tustison NJ
40 | }
41 |
--------------------------------------------------------------------------------
/man/layer_mixture_density.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mixtureDensityUtilities.R
3 | \name{layer_mixture_density}
4 | \alias{layer_mixture_density}
5 | \title{Mixture density layer}
6 | \usage{
7 | layer_mixture_density(
8 | object,
9 | outputDimension,
10 | numberOfMixtures,
11 | trainable = TRUE
12 | )
13 | }
14 | \arguments{
15 | \item{object}{Object to compose layer with. This is either a
16 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
17 | or another Layer which this layer will call.}
18 |
19 | \item{outputDimension}{output dimension}
20 |
21 | \item{numberOfMixtures}{number of Gaussians used to model the function}
22 |
23 | \item{trainable}{Whether the layer weights will be updated during training.}
24 | }
25 | \value{
26 | a keras layer tensor
27 | }
28 | \description{
29 | Wraps a custom mixture density layer.
30 | }
31 | \examples{
32 | \dontrun{
33 | model <- keras_model_sequential()
34 | inputShape = c(20, 20, 1)
35 | model = model \%>\%
36 | layer_conv_2d(filters = 32, kernel_size = c(3,3), activation = 'relu',
37 | input_shape = inputShape)
38 | model \%>\%
39 | layer_mixture_density(outputDimension = c(18, 18),
40 | numberOfMixtures = 3)
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/man/layer_resample_tensor_to_target_tensor_3d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/resampleTensorUtilities.R
3 | \name{layer_resample_tensor_to_target_tensor_3d}
4 | \alias{layer_resample_tensor_to_target_tensor_3d}
5 | \title{Resampling a spatial tensor (3-D).}
6 | \usage{
7 | layer_resample_tensor_to_target_tensor_3d(
8 | object,
9 | interpolationType = c("nearestNeighbor", "nearest", "linear", "bilinear", "cubic",
10 | "bicubic"),
11 | name = NULL,
12 | trainable = FALSE
13 | )
14 | }
15 | \arguments{
16 | \item{object}{Object to compose layer with. This is either a
17 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
18 | or another Layer which this layer will call.}
19 |
20 | \item{interpolationType}{type of interpolation for resampling. Can be
21 | \code{nearestNeighbor}, \code{nearest},
22 | \code{linear}, \code{bilinear},
23 | \code{cubic}, or \code{bicubic}.}
24 |
25 | \item{name}{The name of the layer}
26 |
27 | \item{trainable}{Whether the layer weights will be updated during training.}
28 | }
29 | \value{
30 | a keras layer tensor
31 | }
32 | \description{
33 | Resamples a spatial tensor based on the specified shape and interpolation type.
34 | }
35 | \author{
36 | Tustison NJ
37 | }
38 |
--------------------------------------------------------------------------------
/man/layer_resample_tensor_to_target_tensor_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/resampleTensorUtilities.R
3 | \name{layer_resample_tensor_to_target_tensor_2d}
4 | \alias{layer_resample_tensor_to_target_tensor_2d}
5 | \title{Resampling a spatial tensor to a target tensor (2-D).}
6 | \usage{
7 | layer_resample_tensor_to_target_tensor_2d(
8 | object,
9 | interpolationType = c("nearestNeighbor", "nearest", "linear", "bilinear", "cubic",
10 | "bicubic"),
11 | name = NULL,
12 | trainable = FALSE
13 | )
14 | }
15 | \arguments{
16 | \item{object}{Object to compose layer with. This is either a
17 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
18 | or another Layer which this layer will call.}
19 |
20 | \item{interpolationType}{type of interpolation for resampling. Can be
21 | \code{nearestNeighbor}, \code{nearest},
22 | \code{linear}, \code{bilinear},
23 | \code{cubic}, or \code{bicubic}.}
24 |
25 | \item{name}{The name of the layer}
26 |
27 | \item{trainable}{Whether the layer weights will be updated during training.}
28 | }
29 | \value{
30 | a keras layer tensor
31 | }
32 | \description{
33 | Resamples a spatial tensor based on a target tensor and interpolation type.
34 | }
35 | \author{
36 | Tustison NJ
37 | }
38 |
--------------------------------------------------------------------------------
/inst/CITATION:
--------------------------------------------------------------------------------
1 | citHeader("To cite ANTsRNet in publications use:")
2 |
3 | citEntry(entry = "Article",
4 | title = "Convolutional Neural Networks with Template-Based Data Augmentation for Functional Lung Image Quantification",
5 | author = personList(
6 | as.person("Nicholas J. Tustison"),
7 | as.person("Zixuan Lin"),
8 | as.person("Xue Feng"),
9 | as.person("Nicholas Cullen"),
10 | as.person("Jaime F. Mata"),
11 | as.person("Lucia Flors"),
12 | as.person("James C. Gee"),
13 | as.person("Talissa A. Altes"),
14 | as.person("John P. Mugler III"),
15 | as.person("Kun Qing")),
16 | journal = "Academic Radiology",
17 | year = "2018",
18 | volume = "",
19 | number = "",
20 | pages = "",
21 | url = "https://www.ncbi.nlm.nih.gov/pubmed/30195415",
22 |
23 | textVersion =
24 | paste(" Nicholas J. Tustison, Brian B. Avants, et al. (2018).",
25 | "Convolutional Neural Networks with Template-Based Data Augmentation for Functional Lung Image Quantification.",
26 | "Academic Radiology.",
27 | "URL https://www.ncbi.nlm.nih.gov/pubmed/30195415.")
28 | )
--------------------------------------------------------------------------------
/man/ContextualAttentionLayer3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createInpaintingDeepFillModel.R
3 | \docType{class}
4 | \name{ContextualAttentionLayer3D}
5 | \alias{ContextualAttentionLayer3D}
6 | \title{Contextual attention layer (3-D)}
7 | \value{
8 | output tensor with the same shape as the input.
9 | }
10 | \description{
11 | Contextual attention layer for generative image inpainting described in
12 | }
13 | \details{
14 | Jiahui Yu, et al., Generative Image Inpainting with Contextual Attention,
15 | CVPR 2018.
16 |
17 | available here:\preformatted{ \code{https://arxiv.org/abs/1801.07892}
18 | }
19 | }
20 | \section{Usage}{
21 |
22 | \preformatted{layer <- ContextualAttentionLayer3D$new( scale )
23 |
24 | layer$call( x, mask = NULL )
25 | layer$build( input_shape )
26 | layer$compute_output_shape( input_shape )
27 | }
28 | }
29 |
30 | \section{Arguments}{
31 |
32 | \describe{
33 | \item{layer}{A \code{process} object.}
34 | }
35 | }
36 |
37 | \section{Details}{
38 |
39 | \code{$initialize} instantiates a new class.
40 |
41 | \code{$build}
42 |
43 | \code{$call} main body.
44 |
45 | \code{$compute_output_shape} computes the output shape.
46 | }
47 |
48 | \examples{
49 | x = ContextualAttentionLayer3D$new()
50 | x$build()
51 | }
52 | \author{
53 | Tustison NJ
54 | }
55 |
--------------------------------------------------------------------------------
/man/layer_resample_tensor_3d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/resampleTensorUtilities.R
3 | \name{layer_resample_tensor_3d}
4 | \alias{layer_resample_tensor_3d}
5 | \title{Resampling a spatial tensor (3-D).}
6 | \usage{
7 | layer_resample_tensor_3d(
8 | object,
9 | shape,
10 | interpolationType = c("nearestNeighbor", "nearest", "linear", "bilinear", "cubic",
11 | "bicubic"),
12 | name = NULL,
13 | trainable = FALSE
14 | )
15 | }
16 | \arguments{
17 | \item{object}{Object to compose layer with. This is either a
18 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
19 | or another Layer which this layer will call.}
20 |
21 | \item{shape}{vector or list of length 3 specifying the shape of the output tensor.}
22 |
23 | \item{interpolationType}{type of interpolation for resampling. Can be
24 | \code{nearestNeighbor}, \code{nearest},
25 | \code{linear}, \code{bilinear},
26 | \code{cubic}, or \code{bicubic}.}
27 |
28 | \item{name}{The name of the layer}
29 |
30 | \item{trainable}{Whether the layer weights will be updated during training.}
31 | }
32 | \value{
33 | a keras layer tensor
34 | }
35 | \description{
36 | Resamples a spatial tensor based on the specified shape and interpolation type.
37 | }
38 | \author{
39 | Tustison NJ
40 | }
41 |
--------------------------------------------------------------------------------
/man/SuperResolutionGanModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createSuperResolutionGanModel.R
3 | \docType{class}
4 | \name{SuperResolutionGanModel}
5 | \alias{SuperResolutionGanModel}
6 | \title{Super resolution GAN model}
7 | \description{
8 | Super resolution generative adverserial network from the paper:
9 | }
10 | \details{
11 | https://arxiv.org/abs/1609.04802
12 |
13 | and ported from the Keras (python) implementation:
14 |
15 | https://github.com/eriklindernoren/Keras-GAN/blob/master/srgan/srgan.py
16 | }
17 | \section{Arguments}{
18 |
19 | \describe{
20 | \item{lowResolutionImageSize}{}
21 | \item{numberOfResidualBlocks}{}
22 | }
23 | }
24 |
25 | \section{Details}{
26 |
27 | \code{$initialize} {instantiates a new class and builds the
28 | generator and discriminator.}
29 | \code{$buildGenerator}{build generator.}
30 | \code{$buildGenerator}{build discriminator.}
31 | }
32 |
33 | \examples{
34 | \dontrun{
35 |
36 | library( keras )
37 | library( ANTsRNet )
38 |
39 | keras::backend()$clear_session()
40 |
41 | ganModel <- SuperResolutionGanModel$new(
42 | lowResolutionImageSize = c( 112, 112, 3 ) )
43 | testthat::expect_error({
44 | ganModel <- SuperResolutionGanModel$new(
45 | lowResolutionImageSize = c( 64, 65, 3 ) )
46 | })
47 | }
48 |
49 | }
50 | \author{
51 | Tustison NJ
52 | }
53 |
--------------------------------------------------------------------------------
/man/layer_resample_tensor_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/resampleTensorUtilities.R
3 | \name{layer_resample_tensor_2d}
4 | \alias{layer_resample_tensor_2d}
5 | \title{Creates a resampled tensor (to fixed size) layer (2-D)}
6 | \usage{
7 | layer_resample_tensor_2d(
8 | object,
9 | shape,
10 | interpolationType = c("nearestNeighbor", "nearest", "linear", "bilinear", "cubic",
11 | "bicubic"),
12 | name = NULL,
13 | trainable = FALSE
14 | )
15 | }
16 | \arguments{
17 | \item{object}{Object to compose layer with. This is either a
18 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
19 | or another Layer which this layer will call.}
20 |
21 | \item{shape}{vector or list of length 2 specifying the shape of the output tensor.}
22 |
23 | \item{interpolationType}{type of interpolation for resampling. Can be
24 | \code{nearestNeighbor}, \code{nearest},
25 | \code{linear}, \code{bilinear},
26 | \code{cubic}, or \code{bicubic}.}
27 |
28 | \item{name}{The name of the layer}
29 |
30 | \item{trainable}{Whether the layer weights will be updated during training.}
31 | }
32 | \value{
33 | a keras layer tensor
34 | }
35 | \description{
36 | Resamples a spatial tensor based on the specified shape and interpolation type.
37 | }
38 | \author{
39 | Tustison NJ
40 | }
41 |
--------------------------------------------------------------------------------
/appveyor.yml:
--------------------------------------------------------------------------------
1 | # DO NOT CHANGE the "init" and "install" sections below
2 |
3 | # Download script file from GitHub
4 | init:
5 | ps: |
6 | $ErrorActionPreference = "Stop"
7 | Invoke-WebRequest http://raw.github.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "..\appveyor-tool.ps1"
8 | Import-Module '..\appveyor-tool.ps1'
9 |
10 |
11 | install:
12 | ps: Bootstrap
13 |
14 | cache:
15 | - C:\RLibrary
16 |
17 | environment:
18 | NOT_CRAN: true
19 | # env vars that may need to be set, at least temporarily, from time to time
20 | # see https://github.com/krlmlr/r-appveyor#readme for details
21 | # USE_RTOOLS: true
22 | # R_REMOTES_STANDALONE: true
23 | USE_RTOOLS: true
24 | PYTHON: "C:\\Python36"
25 |
26 | # Adapt as necessary starting from here
27 |
28 | build_script:
29 | - "%PYTHON%\\python.exe -m pip install -r requirements.txt"
30 | - travis-tool.sh install_deps
31 |
32 | test_script:
33 | - travis-tool.sh run_tests
34 |
35 | on_failure:
36 | - 7z a failure.zip *.Rcheck\*
37 | - appveyor PushArtifact failure.zip
38 |
39 | artifacts:
40 | - path: '*.Rcheck\**\*.log'
41 | name: Logs
42 |
43 | - path: '*.Rcheck\**\*.out'
44 | name: Logs
45 |
46 | - path: '*.Rcheck\**\*.fail'
47 | name: Logs
48 |
49 | - path: '*.Rcheck\**\*.Rout'
50 | name: Logs
51 |
52 | - path: '\*_*.tar.gz'
53 | name: Bits
54 |
55 | - path: '\*_*.zip'
56 | name: Bits
57 |
--------------------------------------------------------------------------------
/man/ContextualAttentionLayer2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createInpaintingDeepFillModel.R
3 | \docType{class}
4 | \name{ContextualAttentionLayer2D}
5 | \alias{ContextualAttentionLayer2D}
6 | \title{Contextual attention layer (2-D)}
7 | \value{
8 | output tensor with the same shape as the input.
9 | }
10 | \description{
11 | Contextual attention layer for generative image inpainting described in
12 | }
13 | \details{
14 | Jiahui Yu, et al., Generative Image Inpainting with Contextual Attention,
15 | CVPR 2018.
16 |
17 | available here:\preformatted{ \code{https://arxiv.org/abs/1801.07892}
18 | }
19 | }
20 | \section{Usage}{
21 |
22 | \preformatted{layer <- ContextualAttentionLayer2D$new( scale )
23 |
24 | layer$call( x, mask = NULL )
25 | layer$build( input_shape )
26 | layer$compute_output_shape( input_shape )
27 | }
28 | }
29 |
30 | \section{Arguments}{
31 |
32 | \describe{
33 | \item{layer}{A \code{process} object.}
34 | \item{scale}{feature scale. Default = 20}
35 | \item{x}{}
36 | \item{mask}{}
37 | \item{input_shape}{}
38 | }
39 | }
40 |
41 | \section{Details}{
42 |
43 | \code{$initialize} instantiates a new class.
44 |
45 | \code{$build}
46 |
47 | \code{$call} main body.
48 |
49 | \code{$compute_output_shape} computes the output shape.
50 | }
51 |
52 | \examples{
53 | x = ContextualAttentionLayer2D$new()
54 | x$build()
55 |
56 | }
57 | \author{
58 | Tustison NJ
59 | }
60 |
--------------------------------------------------------------------------------
/man/L2NormalizationLayer2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \docType{class}
4 | \name{L2NormalizationLayer2D}
5 | \alias{L2NormalizationLayer2D}
6 | \title{L2 2-D normalization layer for SSD300/512 architecture.}
7 | \value{
8 | output tensor with the same shape as the input.
9 | }
10 | \description{
11 | L2 2-D normalization layer for SSD300/512 architecture described in
12 | }
13 | \details{
14 | Wei Liu, Andrew Rabinovich, and Alexander C. Berg. ParseNet: Looking Wider
15 | to See Better.
16 |
17 | available here:\preformatted{ \code{https://arxiv.org/abs/1506.04579}
18 | }
19 | }
20 | \section{Usage}{
21 |
22 | \preformatted{layer <- L2NormalizationLayer2D$new( scale )
23 |
24 | layer$call( x, mask = NULL )
25 | layer$build( input_shape )
26 | layer$compute_output_shape( input_shape )
27 | }
28 | }
29 |
30 | \section{Arguments}{
31 |
32 | \describe{
33 | \item{layer}{A \code{process} object.}
34 | \item{scale}{feature scale. Default = 20}
35 | \item{x}{}
36 | \item{mask}{}
37 | \item{input_shape}{}
38 | }
39 | }
40 |
41 | \section{Details}{
42 |
43 | \code{$initialize} instantiates a new class.
44 |
45 | \code{$build}
46 |
47 | \code{$call} main body.
48 |
49 | \code{$compute_output_shape} computes the output shape.
50 | }
51 |
52 | \examples{
53 | x = L2NormalizationLayer2D$new()
54 | \dontrun{
55 | x$build(input_shape = c(20, 20, 20, 3))
56 | }
57 | }
58 | \author{
59 | Tustison NJ
60 | }
61 |
--------------------------------------------------------------------------------
/man/basisWarp.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/randomImageTransformAugmentation.R
3 | \name{basisWarp}
4 | \alias{basisWarp}
5 | \title{Generate a deformable map from basis}
6 | \usage{
7 | basisWarp(
8 | deformationBasis,
9 | betaParameters,
10 | numberOfCompositions = 2,
11 | spatialSmoothing = 0
12 | )
13 | }
14 | \arguments{
15 | \item{deformationBasis}{list containing deformationBasis set}
16 |
17 | \item{betaParameters}{vector containing deformationBasis set parameters}
18 |
19 | \item{numberOfCompositions}{integer greater than or equal to one}
20 |
21 | \item{spatialSmoothing}{spatial smoothing for generated deformation}
22 | }
23 | \value{
24 | list of fields
25 | }
26 | \description{
27 | The function will generate a deformable transformation (via exponential map)
28 | from a basis of deformations.
29 | }
30 | \examples{
31 | \dontrun{
32 | library( ANTsR )
33 | i1 = ri( 1 ) \%>\% resampleImage( 4 )
34 | i2 = ri( 2 ) \%>\% resampleImage( 4 )
35 | reg = antsRegistration( i1, i2, 'SyN' )
36 | w = composeTransformsToField( i1, reg$fwd )
37 | bw = basisWarp( list( w, w ), c( 0.25, 0.25 ), 2, 0 )
38 | bwApp = applyAntsrTransformToImage( bw, i2, i1 )
39 | bw = basisWarp( list( w, w ), c( 0.25, 0.25 )*(-1.0), 2, 0 ) # inverse
40 | bwApp = applyAntsrTransformToImage( bw, i1, i2 )
41 | }
42 | }
43 | \seealso{
44 | \code{\link{randomImageTransformParametersBatchGenerator}}
45 | }
46 | \author{
47 | Avants BB
48 | }
49 |
--------------------------------------------------------------------------------
/man/regressionMatchImage.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/regressionMatchImage.R
3 | \name{regressionMatchImage}
4 | \alias{regressionMatchImage}
5 | \title{Image intensity normalization using linear regression.}
6 | \usage{
7 | regressionMatchImage(
8 | sourceImage,
9 | referenceImage,
10 | maskImage = NULL,
11 | polyOrder = 1,
12 | truncate = TRUE
13 | )
14 | }
15 | \arguments{
16 | \item{sourceImage}{image whose intensities we will match to the
17 | \code{referenceImage} intensities.}
18 |
19 | \item{referenceImage}{defines the reference intensity function.}
20 |
21 | \item{maskImage}{Defines voxels for regression modeling.}
22 |
23 | \item{polyOrder}{of polynomial fit. Default is 1 (linear fit).}
24 |
25 | \item{truncate}{boolean which turns on/off the clipping of intensities.}
26 | }
27 | \value{
28 | the \code{sourceImage} matched to the \code{referenceImage}.
29 | }
30 | \description{
31 | Image intensity normalization by regressing the image
32 | intensities of the reference image with the source image.
33 | }
34 | \examples{
35 | library(ANTsRCore)
36 | sourceImage <- antsImageRead( getANTsRData( "r16" ) )
37 | referenceImage <- antsImageRead( getANTsRData( "r64" ) )
38 | matchedImage <- regressionMatchImage( sourceImage, referenceImage )
39 | bad_source = sourceImage[1:200, 1:200]
40 | testthat::expect_error(regressionMatchImage( bad_source, referenceImage ))
41 | }
42 | \author{
43 | Avants BB
44 | }
45 |
--------------------------------------------------------------------------------
/man/L2NormalizationLayer3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \docType{class}
4 | \name{L2NormalizationLayer3D}
5 | \alias{L2NormalizationLayer3D}
6 | \title{L2 3-D normalization layer for SSD300/512 architecture.}
7 | \value{
8 | output tensor with the same shape as the input.
9 | }
10 | \description{
11 | L2 3-D normalization layer for SSD300/512 architecture described in
12 | }
13 | \details{
14 | Wei Liu, Andrew Rabinovich, and Alexander C. Berg. ParseNet: Looking Wider
15 | to See Better.
16 |
17 | available here:\preformatted{ \code{https://arxiv.org/abs/1506.04579}
18 | }
19 | }
20 | \section{Usage}{
21 |
22 | \preformatted{layer <- L2NormalizationLayer3D$new( scale )
23 |
24 | layer$call( x, mask = NULL )
25 | layer$build( input_shape )
26 | layer$compute_output_shape( input_shape )
27 | }
28 | }
29 |
30 | \section{Arguments}{
31 |
32 | \describe{
33 | \item{layer}{A \code{process} object.}
34 | \item{scale}{feature scale. Default = 20}
35 | \item{x}{}
36 | \item{mask}{}
37 | \item{input_shape}{}
38 | }
39 | }
40 |
41 | \section{Details}{
42 |
43 | \code{$initialize} instantiates a new class.
44 |
45 | \code{$build}
46 |
47 | \code{$call} main body.
48 |
49 | \code{$compute_output_shape} computes the output shape.
50 | }
51 |
52 | \examples{
53 | x = L2NormalizationLayer3D$new()
54 | \dontrun{
55 | x$build(input_shape = c(20, 20, 20, 20, 4))
56 | }
57 | }
58 | \author{
59 | Tustison NJ
60 | }
61 |
--------------------------------------------------------------------------------
/man/MixtureDensityNetworkLayer.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/mixtureDensityUtilities.R
3 | \docType{class}
4 | \name{MixtureDensityNetworkLayer}
5 | \alias{MixtureDensityNetworkLayer}
6 | \title{Mixture density network layer}
7 | \value{
8 | A Keras Layer
9 | }
10 | \description{
11 | Mixture density network layer
12 | }
13 | \section{Arguments}{
14 |
15 | \describe{
16 | \item{outputDimension}{}
17 | \item{numberOfMixtures}{}
18 | }
19 | }
20 |
21 | \section{Details}{
22 |
23 | \code{$initialize} instantiates a new class.
24 |
25 | \code{$call} main body.
26 |
27 | \code{$compute_output_shape} computes the output shape.
28 |
29 | Create custom density layers for each parameter of the
30 | mixed Gaussians. (mu, sigma, pi). I could not get the approach
31 | from the original implementation to work:
32 |
33 | https://github.com/cpmpercussion/keras-mdn-layer/blob/master/mdn/\strong{init}.py#L28-L73
34 |
35 | where the author used the keras dense layers to create the
36 | custom MDN layer and assign the trainable weights directly
37 | thus circumventing the add_weight() function. Instead, I
38 | recreated dense layer functionality using the keras definition
39 | here:
40 |
41 | https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L796-L937
42 | }
43 |
44 | \examples{
45 | MixtureDensityNetworkLayer$new(outputDimension = c(50, 48),
46 | numberOfMixtures = 3)
47 |
48 | }
49 | \author{
50 | Tustison NJ
51 | }
52 |
--------------------------------------------------------------------------------
/man/drawRectangles.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \name{drawRectangles}
4 | \alias{drawRectangles}
5 | \title{Plotting function for 2-D object detection visualization.}
6 | \usage{
7 | drawRectangles(
8 | image,
9 | boxes,
10 | boxColors = "red",
11 | confidenceValues = NULL,
12 | captions = NULL
13 | )
14 | }
15 | \arguments{
16 | \item{image}{standard image using something like \pkg{jpeg::readJPEG}.}
17 |
18 | \item{boxes}{a data frame or comprising where each row has the
19 | format: xmin, xmax, ymin, ymax.}
20 |
21 | \item{boxColors}{Optional scalar or vector of length = \code{numberOfBoxes}
22 | used for determining the colors of the different boxes.}
23 |
24 | \item{confidenceValues}{Optional vector of length = \code{numberOfBoxes} where
25 | each element is in the range \verb{[0, 1]}. Used for determining border width.}
26 |
27 | \item{captions}{Optional vector of length = \code{numberOfBoxes} where
28 | each element is the caption rendered with each box.}
29 | }
30 | \description{
31 | Renders boxes on objects within rasterized images.
32 | }
33 | \examples{
34 | jpg = ANTsRCore::getANTsRData("r16")
35 | if (requireNamespace("jpeg", quietly = TRUE)) {
36 | image = jpeg::readJPEG(jpg)
37 | vec2d = c(1, 10, 1, 10)
38 | drawRectangles(image, vec2d)
39 | vec2d = rbind(
40 | c(1, 10, 1, 10),
41 | c(20, 40, 30, 40)
42 | )
43 | drawRectangles(image, vec2d)
44 | }
45 | }
46 | \author{
47 | Tustison NJ
48 | }
49 |
--------------------------------------------------------------------------------
/man/elBicho.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/lungSegmentation.R
3 | \name{elBicho}
4 | \alias{elBicho}
5 | \title{Functional lung segmentation.}
6 | \usage{
7 | elBicho(
8 | ventilationImage,
9 | mask,
10 | useCoarseSlicesOnly = TRUE,
11 | antsxnetCacheDirectory = NULL,
12 | verbose = FALSE
13 | )
14 | }
15 | \arguments{
16 | \item{ventilationImage}{input ventilation image}
17 |
18 | \item{mask}{input mask image}
19 |
20 | \item{useCoarseSlicesOnly}{if \code{TRUE}, apply network only in the
21 | dimension of greatest slice thickness. If \code{FALSE}, apply to all
22 | dimensions and average the results.}
23 |
24 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
25 | template and model weights. Since these can be resused, if
26 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
27 | inst/extdata/ subfolder of the ANTsRNet package.}
28 |
29 | \item{verbose}{print progress.}
30 | }
31 | \value{
32 | ventilation segmentation and corresponding probability images
33 | }
34 | \description{
35 | Perform functional lung segmentation using hyperpolarized gases.
36 | }
37 | \details{
38 | \url{https://pubmed.ncbi.nlm.nih.gov/30195415/}
39 | }
40 | \examples{
41 | \dontrun{
42 | library( ANTsRNet )
43 | library( keras )
44 |
45 | image <- antsImageRead( "flair.nii.gz" )
46 | probabilityMask <-sysuMediaWmhSegmentation( image )
47 | }
48 | }
49 | \author{
50 | Tustison NJ
51 | }
52 |
--------------------------------------------------------------------------------
/man/convertCoordinates.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \name{convertCoordinates}
4 | \alias{convertCoordinates}
5 | \title{Convert coordinates to/from min/max representation from/to centroids/width}
6 | \usage{
7 | convertCoordinates(boxes, type = c("minmax2centroids", "centroids2minmax"))
8 | }
9 | \arguments{
10 | \item{boxes}{A vector or 2-D array where each row corresponds to a single box
11 | consisting of the format (xmin,xmax,ymin,ymax) or (centerx,centery,width,height)
12 | for 2-D vs. (xmin,xmax,ymin,ymax,zmin,zmax) or
13 | (centerx,centery,centerz,width,height,depth) for 3-D.}
14 |
15 | \item{type}{either \verb{'minmax2centroids'} or \verb{'centroids2minmax'}}
16 | }
17 | \value{
18 | a vector or 2-D array with the converted coordinates
19 | }
20 | \description{
21 | Function for converting box coordinates to/from min/max representation
22 | from/to centroids/width
23 | }
24 | \examples{
25 | vec2d = c(1, 10, 1, 10)
26 | convertCoordinates(vec2d)
27 | convertCoordinates(vec2d, type = "centroids2minmax")
28 | convertCoordinates(rbind( vec2d, vec2d, vec2d))
29 | convertCoordinates(rbind( vec2d, vec2d, vec2d), "centroids2minmax")
30 |
31 | vec3d = c(1, 10, 1, 10, 1, 10)
32 | convertCoordinates(vec3d)
33 | convertCoordinates(vec3d, type = "centroids2minmax")
34 | convertCoordinates(rbind( vec3d, vec3d, vec3d))
35 | convertCoordinates(rbind( vec3d, vec3d, vec3d), type = "centroids2minmax")
36 |
37 | }
38 | \author{
39 | Tustison NJ
40 | }
41 |
--------------------------------------------------------------------------------
/man/createHippMapp3rUnetModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createCustomUnetModel.R
3 | \name{createHippMapp3rUnetModel3D}
4 | \alias{createHippMapp3rUnetModel3D}
5 | \title{Implementation of the "HippMapp3r" U-net architecture}
6 | \usage{
7 | createHippMapp3rUnetModel3D(inputImageSize, doFirstNetwork = TRUE)
8 | }
9 | \arguments{
10 | \item{inputImageSize}{Used for specifying the input tensor shape. The
11 | shape (or dimension) of that tensor is the image dimensions followed by
12 | the number of channels (e.g., red, green, and blue).}
13 |
14 | \item{doFirstNetwork}{boolean dictating if the model built should be the
15 | first (initial) network or second (refinement) network.}
16 | }
17 | \value{
18 | a u-net keras model
19 | }
20 | \description{
21 | Creates a keras model implementation of the u-net architecture
22 | described here:
23 | }
24 | \details{
25 | \preformatted{\url{https://onlinelibrary.wiley.com/doi/pdf/10.1002/hbm.24811}
26 | }
27 |
28 | with the implementation available here:\preformatted{\url{https://github.com/mgoubran/HippMapp3r}
29 | }
30 | }
31 | \examples{
32 | \dontrun{
33 |
34 | model1 <- createHippMapp3rUnetModel3D( c( 160, 160, 128, 1 ), doFirstNetwork = TRUE )
35 | model2 <- createHippMapp3rUnetModel3D( c( 112, 112, 64, 1 ), doFirstNetwork = FALSE )
36 |
37 | json_config <- model_to_json( model1 )
38 | writeLines( json_config, "/Users/ntustison/Desktop/model1_config.json" )
39 |
40 | }
41 | }
42 | \author{
43 | Tustison NJ
44 | }
45 |
--------------------------------------------------------------------------------
/man/layer_anchor_box_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \name{layer_anchor_box_2d}
4 | \alias{layer_anchor_box_2d}
5 | \alias{layer_anchor_box_3d}
6 | \title{Anchor box layer (2-D and 3-D)}
7 | \usage{
8 | layer_anchor_box_2d(
9 | object,
10 | imageSize,
11 | scale,
12 | nextScale,
13 | aspectRatios,
14 | variances,
15 | name = NULL,
16 | trainable = TRUE
17 | )
18 |
19 | layer_anchor_box_3d(
20 | object,
21 | imageSize,
22 | scale,
23 | nextScale,
24 | aspectRatios,
25 | variances,
26 | name = NULL,
27 | trainable = TRUE
28 | )
29 | }
30 | \arguments{
31 | \item{object}{Object to compose layer with. This is either a
32 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
33 | or another Layer which this layer will call.}
34 |
35 | \item{imageSize}{size of the image, passed to \code{\link{create_layer}}}
36 |
37 | \item{scale}{box scale, passed to \code{\link{create_layer}}}
38 |
39 | \item{nextScale}{box scale, passed to \code{\link{create_layer}}}
40 |
41 | \item{aspectRatios}{list of ratios used for the boxes,
42 | passed to \code{\link{create_layer}}}
43 |
44 | \item{variances}{list of variances, passed to \code{\link{create_layer}}}
45 |
46 | \item{name}{The name of the layer}
47 |
48 | \item{trainable}{logical indicating if it is trainable or not}
49 | }
50 | \value{
51 | a keras layer tensor
52 | }
53 | \description{
54 | Wraps a custom layer for the SSD network
55 | }
56 |
--------------------------------------------------------------------------------
/man/categorical_focal_loss.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customMetrics.R
3 | \name{categorical_focal_loss}
4 | \alias{categorical_focal_loss}
5 | \title{Function for categorical focal loss}
6 | \usage{
7 | categorical_focal_loss(y_true, y_pred, gamma = 2, alpha = 0.25)
8 | }
9 | \arguments{
10 | \item{y_true}{True labels (Tensor)}
11 |
12 | \item{y_pred}{Predictions (Tensor of the same shape as \code{y_true})}
13 |
14 | \item{gamma}{focusing parameter for modulating factor (1-p). Default = 2.0.}
15 |
16 | \item{alpha}{weighing factor in balanced cross entropy. Default = 0.25.}
17 | }
18 | \value{
19 | function value
20 | }
21 | \description{
22 | The categorical focal loss discussed in this paper:
23 | }
24 | \details{
25 | \url{https://arxiv.org/pdf/1708.02002.pdf}
26 |
27 | and ported from this implementation:
28 |
29 | \url{https://github.com/umbertogriffo/focal-loss-keras/blob/master/losses.py}
30 |
31 | Used to handle imbalanced classes .
32 | }
33 | \examples{
34 |
35 | library( ANTsRNet )
36 | library( keras )
37 |
38 | model <- createUnetModel2D( c( 64, 64, 1 ) )
39 |
40 | metric_categorical_focal_gain <-
41 | custom_metric( "categorical_focal_gain",
42 | categorical_focal_gain( alpha = 0.25, gamma = 2.0 ) )
43 |
44 | model \%>\% compile( loss = categorical_focal_loss( alpha = 0.25, gamma = 2.0 ),
45 | optimizer = optimizer_adam( lr = 0.0001 ),
46 | metrics = c( metric_categorical_focal_gain ) )
47 |
48 | }
49 | \author{
50 | Tustison NJ
51 | }
52 |
--------------------------------------------------------------------------------
/man/getANTsXNetData.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/getANTsXNetData.R
3 | \name{getANTsXNetData}
4 | \alias{getANTsXNetData}
5 | \title{getANTsXNetData}
6 | \usage{
7 | getANTsXNetData(
8 | fileId = c("show", "biobank", "croppedMni152", "mprage_hippmapp3r",
9 | "protonLungTemplate", "ctLungTemplate", "priorDktLabels", "priorDeepFlashLeftLabels",
10 | "priorDeepFlashRightLabels", "S_template3", "adni", "ixi", "kirby", "mni152", "nki",
11 | "nki10", "oasis"),
12 | targetFileName,
13 | antsxnetCacheDirectory = NULL
14 | )
15 | }
16 | \arguments{
17 | \item{fileId}{one of the permitted file ids or pass "show" to list all
18 | valid possibilities. Note that most require internet access to download.}
19 |
20 | \item{targetFileName}{optional target filename}
21 |
22 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
23 | template and model weights. Since these can be resused, if
24 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
25 | subdirectory ~/.keras/ANTsXNet/.}
26 | }
27 | \value{
28 | filename string
29 | }
30 | \description{
31 | Download data such as prefabricated templates and spatial priors.
32 | }
33 | \note{
34 | See \url{https://figshare.com/authors/Nick_Tustison/441144}
35 | or \url{https://figshare.com/authors/Brian_Avants/418551}
36 | for some more descriptions
37 | }
38 | \examples{
39 | \dontrun{
40 | net <- getANTsXNetData("biobank")
41 | }
42 | }
43 | \author{
44 | Avants BB
45 | }
46 |
--------------------------------------------------------------------------------
/man/createConvolutionalAutoencoderModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createConvolutionalAutoencoderModel.R
3 | \name{createConvolutionalAutoencoderModel2D}
4 | \alias{createConvolutionalAutoencoderModel2D}
5 | \title{Function for creating a 2-D symmetric convolutional autoencoder model.}
6 | \usage{
7 | createConvolutionalAutoencoderModel2D(
8 | inputImageSize,
9 | numberOfFiltersPerLayer = c(32, 64, 128, 10),
10 | convolutionKernelSize = c(5, 5),
11 | deconvolutionKernelSize = c(5, 5)
12 | )
13 | }
14 | \arguments{
15 | \item{inputImageSize}{vector definining spatial dimensions + channels}
16 |
17 | \item{numberOfFiltersPerLayer}{vector defining the number of convolutional
18 | filters in the encoding branch per layer}
19 |
20 | \item{convolutionKernelSize}{kernel size fo the convolutional filters}
21 |
22 | \item{deconvolutionKernelSize}{kernel size fo the convolutional transpose
23 | filters}
24 | }
25 | \value{
26 | two models: the convolutional encoder and convolutional auto-encoder
27 | }
28 | \description{
29 | Builds a convolutional autoencoder based on the specified array
30 | definining the number of units in the encoding branch. Ported to
31 | Keras R from the Keras python implementation here:
32 | }
33 | \details{
34 | \url{https://github.com/XifengGuo/DCEC}
35 | }
36 | \examples{
37 |
38 | library( ANTsRNet )
39 | library( keras )
40 |
41 | ae <- createConvolutionalAutoencoderModel2D( c( 32, 32, 1 ) )
42 |
43 | }
44 | \author{
45 | Tustison NJ
46 | }
47 |
--------------------------------------------------------------------------------
/man/createConvolutionalAutoencoderModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createConvolutionalAutoencoderModel.R
3 | \name{createConvolutionalAutoencoderModel3D}
4 | \alias{createConvolutionalAutoencoderModel3D}
5 | \title{Function for creating a 3-D symmetric convolutional autoencoder model.}
6 | \usage{
7 | createConvolutionalAutoencoderModel3D(
8 | inputImageSize,
9 | numberOfFiltersPerLayer = c(32, 64, 128, 10),
10 | convolutionKernelSize = c(5, 5, 5),
11 | deconvolutionKernelSize = c(5, 5, 5)
12 | )
13 | }
14 | \arguments{
15 | \item{inputImageSize}{vector definining spatial dimensions + channels}
16 |
17 | \item{numberOfFiltersPerLayer}{vector defining the number of convolutional
18 | filters in the encoding branch per layer}
19 |
20 | \item{convolutionKernelSize}{kernel size fo the convolutional filters}
21 |
22 | \item{deconvolutionKernelSize}{kernel size fo the convolutional transpose
23 | filters}
24 | }
25 | \value{
26 | two models: the convolutional encoder and convolutional auto-encoder
27 | }
28 | \description{
29 | Builds a convolutional autoencoder based on the specified array
30 | definining the number of units in the encoding branch. Ported to
31 | Keras R from the Keras python implementation here:
32 | }
33 | \details{
34 | \url{https://github.com/XifengGuo/DCEC}
35 | }
36 | \examples{
37 |
38 | library( ANTsRNet )
39 | library( keras )
40 |
41 | ae <- createConvolutionalAutoencoderModel2D( c( 32, 32, 1 ) )
42 |
43 | }
44 | \author{
45 | Tustison NJ
46 | }
47 |
--------------------------------------------------------------------------------
/man/categorical_focal_gain.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customMetrics.R
3 | \name{categorical_focal_gain}
4 | \alias{categorical_focal_gain}
5 | \title{Function for categorical focal gain}
6 | \usage{
7 | categorical_focal_gain(y_true, y_pred, gamma = 2, alpha = 0.25)
8 | }
9 | \arguments{
10 | \item{y_true}{True labels (Tensor)}
11 |
12 | \item{y_pred}{Predictions (Tensor of the same shape as \code{y_true})}
13 |
14 | \item{gamma}{focusing parameter for modulating factor (1-p). Default = 2.0.}
15 |
16 | \item{alpha}{weighing factor in balanced cross entropy. Default = 0.25.}
17 | }
18 | \value{
19 | function value
20 | }
21 | \description{
22 | The negative of the categorical focal loss discussed
23 | in this paper:
24 | }
25 | \details{
26 | \url{https://arxiv.org/pdf/1708.02002.pdf}
27 |
28 | and ported from this implementation:
29 |
30 | \url{https://github.com/umbertogriffo/focal-loss-keras/blob/master/losses.py}
31 |
32 | Used to handle imbalanced classes.
33 | }
34 | \examples{
35 |
36 | library( ANTsRNet )
37 | library( keras )
38 |
39 | model <- createUnetModel2D( c( 64, 64, 1 ) )
40 |
41 | metric_categorical_focal_gain <-
42 | custom_metric( "categorical_focal_gain",
43 | categorical_focal_gain( alpha = 0.25, gamma = 2.0 ) )
44 |
45 | model \%>\% compile( loss = categorical_focal_loss( alpha = 0.25, gamma = 2.0 ),
46 | optimizer = optimizer_adam( lr = 0.0001 ),
47 | metrics = c( metric_categorical_focal_gain ) )
48 |
49 | }
50 | \author{
51 | Tustison NJ
52 | }
53 |
--------------------------------------------------------------------------------
/man/createSimpleFullyConvolutionalNeuralNetworkModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createCustomModel.R
3 | \name{createSimpleFullyConvolutionalNeuralNetworkModel3D}
4 | \alias{createSimpleFullyConvolutionalNeuralNetworkModel3D}
5 | \title{Implementation of the "SCFN" architecture for Brain/Gender prediction}
6 | \usage{
7 | createSimpleFullyConvolutionalNeuralNetworkModel3D(
8 | inputImageSize,
9 | numberOfFiltersPerLayer = c(32, 64, 128, 256, 256, 64),
10 | numberOfBins = 40,
11 | dropoutRate = 0.5,
12 | doExperimentalVariant = FALSE
13 | )
14 | }
15 | \arguments{
16 | \item{inputImageSize}{Used for specifying the input tensor shape. The
17 | shape (or dimension) of that tensor is the image dimensions followed by
18 | the number of channels (e.g., red, green, and blue).}
19 |
20 | \item{numberOfFiltersPerLayer}{number of filters for the convolutional layers}
21 |
22 | \item{numberOfBins}{number of bins for final softmax output.}
23 |
24 | \item{dropoutRate}{dropout rate before final convolution layer.}
25 | }
26 | \value{
27 | a SCFN keras model
28 | }
29 | \description{
30 | Creates a keras model implementation of the Simple Fully Convolutional
31 | Network model from the FMRIB group:
32 | }
33 | \details{
34 | \preformatted{ \url{https://github.com/ha-ha-ha-han/UKBiobank_deep_pretrain}
35 | }
36 | }
37 | \examples{
38 |
39 | library( ANTsRNet )
40 |
41 | model <- createSimpleFullyConvolutionalNeuralNetworkModel3D( list( NULL, NULL, NULL, 1 ) )
42 |
43 | }
44 | \author{
45 | Tustison NJ
46 | }
47 |
--------------------------------------------------------------------------------
/man/uvaSeg.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/uvaSeg.R
3 | \name{uvaSeg}
4 | \alias{uvaSeg}
5 | \title{Unsupervised variational autoencoder segmentation}
6 | \usage{
7 | uvaSeg(
8 | image,
9 | model,
10 | k,
11 | mask,
12 | returnProbabilities = FALSE,
13 | batchSize = 1028,
14 | standardize = TRUE,
15 | verbose = FALSE
16 | )
17 | }
18 | \arguments{
19 | \item{image}{input image}
20 |
21 | \item{model}{the model output from \code{uvaSegTrain}}
22 |
23 | \item{k}{number of clusters or cluster centers}
24 |
25 | \item{mask}{defining output segmentation space}
26 |
27 | \item{returnProbabilities}{boolean}
28 |
29 | \item{batchSize}{for the prediction}
30 |
31 | \item{standardize}{boolean controlling whether patches are standardized}
32 |
33 | \item{verbose}{boolean}
34 | }
35 | \value{
36 | segmentation and probability images are output
37 | }
38 | \description{
39 | Trains a variational autoencoding with a convolutional network. This is
40 | followed by k-means clustering to produce a segmentation and probabilities.
41 | }
42 | \examples{
43 | \dontrun{
44 | library(ANTsR)
45 | img <- ri( 1 ) \%>\% resampleImage( c(4,4) ) \%>\% iMath( "Normalize" )
46 | mask = randomMask( getMask( img ), 50 )
47 | patch = getNeighborhoodInMask( img, mask, c(3,3), boundary.condition = "NA" )
48 | uvaSegModel = uvaSegTrain( patch, k = 6 )
49 | tarImg = ri( 3 ) \%>\% resampleImage( c(4,4) )
50 | uvaSegmentation = uvaSeg(tarImg, uvaSegModel, k = 3, getMask( tarImg ) )
51 | }
52 | }
53 | \author{
54 | Avants BB
55 | }
56 |
--------------------------------------------------------------------------------
/DESCRIPTION:
--------------------------------------------------------------------------------
1 | Package: ANTsRNet
2 | Type: Package
3 | Title: Neural Networks for Medical Image Processing
4 | Version: 1.1
5 | Date: 2020-03-08
6 | Authors@R: c(
7 | person(c("Nicholas", "J"), "Tustison", role = c("aut", "cre"), email = "ntustison@gmail.com", comment = c(ORCID = "0000-0001-9418-5103")),
8 | person(c("Brian", "B"), "Avants", role = c("aut"), email = "stnava@gmail.com", comment = c(ORCID = "0000-0002-4212-3362")),
9 | person("John", "Muschelli", role = "ctb",
10 | comment = c(ORCID = "0000-0001-6469-1750"))
11 | )
12 | Maintainer: Nicholas J Tustison
13 | Description: Deep learning tools for medical image processing, interfacing
14 | with the 'ANTsR' package and Advanced Normalization Tools ('ANTs').
15 | License: Apache License (>=2.0)
16 | LazyLoad: yes
17 | Depends:
18 | R (>= 3.2)
19 | Imports:
20 | ANTsRCore,
21 | ANTsR,
22 | knitr,
23 | keras (>= 2.2.5.0),
24 | reticulate (>= 1.6.0.9002),
25 | tensorflow (>= 1.5),
26 | tfruns (>= 1.0),
27 | magrittr,
28 | zeallot,
29 | methods,
30 | mvtnorm,
31 | R6,
32 | Rcpp,
33 | stats
34 | Suggests:
35 | ggplot2,
36 | rmarkdown,
37 | testthat (>= 2.1.0),
38 | kirby21.t1,
39 | covr,
40 | jpeg
41 | StagedInstall: no
42 | Remotes:
43 | rstudio/reticulate,
44 | rstudio/keras
45 | SystemRequirements: Keras >= 2.0 (https://keras.io)
46 | Roxygen: list(markdown = TRUE)
47 | RoxygenNote: 7.1.1
48 | VignetteBuilder: knitr
49 | BugReports: https://github.com/ANTsX/ANTsRNet/issues
50 | URL: https://github.com/ANTsX/ANTsRNet
51 | Encoding: UTF-8
52 |
--------------------------------------------------------------------------------
/man/VanillaGanModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createVanillaGanModel.R
3 | \docType{class}
4 | \name{VanillaGanModel}
5 | \alias{VanillaGanModel}
6 | \title{Vanilla GAN model}
7 | \description{
8 | Original generative adverserial network from the paper:
9 | }
10 | \details{
11 | https://arxiv.org/abs/1406.2661
12 |
13 | and ported from the Keras (python) implementation:
14 |
15 | https://github.com/eriklindernoren/Keras-GAN/blob/master/gan/gan.py
16 | }
17 | \section{Arguments}{
18 |
19 | \describe{
20 | \item{inputImageSize}{}
21 | \item{latentDimension}{}
22 | }
23 | }
24 |
25 | \section{Details}{
26 |
27 | \code{$initialize} {instantiates a new class and builds the
28 | generator and discriminator.}
29 | \code{$buildGenerator}{build generator.}
30 | \code{$buildGenerator}{build discriminator.}
31 | }
32 |
33 | \examples{
34 | \dontrun{
35 |
36 | library( keras )
37 | library( ANTsRNet )
38 |
39 | keras::backend()$clear_session()
40 |
41 | # Let's use the mnist data set.
42 |
43 | mnist <- dataset_mnist()
44 |
45 | numberOfTrainingData <- length( mnist$train$y )
46 |
47 | inputImageSize <- c( dim( mnist$train$x[1,,] ), 1 )
48 |
49 | x <- array( data = mnist$train$x / 255, dim = c( numberOfTrainingData, inputImageSize ) )
50 | y <- mnist$train$y
51 |
52 | numberOfClusters <- length( unique( mnist$train$y ) )
53 |
54 | # Instantiate the DCEC model
55 |
56 | ganModel <- VanillaGanModel$new(
57 | inputImageSize = inputImageSize,
58 | latentDimension = 100 )
59 |
60 | ganModel$train( x, numberOfEpochs = 100 )
61 | }
62 |
63 | }
64 | \author{
65 | Tustison NJ
66 | }
67 |
--------------------------------------------------------------------------------
/man/WassersteinGanModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createWassersteinGanModel.R
3 | \docType{class}
4 | \name{WassersteinGanModel}
5 | \alias{WassersteinGanModel}
6 | \title{Wasserstein GAN model}
7 | \description{
8 | Wasserstein generative adverserial network from the paper:
9 | }
10 | \details{
11 | https://arxiv.org/abs/1701.07875
12 |
13 | and ported from the Keras (python) implementation:
14 |
15 | https://github.com/eriklindernoren/Keras-GAN/blob/master/wgan/wgan.py
16 | }
17 | \section{Arguments}{
18 |
19 | \describe{
20 | \item{inputImageSize}{}
21 | \item{latentDimension}{}
22 | }
23 | }
24 |
25 | \section{Details}{
26 |
27 | \code{$initialize} {instantiates a new class and builds the
28 | generator and critic.}
29 | \code{$buildGenerator}{build generator.}
30 | \code{$buildGenerator}{build critic.}
31 | }
32 |
33 | \examples{
34 | library( keras )
35 | library( ANTsRNet )
36 |
37 | keras::backend()$clear_session()
38 |
39 | # Let's use the mnist data set.
40 |
41 | mnist <- dataset_mnist()
42 |
43 | numberOfTrainingData <- length( mnist$train$y )
44 |
45 | inputImageSize <- c( dim( mnist$train$x[1,,] ), 1 )
46 |
47 | x <- array( data = mnist$train$x / 255, dim = c( numberOfTrainingData, inputImageSize ) )
48 | y <- mnist$train$y
49 |
50 | numberOfClusters <- length( unique( mnist$train$y ) )
51 |
52 | # Instantiate the WGAN model
53 |
54 | ganModel <- WassersteinGanModel$new(
55 | inputImageSize = inputImageSize,
56 | latentDimension = 100 )
57 | \donttest{
58 | ganModel$train( x, numberOfEpochs = 5 )
59 | }
60 |
61 | }
62 | \author{
63 | Tustison NJ
64 | }
65 |
--------------------------------------------------------------------------------
/man/createDenoisingAutoEncoderSuperResolutionModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in
3 | % R/createDenoisingAutoEncoderSuperResolutionModel.R
4 | \name{createDenoisingAutoEncoderSuperResolutionModel2D}
5 | \alias{createDenoisingAutoEncoderSuperResolutionModel2D}
6 | \title{2-D implementation of the denoising autoencoder image super resolution architecture.}
7 | \usage{
8 | createDenoisingAutoEncoderSuperResolutionModel2D(
9 | inputImageSize,
10 | convolutionKernelSizes = list(c(3, 3), c(5, 5)),
11 | numberOfEncodingLayers = 2,
12 | numberOfFilters = 64
13 | )
14 | }
15 | \arguments{
16 | \item{inputImageSize}{Used for specifying the input tensor shape. The
17 | shape (or dimension) of that tensor is the image dimensions followed by
18 | the number of channels (e.g., red, green, and blue). The batch size
19 | (i.e., number of training images) is not specified a priori.}
20 |
21 | \item{convolutionKernelSizes}{a 2-element list of 2-D vectors specifying the
22 | kernel size at each convolution layer. The first element is the kernel size
23 | of the encoding layers and the 2nd element is the kernel size of the final
24 | convolution layer.}
25 |
26 | \item{numberOfEncodingLayers}{the number of encoding layers.}
27 |
28 | \item{numberOfFilters}{the number of filters for each encoding layer.}
29 | }
30 | \value{
31 | a keras model for image super resolution
32 | }
33 | \description{
34 | 2-D implementation of the denoising autoencoder image super resolution architecture.
35 | }
36 | \examples{
37 | createDenoisingAutoEncoderSuperResolutionModel2D(c( 28, 28, 1 ))
38 | }
39 | \author{
40 | Tustison NJ
41 | }
42 |
--------------------------------------------------------------------------------
/man/layer_attention_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \name{layer_attention_2d}
4 | \alias{layer_attention_2d}
5 | \title{Attention layer (2-D)}
6 | \usage{
7 | layer_attention_2d(
8 | object,
9 | numberOfChannels,
10 | doGoogleBrainVersion = TRUE,
11 | trainable = TRUE
12 | )
13 | }
14 | \arguments{
15 | \item{object}{Object to compose layer with. This is either a
16 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to
17 | or another Layer which this layer will call.}
18 |
19 | \item{numberOfChannels}{numberOfChannels}
20 |
21 | \item{doGoogleBrainVersion}{boolean. Variant described at second url.}
22 |
23 | \item{trainable}{Whether the layer weights will be updated during training.}
24 | }
25 | \value{
26 | a keras layer tensor
27 | }
28 | \description{
29 | Wraps the AttentionLayer2D taken from the following python implementation
30 | }
31 | \details{
32 | \url{https://stackoverflow.com/questions/50819931/self-attention-gan-in-keras}
33 | \url{https://github.com/taki0112/Self-Attention-GAN-Tensorflow}
34 |
35 | based on the following paper:
36 |
37 | \url{https://arxiv.org/abs/1805.08318}
38 | }
39 | \examples{
40 |
41 | \dontrun{
42 | library( keras )
43 | library( ANTsRNet )
44 |
45 | inputShape <- c( 100, 100, 3 )
46 | input <- layer_input( shape = inputShape )
47 |
48 | numberOfFilters <- 64
49 | outputs <- input \%>\% layer_conv_2d( filters = numberOfFilters, kernel_size = 2 )
50 | outputs <- outputs \%>\% layer_attention_2d( numberOfFilters )
51 |
52 | model <- keras_model( inputs = input, outputs = outputs )
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/man/layer_attention_3d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \name{layer_attention_3d}
4 | \alias{layer_attention_3d}
5 | \title{Attention layer (3-D)}
6 | \usage{
7 | layer_attention_3d(
8 | object,
9 | numberOfChannels,
10 | doGoogleBrainVersion = TRUE,
11 | trainable = TRUE
12 | )
13 | }
14 | \arguments{
15 | \item{object}{Object to compose layer with. This is either a
16 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to
17 | or another Layer which this layer will call.}
18 |
19 | \item{numberOfChannels}{numberOfChannels}
20 |
21 | \item{doGoogleBrainVersion}{boolean. Variant described at second url.}
22 |
23 | \item{trainable}{Whether the layer weights will be updated during training.}
24 | }
25 | \value{
26 | a keras layer tensor
27 | }
28 | \description{
29 | Wraps the AttentionLayer3D taken from the following python implementation
30 | }
31 | \details{
32 | \url{https://stackoverflow.com/questions/50819931/self-attention-gan-in-keras}
33 | \url{https://github.com/taki0112/Self-Attention-GAN-Tensorflow}
34 |
35 | based on the following paper:
36 |
37 | \url{https://arxiv.org/abs/1805.08318}
38 | }
39 | \examples{
40 | \dontrun{
41 | library( keras )
42 | library( ANTsRNet )
43 |
44 | inputShape <- c( 100, 100, 100, 3 )
45 | input <- layer_input( shape = inputShape )
46 |
47 | numberOfFilters <- 64
48 | outputs <- input \%>\% layer_conv_3d( filters = numberOfFilters, kernel_size = 2 )
49 | outputs <- outputs \%>\% layer_attention_3d( numberOfFilters )
50 |
51 | model <- keras_model( inputs = input, outputs = outputs )
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/man/layer_spatial_transformer_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/spatialTransformerNetworkUtilities.R
3 | \name{layer_spatial_transformer_2d}
4 | \alias{layer_spatial_transformer_2d}
5 | \alias{layer_spatial_transformer_3d}
6 | \title{spatial transformer layer (2-D and 3-D)}
7 | \usage{
8 | layer_spatial_transformer_2d(
9 | object,
10 | resampledSize,
11 | transformType = "affine",
12 | interpolatorType = "linear",
13 | name = NULL
14 | )
15 |
16 | layer_spatial_transformer_3d(
17 | object,
18 | resampledSize,
19 | transformType = "affine",
20 | interpolatorType = "linear",
21 | name = NULL
22 | )
23 | }
24 | \arguments{
25 | \item{object}{Object to compose layer with. This is either a
26 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
27 | or another Layer which this layer will call.}
28 |
29 | \item{resampledSize}{size of the output in voxels}
30 |
31 | \item{transformType}{the spatial transform}
32 |
33 | \item{interpolatorType}{interpolation used for the sampling}
34 |
35 | \item{name}{The name of the layer}
36 | }
37 | \value{
38 | a keras layer tensor
39 | }
40 | \description{
41 | Wraps a custom spatial transformer layer
42 | }
43 | \examples{
44 | \dontrun{
45 | model <- keras_model_sequential()
46 | input_shape = c(20, 20, 1)
47 | model = model \%>\%
48 | layer_conv_2d(filters = 32, kernel_size = c(3,3), activation = 'relu',
49 | input_shape = input_shape)
50 | model \%>\%
51 | layer_spatial_transformer_2d(resampledSize = c(50, 50))
52 | model \%>\%
53 | layer_spatial_transformer_2d(resampledSize = c(50, 50, 1))
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/man/createDenoisingAutoEncoderSuperResolutionModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in
3 | % R/createDenoisingAutoEncoderSuperResolutionModel.R
4 | \name{createDenoisingAutoEncoderSuperResolutionModel3D}
5 | \alias{createDenoisingAutoEncoderSuperResolutionModel3D}
6 | \title{3-D implementation of the denoising autoencoder image super resolution architecture.}
7 | \usage{
8 | createDenoisingAutoEncoderSuperResolutionModel3D(
9 | inputImageSize,
10 | convolutionKernelSizes = list(c(3, 3, 3), c(5, 5, 5)),
11 | numberOfEncodingLayers = 2,
12 | numberOfFilters = 64
13 | )
14 | }
15 | \arguments{
16 | \item{inputImageSize}{Used for specifying the input tensor shape. The
17 | shape (or dimension) of that tensor is the image dimensions followed by
18 | the number of channels (e.g., red, green, and blue). The batch size
19 | (i.e., number of training images) is not specified a priori.}
20 |
21 | \item{convolutionKernelSizes}{a 2-element list of 3-D vectors specifying the
22 | kernel size at each convolution layer. The first element is the kernel size
23 | of the encoding layers and the 2nd element is the kernel size of the final
24 | convolution layer.}
25 |
26 | \item{numberOfEncodingLayers}{the number of encoding layers.}
27 |
28 | \item{numberOfFilters}{the number of filters for each encoding layer.}
29 | }
30 | \value{
31 | a keras model for image super resolution
32 | }
33 | \description{
34 | 3-D implementation of the denoising autoencoder image super resolution architecture.
35 | }
36 | \examples{
37 | createDenoisingAutoEncoderSuperResolutionModel3D(c( 28, 28, 28, 1 ))
38 | gc()
39 | }
40 | \author{
41 | Tustison NJ
42 | }
43 |
--------------------------------------------------------------------------------
/man/reconstructImageFromPatches.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/reconstructImageFromPatches.R
3 | \name{reconstructImageFromPatches}
4 | \alias{reconstructImageFromPatches}
5 | \title{Reconstruct image from a list of patches.}
6 | \usage{
7 | reconstructImageFromPatches(
8 | patches,
9 | domainImage,
10 | strideLength = 1,
11 | domainImageIsMask = FALSE
12 | )
13 | }
14 | \arguments{
15 | \item{patches}{List or array of patches defining an image. Patches are assumed
16 | to have the same format as returned by \code{extractImagePatches}.}
17 |
18 | \item{domainImage}{Image or mask to define the geometric information of the
19 | reconstructed image. If this is a mask image, the reconstruction will only
20 | use patches in the mask.}
21 |
22 | \item{strideLength}{Defines the sequential patch overlap for
23 | \code{maxNumberOfPatches = all}. Can be a image-dimensional vector or a scalar.}
24 |
25 | \item{domainImageIsMask}{boolean specifying whether the domain image is a
26 | mask used to limit the region of reconstruction from the patches.}
27 | }
28 | \value{
29 | an ANTs image.
30 | }
31 | \description{
32 | Reconstruct image from a list of patches.
33 | }
34 | \examples{
35 |
36 | library( ANTsR )
37 |
38 | image <- antsImageRead( getANTsRData( "r16" ) )
39 | patchSet <- extractImagePatches( image, c( 64, 64 ), "all", c( 8, 8 ) )
40 | imageReconstructed <-
41 | reconstructImageFromPatches( patchSet, image, c( 8, 8 ) )
42 | testthat::expect_equal(as.array(image), as.array(imageReconstructed))
43 | rm(image); gc()
44 | rm(patchSet); gc()
45 | rm(imageReconstructed); gc()
46 | }
47 | \author{
48 | Tustison NJ
49 | }
50 |
--------------------------------------------------------------------------------
/man/createResNetSuperResolutionModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createResNetSuperResolutionModel.R
3 | \name{createResNetSuperResolutionModel2D}
4 | \alias{createResNetSuperResolutionModel2D}
5 | \title{2-D implementation of the ResNet image super resolution architecture.}
6 | \usage{
7 | createResNetSuperResolutionModel2D(
8 | inputImageSize,
9 | convolutionKernelSize = c(3, 3),
10 | numberOfFilters = 64,
11 | numberOfResidualBlocks = 5,
12 | numberOfResNetBlocks = 1
13 | )
14 | }
15 | \arguments{
16 | \item{inputImageSize}{Used for specifying the input tensor shape. The
17 | shape (or dimension) of that tensor is the image dimensions followed by
18 | the number of channels (e.g., red, green, and blue). The batch size
19 | (i.e., number of training images) is not specified a priori.}
20 |
21 | \item{convolutionKernelSize}{a vector specifying the kernel size for
22 | convolution.}
23 |
24 | \item{numberOfFilters}{the number of filters for each encoding layer.}
25 |
26 | \item{numberOfResidualBlocks}{the number of residual blocks.}
27 |
28 | \item{numberOfResNetBlocks}{the number of resNet blocks. Each block
29 | will double the upsampling amount.}
30 | }
31 | \value{
32 | a keras model for ResNet image super resolution
33 | }
34 | \description{
35 | Creates a keras model of the expanded image super resolution deep learning
36 | framework based on the following python implementation:
37 | }
38 | \details{
39 | \preformatted{ \url{https://github.com/titu1994/Image-Super-Resolution}
40 | }
41 | }
42 | \examples{
43 | createResNetSuperResolutionModel2D(c(256, 256, 3))
44 | }
45 | \author{
46 | Tustison NJ
47 | }
48 |
--------------------------------------------------------------------------------
/tests/testthat/test-alexNetModel.R:
--------------------------------------------------------------------------------
1 | testthat::context("AlexModels-2D")
2 |
3 | testthat::test_that("Creating 2D Models", {
4 | if (keras::is_keras_available()) {
5 | model <- createAlexNetModel2D( inputImageSize = c(20L, 20L, 1L),
6 | numberOfClassificationLabels = 2,
7 | batch_size = 1)
8 | cat("First Model is done")
9 | testthat::expect_is(model, "keras.engine.training.Model" )
10 | testthat::expect_equal(model$count_params(), 123023618L)
11 | testthat::expect_equal(length(model$weights), 16L)
12 | rm(model); gc(); gc()
13 | Sys.sleep(2); gc(); gc()
14 |
15 |
16 | model <- createAlexNetModel2D( inputImageSize = c(20L, 20L, 1L),
17 | numberOfClassificationLabels = 3,
18 | batch_size = 1)
19 | cat("Second Model is done")
20 | testthat::expect_is(model, "keras.engine.training.Model" )
21 | testthat::expect_equal(model$count_params(), 123027715L)
22 | testthat::expect_equal(length(model$weights), 16L)
23 | rm(model); gc(); gc()
24 | Sys.sleep(2); gc(); gc()
25 |
26 |
27 | model <- createAlexNetModel2D( inputImageSize = c(20L, 20L, 1L),
28 | numberOfClassificationLabels = 10,
29 | mode = "regression",
30 | batch_size = 1)
31 | testthat::expect_is(model, "keras.engine.training.Model" )
32 | testthat::expect_equal(model$count_params(), 123056394L)
33 | testthat::expect_equal(length(model$weights), 16L)
34 | rm(model); gc(); gc()
35 | Sys.sleep(2); gc(); gc()
36 |
37 |
38 | }
39 | })
40 |
--------------------------------------------------------------------------------
/man/DeepConvolutionalGanModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createDeepConvolutionalGanModel.R
3 | \docType{class}
4 | \name{DeepConvolutionalGanModel}
5 | \alias{DeepConvolutionalGanModel}
6 | \title{Deep convolutional GAN (DCGAN) model}
7 | \description{
8 | Deep convolutional generative adverserial network from the paper:
9 | }
10 | \details{
11 | https://arxiv.org/abs/1511.06434
12 |
13 | and ported from the Keras (python) implementation:
14 |
15 | https://github.com/eriklindernoren/Keras-GAN/blob/master/dcgan/dcgan.py
16 | }
17 | \section{Arguments}{
18 |
19 | \describe{
20 | \item{inputImageSize}{}
21 | \item{latentDimension}{}
22 | }
23 | }
24 |
25 | \section{Details}{
26 |
27 | \code{$initialize} {instantiates a new class and builds the
28 | generator and discriminator.}
29 | \code{$buildGenerator}{build generator.}
30 | \code{$buildGenerator}{build discriminator.}
31 | }
32 |
33 | \examples{
34 |
35 | library( keras )
36 | library( ANTsRNet )
37 |
38 | keras::backend()$clear_session()
39 |
40 | # Let's use the mnist data set.
41 |
42 | mnist <- dataset_mnist()
43 |
44 | numberOfTrainingData <- length( mnist$train$y )
45 |
46 | inputImageSize <- c( dim( mnist$train$x[1,,] ), 1 )
47 |
48 | x <- array( data = mnist$train$x / 255, dim = c( numberOfTrainingData, inputImageSize ) )
49 | y <- mnist$train$y
50 |
51 | numberOfClusters <- length( unique( mnist$train$y ) )
52 |
53 | # Instantiate the DCEC model
54 |
55 | ganModel <- DeepConvolutionalGanModel$new(
56 | inputImageSize = inputImageSize,
57 | latentDimension = 100 )
58 | \donttest{
59 | ganModel$train( x, numberOfEpochs = 2 )
60 | }
61 |
62 | }
63 | \author{
64 | Tustison NJ
65 | }
66 |
--------------------------------------------------------------------------------
/man/createResNetSuperResolutionModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createResNetSuperResolutionModel.R
3 | \name{createResNetSuperResolutionModel3D}
4 | \alias{createResNetSuperResolutionModel3D}
5 | \title{3-D implementation of the ResNet image super resolution architecture.}
6 | \usage{
7 | createResNetSuperResolutionModel3D(
8 | inputImageSize,
9 | convolutionKernelSize = c(3, 3, 3),
10 | numberOfFilters = 64,
11 | numberOfResidualBlocks = 5,
12 | numberOfResNetBlocks = 1
13 | )
14 | }
15 | \arguments{
16 | \item{inputImageSize}{Used for specifying the input tensor shape. The
17 | shape (or dimension) of that tensor is the image dimensions followed by
18 | the number of channels (e.g., red, green, and blue). The batch size
19 | (i.e., number of training images) is not specified a priori.}
20 |
21 | \item{convolutionKernelSize}{a vector specifying the kernel size for
22 | convolution.}
23 |
24 | \item{numberOfFilters}{the number of filters for each encoding layer.}
25 |
26 | \item{numberOfResidualBlocks}{the number of residual blocks.}
27 |
28 | \item{numberOfResNetBlocks}{the number of resNet blocks. Each block
29 | will double the upsampling amount.}
30 | }
31 | \value{
32 | a keras model for ResNet image super resolution
33 | }
34 | \description{
35 | Creates a keras model of the expanded image super resolution deep learning
36 | framework based on the following python implementation:
37 | }
38 | \details{
39 | \preformatted{ \url{https://github.com/titu1994/Image-Super-Resolution}
40 | }
41 | }
42 | \examples{
43 | inputImageSize = c(256, 256, 30, 1)
44 | createResNetSuperResolutionModel3D(inputImageSize)
45 | }
46 | \author{
47 | Tustison NJ
48 | }
49 |
--------------------------------------------------------------------------------
/R/mriSuperResolution.R:
--------------------------------------------------------------------------------
1 | #' Super-resolution for MRI
2 | #'
3 | #' Perform super-resolution (2x) of MRI data using deep back projection network.
4 | #'
5 | #' @param image magnetic resonance image
6 | #' @param antsxnetCacheDirectory destination directory for storing the downloaded
7 | #' template and model weights. Since these can be resused, if
8 | #' \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
9 | #' inst/extdata/ subfolder of the ANTsRNet package.
10 | #' @param verbose print progress.
11 | #' @return super-resolution image.
12 | #' @author Avants BB
13 | #' @examples
14 | #' \dontrun{
15 | #' library( ANTsRNet )
16 | #'
17 | #' image <- antsImageRead( "t1.nii.gz" )
18 | #' imageSr <- mriSuperResolution( image )
19 | #' }
20 | #' @export
21 | mriSuperResolution <- function( image, antsxnetCacheDirectory = NULL, verbose = FALSE )
22 | {
23 | if( image@dimension != 3 )
24 | {
25 | stop( "Input image dimension must be 3." )
26 | }
27 |
28 | if( is.null( antsxnetCacheDirectory ) )
29 | {
30 | antsxnetCacheDirectory <- "ANTsXNet"
31 | }
32 |
33 | modelAndWeightsFileName <- "mindmapsSR_16_ANINN222_0.h5"
34 | if( verbose == TRUE )
35 | {
36 | cat( "MRI super-resolution: retrieving model weights.\n" )
37 | }
38 | modelAndWeightsFileName <- getPretrainedNetwork( "mriSuperResolution", modelAndWeightsFileName, antsxnetCacheDirectory = antsxnetCacheDirectory )
39 | modelSR <- load_model_hdf5( modelAndWeightsFileName )
40 |
41 | imageSR <- applySuperResolutionModelToImage( image, modelSR, targetRange = c( -127.5, 127.5 ) )
42 | imageSR = regressionMatchImage( imageSR, resampleImageToTarget( image, imageSR ), polyOrder = 1 )
43 |
44 | return( imageSR )
45 | }
46 |
47 |
--------------------------------------------------------------------------------
/man/createExpandedSuperResolutionModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createExpandedSuperResolutionModel.R
3 | \name{createExpandedSuperResolutionModel2D}
4 | \alias{createExpandedSuperResolutionModel2D}
5 | \title{2-D implementation of the expanded image super resolution architecture.}
6 | \usage{
7 | createExpandedSuperResolutionModel2D(
8 | inputImageSize,
9 | convolutionKernelSizes = list(c(9, 9), c(1, 1), c(3, 3), c(5, 5), c(5, 5)),
10 | numberOfFilters = c(64, 32, 32, 32)
11 | )
12 | }
13 | \arguments{
14 | \item{inputImageSize}{Used for specifying the input tensor shape. The
15 | shape (or dimension) of that tensor is the image dimensions followed by
16 | the number of channels (e.g., red, green, and blue). The batch size
17 | (i.e., number of training images) is not specified a priori.}
18 |
19 | \item{convolutionKernelSizes}{a list of 2-D vectors specifying the kernel
20 | size at each convolution layer. Default values are the same as given in
21 | the original paper. The length of kernel size vectors must be 1 greater
22 | than the vector length of the number of filters.}
23 |
24 | \item{numberOfFilters}{a vector containing the number of filters for each
25 | convolutional layer. Default values are the same as given in the original
26 | paper.}
27 | }
28 | \value{
29 | a keras model for image super resolution
30 | }
31 | \description{
32 | Creates a keras model of the expanded image super resolution deep learning
33 | framework based on the following python implementation:
34 | }
35 | \details{
36 | \preformatted{ \url{https://github.com/titu1994/Image-Super-Resolution}
37 | }
38 | }
39 | \examples{
40 | createExpandedSuperResolutionModel2D(c( 100, 100, 1 ))
41 | }
42 | \author{
43 | Tustison NJ
44 | }
45 |
--------------------------------------------------------------------------------
/man/histogramWarpImageIntensities.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/histogramWarpImageIntensities.R
3 | \name{histogramWarpImageIntensities}
4 | \alias{histogramWarpImageIntensities}
5 | \title{Transform image intensities based on histogram mapping.}
6 | \usage{
7 | histogramWarpImageIntensities(
8 | image,
9 | breakPoints = c(0.25, 0.5, 0.75),
10 | displacements = NULL,
11 | clampEndPoints = c(FALSE, FALSE),
12 | sdDisplacements = 0.05,
13 | transformDomainSize = 20
14 | )
15 | }
16 | \arguments{
17 | \item{image}{input image.}
18 |
19 | \item{breakPoints}{parametric points at which the intensity transform
20 | displacements are specified between \link{0, 1}. Alternatively, a single
21 | number can be given and the sequence is linearly spaced in \link{0, 1}.}
22 |
23 | \item{displacements}{displacements to define intensity warping. Length
24 | must be equal to the \code{breakPoints}. Alternatively, if \code{NULL}
25 | random displacements are chosen (random normal: mean = 0, sd = \code{sdDisplacements}).}
26 |
27 | \item{clampEndPoints}{specify non-zero intensity change at the ends of the histogram.}
28 |
29 | \item{sdDisplacements}{characterize the randomness of the intensity displacement.}
30 |
31 | \item{transformDomainSize}{Defines the sampling resolution of the B-spline warping.}
32 | }
33 | \value{
34 | warped intensity image
35 | }
36 | \description{
37 | Apply B-spline 1-D maps to an input image for intensity warping.
38 | }
39 | \examples{
40 |
41 | library( ANTsR )
42 | image <- antsImageRead( getANTsRData( "r16" ) )
43 | transformedImage <- histogramWarpImageIntensities( image, transformDomainSize = 10 )
44 | rm(image); gc()
45 | rm(transformedImage); gc()
46 | }
47 | \author{
48 | Tustison NJ
49 | }
50 |
--------------------------------------------------------------------------------
/R/deepBackProjectionUtilities.R:
--------------------------------------------------------------------------------
1 | #' Apply a pretrained deep back projection model for super resolution.
2 | #'
3 | #' Helper function for applying a pretrained deep back projection model.
4 | #' Apply a patch-wise trained network to perform super-resolution. Can be applied
5 | #' to variable sized inputs. Warning: This function may be better used on CPU
6 | #' unless the GPU can accommodate the full image size. Warning 2: The global
7 | #' intensity range (min to max) of the output will match the input where the
8 | #' range is taken over all channels.
9 | #'
10 | #' @param image input image.
11 | #' @param model pretrained model or filename (cf \code{getPretrainedNetwork}).
12 | #' @param targetRange a vector defining the \code{c(min, max)} of each input
13 | #' image (e.g., -127.5, 127.5). Output images will be scaled
14 | #' back to original intensity. This range should match the
15 | #' mapping used in the training of the network.
16 | #' @param batchSize batch size used for the prediction call.
17 | #' @param regressionOrder if specified, then apply the function
18 | #' \code{regressionMatchImage} with
19 | #' \code{polyOrder = regressionOrder}.
20 | #' @param verbose If \code{TRUE}, show status messages.
21 | #' @return super-resolution image upscaled to resolution specified by the network.
22 | #' @author Avants BB
23 | #' @examples
24 | #' \dontrun{
25 | #' image <- applyDeepBackProjectionModel( ri( 1 ), getPretrainedNetwork( "dbpn4x" ) )
26 | #' }
27 | # @export applyDeepBackProjectionModel
28 | applyDeepBackProjectionModel <- function( image, model,
29 | targetRange = c( -127.5, 127.5 ), batchSize = 32, regressionOrder = NA,
30 | verbose = FALSE )
31 | {
32 |
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/man/uvaSegTrain.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/uvaSeg.R
3 | \name{uvaSegTrain}
4 | \alias{uvaSegTrain}
5 | \title{Unsupervised variational autoencoder training}
6 | \usage{
7 | uvaSegTrain(patches, k, convControl, standardize = TRUE, patches2)
8 | }
9 | \arguments{
10 | \item{patches}{input patch matrix, see \code{getNeighborhoodInMask}}
11 |
12 | \item{k}{number of embedding layers}
13 |
14 | \item{convControl}{optional named list with control parameters ( see code )
15 | \itemize{
16 | \item{hiddenAct}{ activation function for hidden layers eg relu}
17 | \item{img_chns}{ eg 1 number of channels}
18 | \item{filters}{ eg 32L}
19 | \item{conv_kern_sz}{ eg 1L}
20 | \item{front_kernel_size}{ eg 2L}
21 | \item{intermediate_dim}{ eg 32L}
22 | \item{epochs}{ eg 50}
23 | \item{batch_size}{ eg 32}
24 | \item{squashAct}{ activation function for squash layers eg sigmoid}
25 | \item{tensorboardLogDirectory}{ tensorboard logs stored here }
26 | }}
27 |
28 | \item{standardize}{boolean controlling whether patches are standardized}
29 |
30 | \item{patches2}{input target patch matrix, see \code{getNeighborhoodInMask},
31 | may be useful for super-resolution}
32 | }
33 | \value{
34 | model is output
35 | }
36 | \description{
37 | Trains a variational autoencoding with a convolutional network. This is
38 | followed by k-means clustering to produce a segmentation and probabilities.
39 | }
40 | \examples{
41 |
42 | \dontrun{
43 |
44 | library(ANTsR)
45 | img <- ri( 1 ) \%>\% resampleImage( c(4,4) ) \%>\% iMath( "Normalize" )
46 | mask = randomMask( getMask( img ), 50 )
47 | r = c( 3, 3 )
48 | patch = getNeighborhoodInMask( img, mask, r, boundary.condition = "NA" )
49 | uvaSegModel = uvaSegTrain( patch, 6 )
50 | }
51 |
52 | }
53 | \author{
54 | Avants BB
55 | }
56 |
--------------------------------------------------------------------------------
/man/createDeepDenoiseSuperResolutionModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createDeepDenoiseSuperResolutionModel.R
3 | \name{createDeepDenoiseSuperResolutionModel2D}
4 | \alias{createDeepDenoiseSuperResolutionModel2D}
5 | \title{2-D implementation of the deep denoise image super resolution architecture.}
6 | \usage{
7 | createDeepDenoiseSuperResolutionModel2D(
8 | inputImageSize,
9 | layers = 2,
10 | lowestResolution = 64,
11 | convolutionKernelSize = c(3, 3),
12 | poolSize = c(2, 2),
13 | strides = c(2, 2)
14 | )
15 | }
16 | \arguments{
17 | \item{inputImageSize}{Used for specifying the input tensor shape. The
18 | shape (or dimension) of that tensor is the image dimensions followed by
19 | the number of channels (e.g., red, green, and blue). The batch size
20 | (i.e., number of training images) is not specified a priori.}
21 |
22 | \item{layers}{number of architecture layers.}
23 |
24 | \item{lowestResolution}{number of filters at the beginning and end of
25 | the architecture.}
26 |
27 | \item{convolutionKernelSize}{2-D vector defining the kernel size
28 | during the encoding path}
29 |
30 | \item{poolSize}{2-D vector defining the region for each pooling layer.}
31 |
32 | \item{strides}{2-D vector describing the stride length in each direction.}
33 | }
34 | \value{
35 | a keras model for image super resolution
36 | }
37 | \description{
38 | Creates a keras model of the expanded image super resolution deep learning
39 | framework based on the following python implementation:
40 | }
41 | \details{
42 | \preformatted{ \url{https://github.com/titu1994/Image-Super-Resolution}
43 | }
44 | }
45 | \examples{
46 | \dontrun{
47 | createDeepDenoiseSuperResolutionModel2D(c(256L, 256L, 3L))
48 | }
49 | }
50 | \author{
51 | Tustison NJ
52 | }
53 |
--------------------------------------------------------------------------------
/man/createExpandedSuperResolutionModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createExpandedSuperResolutionModel.R
3 | \name{createExpandedSuperResolutionModel3D}
4 | \alias{createExpandedSuperResolutionModel3D}
5 | \title{3-D implementation of the expanded image super resolution architecture.}
6 | \usage{
7 | createExpandedSuperResolutionModel3D(
8 | inputImageSize,
9 | convolutionKernelSizes = list(c(9, 9, 9), c(1, 1, 1), c(3, 3, 3), c(5, 5, 5), c(5, 5,
10 | 5)),
11 | numberOfFilters = c(64, 32, 32, 32)
12 | )
13 | }
14 | \arguments{
15 | \item{inputImageSize}{Used for specifying the input tensor shape. The
16 | shape (or dimension) of that tensor is the image dimensions followed by
17 | the number of channels (e.g., red, green, and blue). The batch size
18 | (i.e., number of training images) is not specified a priori.}
19 |
20 | \item{convolutionKernelSizes}{a list of 3-D vectors specifying the kernel
21 | size at each convolution layer. Default values are the same as given in
22 | the original paper. The length of kernel size vectors must be 1 greater
23 | than the vector length of the number of filters.}
24 |
25 | \item{numberOfFilters}{a vector containing the number of filters for each
26 | convolutional layer. Default values are the same as given in the original
27 | paper.}
28 | }
29 | \value{
30 | a keras model for image super resolution
31 | }
32 | \description{
33 | Creates a keras model of the expanded image super resolution deep learning
34 | framework based on the following python implementation:
35 | }
36 | \details{
37 | \preformatted{ \url{https://github.com/titu1994/Image-Super-Resolution}
38 | }
39 | }
40 | \examples{
41 | createExpandedSuperResolutionModel3D(c( 100, 100, 100, 1 ))
42 | }
43 | \author{
44 | Tustison NJ
45 | }
46 |
--------------------------------------------------------------------------------
/man/createDeepDenoiseSuperResolutionModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createDeepDenoiseSuperResolutionModel.R
3 | \name{createDeepDenoiseSuperResolutionModel3D}
4 | \alias{createDeepDenoiseSuperResolutionModel3D}
5 | \title{3-D implementation of the deep denoise image super resolution architecture.}
6 | \usage{
7 | createDeepDenoiseSuperResolutionModel3D(
8 | inputImageSize,
9 | layers = 2,
10 | lowestResolution = 64,
11 | convolutionKernelSize = c(3, 3, 3),
12 | poolSize = c(2, 2, 2),
13 | strides = c(2, 2, 2)
14 | )
15 | }
16 | \arguments{
17 | \item{inputImageSize}{Used for specifying the input tensor shape. The
18 | shape (or dimension) of that tensor is the image dimensions followed by
19 | the number of channels (e.g., red, green, and blue). The batch size
20 | (i.e., number of training images) is not specified a priori.}
21 |
22 | \item{layers}{number of architecture layers.}
23 |
24 | \item{lowestResolution}{number of filters at the beginning and end of
25 | the architecture.}
26 |
27 | \item{convolutionKernelSize}{3-D vector defining the kernel size
28 | during the encoding path}
29 |
30 | \item{poolSize}{3-D vector defining the region for each pooling layer.}
31 |
32 | \item{strides}{3-D vector describing the stride length in each direction.}
33 | }
34 | \value{
35 | a keras model for image super resolution
36 | }
37 | \description{
38 | Creates a keras model of the expanded image super resolution deep learning
39 | framework based on the following python implementation:
40 | }
41 | \details{
42 | \preformatted{ \url{https://github.com/titu1994/Image-Super-Resolution}
43 | }
44 | }
45 | \examples{
46 | \dontrun{
47 | createDeepDenoiseSuperResolutionModel3D(c(256L, 256L, 45L, 1L))
48 | }
49 | }
50 | \author{
51 | Tustison NJ
52 | }
53 |
--------------------------------------------------------------------------------
/R/customActivationLayers.R:
--------------------------------------------------------------------------------
1 | #' Creates a log softmax layer
2 | #'
3 | #' Creates a log softmax layer taken from
4 | #'
5 | #' \url{https://github.com/tensorflow/tensorflow/pull/25514/files}
6 | #'
7 | #' @docType class
8 | #'
9 | #' @section Arguments:
10 | #' \describe{
11 | #' \item{axis}{Integer specifying the axis.}
12 | #' }
13 | #'
14 | #' @section Details:
15 | #' \code{$initialize} instantiates a new class.
16 | #' \code{$call} main body.
17 | #' \code{$compute_output_shape} computes the output shape.
18 | #'
19 | #' @author Tustison NJ
20 | #'
21 | #' @return a log softmax layer
22 | #'
23 | #' @name LogSoftmaxLayer
24 | NULL
25 |
26 | #' @export
27 | LogSoftmaxLayer <- R6::R6Class( "LogSoftmaxLayer",
28 |
29 | inherit = KerasLayer,
30 |
31 | lock_objects = FALSE,
32 |
33 | public = list(
34 |
35 | axis = -1L,
36 |
37 | initialize = function( axis = -1L )
38 | {
39 | self$axis = axis
40 | },
41 |
42 | call = function( inputs, mask = NULL )
43 | {
44 | clippedInputs <- tensorflow::tf$keras$backend$clip( inputs, 1.0e-7, 1.0 )
45 | return( tensorflow::tf$nn$log_softmax( clippedInputs, axis = self$axis ) )
46 | },
47 |
48 | compute_output_shape = function( input_shape )
49 | {
50 | return( input_shape )
51 | }
52 | )
53 | )
54 |
55 | #' Log softmax layer
56 | #'
57 | #' Creates a log softmax layer
58 | #'
59 | #' @param axis Integer specifying which axis.
60 | #' @param trainable Whether the layer weights will be updated during training.
61 | #' @return a keras layer tensor
62 | #' @author Tustison NJ
63 | #' @import keras
64 | #' @export
65 | layer_activation_log_softmax <- function( object, axis = -1, trainable = TRUE ) {
66 | create_layer( LogSoftmaxLayer, object,
67 | list( axis = axis, trainable = trainable ) )
68 | }
69 |
--------------------------------------------------------------------------------
/man/ImprovedWassersteinGanModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createImprovedWassersteinGanModel.R
3 | \docType{class}
4 | \name{ImprovedWassersteinGanModel}
5 | \alias{ImprovedWassersteinGanModel}
6 | \title{Improved Wasserstein GAN model}
7 | \description{
8 | Improved Wasserstein generative adverserial network (with
9 | gradient penalty) from the paper:
10 | }
11 | \details{
12 | https://arxiv.org/abs/1704.00028
13 |
14 | and ported from the Keras (python) implementation:
15 |
16 | https://github.com/eriklindernoren/Keras-GAN/blob/master/wgan_gp/wgan_gp.py
17 | }
18 | \section{Arguments}{
19 |
20 | \describe{
21 | \item{inputImageSize}{}
22 | \item{latentDimension}{}
23 | }
24 | }
25 |
26 | \section{Details}{
27 |
28 | \code{$initialize} {instantiates a new class and builds the
29 | generator and critic.}
30 | \code{$buildGenerator}{build generator.}
31 | \code{$buildGenerator}{build critic.}
32 | }
33 |
34 | \examples{
35 |
36 | library( keras )
37 | library( ANTsRNet )
38 |
39 | keras::backend()$clear_session()
40 |
41 | # Let's use the mnist data set.
42 |
43 | mnist <- dataset_mnist()
44 |
45 | numberOfTrainingData <- length( mnist$train$y )
46 |
47 | inputImageSize <- c( dim( mnist$train$x[1,,] ), 1 )
48 |
49 | x <- array( data = mnist$train$x / 255, dim = c( numberOfTrainingData, inputImageSize ) )
50 | y <- mnist$train$y
51 |
52 | numberOfClusters <- length( unique( mnist$train$y ) )
53 |
54 | # Instantiate the WGAN model
55 |
56 | ganModel <- ImprovedWassersteinGanModel$new(
57 | inputImageSize = inputImageSize,
58 | latentDimension = 100 )
59 |
60 | \dontrun{
61 | ganModel$train( x, numberOfEpochs = 2 )
62 | }
63 | tryCatch({tensorflow::tf$compat$v1$enable_eager_execution()},
64 | silent = TRUE, error = function(e) {})
65 | }
66 | \author{
67 | Tustison NJ
68 | }
69 |
--------------------------------------------------------------------------------
/man/layer_efficient_attention_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \name{layer_efficient_attention_2d}
4 | \alias{layer_efficient_attention_2d}
5 | \title{Efficient attention layer (2-D)}
6 | \usage{
7 | layer_efficient_attention_2d(
8 | object,
9 | numberOfFiltersFG = 4L,
10 | numberOfFiltersH = 8L,
11 | kernelSize = 1L,
12 | poolSize = 2L,
13 | doConcatenateFinalLayers = FALSE,
14 | trainable = TRUE
15 | )
16 | }
17 | \arguments{
18 | \item{object}{Object to compose layer with. This is either a
19 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to
20 | or another Layer which this layer will call.}
21 |
22 | \item{numberOfFiltersFG}{number of filters for F and G layers.}
23 |
24 | \item{numberOfFiltersH}{number of filters for H. If \code{= NA}, only
25 | use filter \code{F} for efficiency.}
26 |
27 | \item{kernelSize}{kernel size in convolution layer.}
28 |
29 | \item{poolSize}{pool size in max pool layer.}
30 |
31 | \item{doConcatenateFinalLayers}{concatenate final layer with input.
32 | Alternatively, add. Default = FALSE}
33 | }
34 | \value{
35 | a keras layer tensor
36 | }
37 | \description{
38 | Wraps the EfficientAttentionLayer2D modified from the following python implementation
39 | }
40 | \details{
41 | \url{https://github.com/taki0112/Self-Attention-GAN-Tensorflow}
42 |
43 | based on the following paper:
44 |
45 | \url{https://arxiv.org/abs/1805.08318}
46 | }
47 | \examples{
48 |
49 | \dontrun{
50 | library( keras )
51 | library( ANTsRNet )
52 |
53 | inputShape <- c( 100, 100, 3 )
54 | input <- layer_input( shape = inputShape )
55 |
56 | numberOfFiltersFG <- 64L
57 | outputs <- input \%>\% layer_efficient_attention_2d( numberOfFiltersFG )
58 |
59 | model <- keras_model( inputs = input, outputs = outputs )
60 | }
61 |
62 | }
63 |
--------------------------------------------------------------------------------
/man/layer_efficient_attention_3d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \name{layer_efficient_attention_3d}
4 | \alias{layer_efficient_attention_3d}
5 | \title{Efficient attention layer (3-D)}
6 | \usage{
7 | layer_efficient_attention_3d(
8 | object,
9 | numberOfFiltersFG = 4L,
10 | numberOfFiltersH = 8L,
11 | kernelSize = 1L,
12 | poolSize = 2L,
13 | doConcatenateFinalLayers = FALSE,
14 | trainable = TRUE
15 | )
16 | }
17 | \arguments{
18 | \item{object}{Object to compose layer with. This is either a
19 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to
20 | or another Layer which this layer will call.}
21 |
22 | \item{numberOfFiltersFG}{number of filters for F and G layers.}
23 |
24 | \item{numberOfFiltersH}{number of filters for H. If \code{= NA}, only
25 | use filter \code{F} for efficiency.}
26 |
27 | \item{kernelSize}{kernel size in convolution layer.}
28 |
29 | \item{poolSize}{pool size in max pool layer.}
30 |
31 | \item{doConcatenateFinalLayers}{concatenate final layer with input.
32 | Alternatively, add. Default = FALSE}
33 | }
34 | \value{
35 | a keras layer tensor
36 | }
37 | \description{
38 | Wraps the EfficientAttentionLayer3D modified from the following python implementation
39 | }
40 | \details{
41 | \url{https://github.com/taki0112/Self-Attention-GAN-Tensorflow}
42 |
43 | based on the following paper:
44 |
45 | \url{https://arxiv.org/abs/1805.08318}
46 | }
47 | \examples{
48 |
49 | \dontrun{
50 | library( keras )
51 | library( ANTsRNet )
52 |
53 | inputShape <- c( 100, 100, 100, 3 )
54 | input <- layer_input( shape = inputShape )
55 |
56 | numberOfFiltersFG <- 64L
57 | outputs <- input \%>\% layer_efficient_attention_3d( numberOfFiltersFG )
58 |
59 | model <- keras_model( inputs = input, outputs = outputs )
60 | }
61 |
62 | }
63 |
--------------------------------------------------------------------------------
/man/LossSSD.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customMetrics.R
3 | \docType{class}
4 | \name{LossSSD}
5 | \alias{LossSSD}
6 | \title{Loss function for the SSD deep learning architecture.}
7 | \value{
8 | an SSD loss function
9 | }
10 | \description{
11 | Creates an R6 class object for use with the SSD deep learning architecture
12 | based on the paper
13 | }
14 | \details{
15 | W. Liu, D. Anguelov, D. Erhan, C. Szegedy, S. Reed, C-Y. Fu, A. Berg.
16 | SSD: Single Shot MultiBox Detector.
17 |
18 | available here:\preformatted{ \url{https://arxiv.org/abs/1512.02325}
19 | }
20 | }
21 | \section{Usage}{
22 |
23 | \preformatted{ssdLoss <- LossSSD$new( dimension = 2L, backgroundRatio = 3L,
24 | minNumberOfBackgroundBoxes = 0L, alpha = 1.0,
25 | numberOfClassificationLabels )
26 |
27 | ssdLoss$smooth_l1_loss( y_true, y_pred )
28 | ssdLoss$log_loss( y_true, y_pred )
29 | ssdLoss$compute_loss( y_true, y_pred )
30 | }
31 | }
32 |
33 | \section{Arguments}{
34 |
35 | \describe{
36 | \item{ssdLoss}{A \code{process} object.}
37 | \item{dimension}{image dimensionality.}
38 | \item{backgroundRatio}{The maximum ratio of background to foreround
39 | for weighting in the loss function. Is rounded to the nearest integer.
40 | Default is 3.}
41 | \item{minNumberOfBackgroundBoxes}{The minimum number of background boxes
42 | to use in loss computation \emph{per batch}. Should reflect a value in
43 | proportion to the batch size. Default is 0.}
44 | \item{alpha}{Weighting factor for the localization loss in total loss
45 | computation.}
46 | \item{numberOfClassificationLabels}{number of classes including background.}
47 | }
48 | }
49 |
50 | \section{Details}{
51 |
52 | \code{$smooth_l1_loss} smooth loss
53 |
54 | \code{$log_loss} log loss
55 |
56 | \code{$compute_loss} computes total loss.
57 | }
58 |
59 | \author{
60 | Tustison NJ
61 | }
62 |
--------------------------------------------------------------------------------
/man/applySuperResolutionModelToImage.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/superResolutionUtilities.R
3 | \name{applySuperResolutionModelToImage}
4 | \alias{applySuperResolutionModelToImage}
5 | \title{Apply a pretrained model for super resolution.}
6 | \usage{
7 | applySuperResolutionModelToImage(
8 | image,
9 | model,
10 | targetRange = c(-127.5, 127.5),
11 | batchSize = 32,
12 | regressionOrder = NA,
13 | verbose = FALSE
14 | )
15 | }
16 | \arguments{
17 | \item{image}{input image.}
18 |
19 | \item{model}{pretrained model or filename (cf \code{getPretrainedNetwork}).}
20 |
21 | \item{targetRange}{a vector defining the \code{c(min, max)} of each input
22 | image (e.g., -127.5, 127.5). Output images will be scaled
23 | back to original intensity. This range should match the
24 | mapping used in the training of the network.}
25 |
26 | \item{batchSize}{batch size used for the prediction call.}
27 |
28 | \item{regressionOrder}{if specified, then apply the function
29 | \code{regressionMatchImage} with
30 | \code{polyOrder = regressionOrder}.}
31 |
32 | \item{verbose}{If \code{TRUE}, show status messages.}
33 | }
34 | \value{
35 | super-resolution image upscaled to resolution specified by the network.
36 | }
37 | \description{
38 | Helper function for applying a pretrained super resolution model.
39 | Apply a patch-wise trained network to perform super-resolution. Can be applied
40 | to variable sized inputs. Warning: This function may be better used on CPU
41 | unless the GPU can accommodate the full image size. Warning 2: The global
42 | intensity range (min to max) of the output will match the input where the
43 | range is taken over all channels.
44 | }
45 | \examples{
46 | \dontrun{
47 | image <- applySuperResolutionModelToImage( ri( 1 ), getPretrainedNetwork( "dbpn4x" ) )
48 | }
49 | }
50 | \author{
51 | Avants BB
52 | }
53 |
--------------------------------------------------------------------------------
/man/createImageSuperResolutionModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createImageSuperResolutionModel.R
3 | \name{createImageSuperResolutionModel2D}
4 | \alias{createImageSuperResolutionModel2D}
5 | \title{2-D implementation of the image super resolution deep learning architecture.}
6 | \usage{
7 | createImageSuperResolutionModel2D(
8 | inputImageSize,
9 | convolutionKernelSizes = list(c(9, 9), c(1, 1), c(5, 5)),
10 | numberOfFilters = c(64, 32)
11 | )
12 | }
13 | \arguments{
14 | \item{inputImageSize}{Used for specifying the input tensor shape. The
15 | shape (or dimension) of that tensor is the image dimensions followed by
16 | the number of channels (e.g., red, green, and blue). The batch size
17 | (i.e., number of training images) is not specified a priori.}
18 |
19 | \item{convolutionKernelSizes}{a list of 2-D vectors specifying the kernel
20 | size at each convolution layer. Default values are the same as given in
21 | the original paper. The length of kernel size vectors must be 1 greater
22 | than the vector length of the number of filters.}
23 |
24 | \item{numberOfFilters}{a vector containing the number of filters for each
25 | convolutional layer. Default values are the same as given in the original
26 | paper.}
27 | }
28 | \value{
29 | a keras model for image super resolution
30 | }
31 | \description{
32 | Creates a keras model of the image super resolution deep learning framework.
33 | based on the paper available here:
34 | }
35 | \details{
36 | \preformatted{ \url{https://arxiv.org/pdf/1501.00092}
37 | }
38 |
39 | This particular implementation is based on the following python
40 | implementation:\preformatted{ \url{https://github.com/titu1994/Image-Super-Resolution}
41 | }
42 | }
43 | \examples{
44 | createImageSuperResolutionModel2D(c( 100, 100, 1 ))
45 | gc()
46 | }
47 | \author{
48 | Tustison NJ
49 | }
50 |
--------------------------------------------------------------------------------
/man/applyDeepBackProjectionModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/deepBackProjectionUtilities.R
3 | \name{applyDeepBackProjectionModel}
4 | \alias{applyDeepBackProjectionModel}
5 | \title{Apply a pretrained deep back projection model for super resolution.}
6 | \usage{
7 | applyDeepBackProjectionModel(
8 | image,
9 | model,
10 | targetRange = c(-127.5, 127.5),
11 | batchSize = 32,
12 | regressionOrder = NA,
13 | verbose = FALSE
14 | )
15 | }
16 | \arguments{
17 | \item{image}{input image.}
18 |
19 | \item{model}{pretrained model or filename (cf \code{getPretrainedNetwork}).}
20 |
21 | \item{targetRange}{a vector defining the \code{c(min, max)} of each input
22 | image (e.g., -127.5, 127.5). Output images will be scaled
23 | back to original intensity. This range should match the
24 | mapping used in the training of the network.}
25 |
26 | \item{batchSize}{batch size used for the prediction call.}
27 |
28 | \item{regressionOrder}{if specified, then apply the function
29 | \code{regressionMatchImage} with
30 | \code{polyOrder = regressionOrder}.}
31 |
32 | \item{verbose}{If \code{TRUE}, show status messages.}
33 | }
34 | \value{
35 | super-resolution image upscaled to resolution specified by the network.
36 | }
37 | \description{
38 | Helper function for applying a pretrained deep back projection model.
39 | Apply a patch-wise trained network to perform super-resolution. Can be applied
40 | to variable sized inputs. Warning: This function may be better used on CPU
41 | unless the GPU can accommodate the full image size. Warning 2: The global
42 | intensity range (min to max) of the output will match the input where the
43 | range is taken over all channels.
44 | }
45 | \examples{
46 | \dontrun{
47 | image <- applyDeepBackProjectionModel( ri( 1 ), getPretrainedNetwork( "dbpn4x" ) )
48 | }
49 | }
50 | \author{
51 | Avants BB
52 | }
53 |
--------------------------------------------------------------------------------
/man/createImageSuperResolutionModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createImageSuperResolutionModel.R
3 | \name{createImageSuperResolutionModel3D}
4 | \alias{createImageSuperResolutionModel3D}
5 | \title{3-D implementation of the image super resolution deep learning architecture.}
6 | \usage{
7 | createImageSuperResolutionModel3D(
8 | inputImageSize,
9 | convolutionKernelSizes = list(c(9, 9, 9), c(1, 1, 1), c(5, 5, 5)),
10 | numberOfFilters = c(64, 32)
11 | )
12 | }
13 | \arguments{
14 | \item{inputImageSize}{Used for specifying the input tensor shape. The
15 | shape (or dimension) of that tensor is the image dimensions followed by
16 | the number of channels (e.g., red, green, and blue). The batch size
17 | (i.e., number of training images) is not specified a priori.}
18 |
19 | \item{convolutionKernelSizes}{a list of 3-D vectors specifying the kernel
20 | size at each convolution layer. Default values are the same as given in
21 | the original paper. The length of kernel size vectors must be 1 greater
22 | than the vector length of the number of filters.}
23 |
24 | \item{numberOfFilters}{a vector containing the number of filters for each
25 | convolutional layer. Default values are the same as given in the original
26 | paper.}
27 | }
28 | \value{
29 | a keras model for image super resolution
30 | }
31 | \description{
32 | Creates a keras model of the image super resolution deep learning framework.
33 | based on the paper available here:
34 | }
35 | \details{
36 | \preformatted{ \url{https://arxiv.org/pdf/1501.00092}
37 | }
38 |
39 | This particular implementation is based on the following python
40 | implementation:\preformatted{ \url{https://github.com/titu1994/Image-Super-Resolution}
41 | }
42 | }
43 | \examples{
44 | createImageSuperResolutionModel3D(c( 100, 100, 100, 1 ))
45 | }
46 | \author{
47 | Tustison NJ
48 | }
49 |
--------------------------------------------------------------------------------
/man/layer_attention_augmented_convolution_block_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/attentionUtilities.R
3 | \name{layer_attention_augmented_convolution_block_2d}
4 | \alias{layer_attention_augmented_convolution_block_2d}
5 | \title{Creates a 2-D attention augmented convolutional block}
6 | \usage{
7 | layer_attention_augmented_convolution_block_2d(
8 | inputLayer,
9 | numberOfOutputFilters,
10 | kernelSize = c(3, 3),
11 | strides = c(1, 1),
12 | depthOfQueries = 0.2,
13 | depthOfValues = 0.2,
14 | numberOfAttentionHeads = 8,
15 | useRelativeEncodings = TRUE
16 | )
17 | }
18 | \arguments{
19 | \item{inputLayer}{input keras layer.}
20 |
21 | \item{numberOfOutputFilters}{number of output filters.}
22 |
23 | \item{kernelSize}{convolution kernel size.}
24 |
25 | \item{strides}{convolution strides.}
26 |
27 | \item{depthOfQueries}{Defines the number of filters for the queries or \code{k}.
28 | Either absolute or, if \code{< 1.0}, number of \code{k} filters =
29 | \code{depthOfQueries * numberOfOutputFilters}.}
30 |
31 | \item{depthOfValues}{Defines the number of filters for the values or \code{v}.
32 | Either absolute or, if \code{< 1.0}, number of \code{v} filters =
33 | \code{depthOfValues * numberOfOutputFilters}.}
34 |
35 | \item{numberOfAttentionHeads}{number of attention heads. Note that
36 | \code{as.integer(kDepth/numberOfAttentionHeads)>0} (default = 8).}
37 |
38 | \item{useRelativeEncodings}{boolean for whether to use relative encodings
39 | (default = TRUE).}
40 | }
41 | \value{
42 | a keras tensor
43 | }
44 | \description{
45 | Creates a 2-D attention augmented convolutional layer as described in the paper
46 | }
47 | \details{
48 | \url{https://arxiv.org/abs/1904.09925}
49 |
50 | with the implementation ported from the following repository
51 |
52 | \url{https://github.com/titu1994/keras-attention-augmented-convs}
53 | }
54 | \author{
55 | Tustison NJ
56 | }
57 |
--------------------------------------------------------------------------------
/man/layer_contextual_attention_2d.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createInpaintingDeepFillModel.R
3 | \name{layer_contextual_attention_2d}
4 | \alias{layer_contextual_attention_2d}
5 | \alias{layer_contextual_attention_3d}
6 | \title{Contextual attention layer (2-D and 3-D)}
7 | \usage{
8 | layer_contextual_attention_2d(
9 | object,
10 | kernelSize = 3L,
11 | stride = 1L,
12 | dilationRate = 1L,
13 | fusionKernelSize = 0L,
14 | name = NULL,
15 | trainable = FALSE
16 | )
17 |
18 | layer_contextual_attention_3d(
19 | object,
20 | kernelSize = 3L,
21 | stride = 1L,
22 | dilationRate = 1L,
23 | fusionKernelSize = 0L,
24 | name = NULL,
25 | trainable = FALSE
26 | )
27 | }
28 | \arguments{
29 | \item{object}{Object to compose layer with. This is either a
30 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
31 | or another Layer which this layer will call.}
32 |
33 | \item{kernelSize}{integer specifying convolution size}
34 |
35 | \item{stride}{integer for specifyingstride length for sampling the tensor}
36 |
37 | \item{dilationRate}{ingeger specifying dilation}
38 |
39 | \item{fusionKernelSize}{Enhance saliency of large patches}
40 |
41 | \item{name}{The name of the layer}
42 |
43 | \item{trainable}{Whether the layer weights will be updated during training.}
44 | }
45 | \value{
46 | a keras layer tensor
47 | }
48 | \description{
49 | Contextual attention layer for generative image inpainting described in
50 | }
51 | \details{
52 | Jiahui Yu, et al., Generative Image Inpainting with Contextual Attention,
53 | CVPR 2018.
54 |
55 | available here:\preformatted{ \code{https://arxiv.org/abs/1801.07892}
56 | }
57 | }
58 | \examples{
59 | layer_contextual_attention_2d()
60 | layer_contextual_attention_3d()
61 | keras::keras_model_sequential() \%>\%
62 | layer_contextual_attention_2d(fusionKernelSize = 2)
63 | keras::keras_model_sequential() \%>\%
64 | layer_contextual_attention_3d()
65 | }
66 |
--------------------------------------------------------------------------------
/man/applySuperResolutionModel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/applyDBPN4x.R
3 | \name{applySuperResolutionModel}
4 | \alias{applySuperResolutionModel}
5 | \title{applySuperResolutionModel}
6 | \usage{
7 | applySuperResolutionModel(
8 | image,
9 | model,
10 | targetRange,
11 | batch_size = 32,
12 | linmatchOrder,
13 | mask,
14 | verbose = FALSE
15 | )
16 | }
17 | \arguments{
18 | \item{image}{input image}
19 |
20 | \item{model}{model object or filename see \code{getPretrainedNetwork}}
21 |
22 | \item{targetRange}{a vector defining min max of each the input image,
23 | eg -127.5, 127.5. Output images will be scaled back to original intensity.
24 | This range should match the mapping used in the training of the network.}
25 |
26 | \item{batch_size}{for prediction call}
27 |
28 | \item{linmatchOrder}{if not missing, then apply \code{linMatchIntensity} with given fit parameter}
29 |
30 | \item{mask}{restrict intensity rescaling parameters within the mask}
31 |
32 | \item{verbose}{If \code{TRUE}, show status messages}
33 | }
34 | \value{
35 | image upscaled to resolution provided by network
36 | }
37 | \description{
38 | Apply pretrained super-resolution network
39 | }
40 | \details{
41 | Apply a patch-wise trained network to perform super-resolution. Can be applied
42 | to variable sized inputs. Warning: This function may be better used on CPU
43 | unless the GPU can accommodate the full image size. Warning 2: The global
44 | intensity range (min to max) of the output will match the input where the
45 | range is taken over all channels.
46 | }
47 | \examples{
48 | \dontrun{
49 | library(ANTsRCore)
50 | library(keras)
51 | orig_img = antsImageRead( getANTsRData( "r16" ) )
52 | # input needs to be 48x48
53 | img = resampleImage(orig_img, resampleParams = rep(256/48, 2))
54 | model = getPretrainedNetwork( "dbpn4x" )
55 | simg <- applySuperResolutionModel(img, model = model)
56 | plot(orig_img)
57 | plot(img)
58 | plot(simg)
59 | }
60 | }
61 | \author{
62 | Avants BB
63 | }
64 |
--------------------------------------------------------------------------------
/man/AnchorBoxLayer2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \docType{class}
4 | \name{AnchorBoxLayer2D}
5 | \alias{AnchorBoxLayer2D}
6 | \title{Anchor box layer for SSD architecture (2-D).}
7 | \value{
8 | a 5-D tensor with shape
9 | \eqn{ batchSize \times widthSize \times heightSize \times numberOfBoxes \times 8 }
10 | In the last dimension, the first 4 values correspond to the
11 | 2-D coordinates of the bounding boxes and the other 4 are the variances.
12 | }
13 | \description{
14 | Anchor box layer for SSD architecture (2-D).
15 | }
16 | \section{Usage}{
17 |
18 | \preformatted{anchorBoxGenerator <- AnchorBoxLayer2D$new( imageSize,
19 | scale, nextScale, aspectRatios = c( '1:1', '2:1', '1:2' ),
20 | variances = 1.0 )
21 |
22 | anchorBoxGenerator$call( x, mask = NULL )
23 | anchorBoxGenerator$compute_output_shape( input_shape )
24 | }
25 | }
26 |
27 | \section{Arguments}{
28 |
29 | \describe{
30 | \item{anchorBoxGenerator}{A \code{process} object.}
31 | \item{imageSize}{size of the input image.}
32 | \item{scale}{scale of each box (in pixels).}
33 | \item{nextScale}{next scale of each box (in pixels).}
34 | \item{aspectRatios}{vector describing the geometries of the anchor boxes
35 | for this layer.}
36 | \item{variances}{a list of 4 floats > 0 with scaling factors for the encoded
37 | predicted box coordinates. A variance value of 1.0 would apply no scaling at
38 | all to the predictions, while values in (0,1) upscale the encoded
39 | predictions and values greater than 1.0 downscale the encoded predictions.
40 | Defaults to 1.0.}
41 | \item{x}{}
42 | \item{mask}{}
43 | \item{input_shape}{}
44 | }
45 | }
46 |
47 | \section{Details}{
48 |
49 | \code{$initialize} instantiates a new class.
50 |
51 | \code{$call} main body.
52 |
53 | \code{$compute_output_shape} computes the output shape.
54 | }
55 |
56 | \examples{
57 | x = AnchorBoxLayer2D$new(imageSize = c(20, 20),
58 | scale = 2, nextScale = 2)
59 | x$build()
60 | }
61 | \author{
62 | Tustison NJ
63 | }
64 |
--------------------------------------------------------------------------------
/man/deepAtropos.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/deepAtropos.R
3 | \name{deepAtropos}
4 | \alias{deepAtropos}
5 | \title{Six tissue segmentation}
6 | \usage{
7 | deepAtropos(
8 | t1,
9 | doPreprocessing = TRUE,
10 | antsxnetCacheDirectory = NULL,
11 | verbose = FALSE,
12 | debug = FALSE
13 | )
14 | }
15 | \arguments{
16 | \item{t1}{raw or preprocessed 3-D T1-weighted brain image.}
17 |
18 | \item{doPreprocessing}{perform preprocessing. See description above.}
19 |
20 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
21 | template and model weights. Since these can be resused, if
22 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
23 | inst/extdata/ subfolder of the ANTsRNet package.}
24 |
25 | \item{verbose}{print progress.}
26 |
27 | \item{debug}{return feature images in the last layer of the u-net model.}
28 | }
29 | \value{
30 | list consisting of the segmentation image and probability images for
31 | each label.
32 | }
33 | \description{
34 | Perform Atropos-style six tissue segmentation using deep learning
35 | }
36 | \details{
37 | The labeling is as follows:
38 | \itemize{
39 | \item{Label 0:}{background}
40 | \item{Label 1:}{CSF}
41 | \item{Label 2:}{gray matter}
42 | \item{Label 3:}{white matter}
43 | \item{Label 4:}{deep gray matter}
44 | \item{Label 5:}{brain stem}
45 | \item{Label 6:}{cerebellum}
46 | }
47 |
48 | Preprocessing on the training data consisted of:
49 | \itemize{
50 | \item n4 bias correction,
51 | \item denoising,
52 | \item brain extraction, and
53 | \item affine registration to MNI.
54 | The input T1 should undergo the same steps. If the input T1 is the raw
55 | T1, these steps can be performed by the internal preprocessing, i.e. set
56 | \code{doPreprocessing = TRUE}
57 | }
58 | }
59 | \examples{
60 | \dontrun{
61 | library( ANTsRNet )
62 | library( keras )
63 |
64 | image <- antsImageRead( "t1.nii.gz" )
65 | results <- deepAtropos( image )
66 | }
67 | }
68 | \author{
69 | Tustison NJ
70 | }
71 |
--------------------------------------------------------------------------------
/man/AnchorBoxLayer3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \docType{class}
4 | \name{AnchorBoxLayer3D}
5 | \alias{AnchorBoxLayer3D}
6 | \title{Anchor box layer for SSD architecture (3-D).}
7 | \value{
8 | a 6-D tensor with shape
9 | \eqn{ batchSize \times widthSize \times heightSize \times depthSize \times numberOfBoxes \times 12 }
10 | In the last dimension, the first 6 values correspond to the
11 | 3-D coordinates of the bounding boxes and the other 6 are the variances.
12 | }
13 | \description{
14 | Anchor box layer for SSD architecture (3-D).
15 | }
16 | \section{Usage}{
17 |
18 | \preformatted{anchorBoxGenerator <- AnchorBoxLayer3D$new( imageSize,
19 | scale, nextScale, aspectRatios = c( '1:1:1', '2:1:1', '1:2:1', '1:1:2' ),
20 | variances = 1.0 )
21 |
22 | anchorBoxGenerator$call( x, mask = NULL )
23 | anchorBoxGenerator$compute_output_shape( input_shape )
24 | }
25 | }
26 |
27 | \section{Arguments}{
28 |
29 | \describe{
30 | \item{anchorBoxGenerator}{A \code{process} object.}
31 | \item{imageSize}{size of the input image.}
32 | \item{scale}{scale of each box (in pixels).}
33 | \item{nextScale}{next scale of each box (in pixels).}
34 | \item{aspectRatios}{vector describing the geometries of the anchor boxes
35 | for this layer.}
36 | \item{variances}{a list of 6 floats > 0 with scaling factors for the encoded
37 | predicted box coordinates. A variance value of 1.0 would apply no scaling at
38 | all to the predictions, while values in (0,1) upscale the encoded
39 | predictions and values greater than 1.0 downscale the encoded predictions.
40 | Defaults to 1.0.}
41 | \item{x}{}
42 | \item{mask}{}
43 | \item{input_shape}{}
44 | }
45 | }
46 |
47 | \section{Details}{
48 |
49 | \code{$initialize} instantiates a new class.
50 |
51 | \code{$call} main body.
52 |
53 | \code{$compute_output_shape} computes the output shape.
54 | }
55 |
56 | \examples{
57 | x = AnchorBoxLayer3D$new(imageSize = c(20, 20, 20),
58 | scale = 2, nextScale = 2)
59 | x$build()
60 | }
61 | \author{
62 | Tustison NJ
63 | }
64 |
--------------------------------------------------------------------------------
/man/multilabel_surface_loss.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customMetrics.R
3 | \name{multilabel_surface_loss}
4 | \alias{multilabel_surface_loss}
5 | \title{Function for surface loss}
6 | \usage{
7 | multilabel_surface_loss(y_true, y_pred, dimensionality = 3L)
8 | }
9 | \arguments{
10 | \item{y_true}{True labels (Tensor)}
11 |
12 | \item{y_pred}{Predictions (Tensor of the same shape as \code{y_true})}
13 | }
14 | \value{
15 | function value
16 | }
17 | \description{
18 | \url{https://pubmed.ncbi.nlm.nih.gov/33080507/}
19 | }
20 | \details{
21 | ported from this implementation:
22 |
23 | \url{https://github.com/LIVIAETS/boundary-loss/blob/master/keras_loss.py}
24 |
25 | Note: Assumption is that y_true is a one-hot representation
26 | of the segmentation batch. The background (label 0) should
27 | be included but is not used in the calculation.
28 | }
29 | \examples{
30 |
31 | library( ANTsRNet )
32 | library( keras )
33 |
34 | model <- createUnetModel2D( c( 64, 64, 1 ), numberOfOutputs = 2 )
35 |
36 | model \%>\% compile( loss = multilabel_surface_loss,
37 | optimizer = optimizer_adam( lr = 0.0001 ),
38 | metrics = "accuracy" )
39 |
40 | ########################################
41 | #
42 | # Run in isolation
43 | #
44 |
45 | library( ANTsR )
46 |
47 | r16 <- antsImageRead( getANTsRData( "r16" ) )
48 | r16seg <- kmeansSegmentation( r16, 3 )$segmentation
49 | r16array <- array( data = as.array( r16seg ), dim = c( 1, dim( r16seg ) ) )
50 | r16tensor <- tensorflow::tf$convert_to_tensor( encodeUnet( r16array, c( 0, 1, 2, 3 ) ) )
51 |
52 | r64 <- antsImageRead( getANTsRData( "r64" ) )
53 | r64seg <- kmeansSegmentation( r64, 3 )$segmentation
54 | r64array <- array( data = as.array( r64seg ), dim = c( 1, dim( r64seg ) ) )
55 | r64tensor <- tensorflow::tf$convert_to_tensor( encodeUnet( r64array, c( 0, 1, 2, 3 ) ) )
56 |
57 | surface_loss <- multilabel_surface_loss( r16tensor, r64tensor, dimensionality = 2L )
58 | loss_value <- surface_loss( r16tensor, r64tensor )$numpy()
59 |
60 | }
61 | \author{
62 | Tustison NJ
63 | }
64 |
--------------------------------------------------------------------------------
/docs/bootstrap-toc.css:
--------------------------------------------------------------------------------
1 | /*!
2 | * Bootstrap Table of Contents v0.4.1 (http://afeld.github.io/bootstrap-toc/)
3 | * Copyright 2015 Aidan Feldman
4 | * Licensed under MIT (https://github.com/afeld/bootstrap-toc/blob/gh-pages/LICENSE.md) */
5 |
6 | /* modified from https://github.com/twbs/bootstrap/blob/94b4076dd2efba9af71f0b18d4ee4b163aa9e0dd/docs/assets/css/src/docs.css#L548-L601 */
7 |
8 | /* All levels of nav */
9 | nav[data-toggle='toc'] .nav > li > a {
10 | display: block;
11 | padding: 4px 20px;
12 | font-size: 13px;
13 | font-weight: 500;
14 | color: #767676;
15 | }
16 | nav[data-toggle='toc'] .nav > li > a:hover,
17 | nav[data-toggle='toc'] .nav > li > a:focus {
18 | padding-left: 19px;
19 | color: #563d7c;
20 | text-decoration: none;
21 | background-color: transparent;
22 | border-left: 1px solid #563d7c;
23 | }
24 | nav[data-toggle='toc'] .nav > .active > a,
25 | nav[data-toggle='toc'] .nav > .active:hover > a,
26 | nav[data-toggle='toc'] .nav > .active:focus > a {
27 | padding-left: 18px;
28 | font-weight: bold;
29 | color: #563d7c;
30 | background-color: transparent;
31 | border-left: 2px solid #563d7c;
32 | }
33 |
34 | /* Nav: second level (shown on .active) */
35 | nav[data-toggle='toc'] .nav .nav {
36 | display: none; /* Hide by default, but at >768px, show it */
37 | padding-bottom: 10px;
38 | }
39 | nav[data-toggle='toc'] .nav .nav > li > a {
40 | padding-top: 1px;
41 | padding-bottom: 1px;
42 | padding-left: 30px;
43 | font-size: 12px;
44 | font-weight: normal;
45 | }
46 | nav[data-toggle='toc'] .nav .nav > li > a:hover,
47 | nav[data-toggle='toc'] .nav .nav > li > a:focus {
48 | padding-left: 29px;
49 | }
50 | nav[data-toggle='toc'] .nav .nav > .active > a,
51 | nav[data-toggle='toc'] .nav .nav > .active:hover > a,
52 | nav[data-toggle='toc'] .nav .nav > .active:focus > a {
53 | padding-left: 28px;
54 | font-weight: 500;
55 | }
56 |
57 | /* from https://github.com/twbs/bootstrap/blob/e38f066d8c203c3e032da0ff23cd2d6098ee2dd6/docs/assets/css/src/docs.css#L631-L634 */
58 | nav[data-toggle='toc'] .nav > .active > ul {
59 | display: block;
60 | }
61 |
--------------------------------------------------------------------------------
/man/hippMapp3rSegmentation.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/hippMapp3rSegmentation.R
3 | \name{hippMapp3rSegmentation}
4 | \alias{hippMapp3rSegmentation}
5 | \title{hippMapp3rSegmentation}
6 | \usage{
7 | hippMapp3rSegmentation(
8 | t1,
9 | doPreprocessing = TRUE,
10 | antsxnetCacheDirectory = NULL,
11 | verbose = FALSE
12 | )
13 | }
14 | \arguments{
15 | \item{doPreprocessing}{perform preprocessing. See description above.}
16 |
17 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
18 | template and model weights. Since these can be resused, if
19 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
20 | subdirectory ~/.keras/ANTsXNet/.}
21 |
22 | \item{verbose}{print progress.}
23 |
24 | \item{image}{input 3-D T1-weighted brain image.}
25 | }
26 | \value{
27 | labeled hippocampal mask (ANTsR image)
28 | }
29 | \description{
30 | Perform HippMapp3r (hippocampal) segmentation described in
31 | }
32 | \details{
33 | https://www.ncbi.nlm.nih.gov/pubmed/31609046
34 |
35 | with models and architecture ported from
36 |
37 | https://github.com/mgoubran/HippMapp3r
38 |
39 | Additional documentation and attribution resources found at
40 |
41 | https://hippmapp3r.readthedocs.io/en/latest/
42 |
43 | Preprocessing consists of:
44 | \itemize{
45 | \item n4 bias correction and
46 | \item brain extraction.
47 | The input T1 should undergo the same steps. If the input T1 is the raw
48 | T1, these steps can be performed by the internal preprocessing, i.e. set
49 | \code{doPreprocessing = TRUE}
50 | }
51 | }
52 | \examples{
53 | \dontrun{
54 | library( ANTsRNet )
55 | library( ANTsRNet )
56 | library( keras )
57 |
58 | url <- "https://github.com/mgoubran/HippMapp3r/blob/master/data/test_case/mprage.nii.gz?raw=true"
59 | imageFile <- "head.nii.gz"
60 | download.file( url, imageFile )
61 | image <- antsImageRead( imageFile )
62 | imageN4 <- n4BiasFieldCorrection( image, verbose = TRUE )
63 | segmentation <- hippMapp3rSegmentation( imageN4, verbose = TRUE )
64 | }
65 | }
66 | \author{
67 | Tustison NJ
68 | }
69 |
--------------------------------------------------------------------------------
/man/layer_instance_normalization.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customNormalizationLayers.R
3 | \name{layer_instance_normalization}
4 | \alias{layer_instance_normalization}
5 | \title{Instance normalization layer}
6 | \usage{
7 | layer_instance_normalization(
8 | object,
9 | axis = NULL,
10 | epsilon = 0.001,
11 | center = TRUE,
12 | scale = TRUE,
13 | betaInitializer = "zeros",
14 | gammaInitializer = "ones",
15 | betaRegularizer = NULL,
16 | gammaRegularizer = NULL,
17 | betaConstraint = NULL,
18 | gammaConstraint = NULL,
19 | trainable = TRUE
20 | )
21 | }
22 | \arguments{
23 | \item{object}{Object to compose layer with. This is either a
24 | \link[keras:keras_model_sequential]{keras::keras_model_sequential} to add the layer to,
25 | or another Layer which this layer will call.}
26 |
27 | \item{axis}{Integer specifying which axis should be normalized, typically
28 | the feature axis. For example, after a Conv2D layer with
29 | \code{channels_first}, set axis = 1. Setting \code{axis=-1L} will
30 | normalize all values in each instance of the batch. Axis 0
31 | is the batch dimension for tensorflow backend so we throw an
32 | error if \code{axis = 0}.}
33 |
34 | \item{epsilon}{Small float added to the variance to avoid dividing by 0.}
35 |
36 | \item{center}{If TRUE, add \code{beta} offset to normalized tensor.}
37 |
38 | \item{scale}{If TRUE, multiply by \code{gamma}.}
39 |
40 | \item{betaInitializer}{Intializer for the beta weight.}
41 |
42 | \item{gammaInitializer}{Intializer for the gamma weight.}
43 |
44 | \item{betaRegularizer}{Regularizer for the beta weight.}
45 |
46 | \item{gammaRegularizer}{Regularizer for the gamma weight.}
47 |
48 | \item{betaConstraint}{Optional constraint for the beta weight.}
49 |
50 | \item{gammaConstraint}{Optional constraint for the gamma weight.}
51 |
52 | \item{trainable}{Whether the layer weights will be updated during training.}
53 | }
54 | \value{
55 | a keras layer tensor
56 | }
57 | \description{
58 | Creates an instance normalization layer
59 | }
60 | \author{
61 | Tustison NJ
62 | }
63 |
--------------------------------------------------------------------------------
/man/InstanceNormalizationLayer.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customNormalizationLayers.R
3 | \docType{class}
4 | \name{InstanceNormalizationLayer}
5 | \alias{InstanceNormalizationLayer}
6 | \title{Creates an instance normalization layer}
7 | \value{
8 | an instance normalization layer
9 | }
10 | \description{
11 | Creates an instance normalization layer as described in the paper
12 | }
13 | \details{
14 | \url{https://arxiv.org/abs/1701.02096}
15 |
16 | with the implementation ported from the following python implementation
17 |
18 | \url{https://github.com/keras-team/keras-contrib/blob/master/keras_contrib/layers/normalization/instancenormalization.py}
19 | }
20 | \section{Arguments}{
21 |
22 | \describe{
23 | \item{axis}{Integer specifying which axis should be normalized, typically
24 | the feature axis. For example, after a Conv2D layer with
25 | \code{channels_first}, set axis = 2. Setting \code{axis=-1L} will
26 | normalize all values in each instance of the batch. Axis 1
27 | is the batch dimension for tensorflow backend so we throw an
28 | error if \code{axis = 1}.}
29 | \item{epsilon}{Small float added to the variance to avoid dividing by 0.}
30 | \item{center}{If TRUE, add \code{beta} offset to normalized tensor.}
31 | \item{scale}{If TRUE, multiply by \code{gamma}.}
32 | \item{betaInitializer}{Intializer for the beta weight.}
33 | \item{gammaInitializer}{Intializer for the gamma weight.}
34 | \item{betaRegularizer}{Regularizer for the beta weight.}
35 | \item{gammaRegularizer}{Regularizer for the gamma weight.}
36 | \item{betaConstraint}{Optional constraint for the beta weight.}
37 | \item{gammaConstraint}{Optional constraint for the gamma weight.}
38 | }
39 | }
40 |
41 | \section{Details}{
42 |
43 | \code{$initialize} instantiates a new class.
44 |
45 | \code{$call} main body.
46 |
47 | \code{$compute_output_shape} computes the output shape.
48 | }
49 |
50 | \examples{
51 | InstanceNormalizationLayer$new()
52 | InstanceNormalizationLayer$new(axis = 2L)
53 | testthat::expect_error(InstanceNormalizationLayer$new(axis = 1L))
54 |
55 | }
56 | \author{
57 | Tustison NJ
58 | }
59 |
--------------------------------------------------------------------------------
/man/longitudinalCorticalThickness.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/corticalThickness.R
3 | \name{longitudinalCorticalThickness}
4 | \alias{longitudinalCorticalThickness}
5 | \title{Longitudinal cortical thickness using deep learning}
6 | \usage{
7 | longitudinalCorticalThickness(
8 | t1s,
9 | initialTemplate = "oasis",
10 | numberOfIterations = 1,
11 | refinementTransform = "antsRegistrationSyNQuick[a]",
12 | antsxnetCacheDirectory = NULL,
13 | verbose = FALSE
14 | )
15 | }
16 | \arguments{
17 | \item{t1s}{input list of 3-D unprocessed T1-weighted brain images from a single subject}
18 |
19 | \item{initialTemplate}{input image to define the orientation of the SST. Can be a string
20 | (see \code{getANTsXNetData}) or a specified template. This allows the user to create a
21 | SST outside of this routine.}
22 |
23 | \item{numberOfIterations}{Defines the number of iterations for refining the SST.}
24 |
25 | \item{refinementTransform}{Transform for defining the refinement registration transform.
26 | See options in \code{antsRegistration}.}
27 |
28 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
29 | template and model weights. Since these can be resused, if
30 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
31 | subdirectory ~/.keras/ANTsXNet/.}
32 |
33 | \item{verbose}{print progress.}
34 | }
35 | \value{
36 | List consisting of the SST, and a (sub-)list for each subject consisting of
37 | the preprocessed image, cortical thickness image, segmentation probability images,
38 | and affine mapping to the SST.
39 | }
40 | \description{
41 | Perform KellyKapowski cortical thickness longitudinally using \code{deepAtropos}
42 | for segmentation of the derived single-subject template. It takes inspiration from
43 | the work described here:
44 | }
45 | \details{
46 | \url{https://pubmed.ncbi.nlm.nih.gov/31356207/}
47 | }
48 | \examples{
49 | \dontrun{
50 | library( ANTsRNet )
51 | library( keras )
52 |
53 | image <- antsImageRead( "t1w_image.nii.gz" )
54 | kk <- corticalThickness( image )
55 | }
56 | }
57 | \author{
58 | Tustison NJ, Avants BB
59 | }
60 |
--------------------------------------------------------------------------------
/man/brainAge.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/brainAge.R
3 | \name{brainAge}
4 | \alias{brainAge}
5 | \title{BrainAGE}
6 | \usage{
7 | brainAge(
8 | image,
9 | doPreprocessing = TRUE,
10 | numberOfSimulations = 0,
11 | sdAffine = 0.01,
12 | antsxnetCacheDirectory = NULL,
13 | verbose = TRUE
14 | )
15 | }
16 | \arguments{
17 | \item{image}{input 3-D T1-weighted brain image.}
18 |
19 | \item{doPreprocessing}{boolean dictating whether prescribed
20 | preprocessing is performed (brain extraction, bias correction,
21 | normalization to template).}
22 |
23 | \item{numberOfSimulations}{number of random affine perturbations to
24 | transform the input.}
25 |
26 | \item{sdAffine}{define the standard deviation of the affine transformation
27 | parameter.}
28 |
29 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
30 | template and model weights. Since these can be resused, if
31 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
32 | inst/extdata/ subfolder of the ANTsRNet package.}
33 |
34 | \item{verbose}{print progress.}
35 | }
36 | \value{
37 | predicted age and binned confidence values
38 | }
39 | \description{
40 | Estimate BrainAge from a T1-weighted MR image using the DeepBrainNet
41 | architecture and weights described here:
42 | }
43 | \details{
44 | \url{https://github.com/vishnubashyam/DeepBrainNet}
45 |
46 | and described in the following article:
47 |
48 | \url{https://academic.oup.com/brain/article-abstract/doi/10.1093/brain/awaa160/5863667?redirectedFrom=fulltext}
49 |
50 | Preprocessing on the training data consisted of:
51 | \itemize{
52 | \item n4 bias correction,
53 | \item brain extraction, and
54 | \item affine registration to MNI.
55 | The input T1 should undergo the same steps. If the input T1 is the raw
56 | T1, these steps can be performed by the internal preprocessing, i.e. set
57 | \code{doPreprocessing = TRUE}
58 | }
59 | }
60 | \examples{
61 | \dontrun{
62 | library( ANTsRNet )
63 | library( keras )
64 |
65 | image <- antsImageRead( "t1w_image.nii.gz" )
66 | estimatedBrainAge <- brainAge( image )
67 | }
68 | }
69 | \author{
70 | Tustison NJ
71 | }
72 |
--------------------------------------------------------------------------------
/R/createAutoencoderModel.R:
--------------------------------------------------------------------------------
1 | #' Function for creating a symmetric autoencoder model.
2 | #'
3 | #' Builds an autoencoder based on the specified array definining the
4 | #' number of units in the encoding branch. Ported to Keras R from the
5 | #' Keras python implementation here:
6 | #'
7 | #' \url{https://github.com/XifengGuo/DEC-keras}
8 | #'
9 | #' @param numberOfUnitsPerLayer vector defining the number of units
10 | #' in the encoding branch
11 | #' @param activation activation type for the dense layers
12 | #' @param initializer initializer type for the dense layers
13 | #'
14 | #' @return two models: the encoder and auto-encoder
15 | #'
16 | #' @author Tustison NJ
17 | #' @examples
18 | #'
19 | #' library( ANTsRNet )
20 | #' library( keras )
21 | #'
22 | #' ae <- createAutoencoderModel( c( 784, 500, 500, 2000, 10 ) )
23 | #'
24 | #' @export
25 |
26 | createAutoencoderModel <- function( numberOfUnitsPerLayer,
27 | activation = 'relu',
28 | initializer = 'glorot_uniform' )
29 | {
30 | numberOfEncodingLayers <- as.integer( length( numberOfUnitsPerLayer ) - 1 )
31 |
32 | inputs <- layer_input( shape = c( as.integer( numberOfUnitsPerLayer[1] ) ) )
33 |
34 | encoder <- inputs
35 |
36 | for( i in seq_len( numberOfEncodingLayers - 1 ) )
37 | {
38 | encoder <- encoder %>%
39 | layer_dense( numberOfUnitsPerLayer[i+1],
40 | activation = activation, kernel_initializer = initializer )
41 | }
42 |
43 | encoder <- encoder %>%
44 | layer_dense( units = tail( numberOfUnitsPerLayer, 1 ) )
45 |
46 | autoencoder <- encoder
47 |
48 | for( i in seq( from = numberOfEncodingLayers, to = 2, by = -1 ) )
49 | {
50 | autoencoder <- autoencoder %>%
51 | layer_dense( numberOfUnitsPerLayer[i],
52 | activation = activation, kernel_initializer = initializer )
53 | }
54 |
55 | autoencoder <- autoencoder %>%
56 | layer_dense( numberOfUnitsPerLayer[1], kernel_initializer = initializer )
57 |
58 | return( list(
59 | autoencoderModel = keras_model( inputs = inputs, outputs = autoencoder ),
60 | encoderModel = keras_model( inputs = inputs, outputs = encoder ) ) )
61 | }
62 |
--------------------------------------------------------------------------------
/tests/testthat/test-alexNetModel3D.R:
--------------------------------------------------------------------------------
1 | testthat::context("AlexModels-3D")
2 |
3 | testthat::test_that("Creating 3D Models", {
4 | if (keras::is_keras_available()) {
5 | model <- createAlexNetModel3D(
6 | inputImageSize = c(20L, 20L, 19L, 1L),
7 | numberOfClassificationLabels = 2,
8 | numberOfDenseUnits = 256,
9 | batch_size = 1)
10 | testthat::expect_is(model, "keras.engine.training.Model" )
11 | testthat::expect_equal(model$count_params(), 46963394L)
12 | testthat::expect_equal(length(model$weights), 16L)
13 | rm(model); gc(); gc()
14 | Sys.sleep(2); gc(); gc()
15 |
16 | model <- createAlexNetModel3D(
17 | inputImageSize = c(20L, 20L, 20L, 1L),
18 | numberOfClassificationLabels = 3,
19 | numberOfDenseUnits = 256,
20 | batch_size = 1)
21 | testthat::expect_is(model, "keras.engine.training.Model" )
22 | testthat::expect_equal(model$count_params(), 46963651L)
23 | testthat::expect_equal(length(model$weights), 16L)
24 | rm(model); gc(); gc()
25 | Sys.sleep(2); gc(); gc()
26 |
27 | model <- createAlexNetModel3D(
28 | inputImageSize = c(20L, 20L, 20L, 1L),
29 | numberOfClassificationLabels = 2,
30 | mode = "regression",
31 | numberOfDenseUnits = 256,
32 | batch_size = 1 )
33 | testthat::expect_is(model, "keras.engine.training.Model" )
34 | testthat::expect_equal(model$count_params(), 46963394L)
35 | testthat::expect_equal(length(model$weights), 16L)
36 | rm(model); gc(); gc()
37 | Sys.sleep(2); gc(); gc()
38 |
39 | }
40 | })
41 |
42 |
43 | testthat::test_that("Creating Big 3D Models", {
44 | if (keras::is_keras_available()) {
45 | testthat::skip_on_travis()
46 | model <- createAlexNetModel3D(
47 | inputImageSize = c(20L, 20L, 20L, 1L),
48 | numberOfClassificationLabels = 2,
49 | mode = "regression",
50 | numberOfDenseUnits = 1024,
51 | batch_size = 1 )
52 | testthat::expect_is(model, "keras.engine.training.Model" )
53 | testthat::expect_equal(model$count_params(), 164734658L)
54 | testthat::expect_equal(length(model$weights), 16L)
55 | rm(model); gc(); gc()
56 | Sys.sleep(2); gc(); gc()
57 | }
58 | })
59 |
--------------------------------------------------------------------------------
/man/createFullyConvolutionalVggModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createVggModel.R
3 | \name{createFullyConvolutionalVggModel2D}
4 | \alias{createFullyConvolutionalVggModel2D}
5 | \title{2-D implementation of the VGG deep learning architecture without classification
6 | layers.}
7 | \usage{
8 | createFullyConvolutionalVggModel2D(
9 | inputImageSize,
10 | layers = c(1, 2, 3, 4, 4),
11 | lowestResolution = 64,
12 | convolutionKernelSize = c(3, 3),
13 | poolSize = c(2, 2),
14 | strides = c(2, 2),
15 | dropoutRate = 0,
16 | style = 19,
17 | activation = "relu"
18 | )
19 | }
20 | \arguments{
21 | \item{inputImageSize}{Used for specifying the input tensor shape. The
22 | shape (or dimension) of that tensor is the image dimensions followed by
23 | the number of channels (e.g., red, green, and blue). The batch size
24 | (i.e., number of training images) is not specified a priori.}
25 |
26 | \item{layers}{a vector determining the number of filters defined at
27 | for each layer.}
28 |
29 | \item{lowestResolution}{number of filters at the beginning.}
30 |
31 | \item{convolutionKernelSize}{2-d vector definining the kernel size
32 | during the encoding path}
33 |
34 | \item{poolSize}{2-d vector defining the region for each pooling layer.}
35 |
36 | \item{strides}{2-d vector describing the stride length in each direction.}
37 |
38 | \item{dropoutRate}{float between 0 and 1 to use between dense layers.}
39 |
40 | \item{style}{\verb{'16'} or \verb{'19'} for VGG16 or VGG19, respectively.}
41 |
42 | \item{activation}{activation function for convolutional layer.}
43 | }
44 | \value{
45 | a VGG keras model
46 | }
47 | \description{
48 | Creates a keras model of the Vgg deep learning architecture for image
49 | recognition based on the paper
50 | }
51 | \details{
52 | K. Simonyan and A. Zisserman, Very Deep Convolutional Networks for
53 | Large-Scale Image Recognition
54 |
55 | available here:\preformatted{ \url{https://arxiv.org/abs/1409.1556}
56 | }
57 |
58 | This particular implementation was influenced by the following python
59 | implementation:\preformatted{ \url{https://gist.github.com/baraldilorenzo/8d096f48a1be4a2d660d}
60 | }
61 | }
62 | \author{
63 | Tustison NJ
64 | }
65 |
--------------------------------------------------------------------------------
/man/sysuMediaWmhSegmentation.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/whiteMatterHyperintensitySegmentation.R
3 | \name{sysuMediaWmhSegmentation}
4 | \alias{sysuMediaWmhSegmentation}
5 | \title{White matter hyperintensity segmentation}
6 | \usage{
7 | sysuMediaWmhSegmentation(
8 | flair,
9 | t1 = NULL,
10 | doPreprocessing = TRUE,
11 | useEnsemble = TRUE,
12 | useAxialSlicesOnly = TRUE,
13 | antsxnetCacheDirectory = NULL,
14 | verbose = FALSE
15 | )
16 | }
17 | \arguments{
18 | \item{flair}{input 3-D FLAIR brain image.}
19 |
20 | \item{t1}{input 3-D T1-weighted brain image (assumed to be aligned to
21 | the flair, if specified).}
22 |
23 | \item{doPreprocessing}{perform n4 bias correction?}
24 |
25 | \item{useEnsemble}{boolean to check whether to use all 3 sets of weights.}
26 |
27 | \item{useAxialSlicesOnly}{if \code{TRUE}, use original implementation which
28 | was trained on axial slices. If \code{FALSE}, use ANTsXNet variant
29 | implementation which applies the slice-by-slice models to all 3 dimensions
30 | and averages the results.}
31 |
32 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
33 | template and model weights. Since these can be resused, if
34 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
35 | inst/extdata/ subfolder of the ANTsRNet package.}
36 |
37 | \item{verbose}{print progress.}
38 | }
39 | \value{
40 | WMH segmentation probability image
41 | }
42 | \description{
43 | Perform WMH segmentation using the winning submission in the MICCAI
44 | 2017 challenge by the sysu_media team using FLAIR or T1/FLAIR. The
45 | MICCAI challenge is discussed in
46 | }
47 | \details{
48 | \url{https://pubmed.ncbi.nlm.nih.gov/30908194/}
49 |
50 | with the sysu_media's team entry is discussed in
51 |
52 | \url{https://pubmed.ncbi.nlm.nih.gov/30125711/}
53 |
54 | with the original implementation available here:\preformatted{\url{https://github.com/hongweilibran/wmh_ibbmTum}
55 | }
56 | }
57 | \examples{
58 | \dontrun{
59 | library( ANTsRNet )
60 | library( keras )
61 |
62 | image <- antsImageRead( "flair.nii.gz" )
63 | probabilityMask <-sysuMediaWmhSegmentation( image )
64 | }
65 | }
66 | \author{
67 | Tustison NJ
68 | }
69 |
--------------------------------------------------------------------------------
/man/multilabel_dice_coefficient.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/customMetrics.R
3 | \name{multilabel_dice_coefficient}
4 | \alias{multilabel_dice_coefficient}
5 | \title{Dice function for multilabel segmentation problems}
6 | \usage{
7 | multilabel_dice_coefficient(
8 | y_true,
9 | y_pred,
10 | dimensionality = 3L,
11 | smoothingFactor = 0
12 | )
13 | }
14 | \arguments{
15 | \item{y_true}{True labels (Tensor)}
16 |
17 | \item{y_pred}{Predictions (Tensor of the same shape as \code{y_true})}
18 |
19 | \item{dimensionality}{image dimension.}
20 |
21 | \item{smoothingFactor}{parameter for smoothing the metric.}
22 | }
23 | \value{
24 | Dice value (negative)
25 | }
26 | \description{
27 | Note: Assumption is that y_true is a one-hot representation
28 | of the segmentation batch. The background (label 0) should
29 | be included but is not used in the calculation.
30 | }
31 | \examples{
32 |
33 | library( ANTsR )
34 | library( ANTsRNet )
35 | library( keras )
36 |
37 | model <- createUnetModel2D( c( 64, 64, 1 ) )
38 |
39 | dice_loss <- multilabel_dice_coefficient( smoothingFactor = 0.1 )
40 |
41 | model \%>\% compile( loss = dice_loss,
42 | optimizer = optimizer_adam( lr = 0.0001 ) )
43 |
44 | ########################################
45 | #
46 | # Run in isolation
47 | #
48 |
49 | library( ANTsR )
50 |
51 | r16 <- antsImageRead( getANTsRData( "r16" ) )
52 | r16seg <- kmeansSegmentation( r16, 3 )$segmentation
53 | r16array <- array( data = as.array( r16seg ), dim = c( 1, dim( r16seg ) ) )
54 | r16tensor <- tensorflow::tf$convert_to_tensor( encodeUnet( r16array, c( 0, 1, 2, 3 ) ) )
55 |
56 | r64 <- antsImageRead( getANTsRData( "r64" ) )
57 | r64seg <- kmeansSegmentation( r64, 3 )$segmentation
58 | r64array <- array( data = as.array( r64seg ), dim = c( 1, dim( r64seg ) ) )
59 | r64tensor <- tensorflow::tf$convert_to_tensor( encodeUnet( r64array, c( 0, 1, 2, 3 ) ) )
60 |
61 | dice_loss <- multilabel_dice_coefficient( r16tensor, r64tensor, dimensionality = 2L )
62 | loss_value <- dice_loss( r16tensor, r64tensor )$numpy()
63 |
64 | # Compare with
65 | # overlap_value <- labelOverlapMeasures( r16seg, r64seg )$MeanOverlap[1]
66 |
67 | rm(model); gc()
68 | }
69 | \author{
70 | Tustison NJ
71 | }
72 |
--------------------------------------------------------------------------------
/man/createFullyConvolutionalVggModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createVggModel.R
3 | \name{createFullyConvolutionalVggModel3D}
4 | \alias{createFullyConvolutionalVggModel3D}
5 | \title{3-D implementation of the VGG deep learning architecture without classification
6 | layers.}
7 | \usage{
8 | createFullyConvolutionalVggModel3D(
9 | inputImageSize,
10 | layers = c(1, 2, 3, 4, 4),
11 | lowestResolution = 64,
12 | convolutionKernelSize = c(3, 3, 3),
13 | poolSize = c(2, 2, 2),
14 | strides = c(2, 2, 2),
15 | dropoutRate = 0,
16 | style = 19,
17 | activation = "relu"
18 | )
19 | }
20 | \arguments{
21 | \item{inputImageSize}{Used for specifying the input tensor shape. The
22 | shape (or dimension) of that tensor is the image dimensions followed by
23 | the number of channels (e.g., red, green, and blue). The batch size
24 | (i.e., number of training images) is not specified a priori.}
25 |
26 | \item{layers}{a vector determining the number of 'filters' defined at
27 | for each layer.}
28 |
29 | \item{lowestResolution}{number of filters at the beginning.}
30 |
31 | \item{convolutionKernelSize}{3-d vector definining the kernel size
32 | during the encoding path}
33 |
34 | \item{poolSize}{3-d vector defining the region for each pooling layer.}
35 |
36 | \item{strides}{3-d vector describing the stride length in each direction.}
37 |
38 | \item{dropoutRate}{float between 0 and 1 to use between dense layers.}
39 |
40 | \item{style}{\verb{'16'} or \verb{'19'} for VGG16 or VGG19, respectively.}
41 |
42 | \item{activation}{activation function for convolutional layer.}
43 | }
44 | \value{
45 | a VGG keras model
46 | }
47 | \description{
48 | Creates a keras model of the Vgg deep learning architecture for image
49 | recognition based on the paper
50 | }
51 | \details{
52 | K. Simonyan and A. Zisserman, Very Deep Convolutional Networks for
53 | Large-Scale Image Recognition
54 |
55 | available here:\preformatted{ \url{https://arxiv.org/abs/1409.1556}
56 | }
57 |
58 | This particular implementation was influenced by the following python
59 | implementation:\preformatted{ \url{https://gist.github.com/baraldilorenzo/8d096f48a1be4a2d660d}
60 | }
61 | }
62 | \author{
63 | Tustison NJ
64 | }
65 |
--------------------------------------------------------------------------------
/man/decodeSsd2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \name{decodeSsd2D}
4 | \alias{decodeSsd2D}
5 | \title{Decoding function for 2-D Y_train}
6 | \usage{
7 | decodeSsd2D(
8 | yPredicted,
9 | imageSize,
10 | confidenceThreshold = 0.5,
11 | overlapThreshold = 0.45
12 | )
13 | }
14 | \arguments{
15 | \item{yPredicted}{The predicted output produced by the SSD model expected to
16 | be an array of shape (\code{batchSize}, \code{numberOfBoxes},
17 | \code{numberOfClasses} + 4 + 4 + 4)
18 | where the additional 4's along the third dimension correspond to the box
19 | coordinates (centerx, centery, width, height), dummy variables, and the variances.
20 | \code{numberOfClasses} includes the background class.}
21 |
22 | \item{imageSize}{2-D vector specifying the spatial domain of the input
23 | images.}
24 |
25 | \item{confidenceThreshold}{Float between 0 and 1. The minimum
26 | classification value required for a given box to be considered a "positive
27 | prediction." A lower value will result in better recall while a higher
28 | value yields higher precision results. Default = 0.5.}
29 |
30 | \item{overlapThreshold}{'NULL' or a float between 0 and 1. If 'NULL' then
31 | no non-maximum suppression will be performed. Otherwise, a greedy non-
32 | maximal suppression is performed following confidence thresholding. In
33 | other words all boxes with Jaccard similarities > \code{overlapThreshold} will
34 | be removed from the set of predictions. Default = 0.45.}
35 | }
36 | \value{
37 | a list of length \code{batchSize} where each element comprises a 2-D
38 | array where each row describes a single box using the following six elements
39 | (classId, confidenceValue, xmin, xmax, ymin, ymax)
40 | }
41 | \description{
42 | Function for translating the predictions from the SSD model output to
43 | boxes, (centerx, centery, width, height), for subsequent usage.
44 | }
45 | \details{
46 | This particular implementation was heavily influenced by the following
47 | python and R implementations:\preformatted{ \url{https://github.com/pierluigiferrari/ssd_keras}
48 | \url{https://github.com/rykov8/ssd_keras}
49 | \url{https://github.com/gsimchoni/ssdkeras}
50 | }
51 | }
52 | \author{
53 | Tustison NJ
54 | }
55 |
--------------------------------------------------------------------------------
/man/ewDavid.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/whiteMatterHyperintensitySegmentation.R
3 | \name{ewDavid}
4 | \alias{ewDavid}
5 | \title{White matter hypterintensity probabilistic segmentation}
6 | \usage{
7 | ewDavid(
8 | flair,
9 | t1,
10 | doPreprocessing = TRUE,
11 | doSlicewise = TRUE,
12 | whichAxes = "max",
13 | antsxnetCacheDirectory = NULL,
14 | verbose = FALSE
15 | )
16 | }
17 | \arguments{
18 | \item{flair}{input 3-D FLAIR brain image.}
19 |
20 | \item{t1}{input 3-D T1-weighted brain image (assumed to be aligned to
21 | the flair).}
22 |
23 | \item{doPreprocessing}{perform preprocessing. See description above.}
24 |
25 | \item{doSlicewise}{apply 2-D model along direction of maximal slice thickness.}
26 |
27 | \item{whichAxes}{apply 2-D model to 1 or more axes. In addition to a scalar
28 | or vector, e.g., \code{whichAxes = c(1, 3)}, one can use "max" for the
29 | axis with maximum anisotropy (default) or "all" for all axes.}
30 |
31 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
32 | template and model weights. Since these can be resused, if
33 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
34 | inst/extdata/ subfolder of the ANTsRNet package.}
35 |
36 | \item{verbose}{print progress.}
37 |
38 | \item{debug}{return feature images in the last layer of the u-net model.}
39 | }
40 | \value{
41 | list consisting of the segmentation image and probability images for
42 | each label.
43 | }
44 | \description{
45 | Perform White matter hypterintensity probabilistic segmentation
46 | using deep learning
47 | }
48 | \details{
49 | Preprocessing on the training data consisted of:
50 | \itemize{
51 | \item n4 bias correction,
52 | \item brain extraction, and
53 | \item affine registration to MNI.
54 | The input T1 should undergo the same steps. If the input T1 is the raw
55 | T1, these steps can be performed by the internal preprocessing, i.e. set
56 | \code{doPreprocessing = TRUE}
57 | }
58 | }
59 | \examples{
60 | \dontrun{
61 | library( ANTsRNet )
62 | library( keras )
63 |
64 | t1 <- antsImageRead( "t1.nii.gz" )
65 | flair <- antsImageRead( "flair.nii.gz" )
66 | results <- ewDavid( t1, flair )
67 | }
68 | }
69 | \author{
70 | Tustison NJ
71 | }
72 |
--------------------------------------------------------------------------------
/man/decodeSsd3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/ssdUtilities.R
3 | \name{decodeSsd3D}
4 | \alias{decodeSsd3D}
5 | \title{Decoding function for 3-D Y_train}
6 | \usage{
7 | decodeSsd3D(
8 | yPredicted,
9 | imageSize,
10 | confidenceThreshold = 0.5,
11 | overlapThreshold = 0.45
12 | )
13 | }
14 | \arguments{
15 | \item{yPredicted}{The predicted output produced by the SSD model expected to
16 | be an array of shape (\code{batchSize}, \code{numberOfBoxes},
17 | \code{numberOfClasses} + 6 + 6 + 6)
18 | where the additional 6's along the third dimension correspond to the box
19 | coordinates (centerx, centery, width, height), dummy variables, and the variances.
20 | \code{numberOfClasses} includes the background class.}
21 |
22 | \item{imageSize}{3-D vector specifying the spatial domain of the input
23 | images.}
24 |
25 | \item{confidenceThreshold}{Float between 0 and 1. The minimum
26 | classification value required for a given box to be considered a "positive
27 | prediction." A lower value will result in better recall while a higher
28 | value yields higher precision results. Default = 0.5.}
29 |
30 | \item{overlapThreshold}{\code{NULL} or a float between 0 and 1. If
31 | \code{NULL} then no non-maximum suppression will be performed. Otherwise, a
32 | greedy non-maximal suppression is performed following confidence thresholding.
33 | In other words all boxes with Jaccard similarities > \code{overlapThreshold}
34 | will be removed from the set of predictions. Default = 0.45.}
35 | }
36 | \value{
37 | a list of length \code{batchSize} where each element comprises a 2-D
38 | array where each row describes a single box using the following six elements
39 | (classId, confidenceValue, xmin, xmax, ymin, ymax, zmin, zmax).
40 | }
41 | \description{
42 | Function for translating the predictions from the SSD model output to
43 | boxes, (centerx, centery, width, height), for subsequent usage.
44 | }
45 | \details{
46 | This particular implementation was heavily influenced by the following
47 | python and R implementations:\preformatted{ \url{https://github.com/pierluigiferrari/ssd_keras}
48 | \url{https://github.com/rykov8/ssd_keras}
49 | \url{https://github.com/gsimchoni/ssdkeras}
50 | }
51 | }
52 | \author{
53 | Tustison NJ
54 | }
55 |
--------------------------------------------------------------------------------
/man/extractImagePatches.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/extractImagePatches.R
3 | \name{extractImagePatches}
4 | \alias{extractImagePatches}
5 | \title{Extract 2-D or 3-D image patches.}
6 | \usage{
7 | extractImagePatches(
8 | image,
9 | patchSize,
10 | maxNumberOfPatches = "all",
11 | strideLength = 1,
12 | maskImage = NULL,
13 | randomSeed,
14 | returnAsArray = FALSE,
15 | randomize = TRUE
16 | )
17 | }
18 | \arguments{
19 | \item{image}{Input ANTs image with one or more components.}
20 |
21 | \item{patchSize}{Width, height, and depth (if 3-D) of patches.}
22 |
23 | \item{maxNumberOfPatches}{Maximum number of patches returned. If
24 | "all" is specified, then all patches in sequence (defined by the
25 | \code{strideLength} are extracted.}
26 |
27 | \item{strideLength}{Defines the sequential patch overlap for
28 | \code{maxNumberOfPatches = "all"}. Can be a image-dimensional vector or a scalar.}
29 |
30 | \item{maskImage}{optional image specifying the sampling region for
31 | the patches when \code{maximumNumberOfPatches} does not equal "all".
32 | The way we constrain patch selection using a mask is by forcing
33 | each returned patch to have a masked voxel at its center.}
34 |
35 | \item{randomSeed}{integer seed that allows reproducible patch extraction
36 | across runs.}
37 |
38 | \item{returnAsArray}{specifies the return type of the function. If
39 | \code{FALSE} (default) the return type is a list where each element is
40 | a single patch. Otherwise the return type is an array of size
41 | \code{dim( numberOfPatches, patchSize )}.}
42 |
43 | \item{randomize}{boolean controlling whether we randomize indices when masking.}
44 | }
45 | \value{
46 | a list (or array) of image patches.
47 | }
48 | \description{
49 | Extract 2-D or 3-D image patches.
50 | }
51 | \examples{
52 |
53 | library( ANTsR )
54 |
55 | image <- antsImageRead( getANTsRData( "r16" ) )
56 | maskImage <- getMask( image, 1, 1000 )
57 | patchSet1 <- extractImagePatches( image, c( 32, 32 ), 10, c( 32, 32 ), randomSeed = 0 )
58 | patchSet2 <- extractImagePatches( image, c( 32, 32 ), 10, c( 32, 32 ), randomSeed = 1 )
59 | patchSet3 <- extractImagePatches( image, c( 32, 32 ), 10, c( 32, 32 ), maskImage, randomSeed = 0 )
60 |
61 | }
62 | \author{
63 | Tustison NJ
64 | }
65 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | # R for travis: see documentation at https://docs.travis-ci.com/user/languages/r
2 | language: r
3 |
4 | dist: xenial
5 | services:
6 | - xvfb
7 |
8 | sudo: false
9 | warnings_are_errors: false # setting this false b/c of rcpp attribute error
10 |
11 | env:
12 | global:
13 | - KERAS_BACKEND="tensorflow"
14 | - KERAS_IMPLEMENTATION="tensorflow"
15 |
16 | addons:
17 | apt:
18 | packages:
19 | - python3
20 | - libstdc++6
21 |
22 | cache:
23 | packages: true
24 | directories:
25 | - $HOME/.keras
26 | - $HOME/.cache/pip
27 |
28 | before_install:
29 | - tlmgr install index
30 | - export PACKAGE_NAME=ANTsRNet
31 | - wget -O travis_helpers.sh http://bit.ly/travis_helpers
32 | - cat travis_helpers.sh; source travis_helpers.sh
33 | - rm travis_helpers.sh
34 | - v8_install ;
35 | - Rscript -e 'install.packages("rsvd")'
36 | - Rscript -e 'install.packages("magic")'
37 | - Rscript -e 'install.packages(c("psych","colormap"))'
38 | - Rscript -e 'install.packages("RcppEigen")'
39 | - gh_binary_install stnava/ITKR ANTsX/ANTsRCore ANTsX/ANTsR
40 |
41 |
42 | before_script:
43 | # - pyenv global 3.6
44 | # - pip install --upgrade pip==9.0.3
45 | # - pip2.7 install -q --upgrade --ignore-installed --user travis virtualenv
46 | # - pip2.7 install -q --user setuptools wheel
47 | # - pip2.7 install -q --upgrade --ignore-installed --user travis keras h5py pyyaml requests Pillow scipy theano tensorflow
48 | - export TRAVIS_PYTHON_VERSION=3.6
49 | - setup_python ${TRAVIS_PYTHON_VERSION} false;
50 | - export CI_PYTHON_VERSION=${TRAVIS_PYTHON_VERSION}
51 | - export PATH=/opt/python/${TRAVIS_PYTHON_VERSION}/bin:$PATH
52 | - ls ~/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin || exit 0 ;
53 | - source ~/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/activate || exit 0;
54 | - python --version
55 | - python3 --version
56 | - pip --version
57 | - install_python_requirements -q ;
58 | # - R -e 'tensorflow::install_tensorflow(version = "2.0.0")'
59 |
60 | # before_cache: Rscript -e 'remotes::install_cran("pkgdown")'
61 | # deploy:
62 | # provider: script
63 | # script: Rscript -e 'pkgdown::deploy_site_github()'
64 | # skip_cleanup: true
65 |
66 | before_deploy:
67 | - Rscript -e 'covr::codecov(type = "all", commentDonttest = FALSE)'
68 |
--------------------------------------------------------------------------------
/man/brainExtraction.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/brainExtraction.R
3 | \name{brainExtraction}
4 | \alias{brainExtraction}
5 | \title{Brain extraction}
6 | \usage{
7 | brainExtraction(
8 | image,
9 | modality = c("t1", "t1v0", "t1nobrainer", "t1combined", "t2", "flair", "bold", "fa",
10 | "t1t2infant", "t1infant", "t2infant"),
11 | antsxnetCacheDirectory = NULL,
12 | verbose = FALSE
13 | )
14 | }
15 | \arguments{
16 | \item{image}{input 3-D brain image (or list of images for multi-modal scenarios).}
17 |
18 | \item{modality}{image type. Options include:
19 | \itemize{
20 | \item{"t1": }{T1-weighted MRI---ANTs-trained. Update from "t1v0"}
21 | \item{"t1v0": }{T1-weighted MRI---ANTs-trained.}
22 | \item{"t1nobrainer": }{T1-weighted MRI---FreeSurfer-trained: h/t Satra Ghosh and Jakub Kaczmarzyk.}
23 | \item{"t1combined": }{Brian's combination of "t1" and "t1nobrainer". One can also specify
24 | "t1combined\link{X}" where X is the morphological radius. X = 12 by default.}
25 | \item{"flair": }{FLAIR MRI.}
26 | \item{"t2": }{T2-w MRI.}
27 | \item{"bold": }{3-D BOLD MRI.}
28 | \item{"fa": }{Fractional anisotropy.}
29 | \item{"t1t2infant": }{Combined T1-w/T2-w infant MRI h/t Martin Styner.}
30 | \item{"t1infant": }{T1-w infant MRI h/t Martin Styner.}
31 | \item{"t2infant": }{T2-w infant MRI h/t Martin Styner.}
32 | }}
33 |
34 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
35 | template and model weights. Since these can be resused, if
36 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
37 | subdirectory ~/.keras/ANTsXNet/.}
38 |
39 | \item{verbose}{print progress.}
40 | }
41 | \value{
42 | brain probability mask (ANTsR image)
43 | }
44 | \description{
45 | Perform T1, FA, or bold brain extraction using a U-net architecture
46 | training data. "NoBrainer" is also possible where
47 | brain extraction uses U-net and FreeSurfer
48 | training data ported from the
49 | }
50 | \details{
51 | \url{https://github.com/neuronets/nobrainer-models}
52 | }
53 | \examples{
54 | \dontrun{
55 | library( ANTsRNet )
56 | library( keras )
57 |
58 | image <- antsImageRead( "t1w_image.nii.gz" )
59 | probabilityMask <- brainExtraction( image, modality = "t1" )
60 | }
61 | }
62 | \author{
63 | Tustison NJ
64 | }
65 |
--------------------------------------------------------------------------------
/man/createEnhancedDeepSuperResolutionModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/createUpDownResNetSuperResolutionModel.R
3 | \name{createEnhancedDeepSuperResolutionModel2D}
4 | \alias{createEnhancedDeepSuperResolutionModel2D}
5 | \title{2-D implementation of the EDSR super resolution architecture.}
6 | \usage{
7 | createEnhancedDeepSuperResolutionModel2D(
8 | inputImageSize,
9 | convolutionKernelSize = c(3, 3),
10 | numberOfFilters = 256,
11 | numberOfResidualBlocks = 32,
12 | scale = 2,
13 | numberOfLossFunctions = 1,
14 | numberOfOutputChannels = 1,
15 | doBatchNormalization = FALSE,
16 | interpolation = c("bilinear", "nearest", "conv")
17 | )
18 | }
19 | \arguments{
20 | \item{inputImageSize}{Used for specifying the input tensor shape. The
21 | shape (or dimension) of that tensor is the image dimensions followed by
22 | the number of channels (e.g., red, green, and blue). The batch size
23 | (i.e., number of training images) is not specified a priori.}
24 |
25 | \item{convolutionKernelSize}{a vector specifying the kernel size for
26 | convolution.}
27 |
28 | \item{numberOfFilters}{the number of filters for each encoding layer.}
29 |
30 | \item{numberOfResidualBlocks}{the number of residual blocks.}
31 |
32 | \item{scale}{the upsampling amount, 2, 4 or 8}
33 |
34 | \item{numberOfLossFunctions}{the number of data targets, e.g. 2 for 2 targets}
35 |
36 | \item{numberOfOutputChannels}{the number of ouput channels}
37 |
38 | \item{doBatchNormalization}{boolean for include BN in the residual blocks}
39 |
40 | \item{interpolation}{nearest, bilinear or conv for upscaling block}
41 | }
42 | \value{
43 | a keras model for EDSR image super resolution
44 | }
45 | \description{
46 | Creates a keras model of the expanded image super resolution deep learning
47 | framework based on EDSR.
48 | }
49 | \examples{
50 | model = createEnhancedDeepSuperResolutionModel2D(c( 28, 28, 1 ))
51 | rm(model); gc()
52 | model = createEnhancedDeepSuperResolutionModel2D(c( 28, 28, 1 ),
53 | doBatchNormalization = TRUE,
54 | interpolation = "conv", scale = 4)
55 | rm(model); gc()
56 | model = createEnhancedDeepSuperResolutionModel2D(c( 28, 28, 1 ),
57 | doBatchNormalization = TRUE,
58 | numberOfLossFunctions = 2,
59 | interpolation = "conv", scale = 8)
60 | rm(model); gc()
61 | }
62 | \author{
63 | Tustison NJ, Avants BB
64 | }
65 |
--------------------------------------------------------------------------------
/docs/docsearch.js:
--------------------------------------------------------------------------------
1 | $(function() {
2 |
3 | // register a handler to move the focus to the search bar
4 | // upon pressing shift + "/" (i.e. "?")
5 | $(document).on('keydown', function(e) {
6 | if (e.shiftKey && e.keyCode == 191) {
7 | e.preventDefault();
8 | $("#search-input").focus();
9 | }
10 | });
11 |
12 | $(document).ready(function() {
13 | // do keyword highlighting
14 | /* modified from https://jsfiddle.net/julmot/bL6bb5oo/ */
15 | var mark = function() {
16 |
17 | var referrer = document.URL ;
18 | var paramKey = "q" ;
19 |
20 | if (referrer.indexOf("?") !== -1) {
21 | var qs = referrer.substr(referrer.indexOf('?') + 1);
22 | var qs_noanchor = qs.split('#')[0];
23 | var qsa = qs_noanchor.split('&');
24 | var keyword = "";
25 |
26 | for (var i = 0; i < qsa.length; i++) {
27 | var currentParam = qsa[i].split('=');
28 |
29 | if (currentParam.length !== 2) {
30 | continue;
31 | }
32 |
33 | if (currentParam[0] == paramKey) {
34 | keyword = decodeURIComponent(currentParam[1].replace(/\+/g, "%20"));
35 | }
36 | }
37 |
38 | if (keyword !== "") {
39 | $(".contents").unmark({
40 | done: function() {
41 | $(".contents").mark(keyword);
42 | }
43 | });
44 | }
45 | }
46 | };
47 |
48 | mark();
49 | });
50 | });
51 |
52 | /* Search term highlighting ------------------------------*/
53 |
54 | function matchedWords(hit) {
55 | var words = [];
56 |
57 | var hierarchy = hit._highlightResult.hierarchy;
58 | // loop to fetch from lvl0, lvl1, etc.
59 | for (var idx in hierarchy) {
60 | words = words.concat(hierarchy[idx].matchedWords);
61 | }
62 |
63 | var content = hit._highlightResult.content;
64 | if (content) {
65 | words = words.concat(content.matchedWords);
66 | }
67 |
68 | // return unique words
69 | var words_uniq = [...new Set(words)];
70 | return words_uniq;
71 | }
72 |
73 | function updateHitURL(hit) {
74 |
75 | var words = matchedWords(hit);
76 | var url = "";
77 |
78 | if (hit.anchor) {
79 | url = hit.url_without_anchor + '?q=' + escape(words.join(" ")) + '#' + hit.anchor;
80 | } else {
81 | url = hit.url + '?q=' + escape(words.join(" "));
82 | }
83 |
84 | return url;
85 | }
86 |
--------------------------------------------------------------------------------
/man/applySuperResolutionModelPatch.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/applyDBPN4x.R
3 | \name{applySuperResolutionModelPatch}
4 | \alias{applySuperResolutionModelPatch}
5 | \title{applySuperResolutionModelPatch}
6 | \usage{
7 | applySuperResolutionModelPatch(
8 | image,
9 | model,
10 | targetRange,
11 | lowResolutionPatchSize = 128,
12 | strideLength = 16,
13 | batch_size = 32,
14 | mask,
15 | verbose = FALSE
16 | )
17 | }
18 | \arguments{
19 | \item{image}{input image}
20 |
21 | \item{model}{model object or filename see \code{getPretrainedNetwork}}
22 |
23 | \item{targetRange}{a vector defining min max of each the input image,
24 | eg -127.5, 127.5. Output images will be scaled back to original intensity.
25 | This range should match the mapping used in the training of the network.}
26 |
27 | \item{lowResolutionPatchSize}{size of patches to upsample}
28 |
29 | \item{strideLength}{voxel/pixel steps between patches}
30 |
31 | \item{batch_size}{for prediction call}
32 |
33 | \item{mask}{restrict intensity rescaling parameters within the mask}
34 |
35 | \item{verbose}{If \code{TRUE}, show status messages}
36 | }
37 | \value{
38 | image upscaled to resolution provided by network
39 | }
40 | \description{
41 | Apply pretrained super-resolution network by stitching together patches.
42 | }
43 | \details{
44 | Apply a patch-wise trained network to perform super-resolution. Can be applied
45 | to variable sized inputs. Warning: This function may be better used on CPU
46 | unless the GPU can accommodate the full patch size. Warning 2: The global
47 | intensity range (min to max) of the output will match the input where the
48 | range is taken over all channels.
49 | }
50 | \examples{
51 | \dontrun{
52 | library(ANTsRCore)
53 | library( keras )
54 | orig_img = antsImageRead( getANTsRData( "r16" ) )
55 | # input needs to be 48x48
56 | model = createDeepBackProjectionNetworkModel2D( list(NULL,NULL, 1) )
57 | img = resampleImage(orig_img, resampleParams = rep(256/48, 2))
58 | simg <- applySuperResolutionModelPatch( img,
59 | model = model, lowResolutionPatchSize = 8, strideLength = 2)
60 | simgm <- applySuperResolutionModelPatch( img, mask = getMask( img ),
61 | model = model, lowResolutionPatchSize = 8, strideLength = 2)
62 | plot( orig_img )
63 | plot( img )
64 | plot( simg )
65 | plot( simgm )
66 | }
67 | }
68 | \author{
69 | Avants BB
70 | }
71 |
--------------------------------------------------------------------------------
/man/extractImagePatchCoordinates.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/extractImagePatches.R
3 | \name{extractImagePatchCoordinates}
4 | \alias{extractImagePatchCoordinates}
5 | \title{Extract 2-D or 3-D image patch coordinates.}
6 | \usage{
7 | extractImagePatchCoordinates(
8 | image,
9 | patchSize,
10 | maxNumberOfPatches = "all",
11 | strideLength = 1,
12 | maskImage = NULL,
13 | physicalCoordinates = TRUE,
14 | cornerCoordinates = TRUE,
15 | randomSeed,
16 | randomize = TRUE
17 | )
18 | }
19 | \arguments{
20 | \item{image}{Input ANTs image with one or more components.}
21 |
22 | \item{patchSize}{Width, height, and depth (if 3-D) of patches.}
23 |
24 | \item{maxNumberOfPatches}{Maximum number of patches returned. If
25 | "all" is specified, then all patches in sequence (defined by the
26 | \code{strideLength} are extracted.}
27 |
28 | \item{strideLength}{Defines the sequential patch overlap for
29 | \code{maxNumberOfPatches = "all"}. Can be a image-dimensional vector or a scalar.}
30 |
31 | \item{maskImage}{optional image specifying the sampling region for
32 | the patches when \code{maximumNumberOfPatches} does not equal "all".
33 | The way we constrain patch selection using a mask is by forcing
34 | each returned patch to have a masked voxel at its center.}
35 |
36 | \item{physicalCoordinates}{boolean to determine whether indices or spatial
37 | coordinates are returned.}
38 |
39 | \item{cornerCoordinates}{boolean to determine whether indices or spatial
40 | coordinates of the corner or the center are returned.}
41 |
42 | \item{randomSeed}{integer seed that allows reproducible patch extraction
43 | across runs.}
44 |
45 | \item{randomize}{boolean controlling whether we randomize indices when masking.}
46 | }
47 | \value{
48 | a matrix of image patch spatial or index coordinates.
49 | }
50 | \description{
51 | Extract 2-D or 3-D image patch coordinates.
52 | }
53 | \examples{
54 |
55 | library( ANTsR )
56 |
57 | image <- antsImageRead( getANTsRData( "r16" ) )
58 | maskImage <- getMask( image, 1, 1000 )
59 | patchCoordsP <- extractImagePatchCoordinates( image, c( 32, 32 ), 10, c( 32, 32 ),
60 | maskImage, randomSeed = 0, physicalCoordinates = TRUE )
61 | patchCoordsI <- extractImagePatchCoordinates( image, c( 32, 32 ), 10, c( 32, 32 ),
62 | maskImage, randomSeed = 0, physicalCoordinates = FALSE )
63 |
64 | }
65 | \author{
66 | Tustison NJ, Avants B
67 | }
68 |
--------------------------------------------------------------------------------
/man/createSimpleClassificationWithSpatialTransformerNetworkModel2D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in
3 | % R/createSimpleClassificationWithSpatialTransformerNetworkModel.R
4 | \name{createSimpleClassificationWithSpatialTransformerNetworkModel2D}
5 | \alias{createSimpleClassificationWithSpatialTransformerNetworkModel2D}
6 | \title{2-D implementation of the spatial transformer network.}
7 | \usage{
8 | createSimpleClassificationWithSpatialTransformerNetworkModel2D(
9 | inputImageSize,
10 | resampledSize = c(30, 30),
11 | numberOfClassificationLabels = 10
12 | )
13 | }
14 | \arguments{
15 | \item{inputImageSize}{Used for specifying the input tensor shape. The
16 | shape (or dimension) of that tensor is the image dimensions followed by
17 | the number of channels (e.g., red, green, and blue). The batch size
18 | (i.e., number of training images) is not specified a priori.}
19 |
20 | \item{resampledSize}{resampled size of the transformed input images.}
21 |
22 | \item{numberOfClassificationLabels}{Number of classes.}
23 | }
24 | \value{
25 | a keras model
26 | }
27 | \description{
28 | Creates a keras model of the spatial transformer network:
29 | }
30 | \details{
31 | \preformatted{ \url{https://arxiv.org/abs/1506.02025}
32 | }
33 |
34 | based on the following python Keras model:\preformatted{ \url{https://github.com/oarriaga/STN.keras/blob/master/src/models/STN.py}
35 | }
36 | }
37 | \examples{
38 |
39 | library( ANTsRNet )
40 | library( keras )
41 |
42 | mnistData <- dataset_mnist()
43 | numberOfLabels <- 10
44 |
45 | # Extract a small subset for something that can run quickly
46 |
47 | X_trainSmall <- mnistData$train$x[1:100,,]
48 | X_trainSmall <- array( data = X_trainSmall, dim = c( dim( X_trainSmall ), 1 ) )
49 | Y_trainSmall <- to_categorical( mnistData$train$y[1:100], numberOfLabels )
50 |
51 | X_testSmall <- mnistData$test$x[1:10,,]
52 | X_testSmall <- array( data = X_testSmall, dim = c( dim( X_testSmall ), 1 ) )
53 | Y_testSmall <- to_categorical( mnistData$test$y[1:10], numberOfLabels )
54 |
55 | # We add a dimension of 1 to specify the channel size
56 |
57 | inputImageSize <- c( dim( X_trainSmall )[2:3], 1 )
58 |
59 | \dontrun{
60 | model <- createSimpleClassificationWithSpatialTransformerNetworkModel2D(
61 | inputImageSize = inputImageSize,
62 | resampledSize = c( 30, 30 ), numberOfClassificationLabels = numberOfLabels )
63 | }
64 | }
65 | \author{
66 | Tustison NJ
67 | }
68 |
--------------------------------------------------------------------------------
/man/createSimpleClassificationWithSpatialTransformerNetworkModel3D.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in
3 | % R/createSimpleClassificationWithSpatialTransformerNetworkModel.R
4 | \name{createSimpleClassificationWithSpatialTransformerNetworkModel3D}
5 | \alias{createSimpleClassificationWithSpatialTransformerNetworkModel3D}
6 | \title{3-D implementation of the spatial transformer network.}
7 | \usage{
8 | createSimpleClassificationWithSpatialTransformerNetworkModel3D(
9 | inputImageSize,
10 | resampledSize = c(30, 30, 30),
11 | numberOfClassificationLabels = 10
12 | )
13 | }
14 | \arguments{
15 | \item{inputImageSize}{Used for specifying the input tensor shape. The
16 | shape (or dimension) of that tensor is the image dimensions followed by
17 | the number of channels (e.g., red, green, and blue). The batch size
18 | (i.e., number of training images) is not specified a priori.}
19 |
20 | \item{resampledSize}{resampled size of the transformed input images.}
21 |
22 | \item{numberOfClassificationLabels}{Number of classes.}
23 | }
24 | \value{
25 | a keras model
26 | }
27 | \description{
28 | Creates a keras model of the spatial transformer network:
29 | }
30 | \details{
31 | \preformatted{ \url{https://arxiv.org/abs/1506.02025}
32 | }
33 |
34 | based on the following python Keras model:\preformatted{ \url{https://github.com/oarriaga/STN.keras/blob/master/src/models/STN.py}
35 | }
36 | }
37 | \examples{
38 |
39 | \dontrun{
40 |
41 | library( ANTsRNet )
42 | library( keras )
43 |
44 | mnistData <- dataset_mnist()
45 | numberOfLabels <- 10
46 |
47 | # Extract a small subset for something that can run quickly
48 |
49 | X_trainSmall <- mnistData$train$x[1:100,,]
50 | X_trainSmall <- array( data = X_trainSmall, dim = c( dim( X_trainSmall ), 1 ) )
51 | Y_trainSmall <- to_categorical( mnistData$train$y[1:100], numberOfLabels )
52 |
53 | X_testSmall <- mnistData$test$x[1:10,,]
54 | X_testSmall <- array( data = X_testSmall, dim = c( dim( X_testSmall ), 1 ) )
55 | Y_testSmall <- to_categorical( mnistData$test$y[1:10], numberOfLabels )
56 |
57 | # We add a dimension of 1 to specify the channel size
58 |
59 | inputImageSize <- c( dim( X_trainSmall )[2:3], 1 )
60 |
61 | model <- createSimpleClassificationWithSpatialTransformerNetworkModel2D(
62 | inputImageSize = inputImageSize,
63 | resampledSize = c( 30, 30 ), numberOfClassificationLabels = numberOfLabels )
64 |
65 | }
66 | }
67 | \author{
68 | Tustison NJ
69 | }
70 |
--------------------------------------------------------------------------------
/man/getPretrainedNetwork.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/getPretrainedNetwork.R
3 | \name{getPretrainedNetwork}
4 | \alias{getPretrainedNetwork}
5 | \title{getPretrainedNetwork}
6 | \usage{
7 | getPretrainedNetwork(
8 | fileId = c("show", "brainAgeGender", "brainAgeFmrib", "brainAgeDeepBrainNet",
9 | "brainExtraction", "brainExtractionT1", "brainExtractionT2", "brainExtractionFLAIR",
10 | "brainExtractionBOLD", "brainExtractionFA", "brainExtractionNoBrainer",
11 | "brainExtractionInfantT1T2", "brainExtractionInfantT1", "brainExtractionInfantT2",
12 | "brainSegmentation", "brainSegmentationPatchBased", "ctHumanLung", "dbpn4x",
13 | "deepFlash", "deepFlashLeft8", "deepFlashRight8", "deepFlashLeft16",
14 | "deepFlashRight16", "deepFlashLeft16new", "deepFlashRight16new", "denoising",
15 | "dktInner", "dktOuter", "dktOuterWithSpatialPriors", "elBicho",
16 | "ewDavidWmhSegmentationWeights", "ewDavidWmhSegmentationSlicewiseWeights",
17 | "ewDavidWmhSegmentationSlicewiseT1OnlyWeights", "functionalLungMri",
18 | "hippMapp3rInitial", "hippMapp3rRefine", "koniqMBCS", "koniqMS", "koniqMS2",
19 | "koniqMS3", "mriSuperResolution", "protonLungMri",
20 | "sixTissueOctantBrainSegmentation", "sysuMediaWmhFlairOnlyModel0",
21 | "sysuMediaWmhFlairOnlyModel1", "sysuMediaWmhFlairOnlyModel2",
22 | "sysuMediaWmhFlairT1Model0", "sysuMediaWmhFlairT1Model1",
23 | "sysuMediaWmhFlairT1Model2", "tidsQualityAssessment",
24 | "wholeTumorSegmentationT2Flair"),
25 | targetFileName,
26 | antsxnetCacheDirectory = NULL
27 | )
28 | }
29 | \arguments{
30 | \item{fileId}{one of the permitted file ids or pass "show" to list all
31 | valid possibilities. Note that most require internet access to download.}
32 |
33 | \item{targetFileName}{optional target filename}
34 |
35 | \item{antsxnetCacheDirectory}{destination directory for storing the downloaded
36 | template and model weights. Since these can be resused, if
37 | \code{is.null(antsxnetCacheDirectory)}, these data will be downloaded to the
38 | subdirectory ~/.keras/ANTsXNet/.}
39 | }
40 | \value{
41 | filename string
42 | }
43 | \description{
44 | Downloads pretrained network/weights.
45 | }
46 | \note{
47 | See \url{https://figshare.com/authors/Nick_Tustison/441144}
48 | or \url{https://figshare.com/authors/Brian_Avants/418551}
49 | for some more descriptions
50 | }
51 | \examples{
52 | \dontrun{
53 | net <- getPretrainedNetwork( "dbpn4x" )
54 | }
55 | }
56 | \author{
57 | Avants BB
58 | }
59 |
--------------------------------------------------------------------------------