├── .Rbuildignore ├── .github ├── .gitignore └── workflows │ ├── coverage.yaml │ ├── pkgdown.yaml │ ├── rcmdcheck.yaml │ └── rhub.yaml ├── .gitignore ├── DESCRIPTION ├── NAMESPACE ├── NEWS.md ├── R ├── RStoolbox-package.R ├── RcppExports.R ├── classifyQA.R ├── cloudMask.R ├── coregisterImages.R ├── encodeQA.R ├── estimateHaze.R ├── fCover.R ├── fortify-raster.R ├── getMeta.R ├── getValidation.R ├── ggR.R ├── ggRGB.R ├── histMatch.R ├── internalFunctions.R ├── mesma.R ├── mlc.R ├── normImage.R ├── oneHotEncode.R ├── panSharpen.R ├── pifMatch.R ├── radCor.R ├── rasterCVA.R ├── rasterEntropy.R ├── rasterPCA.R ├── readEE.R ├── readMeta.R ├── readSLI.R ├── rescaleImage.R ├── rsOpts.R ├── sam.R ├── saveRSTBX.R ├── spectralIndices.R ├── stackMeta.R ├── superClass.R ├── sysdata.rda ├── tasseledCap.R ├── topCor.R ├── unsuperClass.R ├── utils-pipe.R ├── validateMap.R └── zzz.R ├── README.md ├── _pkgdown.yml ├── cran-comments.md ├── data-raw ├── generate_sysdata.R └── sun_earth_dists.rds ├── inst ├── CITATION └── external │ ├── EarthExplorer_LS8.txt │ ├── landsat │ ├── LT52240631988227CUB02_B1.TIF │ ├── LT52240631988227CUB02_B2.TIF │ ├── LT52240631988227CUB02_B3.TIF │ ├── LT52240631988227CUB02_B4.TIF │ ├── LT52240631988227CUB02_B5.TIF │ ├── LT52240631988227CUB02_B6.TIF │ ├── LT52240631988227CUB02_B7.TIF │ └── LT52240631988227CUB02_MTL.txt │ ├── lsat.rds │ ├── rlogo.rds │ ├── sen2.rds │ ├── srtm_lsat.rds │ ├── srtm_sen2.rds │ ├── trainingPoints_rlogo.rds │ ├── trainingPolygons_lsat.rds │ ├── trainingPolygons_sen2.rds │ ├── vegSpec.sli │ └── vegSpec.sli.hdr ├── man-roxygen ├── examples_SLI.R ├── examples_cloudMask.R └── spectralIndices_table.R ├── man ├── ImageMetaData.Rd ├── RStoolbox.Rd ├── classifyQA.Rd ├── cloudMask.Rd ├── cloudShadowMask.Rd ├── coregisterImages.Rd ├── decodeQA.Rd ├── encodeQA.Rd ├── estimateHaze.Rd ├── fCover.Rd ├── figures │ └── logo.png ├── fortifySpatRaster.Rd ├── getMeta.Rd ├── getValidation.Rd ├── ggR.Rd ├── ggRGB.Rd ├── histMatch.Rd ├── lsat.Rd ├── mesma.Rd ├── normImage.Rd ├── oneHotEncode.Rd ├── panSharpen.Rd ├── pifMatch.Rd ├── pipe.Rd ├── predict.unsuperClass.Rd ├── radCor.Rd ├── rasterCVA.Rd ├── rasterEntropy.Rd ├── rasterPCA.Rd ├── readEE.Rd ├── readMeta.Rd ├── readSLI.Rd ├── rescaleImage.Rd ├── rlogo.Rd ├── rsOpts.Rd ├── sam.Rd ├── saveRSTBX.Rd ├── sen2.Rd ├── spectralIndices.Rd ├── srtm.Rd ├── srtm_sen2.Rd ├── stackMeta.Rd ├── superClass.Rd ├── tasseledCap.Rd ├── topCor.Rd ├── unsuperClass.Rd ├── validateMap.Rd └── writeSLI.Rd ├── prep-release └── makeRelease.sh ├── src ├── Makevars ├── Makevars.win ├── RcppExports.cpp ├── classQA.cpp ├── entropy.cpp ├── gainOffsetRescale.cpp ├── init.c ├── memory.cpp ├── memory.h ├── nnls_solver.cpp ├── normImage.cpp ├── oneHot.cpp ├── predictKmeans.cpp ├── predictMlc.cpp ├── pwSimilarity.cpp ├── rescaleImage.cpp ├── sam.cpp └── spectralIndices.cpp └── tests ├── testthat.R └── testthat ├── helper-testUtils.R ├── test-classifyQA.R ├── test-cloudMask.R ├── test-coregisterImages.R ├── test-encodeQA.R ├── test-entropy.R ├── test-estimateHaze.R ├── test-fCover.R ├── test-gainOffsetRescale.R ├── test-getMeta.R ├── test-getValidation.R ├── test-ggplot.R ├── test-histMatch.R ├── test-internalFunctions.R ├── test-mesma.R ├── test-mlc.R ├── test-multicore.R ├── test-normImage.R ├── test-oneHotEncode.R ├── test-panSharpen.R ├── test-pifMatch.R ├── test-radCor.R ├── test-rasterCVA.R ├── test-rasterPCA.R ├── test-readEE.R ├── test-readMeta.R ├── test-rescaleImage.R ├── test-rsOpts.R ├── test-sam.R ├── test-saveReadRSTBX.R ├── test-sli.R ├── test-spectralIndices.R ├── test-stackMeta.R ├── test-superClass.R ├── test-tasseledCap.R ├── test-topCor.R ├── test-unsuperClass.R ├── test-validateMap.R └── testdata ├── earthexplorer ├── EE_LANDSAT_5.txt ├── EE_LANDSAT_7.txt ├── EE_LANDSAT_8.txt └── EE_LANDSAT_NEWFORMAT.csv ├── metadata ├── LC08_L1TP_193024_20180824_20200831_02_T1_MTL.txt ├── LC08_L1TP_195025_20130707_20170503_01_T1_MTL.txt ├── LC80980762015235LGN00.xml ├── LE07_L1TP_160031_20110416_20161210_01_T1.xml ├── LE07_L1TP_160031_20110416_20161210_01_T1_MTL.TXT ├── LM50490251987214PAC00_MTL.txt ├── LT05_L1TP_047027_20101006_20160512_01_T1_MTL.txt ├── LT05_L1TP_218072_20100801_20161015_01_T1_MTL.txt └── mss_MTL.txt └── sli ├── a.sli ├── a.sli.hdr ├── b.sli ├── b.sli.hdr ├── c.sli ├── c.sli.hdr ├── d.sli └── d.sli.hdr /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^CRAN-RELEASE$ 2 | ^\.travis\.yml$ 3 | inst/demo 4 | .project 5 | .idea 6 | .git 7 | .gitignore 8 | .Rprofile 9 | man-roxygen 10 | data-raw 11 | prep-release 12 | cran-comments.md 13 | .README.md.html 14 | pkg-build.sh 15 | ^.*\.Rproj$ 16 | ^\.Rproj\.user$ 17 | revdep 18 | ^\.github$ 19 | ^logo\.png$ 20 | ^CRAN-SUBMISSION$ 21 | _pkgdown.yml 22 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [main, master] 4 | pull_request: 5 | branches: [main, master] 6 | 7 | name: test-coverage 8 | 9 | jobs: 10 | test-coverage: 11 | runs-on: macOS-latest 12 | env: 13 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - uses: r-lib/actions/setup-r@v2 19 | 20 | - uses: r-lib/actions/setup-pandoc@v2 21 | 22 | - name: Query dependencies 23 | run: | 24 | install.packages('remotes') 25 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 26 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 27 | shell: Rscript {0} 28 | 29 | - name: Restore R package cache 30 | uses: actions/cache@v2 31 | with: 32 | path: ${{ env.R_LIBS_USER }} 33 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 34 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 35 | 36 | - name: Install dependencies 37 | run: | 38 | install.packages(c("remotes")) 39 | remotes::install_deps(dependencies = TRUE) 40 | remotes::install_cran("covr") 41 | shell: Rscript {0} 42 | 43 | - name: Test coverage 44 | run: covr::codecov(token = "${{ secrets.CODECOV_TOKEN }}") 45 | shell: Rscript {0} -------------------------------------------------------------------------------- /.github/workflows/pkgdown.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | release: 9 | types: [published] 10 | workflow_dispatch: 11 | 12 | name: pkgdown 13 | 14 | jobs: 15 | pkgdown: 16 | runs-on: ubuntu-latest 17 | # Only restrict concurrency for non-PR jobs 18 | concurrency: 19 | group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} 20 | env: 21 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 22 | permissions: 23 | contents: write 24 | steps: 25 | - uses: actions/checkout@v4 26 | 27 | - uses: r-lib/actions/setup-pandoc@v2 28 | 29 | - uses: r-lib/actions/setup-r@v2 30 | with: 31 | use-public-rspm: true 32 | 33 | - uses: r-lib/actions/setup-r-dependencies@v2 34 | with: 35 | extra-packages: any::pkgdown, local::. 36 | needs: website 37 | 38 | - name: Build site 39 | run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) 40 | shell: Rscript {0} 41 | 42 | - name: Deploy to GitHub pages 🚀 43 | if: github.event_name != 'pull_request' 44 | uses: JamesIves/github-pages-deploy-action@v4.5.0 45 | with: 46 | clean: false 47 | branch: gh-pages 48 | folder: docs 49 | -------------------------------------------------------------------------------- /.github/workflows/rcmdcheck.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/master/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | 9 | name: CI 10 | 11 | jobs: 12 | CI: 13 | runs-on: ${{ matrix.config.os }} 14 | 15 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 16 | 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | config: 21 | - {os: macOS-latest, r: 'release'} 22 | - {os: windows-latest, r: 'release'} 23 | - {os: windows-latest, r: 'devel'} 24 | - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} 25 | - {os: ubuntu-latest, r: 'release'} 26 | - {os: ubuntu-latest, r: 'oldrel-1'} 27 | 28 | env: 29 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 30 | R_KEEP_PKG_SOURCE: yes 31 | 32 | steps: 33 | - uses: actions/checkout@v3 34 | 35 | - uses: r-lib/actions/setup-pandoc@v2 36 | 37 | - name: Install xquartz 38 | if: runner.os == 'macOS' 39 | run: brew install --cask xquartz 40 | 41 | - uses: r-lib/actions/setup-r@v2 42 | with: 43 | r-version: ${{ matrix.config.r }} 44 | http-user-agent: ${{ matrix.config.http-user-agent }} 45 | use-public-rspm: true 46 | Ncpus: '2' 47 | 48 | - name: Install dependencies (Win) 49 | if: runner.os == 'windows' 50 | uses: r-lib/actions/setup-r-dependencies@v2 51 | with: 52 | extra-packages: any::rcmdcheck 53 | needs: check 54 | error_on: ${{ env.RCMDCHECK_ERROR_ON || 'error,warning' }} 55 | 56 | - name: Install dependencies (Non-Win) 57 | if: runner.os != 'windows' 58 | uses: r-lib/actions/setup-r-dependencies@v2 59 | with: 60 | extra-packages: any::rcmdcheck, cran/randomForest, cran/XML 61 | needs: check 62 | error_on: ${{ env.RCMDCHECK_ERROR_ON || 'error,warning' }} 63 | 64 | - name: Install RF 65 | if: ${{ matrix.config.r != 'oldrel-1' }} 66 | run: install.packages("randomForest") 67 | shell: Rscript {0} 68 | 69 | - uses: r-lib/actions/check-r-package@v2 70 | with: 71 | upload-snapshots: true 72 | -------------------------------------------------------------------------------- /.github/workflows/rhub.yaml: -------------------------------------------------------------------------------- 1 | # R-hub's generic GitHub Actions workflow file. It's canonical location is at 2 | # https://github.com/r-hub/actions/blob/v1/workflows/rhub.yaml 3 | # You can update this file to a newer version using the rhub2 package: 4 | # 5 | # rhub::rhub_setup() 6 | # 7 | # It is unlikely that you need to modify this file manually. 8 | 9 | name: R-hub 10 | run-name: "${{ github.event.inputs.id }}: ${{ github.event.inputs.name || format('Manually run by {0}', github.triggering_actor) }}" 11 | 12 | on: 13 | workflow_dispatch: 14 | inputs: 15 | config: 16 | description: 'A comma separated list of R-hub platforms to use.' 17 | type: string 18 | default: 'linux,windows,macos' 19 | name: 20 | description: 'Run name. You can leave this empty now.' 21 | type: string 22 | id: 23 | description: 'Unique ID. You can leave this empty now.' 24 | type: string 25 | 26 | jobs: 27 | 28 | setup: 29 | runs-on: ubuntu-latest 30 | outputs: 31 | containers: ${{ steps.rhub-setup.outputs.containers }} 32 | platforms: ${{ steps.rhub-setup.outputs.platforms }} 33 | 34 | steps: 35 | # NO NEED TO CHECKOUT HERE 36 | - uses: r-hub/actions/setup@v1 37 | with: 38 | config: ${{ github.event.inputs.config }} 39 | id: rhub-setup 40 | 41 | linux-containers: 42 | needs: setup 43 | if: ${{ needs.setup.outputs.containers != '[]' }} 44 | runs-on: ubuntu-latest 45 | name: ${{ matrix.config.label }} 46 | strategy: 47 | fail-fast: false 48 | matrix: 49 | config: ${{ fromJson(needs.setup.outputs.containers) }} 50 | container: 51 | image: ${{ matrix.config.container }} 52 | 53 | steps: 54 | - uses: r-hub/actions/checkout@v1 55 | - uses: r-hub/actions/platform-info@v1 56 | with: 57 | token: ${{ secrets.RHUB }} 58 | job-config: ${{ matrix.config.job-config }} 59 | - uses: r-hub/actions/setup-deps@v1 60 | with: 61 | token: ${{ secrets.RHUB }} 62 | job-config: ${{ matrix.config.job-config }} 63 | - uses: r-hub/actions/run-check@v1 64 | with: 65 | token: ${{ secrets.RHUB }} 66 | job-config: ${{ matrix.config.job-config }} 67 | 68 | other-platforms: 69 | needs: setup 70 | if: ${{ needs.setup.outputs.platforms != '[]' }} 71 | runs-on: ${{ matrix.config.os }} 72 | name: ${{ matrix.config.label }} 73 | strategy: 74 | fail-fast: false 75 | matrix: 76 | config: ${{ fromJson(needs.setup.outputs.platforms) }} 77 | 78 | steps: 79 | - uses: r-hub/actions/checkout@v1 80 | - uses: r-hub/actions/setup-r@v1 81 | with: 82 | job-config: ${{ matrix.config.job-config }} 83 | token: ${{ secrets.RHUB }} 84 | - uses: r-hub/actions/platform-info@v1 85 | with: 86 | token: ${{ secrets.RHUB }} 87 | job-config: ${{ matrix.config.job-config }} 88 | - uses: r-hub/actions/setup-deps@v1 89 | with: 90 | job-config: ${{ matrix.config.job-config }} 91 | token: ${{ secrets.RHUB }} 92 | - uses: r-hub/actions/run-check@v1 93 | with: 94 | job-config: ${{ matrix.config.job-config }} 95 | token: ${{ secrets.RHUB }} 96 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .project 2 | .settings 3 | .Rhistory 4 | *~ 5 | *.o 6 | *.so 7 | *.dll 8 | *.README.md.html 9 | _Rmd 10 | .Rproj.user 11 | RStoolbox.Rproj 12 | revdep 13 | *Rplots.pdf 14 | CRAN_RELEASE 15 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: RStoolbox 2 | Type: Package 3 | Title: Remote Sensing Data Analysis 4 | Version: 1.0.2.1 5 | Date: 2025-02-02 6 | Authors@R: c( 7 | person("Benjamin", "Leutner", role= "aut", email="rstoolboxpackage@gmail.com", comment = c(ORCID = "0000-0002-6893-2002")), 8 | person("Ned", "Horning", role ="aut", email="horning@amnh.org"), 9 | person("Jakob", "Schwalb-Willmann", role ="aut", email="movevis@schwalb-willmann.de", comment = c(ORCID = "0000-0003-2665-1509")), 10 | person("Robert J.", "Hijmans", role = "ctb", email = "r.hijmans@gmail.com",comment = c(ORCID = "0000-0001-5872-2872")), 11 | person("Konstantin", "Mueller", role = c("aut", "cre"), email = "konstantinfinn.mueller@gmx.de", comment = c(ORCID = "0000-0001-6540-3124")) 12 | ) 13 | Description: Toolbox for remote sensing image processing and analysis such as 14 | calculating spectral indexes, principal component transformation, unsupervised 15 | and supervised classification or fractional cover analyses. 16 | URL: https://bleutner.github.io/RStoolbox/, https://github.com/bleutner/RStoolbox 17 | BugReports: https://github.com/bleutner/RStoolbox/issues 18 | Encoding: UTF-8 19 | Depends: 20 | R (>= 3.5.0) 21 | Imports: 22 | caret (>= 6.0-79), 23 | sf, 24 | terra (>= 1.6-41), 25 | XML, 26 | dplyr, 27 | ggplot2, 28 | tidyr, 29 | reshape2, 30 | lifecycle, 31 | exactextractr, 32 | Rcpp, 33 | methods, 34 | magrittr 35 | Suggests: 36 | randomForest, 37 | lattice, 38 | kernlab, 39 | e1071, 40 | gridExtra, 41 | pls, 42 | testthat 43 | LinkingTo: Rcpp, 44 | RcppArmadillo 45 | License: GPL (>=3) 46 | RoxygenNote: 7.3.2 47 | LazyData: true 48 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(predict,superClass) 4 | S3method(predict,unsuperClass) 5 | S3method(print,mapValidation) 6 | S3method(print,superClass) 7 | S3method(print,unsuperClass) 8 | S3method(summary,ImageMetaData) 9 | export("%>%") 10 | export(ImageMetaData) 11 | export(classifyQA) 12 | export(cloudMask) 13 | export(cloudShadowMask) 14 | export(coregisterImages) 15 | export(decodeQA) 16 | export(encodeQA) 17 | export(estimateHaze) 18 | export(fCover) 19 | export(fortifySpatRaster) 20 | export(getMeta) 21 | export(getValidation) 22 | export(ggR) 23 | export(ggRGB) 24 | export(histMatch) 25 | export(mesma) 26 | export(normImage) 27 | export(oneHotEncode) 28 | export(panSharpen) 29 | export(pifMatch) 30 | export(radCor) 31 | export(rasterCVA) 32 | export(rasterEntropy) 33 | export(rasterPCA) 34 | export(readEE) 35 | export(readMeta) 36 | export(readRSTBX) 37 | export(readSLI) 38 | export(rescaleImage) 39 | export(rsOpts) 40 | export(sam) 41 | export(saveRSTBX) 42 | export(spectralIndices) 43 | export(stackMeta) 44 | export(superClass) 45 | export(tasseledCap) 46 | export(topCor) 47 | export(unsuperClass) 48 | export(validateMap) 49 | export(writeSLI) 50 | import(sf) 51 | import(terra) 52 | importFrom(Rcpp,sourceCpp) 53 | importFrom(XML,xmlParse) 54 | importFrom(XML,xmlToList) 55 | importFrom(caret,confusionMatrix) 56 | importFrom(caret,createDataPartition) 57 | importFrom(caret,createFolds) 58 | importFrom(caret,getTrainPerf) 59 | importFrom(caret,postResample) 60 | importFrom(caret,train) 61 | importFrom(caret,trainControl) 62 | importFrom(dplyr,filter) 63 | importFrom(dplyr,group_by) 64 | importFrom(dplyr,mutate) 65 | importFrom(dplyr,summarize) 66 | importFrom(exactextractr,exact_extract) 67 | importFrom(ggplot2,aes) 68 | importFrom(ggplot2,aes_string) 69 | importFrom(ggplot2,annotation_raster) 70 | importFrom(ggplot2,coord_equal) 71 | importFrom(ggplot2,facet_wrap) 72 | importFrom(ggplot2,fortify) 73 | importFrom(ggplot2,geom_blank) 74 | importFrom(ggplot2,geom_raster) 75 | importFrom(ggplot2,ggplot) 76 | importFrom(ggplot2,scale_fill_discrete) 77 | importFrom(ggplot2,scale_fill_gradientn) 78 | importFrom(ggplot2,scale_fill_identity) 79 | importFrom(grDevices,hsv) 80 | importFrom(graphics,abline) 81 | importFrom(graphics,par) 82 | importFrom(lifecycle,deprecate_warn) 83 | importFrom(lifecycle,deprecated) 84 | importFrom(lifecycle,is_present) 85 | importFrom(magrittr,"%>%") 86 | importFrom(methods,as) 87 | importFrom(methods,show) 88 | importFrom(reshape2,melt) 89 | importFrom(stats,approxfun) 90 | importFrom(stats,coefficients) 91 | importFrom(stats,complete.cases) 92 | importFrom(stats,cor) 93 | importFrom(stats,cov) 94 | importFrom(stats,cov.wt) 95 | importFrom(stats,ecdf) 96 | importFrom(stats,kmeans) 97 | importFrom(stats,knots) 98 | importFrom(stats,lm) 99 | importFrom(stats,loadings) 100 | importFrom(stats,setNames) 101 | importFrom(terra,nlyr) 102 | importFrom(terra,rast) 103 | importFrom(terra,selectRange) 104 | importFrom(terra,which.min) 105 | importFrom(tidyr,complete) 106 | importFrom(tidyr,pivot_wider) 107 | importFrom(utils,capture.output) 108 | importFrom(utils,data) 109 | importFrom(utils,read.csv) 110 | importFrom(utils,read.delim) 111 | importFrom(utils,read.table) 112 | importFrom(utils,str) 113 | importFrom(utils,write.table) 114 | useDynLib(RStoolbox) 115 | -------------------------------------------------------------------------------- /R/RcppExports.R: -------------------------------------------------------------------------------- 1 | # Generated by using Rcpp::compileAttributes() -> do not edit by hand 2 | # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 3 | 4 | classQA <- function(x, rcl) { 5 | .Call('_RStoolbox_classQA', PACKAGE = 'RStoolbox', x, rcl) 6 | } 7 | 8 | entropyCpp <- function(x) { 9 | .Call('_RStoolbox_entropyCpp', PACKAGE = 'RStoolbox', x) 10 | } 11 | 12 | gainOffsetRescale <- function(x, g, o, clamp) { 13 | .Call('_RStoolbox_gainOffsetRescale', PACKAGE = 'RStoolbox', x, g, o, clamp) 14 | } 15 | 16 | availableRAMCpp <- function(ram) { 17 | .Call('_RStoolbox_availableRAMCpp', PACKAGE = 'RStoolbox', ram) 18 | } 19 | 20 | nnls_solver <- function(x, A, iterate = 400L, tolerance = 0.000001) { 21 | .Call('_RStoolbox_nnls_solver', PACKAGE = 'RStoolbox', x, A, iterate, tolerance) 22 | } 23 | 24 | normImageCpp <- function(x, M, S) { 25 | .Call('_RStoolbox_normImageCpp', PACKAGE = 'RStoolbox', x, M, S) 26 | } 27 | 28 | oneHotCpp <- function(x, classes, bg, fg, na_rm) { 29 | .Call('_RStoolbox_oneHotCpp', PACKAGE = 'RStoolbox', x, classes, bg, fg, na_rm) 30 | } 31 | 32 | predKmeansCpp <- function(x, centers, returnDistance = FALSE) { 33 | .Call('_RStoolbox_predKmeansCpp', PACKAGE = 'RStoolbox', x, centers, returnDistance) 34 | } 35 | 36 | predictMlcCpp <- function(newdata, model, nclasses) { 37 | .Call('_RStoolbox_predictMlcCpp', PACKAGE = 'RStoolbox', newdata, model, nclasses) 38 | } 39 | 40 | pwSimilarityCpp <- function(img, ref, method) { 41 | .Call('_RStoolbox_pwSimilarityCpp', PACKAGE = 'RStoolbox', img, ref, method) 42 | } 43 | 44 | rescaleImageCpp <- function(x, scal, xmin, ymin) { 45 | .Call('_RStoolbox_rescaleImageCpp', PACKAGE = 'RStoolbox', x, scal, xmin, ymin) 46 | } 47 | 48 | specSimC <- function(x, em) { 49 | .Call('_RStoolbox_specSimC', PACKAGE = 'RStoolbox', x, em) 50 | } 51 | 52 | spectralIndicesCpp <- function(x, indices, redBand, blueBand, greenBand, nirBand, redEdge1Band, redEdge2Band, redEdge3Band, swir1Band, swir2Band, swir3Band, maskLayer, maskValue, L, s, G, C1, C2, Levi, swir2ccc, swir2cdiff, sf, formulas) { 53 | .Call('_RStoolbox_spectralIndicesCpp', PACKAGE = 'RStoolbox', x, indices, redBand, blueBand, greenBand, nirBand, redEdge1Band, redEdge2Band, redEdge3Band, swir1Band, swir2Band, swir3Band, maskLayer, maskValue, L, s, G, C1, C2, Levi, swir2ccc, swir2cdiff, sf, formulas) 54 | } 55 | 56 | -------------------------------------------------------------------------------- /R/fortify-raster.R: -------------------------------------------------------------------------------- 1 | #' Fortify method for classes from the terra package. 2 | #' 3 | #' @param x \code{SpatRaster} object to convert into a dataframe. 4 | #' @param maxpixels Integer. Maximum number of pixels to sample 5 | #' @rdname fortifySpatRaster 6 | #' @usage fortifySpatRaster(x, maxpixels = 50000) 7 | #' @return Returns a data.frame with coordinates (x,y) and corresponding raster values. 8 | #' @name fortifySpatRaster 9 | #' @examples 10 | #' r_df <- fortifySpatRaster(rlogo) 11 | #' head(r_df) 12 | #' @export 13 | fortifySpatRaster <- function(x, maxpixels = 50000){ 14 | raster <- spatSample(x, maxpixels, method = "regular", as.raster = TRUE) 15 | as.data.frame(raster, xy = TRUE) 16 | } 17 | -------------------------------------------------------------------------------- /R/getMeta.R: -------------------------------------------------------------------------------- 1 | #' Extract bandwise information from ImageMetaData 2 | #' 3 | #' This is an accessor function to quickly access information stored in ImageMetaData, e.g. scale factor per band. 4 | #' Intended for use with imagery which was imported using stackMeta. Will return parameters using the actual band order in img. 5 | #' 6 | #' @param img SpatRaster or character vector with band names. 7 | #' @param metaData ImageMetaData or path to meta data file. 8 | #' @param what Character. Parameter to extract. Either data descriptors, or conversion parameters (see Details for options) 9 | #' @export 10 | #' @return 11 | #' If \code{what} is one of \code{c('CALRAD', 'CALBT', 'CALREF')} a data.frame is returned with bands in rows (order corresponding to \code{img} band order). 12 | #' Otherwise a named numeric vector with the corresponding parameter is returned (layernames as names). 13 | #' @details 14 | #' Possible metadata parameters (\code{what} argument): 15 | #' 16 | #' Data descriptors 17 | #' \tabular{ll}{ 18 | #' 'FILES' \tab \cr 19 | #' 'QUANTITY' \tab \cr 20 | #' 'CATEGORY' \tab \cr 21 | #' 'NA_VALUE' \tab \cr 22 | #' 'SATURATE_VALUE' \tab \cr 23 | #' 'SCALE_FACTOR' \tab \cr 24 | #' 'DATA_TYPE' \tab \cr 25 | #' 'SPATIAL_RESOLUTION' \tab \cr 26 | #' } 27 | #' Conversion parameters 28 | #' \tabular{ll}{ 29 | #' 'CALRAD' \tab Conversion parameters from DN to radiance \cr 30 | #' 'CALBT' \tab Conversion parameters from radiance to brightness temperature \cr 31 | #' 'CALREF' \tab Conversion parameters from DN to reflectance (Landsat 8 only) \cr 32 | #' } 33 | #' @examples 34 | #' ## Import example data 35 | #' mtlFile <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox") 36 | #' meta <- readMeta(mtlFile) 37 | #' lsat_t <- stackMeta(mtlFile) 38 | #' 39 | #' ## Get integer scale factors 40 | #' getMeta(lsat_t, metaData = meta, what = "SCALE_FACTOR") 41 | #' 42 | #' ## Conversion factors for brightness temperature 43 | #' getMeta("B6_dn", metaData = meta, what = "CALBT") 44 | #' 45 | #' ## Conversion factors to top-of-atmosphere radiance 46 | #' ## Band order not corresponding to metaData order 47 | #' getMeta(lsat_t[[5:1]], metaData = meta, what = "CALRAD") 48 | #' 49 | #' ## Get integer scale factors 50 | #' getMeta(lsat_t, metaData = meta, what = "SCALE_FACTOR") 51 | #' 52 | #' ## Get file basenames 53 | #' getMeta(lsat_t, metaData = meta, what = "FILES") 54 | #' 55 | getMeta <- function(img, metaData, what){ 56 | 57 | if(inherits(metaData, "character")) { 58 | metaData <- readMeta(metaData) 59 | } else if(!inherits(metaData, "ImageMetaData")){ 60 | stop("metaData must be character or ImageMetaData") 61 | } 62 | 63 | stopifnot(what %in% c(names(metaData$DATA), "CALREF", "CALRAD", "CALBT") & length(what) == 1) 64 | 65 | if(inherits(img, "SpatRaster")){ 66 | bds <- names(img) 67 | } else if (inherits(img,"character")) { 68 | bds <- img 69 | } else { 70 | stop("img must be a SpatRaster or character") 71 | } 72 | 73 | if(what %in% c("CALREF", "CALRAD", "CALBT")) { 74 | if(length(metaData[[what]]) == 1) stop(paste(what, "is not populated in metaData")) 75 | present <- bds %in% rownames(metaData[[what]]) 76 | if (any(!present)) stop("Bands ", paste0(bds[!present], collapse = ", ") , " are not present in metaData.\n", 77 | "Available bands: ", rownames(metaData[[what]]) , collapse = ", ") 78 | out <- metaData[[what]][bds,] 79 | } else { 80 | present <- bds %in% rownames(metaData$DATA) 81 | if (any(!present)) stop("Bands ", paste0(bds[!present], collapse = ", ") , " are not present in metaData.\n", 82 | "Available bands: ", paste0(rownames(metaData$DATA), collapse = ", ")) 83 | out <- metaData$DATA[bds,what] 84 | names(out) <- bds 85 | } 86 | 87 | return(out) 88 | 89 | } 90 | 91 | -------------------------------------------------------------------------------- /R/getValidation.R: -------------------------------------------------------------------------------- 1 | #' Extract validation results from superClass objects 2 | #' 3 | #' @param x superClass object or caret::confusionMatrix 4 | #' @param from Character. 'testset' extracts the results from independent validation with testset. 'cv' extracts cross-validation results. 5 | #' @param metrics Character. Only relevant in classification mode (ignored for regression models). 6 | #' Select 'overall' for overall accuracy metrics, 'classwise' for classwise metrics, 7 | #' 'confmat' for the confusion matrix itself and 'caret' to return the whole caret::confusionMatrix object. 8 | #' @export 9 | #' @return Returns a data.frame with validation results. 10 | #' If metrics = 'confmat' or 'caret' will return a table or the full caret::confusionMatrix object, respectively. 11 | #' @examples 12 | #' library(pls) 13 | #' ## Fit classifier (splitting training into 70\% training data, 30\% validation data) 14 | #' train <- readRDS(system.file("external/trainingPoints_rlogo.rds", package="RStoolbox")) 15 | #' SC <- superClass(rlogo, trainData = train, responseCol = "class", 16 | #' model="pls", trainPartition = 0.7) 17 | #' ## Independent testset-validation 18 | #' getValidation(SC) 19 | #' getValidation(SC, metrics = "classwise") 20 | #' ## Cross-validation based 21 | #' getValidation(SC, from = "cv") 22 | getValidation <- function(x, from = "testset", metrics = "overall"){ 23 | 24 | stopifnot(inherits(x, c("superClass", "mapValidation", "confusionMatrix")) , 25 | metrics %in% c("overall", "classwise", "confmat", "caret"), 26 | from %in% c("testset", "cv") 27 | ) 28 | if(inherits(x, "mapValidation")) x <- x$performance 29 | if(inherits(x, "superClass") && from == "testset" && inherits(x$validation, "character")){ 30 | stop("No independent validation was performed during model fitting. Use from='cv' to extract cross-validation performance.") 31 | } 32 | if(inherits(x,"confusionMatrix") || x$model$modelType == "Classification"){ 33 | 34 | if(inherits(x,"confusionMatrix")) { 35 | confMat <- x 36 | } else if(from == "cv") { 37 | confMat <- confusionMatrix(as.factor(x$model$pred$pred), as.factor(x$model$pred$obs)) 38 | } else { 39 | confMat <- x$validation$performance 40 | } 41 | 42 | if(metrics == "overall") { 43 | perf <- as.data.frame(t(confMat$overall)) 44 | } else if (metrics == "classwise"){ 45 | perf <- confMat$byClass 46 | perf <- data.frame(class = gsub("Class:", "", rownames(perf)), perf) 47 | rownames(perf) <- NULL 48 | } else if (metrics == "confmat"){ 49 | return(confMat$table) 50 | } else if (metrics == "caret"){ 51 | return(confMat) 52 | } 53 | } else { 54 | ## Regression 55 | if(from=="testset"){ 56 | perf <- x$validation$performance 57 | } else { 58 | bestPerf <- x$model$bestTune 59 | colnames(bestPerf) <- gsub("^\\.", "", colnames(bestPerf)) 60 | perf <- merge(x$model$results, bestPerf)[,-c(1:length(bestPerf))] 61 | } 62 | } 63 | if(inherits(x, "confusionMatrix")) { 64 | model <- from <- NA 65 | } else { 66 | model <- x$model$method 67 | } 68 | perf <- data.frame(model = model, validation = from, perf) 69 | 70 | return(perf) 71 | } 72 | 73 | -------------------------------------------------------------------------------- /R/mlc.R: -------------------------------------------------------------------------------- 1 | #' Maximum Likelihood Classification 2 | #' @param x matrix with predictors 3 | #' @param y vector with classes (factor) 4 | #' @param ... not used 5 | #' @keywords internal 6 | #' @noRd 7 | mlc <- function(x, y, ...){ 8 | classes <- setNames(nm=levels(y)) 9 | mod <- lapply(classes, function(ci){ 10 | cl <- y %in% ci 11 | 12 | if(!any(cl)) return(list(warn=TRUE, msg=paste("No data for class", ci))) 13 | 14 | cod <- cov(x[cl,]) 15 | ## Check&Fix for singularity due to fully correlated variables 16 | ## TODO: exclude correlated variables on a per class basis? 17 | dups <- duplicated(cod) 18 | warn <- any(dups) 19 | if(warn) { 20 | diag(cod) <- diag(cod) + cumsum(1e-10*dups)*dups 21 | } 22 | 23 | upper <- chol(cod) 24 | 25 | list(m = colMeans(x[cl,]), 26 | D = -2*sum(log(diag(upper))), 27 | I = provideDimnames(chol2inv(upper), base=dimnames(upper)), 28 | warn = warn) 29 | }) 30 | 31 | warn <- classes[ vapply(mod, "[[", FALSE, "warn")] 32 | if(length(warn)) warning("Covariance matrix of class/classes ", paste0(warn, collapse =", "), " is singular, i.e. holds perfectly correlated variables.") 33 | for(i in seq_along(mod)) mod[[i]][["warn"]] <- NULL 34 | mod[["levels"]] <- unique(y) 35 | mod 36 | } 37 | 38 | #' Predict Maximum Likelihood Classification 39 | #' 40 | #' @param modelFit model result from mlc 41 | #' @param newdata Matrix. New data. 42 | #' @param ... not used 43 | #' @noRd 44 | #' @keywords internal 45 | predict.mlc <- function(modelFit, newdata, ...){ 46 | if(inherits(modelFit, "train")) modelFit <- modelFit$finalModel 47 | classes <- modelFit$obsLevels 48 | pred <- predictMlcCpp(newdata, model = modelFit, nclasses = length(classes)) 49 | factor(classes[pred[,1]], classes) 50 | } 51 | 52 | #' Predict Maximum Likelihood Classification - Probabilities 53 | #' 54 | #' @param modelFit model result from mlc 55 | #' @param newdata Matrix. New data. 56 | #' @param ... not used 57 | #' @noRd 58 | #' @keywords internal 59 | predict.mlc.prob <- function(modelFit, newdata, ...){ 60 | if(inherits(modelFit, "train")) modelFit <- modelFit$finalModel 61 | classes <- modelFit$obsLevels 62 | if(is.data.frame(newdata)) newdata <- as.matrix(newdata) 63 | pred <- predictMlcCpp(newdata, model = modelFit, nclasses = length(classes)) 64 | pred <- pred[,-1] 65 | colnames(pred) <- classes 66 | pred 67 | } 68 | 69 | #' Define caret custom model for maximum likelihood classification 70 | #' @noRd 71 | #' @keywords internal 72 | mlcCaret <- list( 73 | label = "Maximum Likelihood Classification", 74 | library = NULL, 75 | type = "Classification", 76 | parameters = data.frame(parameter = "parameter", class = "class", label = "label"), 77 | grid = function (x, y, len = NULL, ...) {data.frame(parameter = "none")}, 78 | fit = mlc, 79 | predict = predict.mlc, 80 | prob = predict.mlc.prob, 81 | sort = function(x) x, 82 | levels = function(x) levels(x$levels) 83 | ) 84 | -------------------------------------------------------------------------------- /R/normImage.R: -------------------------------------------------------------------------------- 1 | #' Normalize Raster Images: Center and Scale 2 | #' 3 | #' For each pixel subtracts the mean of the raster layer and optionally divide by its standard deviation. 4 | #' 5 | #' @param img SpatRaster. Image to transform. Transformation will be performed separately for each layer. 6 | #' @param norm Logical. Perform normalization (scaling) in addition to centering, i.e. divide by standard deviation. 7 | #' @param ... further arguments passed to \link[terra]{writeRaster}. 8 | #' @return 9 | #' Returns a SpatRaster with the same number layers as input layers with each layer being centered and optionally normalized. 10 | #' @export 11 | #' @examples 12 | #' library(terra) 13 | #' ## Load example data 14 | #' 15 | #' ## Normalization: Center and Scale 16 | #' rlogo_center_norm <- normImage(rlogo) 17 | #' hist(rlogo_center_norm) 18 | #' 19 | #' ## Centering 20 | #' rlogo_center <- normImage(rlogo, norm = FALSE) 21 | normImage <- function(img, norm = TRUE, ...) { 22 | img <- .toTerra(img) 23 | 24 | if(inherits(img, "SpatRaster")) { 25 | out <- scale(img, TRUE, norm) 26 | if("filename" %in% names(list(...))) out <- terra::writeRaster(out, ...) 27 | } else if(.canProcInMem(img)) { 28 | out <- img 29 | out[] <- scale(img[], center = TRUE, scale = norm) 30 | if("filename" %in% names(list(...))) out <- terra::writeRaster(out, ...) 31 | } else { 32 | means <- as.numeric(t(terra::global(img, "mean"))) 33 | names(means) <- names(img) 34 | sds <- if(norm){ 35 | as.numeric(t(terra::global(img, "mean"))) 36 | names(means) <- names(img) 37 | }else { 38 | rep(1, nlyr(img)) 39 | } 40 | 41 | stop() 42 | 43 | sds[sds == 0] <- 1 44 | if(nlyr(img) == 1) { 45 | out <- app(img, function(x) {(x - means)/sds}, ...) 46 | } else { 47 | out <- app(img, function(x) normImageCpp(x, M = means, S = sds), ...) 48 | } 49 | } 50 | return(out) 51 | } 52 | -------------------------------------------------------------------------------- /R/oneHotEncode.R: -------------------------------------------------------------------------------- 1 | #' One-hot encode a raster or vector 2 | #' 3 | #' Splits a categorical raster layer (or a vector) into a multilayer raster (or matrix). 4 | #' 5 | #' @param img SpatRaster or integer/numeric vector containing multiple classes 6 | #' @param classes integer: vector of classes which should be extracted 7 | #' @param background integer: background value (default = 0) 8 | #' @param foreground integer: foreground value (default = 1) 9 | #' @param na.rm logical: if \code{TRUE}, \code{NA}s will be coerced to the \code{background} value. 10 | #' @param ... further arguments passed to \link[terra]{writeRaster}. Ignored if img is not a SpatRaster, but a numeric/integer vector or matrix 11 | #' @return A SpatRaster with as many layers as there are classes. 12 | #' Pixels matching the class of interest are set to 1, backround values by default are set to 0 (see background argument) 13 | #' @export 14 | #' @examples 15 | #' \donttest{ 16 | #' sc <- unsuperClass(rlogo, nClasses = 3) 17 | #' 18 | #' ## one-hot encode 19 | #' sc_oneHot <- oneHotEncode(sc$map, classes = c(1,2,3)) 20 | #' 21 | #' ## check results 22 | #' sc_oneHot 23 | #' } 24 | oneHotEncode <- function(img, classes, background = 0, foreground = 1, na.rm = FALSE, ...) { 25 | stopifnot(inherits(img, c("SpatRaster", "integer", "numeric", "matrix"))) 26 | if (inherits(img, "SpatRaster")) { 27 | img <- .toTerra(img) 28 | if (nlyr(img) > 1) { 29 | warning(paste0("oneHotEncode() currently works on single layers only, but `img` has ", nlyr(img), " layers.", 30 | "\nDefaulting to first layer.", 31 | "\nSubmit a feature request at if you need it for more layers."), 32 | call. = FALSE) 33 | } 34 | out <- app(img[[1]], 35 | function(x, cl = classes, bg = background, fg = foreground, na.rm) 36 | oneHotCpp(x, classes = cl, bg = bg, fg = fg, na_rm = na.rm), 37 | na.rm = na.rm, ...) 38 | names(out) <- paste0("c_", classes) 39 | } else { 40 | out <- oneHotCpp(img, classes = classes, bg = background, fg = foreground, na_rm = na.rm) 41 | colnames(out) <- paste0("c_", classes) 42 | } 43 | out 44 | } 45 | -------------------------------------------------------------------------------- /R/rasterCVA.R: -------------------------------------------------------------------------------- 1 | #' Change Vector Analysis 2 | #' 3 | #' Calculates angle and magnitude of change vectors. 4 | #' Dimensionality is limited to two bands per image. 5 | #' 6 | #' @param x SpatRaster with two layers. This will be the reference/origin for the change calculations. Both rasters (y and y) need to correspond to each other, i.e. same resolution, extent and origin. 7 | #' @param y SpatRaster with two layers. Both rasters (y and y) need to correspond to each other, i.e. same resolution, extent and origin. 8 | #' @param tmf Numeric. Threshold median factor (optional). Used to calculate a threshold magnitude for which pixels are considered stable, i.e. no change. Calculated as \code{tmf * mean(magnitude[magnitude > 0])}. 9 | #' @param nct Numeric. No-change threshold (optional). Alternative to \code{tmf}. Sets an absolute threshold. Change magnitudes below \code{nct} are considered stable and set to NA. 10 | #' @param ... further arguments passed to writeRaster 11 | #' @details 12 | #' Change Vector Analysis (CVA) is used to identify spectral changes between two identical scenes which were acquired at different times. 13 | #' CVA is limited to two bands per image. For each pixel it calculates the change vector in the two-dimensional spectral space. 14 | #' For example for a given pixel in image A and B for the red and nir band the change vector is calculated for the coordinate pairs: (red_A | nir_A) and (red_B | nir_B). 15 | #' 16 | #' The coordinate system is defined by the order of the input bands: the first band defines the x-axis and the second band the y-axis, respectively. 17 | #' Angles are returned *in degree* beginning with 0 degrees pointing 'north', i.e. the y-axis, i.e. the second band. 18 | #' 19 | #' 20 | #' @return 21 | #' Returns a SpatRaster with two layers: change vector angle and change vector magnitude 22 | #' @export 23 | #' @examples 24 | #' library(terra) 25 | #' pca <- rasterPCA(lsat)$map 26 | #' 27 | #' ## Do change vector analysis 28 | #' cva <- rasterCVA(pca[[1:2]], pca[[3:4]]) 29 | #' cva 30 | rasterCVA <- function(x, y, tmf = NULL, nct = NULL, ...) { 31 | x <- .toTerra(x) 32 | y <- .toTerra(y) 33 | 34 | if(nlyr(x) != 2 | nlyr(y) != 2) 35 | stop("need two rasters with two layers each") 36 | 37 | doClamp <- !is.null(tmf) || !is.null(nct) 38 | 39 | if(!is.null(tmf) && !is.null(nct)) { 40 | stop("'tmf' and 'nct' are exclusive options, cannot use both.", call. = FALSE) 41 | } 42 | 43 | if(!is.null(tmf)) { 44 | maxMag <- sqrt(sum((as.numeric(t(terra::global(x, "max", na.rm=T))) - as.numeric(t(terra::global(y, "max", na.rm=T))) )^2))*2 45 | medianBuckets <- seq(1e-10, maxMag, length.out = 2e5) 46 | RStoolbox_rasterCVAEnv <- new.env() 47 | RStoolbox_rasterCVAEnv$medianTable <- 0 48 | } 49 | anglefun <- function(values,tmf,...) { 50 | dif <- values[,3:4] - values[,1:2] 51 | magnitude <- sqrt(rowSums(dif^2)) 52 | 53 | if(!is.null(tmf)) { 54 | RStoolbox_rasterCVAEnv$medianTable <- RStoolbox_rasterCVAEnv$medianTable + table(cut(magnitude, medianBuckets), useNA = "no") 55 | } 56 | 57 | angle <- rep(0, length(magnitude)) 58 | sel <- !is.na(magnitude) 59 | angle[sel] <- atan2(dif[sel,1],dif[sel,2]) / pi * 180 60 | negang <- angle < 0 61 | angle[negang] <- 360 + angle[negang] 62 | angle[!sel] <- NA 63 | cbind(angle, magnitude) 64 | } 65 | 66 | X <- c(x,y) 67 | out <- rast(x, 2) 68 | 69 | names(out) <- c("angle", "magnitude") 70 | ellips <- list(...) 71 | 72 | if(.canProcInMem(X, 2) ) { 73 | out[] <- anglefun(values(X), tmf) 74 | if(!doClamp && !is.null(ellips$filename)){ 75 | out <- writeRaster(out, ...) 76 | } 77 | } else { 78 | magfile <- if(!is.null(ellips$filename) && !doClamp) sources else .terraTmpFile() 79 | X <- readStart(X) 80 | out <- writeStart(out, filename = magfile, ...) 81 | tr <- terra::blocks(out) 82 | for (i in 1:tr$n) { 83 | vo <- values(X, row=tr$row[i], nrows=tr$nrows[i]) 84 | vo <- anglefun(vo, tmf) 85 | out <- writeValues(out, vo, tr$row[i]) 86 | } 87 | out <- writeStop(out) 88 | X <- readStop(X) 89 | } 90 | 91 | if(doClamp) { 92 | if(!is.null(tmf)) { 93 | ci <- which(cumsum(RStoolbox_rasterCVAEnv$medianTable) > sum(RStoolbox_rasterCVAEnv$medianTable) / 2)[1] 94 | medianEstimate <- mean(medianBuckets[c(ci,ci+1)]) 95 | nct <- tmf * medianEstimate 96 | rm(RStoolbox_rasterCVAEnv) 97 | } 98 | out <- do.call(terra::clamp, c(list(x = out, lower=nct), ellips)) 99 | 100 | names(out) <- c("angle", "magnitude") 101 | } 102 | 103 | return(out) 104 | } 105 | 106 | -------------------------------------------------------------------------------- /R/rasterEntropy.R: -------------------------------------------------------------------------------- 1 | #' Multi-layer Pixel Entropy 2 | #' 3 | #' Shannon entropy is calculated for each pixel based on it's layer values. 4 | #' To be used with categorical / integer valued rasters. 5 | #' 6 | #' Entropy is calculated as -sum(p log(p)); p being the class frequency per pixel. 7 | #' 8 | #' @param img SpatRaster 9 | #' @param ... additional arguments passed to writeRaster 10 | #' @return 11 | #' SpatRaster "entropy" 12 | #' @export 13 | #' @examples 14 | #' re <- rasterEntropy(rlogo) 15 | #' ggR(re, geom_raster = TRUE) 16 | rasterEntropy <- function(img, ...){ 17 | img <- .toTerra(img) 18 | if(nlyr(img) <= 1) 19 | stop("img must have at least two layers") 20 | out <- app(img, fun = entropyCpp, ...) 21 | out <- .updateLayerNames(out, "entropy") 22 | out 23 | } 24 | 25 | 26 | -------------------------------------------------------------------------------- /R/readEE.R: -------------------------------------------------------------------------------- 1 | #' Tidy import tool for EarthExplorer .csv export files 2 | #' 3 | #' Imports and tidies CSV files exported from EarthExplorer into data.frames and annotates missing fields. 4 | #' 5 | #' @param x Character, Character or list. One or more paths to EarthExplorer export files. 6 | #' @return data.frame 7 | #' @details 8 | #' The \href{https://earthexplorer.usgs.gov/}{EarthExplorer} CSV file can be produced from the search results page. Above the results click on 'export results' and select 'comma (,) delimited'. 9 | #' 10 | #' Note that only a subset of columns is imported which was deemed interesting. Please contact the maintainer if you think an omited column should be included. 11 | #' 12 | #' @export 13 | #' @examples 14 | #' library(ggplot2) 15 | #' ee <- readEE(system.file("external/EarthExplorer_LS8.txt", package = "RStoolbox")) 16 | #' 17 | #' ## Scenes with cloud cover < 20% 18 | #' ee[ee$Cloud.Cover < 20,] 19 | #' 20 | #' ## Available time-series 21 | #' ggplot(ee) + 22 | #' geom_segment(aes(x = Date, xend = Date, y = 0, yend = 100 - Cloud.Cover, 23 | #' col = as.factor(Year))) + 24 | #' scale_y_continuous(name = "Scene quality (% clear sky)") 25 | #' 26 | readEE <- function(x) { 27 | 28 | llee <- lapply(x, function(ix){ 29 | if(!file.exists(ix)) stop(paste0("Can't find file ", ix), call.=FALSE ) 30 | df <- read.csv(ix, stringsAsFactors = FALSE, quote = "", fileEncoding = "latin1") 31 | namesLut <- c("Scene.Cloud.Cover"="Cloud.Cover", "Acquisition.Date"="Date.Acquired", "Day.Night.Indicator"="Day.Night") 32 | for(i in names(namesLut)) { 33 | names(df)[names(df) == i] <- namesLut[i] 34 | } 35 | allLScats <- c("Landsat.Scene.Identifier", "WRS.Path", "WRS.Row", "Data.Category", "Cloud.Cover", 36 | "Station.Identifier", "Day.Night", "Data.Type.Level.1", "Date.Acquired", 37 | "Sun.Elevation", "Sun.Azimuth", "Geometric.RMSE.Model.X", 38 | "Geometric.RMSE.Model.Y", "Display.ID", "Ordering.ID", "Download.Link", "Browse.Link") 39 | 40 | inter <- allLScats %in% colnames(df) 41 | df <- df[,allLScats[inter]] 42 | df[,allLScats[!inter]] <- NA 43 | df <- df[, allLScats] 44 | df$Date <- as.POSIXct(df$Date.Acquired, format = "%Y/%m/%d") 45 | df$Doy <- as.numeric(format(df$Date, format = "%j")) 46 | df$Year <- as.numeric(format(df$Date, format = "%Y")) 47 | df$Satellite <- paste0("LS", substr(df$Landsat.Scene.Identifier, 3, 3)) 48 | df$Num <- as.numeric(substr(df$Landsat.Scene.Identifier,3,3)) 49 | df 50 | }) 51 | out <- do.call("rbind", llee) 52 | return(out) 53 | } 54 | 55 | -------------------------------------------------------------------------------- /R/rsOpts.R: -------------------------------------------------------------------------------- 1 | #' Set global options for RStoolbox 2 | #' 3 | #' shortcut to options(RStoolbox.*) 4 | #' 5 | #' @param verbose Logical. If \code{TRUE} many functions will print status messages about the current processing step. By default verbose mode is disabled. 6 | #' @param idxdb List. The list conatins the formal calculation of spectral indices. Modify this list to pipe your own spectral index through the internal C++ calculation of RStoolbox. 7 | #' @export 8 | #' @return 9 | #' No return, just a setter for the verbosiness and the index-database of the RStoolbox package. For latter, see the example of Rstoolbox::spectralIndices() 10 | #' @examples 11 | #' rsOpts(verbose=TRUE) 12 | #' 13 | rsOpts <- function(verbose=NULL, idxdb=NULL){ 14 | if(!is.null(verbose)){ 15 | options(RStoolbox.verbose=verbose) 16 | } 17 | if(!is.null(idxdb)){ 18 | options(RStoolbox.idxdb=idxdb) 19 | } 20 | } 21 | 22 | 23 | -------------------------------------------------------------------------------- /R/sam.R: -------------------------------------------------------------------------------- 1 | #' Spectral Angle Mapper 2 | #' 3 | #' Calculates the angle in spectral space between pixels and a set of reference spectra (endmembers) for image classification based on spectral similarity. 4 | #' 5 | #' @param img SpatRaster. Remote sensing imagery. 6 | #' @param em Matrix or data.frame with endmembers. Each row should contain the endmember spectrum of a class, i.e. columns correspond to bands in \code{img}. It is reccomended to set the rownames to class names. 7 | #' @param angles Logical. If \code{TRUE} a RasterBrick containing each one layer per endmember will be returned containing the spectral angles. 8 | #' @param ... further arguments to be passed to \code{\link[terra]{writeRaster}} 9 | #' @export 10 | #' @details 11 | #' For each pixel the spectral angle mapper calculates the angle between the vector defined by the pixel values and each endmember vector. The result of this is 12 | #' one raster layer for each endmember containing the spectral angle. The smaller the spectral angle the more similar a pixel is to a given endmember class. 13 | #' In a second step one can the go ahead an enforce thresholds of maximum angles or simply classify each pixel to the most similar endmember. 14 | #' @return SpatRaster 15 | #' If \code{angles = FALSE} a single Layer will be returned in which each pixel is assigned to the closest endmember class (integer pixel values correspond to row order of \code{em}. 16 | #' @examples 17 | #' library(terra) 18 | #' library(ggplot2) 19 | #' 20 | #' ## Sample endmember spectra 21 | #' ## First location is water, second is open agricultural vegetation 22 | #' pts <- data.frame(x = c(624720, 627480), y = c(-414690, -411090)) 23 | #' endmembers <- extract(lsat, pts) 24 | #' rownames(endmembers) <- c("water", "vegetation") 25 | #' 26 | #' ## Calculate spectral angles 27 | #' lsat_sam <- sam(lsat, endmembers, angles = TRUE) 28 | #' plot(lsat_sam) 29 | #' 30 | #' ## Classify based on minimum angle 31 | #' lsat_sam <- sam(lsat, endmembers, angles = FALSE) 32 | #' 33 | #' ggR(lsat_sam, forceCat = TRUE, geom_raster=TRUE) + 34 | #' scale_fill_manual(values = c("blue", "green"), labels = c("water", "vegetation")) 35 | sam <- function(img, em, angles = FALSE, ...){ 36 | img <- .toTerra(img) 37 | 38 | if(is.vector(em)) { 39 | em <- matrix(em, nrow = 1, ncol=length(em)) 40 | } else if (is.data.frame(em)) { 41 | em <- as.matrix(em, ncol=length(colnames(em))) 42 | } 43 | 44 | em <- em[, colnames(em) != "ID", drop=F] 45 | if(ncol(em) != nlyr(img)) stop("The number of columns in em must match the number of bands in x.") 46 | if(!angles && nrow(em) == 1){ 47 | stop(paste0("With only one class an image classification does not make sense.", 48 | "\nUse angles=TRUE to calculate the spectral angles for your class without adding a classification on top."), 49 | call. = FALSE ) 50 | } 51 | 52 | ## Calculate angles 53 | out <- app(img, fun = function(xi, emc=em) {specSimC(x=xi, em=emc)}, ... ) 54 | if(is.null(rownames(em))) rownames(em) <- paste0("s", 1:nrow(em)) 55 | names(out) <- paste0(rownames(em), "_sa") 56 | 57 | ## Select minimum angle 58 | if(!angles) { 59 | out <- which.min(out) 60 | names(out) <- "class" 61 | } 62 | return(out) 63 | 64 | } -------------------------------------------------------------------------------- /R/saveRSTBX.R: -------------------------------------------------------------------------------- 1 | #' Save and Read RStoolbox Classification Results 2 | #' 3 | #' Saves objects of classes unsuperClass, superClass, rasterPCA and fCover to 4 | #' file. Useful to archive the fitted models. 5 | #' 6 | #' @return 7 | #' The output of writeRSTBX will be at least two files written to disk: 8 | #' a) an .rds file containing the object itself and 9 | #' b) the raster file (depending on the driver you choose this can be more than two files). 10 | #' 11 | #' @note All files must be kept in the same directory to read the full object back into R 12 | #' by means of readRSTBX. You can move them to another location but you'll have to move *all* of them 13 | #' (just like you would with Shapefiles). In case the raster file(s) is missing, readRSTBX will still 14 | #' return the object but the raster will be missing. 15 | #' 16 | #' writeRSTBX and readRSTBX are convenience wrappers around saveRDS, readRDS. This means 17 | #' you can read all files created this way also with base functionality as long as you don't move your files. 18 | #' This is because x$map is a SpatRaster object and hence contains only a static link to the file on disk. 19 | #' 20 | #' @param x RStoolbox object of classes c("fCover", "rasterPCA", "superClass", "unsuperClass") 21 | #' @param filename Character. Path and filename. Any file extension will be ignored. 22 | #' @param format Character. Driver to use for the raster file 23 | #' @param ... further arguments passed to writeRaster 24 | #' @export 25 | #' @examples 26 | #' \dontrun{ 27 | #' input <- rlogo 28 | #' ## Create filename 29 | #' file <- paste0(tempdir(), "/test", runif(1)) 30 | #' ## Run PCA 31 | #' rpc <- rasterPCA(input, nSample = 100) 32 | #' ## Save object 33 | #' saveRSTBX(rpc, filename=file) 34 | #' ## Which files were written? 35 | #' list.files(tempdir(), pattern = basename(file)) 36 | #' ## Re-read files 37 | #' re_rpc <- readRSTBX(file) 38 | #' ## Remove files 39 | #' file.remove(list.files(tempdir(), pattern = basename(file), full = TRUE)) 40 | #' } 41 | #' @name saveRSTBX 42 | NULL 43 | 44 | #' @describeIn saveRSTBX Save RStoolbox object to file 45 | saveRSTBX <- function(x, filename, format = "raster", ...){ 46 | stopifnot(inherits(x, "RStoolbox")) 47 | 48 | if(!inherits(x$map, "SpatRaster")){ 49 | x$map <- .toTerra(x$map) 50 | } 51 | 52 | rdsFile <- rastFile <- .fullPath(filename) 53 | rdsFile <- paste0(rdsFile, ".rds") 54 | rastFile <- paste0(rastFile, .rasterExtension(format)) 55 | 56 | f <- terra::sources(x$map) 57 | 58 | if(inMemory(x$map)){ 59 | x$map <- terra::wrap(writeRaster(x$map, filename = rastFile, ...)) 60 | } else { 61 | if(f != rastFile){ 62 | x$map <- terra::wrap(writeRaster(x$map, filename = rastFile, ...)) 63 | } 64 | } 65 | 66 | base::saveRDS(x, rdsFile) 67 | } 68 | 69 | #' @describeIn saveRSTBX Read files saved with saveRSTBX 70 | #' @export 71 | readRSTBX <- function(filename){ 72 | rdsFile <- rastFile <- .fullPath(filename) 73 | 74 | x <- readRDS(rdsFile) 75 | 76 | try(x$map <- terra::unwrap(x$map), silent = FALSE) 77 | 78 | if(!inherits(x, "RStoolbox")) 79 | stop(filename, "is not a RStoolbox object.", call. = FALSE) 80 | 81 | if(inherits(x$map, "SpatRaster")){ 82 | rastFile <- paste0(rastFile, terra::sources(x$map)) 83 | if(!file.exists(rastFile)) { 84 | warning("Corresponding raster file ", rastFile, " cannot be found. \nThe *.rds and the raster file must be located in the same directory.") 85 | x$map <- "Raster map not found" 86 | } 87 | } 88 | x 89 | } 90 | -------------------------------------------------------------------------------- /R/sysdata.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/R/sysdata.rda -------------------------------------------------------------------------------- /R/tasseledCap.R: -------------------------------------------------------------------------------- 1 | #' Tasseled Cap Transformation 2 | #' 3 | #' Calculates brightness, greenness and wetness from multispectral imagery. 4 | #' Currently implemented Landsat 4 TM, Landsat 5 TM, Landsat 7ETM+, Landsat 8 OLI, MODIS, QuickBird, Spot5 and RapidEye. 5 | #' 6 | #' @param img SpatRaster. Input image. Band order must correspond to sensor specifications (see Details and Examples) 7 | #' @param sat Character. Sensor; one of: c("Landsat4TM", "Landsat5TM", "Landsat7ETM", "Landsat8OLI", "MODIS", "QuickBird", "Spot5", "RapidEye"). Case is irrelevant. 8 | #' @param ... Further arguments passed to writeRaster. 9 | #' @export 10 | #' @details 11 | #' Currently implemented: Landsat 4 TM, Landsat 5 TM, Landsat 7ETM+, Landsat 8 OLI, MODIS, QuickBird, Spot5, RapdiEye. 12 | #' Input data must be in top of atmosphere reflectance. 13 | #' Moreover, bands must be provided in ascending order as listed in the table below. 14 | #' Irrelevant bands, such as Landsat Thermal Bands or QuickBird/Spot5 Panchromatic Bands must be omitted. 15 | #' Required bands are: 16 | #' \tabular{rrrl}{ 17 | #' sat \tab bands \tab coefficients \tab data unit\cr 18 | #' Landsat4TM \tab 1,2,3,4,5,7 \tab Crist 1985 \tab reflectance \cr 19 | #' Landsat5TM \tab 1,2,3,4,5,7 \tab Crist 1985 \tab reflectance \cr 20 | #' Landsat7ETM \tab 1,2,3,4,5,7 \tab Huang 2002 \tab reflectance \cr 21 | #' Landsat8OLI \tab 2,3,4,5,6,7 \tab Baig 2014 \tab reflectance \cr 22 | #' MODIS \tab 1,2,3,4,5,6,7 \tab Lobser 2007 \tab reflectance \cr 23 | #' QuickBird \tab 2,3,4,5 \tab Yarbrough 2005 \tab reflectance \cr 24 | #' Spot5 \tab 2,3,4,5 \tab Ivtis 2008 \tab reflectance \cr 25 | #' RapidEye \tab 1,2,3,4,5 \tab Schoenert 2014 \tab reflectance \cr 26 | #' } 27 | #' @references 28 | #' Crist (1985) "A TM Tasseled Cap Equivalent Transformation for Reflectance Factor Data." Remote Sensing of Environment 17 (3): 301-306 29 | #' 30 | #' Huang et al. (2002) "Derivation of a Tasselled Cap Transformation Based on Landsat 7 At-Satellite Reflectance." International Journal of Remote Sensing 23 (8): 1741-1748 31 | #' 32 | #' Baig et al. (2014) "Derivation of a Tasselled Cap Transformation Based on Landsat 8 At-Satellite Reflectance." Remote Sensing Letters 5 (5): 423-431. 33 | #' 34 | #' Lobser et al. (2007) "MODIS Tasselled Cap: Land Cover Characteristics Expressed through Transformed MODIS Data." International Journal of Remote Sensing 28 (22): 5079-5101. 35 | #' 36 | #' Yarbrough et al. (2005) "QuickBird 2 tasseled cap transform coefficients: a comparison of derivation methods." Pecora 16 Global Priorities in Land Remote Sensing: 23-27. 37 | #' 38 | #' Ivits et al. (2008) "Orthogonal transformation of segmented SPOT5 images." Photogrammetric Engineering & Remote Sensing 74 (11): 1351-1364. 39 | #' 40 | #' Schoenert et al. (2014) "Derivation of tasseled cap coefficients for RapidEye data." Earth Resources and Environmental Remote Sensing/GIS Applications V (9245): 92450Qs. 41 | #' @return 42 | #' Returns a SpatRaster with the thee bands: brigthness, greenness, and (soil) wetness. 43 | #' @examples 44 | #' library(terra) 45 | #' 46 | #' ## Run tasseled cap (exclude thermal band 6) 47 | #' lsat_tc <- tasseledCap(lsat[[c(1:5,7)]], sat = "Landsat5TM") 48 | #' lsat_tc 49 | #' plot(lsat_tc) 50 | tasseledCap <- function(img, sat, ...) { 51 | img <- .toTerra(img) 52 | 53 | sat <- tolower(sat) 54 | if(!sat %in% c("landsat4tm" , "landsat5tm" , "landsat7etm" ,"landsat8oli", "modis", "quickbird", "spot5", "rapideye")) stop("Sensor not implemented. See ?tasseledCap for options.") 55 | 56 | if(nlyr(img) != nrow(.TCcoefs[[sat]])) stop("Number of layers does not match required number of layers") 57 | 58 | tct <- function(x, cof = .TCcoefs[[sat]]) { 59 | x %*% cof 60 | } 61 | 62 | out <- app(img, fun = tct, ...) 63 | out <- .updateLayerNames(out, colnames(.TCcoefs[[sat]])) 64 | out 65 | } 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /R/utils-pipe.R: -------------------------------------------------------------------------------- 1 | #' Pipe operator 2 | #' 3 | #' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. 4 | #' 5 | #' @name %>% 6 | #' @rdname pipe 7 | #' @keywords internal 8 | #' @export 9 | #' @importFrom magrittr %>% 10 | #' @usage lhs \%>\% rhs 11 | #' @param lhs A value or the magrittr placeholder. 12 | #' @param rhs A function call using the magrittr semantics. 13 | #' @return The result of calling `rhs(lhs)`. 14 | NULL 15 | -------------------------------------------------------------------------------- /R/validateMap.R: -------------------------------------------------------------------------------- 1 | 2 | #' Map accuracy assessment 3 | #' 4 | #' validate a map from a classification or regression model. This can be useful to update the accuracy assessment after filtering, e.g. for a minimum mapping unit. 5 | #' 6 | #' @param map SpatRaster. The classified map. 7 | #' @param valData sf object with validation data (POLYGONs or POINTs). 8 | #' @param nSamplesV Integer. Number of pixels to sample for validation (only applies to polygons). 9 | #' @param responseCol Character. Column containing the validation data in attribute table of \code{valData}. 10 | #' @param mode Character. Either 'classification' or 'regression'. 11 | #' @param classMapping optional data.frame with columns \code{'class'} and \code{'classID'} defining the mapping from raster integers to class names. 12 | #' @return 13 | #' Returns a structured list includng the preformance and confusion-matrix of your then validated input data 14 | #' @export 15 | #' @examples 16 | #' library(caret) 17 | #' library(terra) 18 | #' 19 | #' ## Training data 20 | #' poly <- readRDS(system.file("external/trainingPolygons_lsat.rds", package="RStoolbox")) 21 | #' 22 | #' ## Split training data in training and validation set (50%-50%) 23 | #' splitIn <- createDataPartition(poly$class, p = .5)[[1]] 24 | #' train <- poly[splitIn,] 25 | #' val <- poly[-splitIn,] 26 | #' 27 | #' ## Classify (deliberately poorly) 28 | #' sc <- superClass(lsat, trainData = train, responseCol = "class", nSamples = 50, model = "mlc") 29 | #' 30 | #' ## Polish map with majority filter 31 | #' 32 | #' polishedMap <- focal(sc$map, matrix(1,3,3), fun = modal) 33 | #' 34 | #' ## Validation 35 | #' ## Before filtering 36 | #' val0 <- validateMap(sc$map, valData = val, responseCol = "class", 37 | #' classMapping = sc$classMapping) 38 | #' ## After filtering 39 | #' val1 <- validateMap(polishedMap, valData = val, responseCol = "class", 40 | #' classMapping = sc$classMapping) 41 | validateMap <- function(map, valData, responseCol, nSamplesV = 500, mode = "classification", classMapping = NULL){ 42 | map <- .toTerra(map) 43 | valData <- .toSf(valData) 44 | 45 | stopifnot(responseCol %in% names(valData), mode %in% c("classification", "regression")) 46 | 47 | valiSet <- .samplePixels(v = valData, r = map, responseCol = responseCol, 48 | nSamples = nSamplesV, trainCells = NULL, classMapping = classMapping) 49 | colnames(valiSet[[1]]) <- c("reference", "prediction") 50 | if(mode=="classification") { 51 | if(!is.null(classMapping)) { 52 | valiSet[[1]][,"prediction"] <- classMapping[match(valiSet[[1]][,"prediction"], classMapping$classID),"class"] 53 | } 54 | performance <- confusionMatrix(as.factor(valiSet[[1]][,"prediction"]), reference = as.factor(valiSet[[1]][,"reference"])) 55 | } else { 56 | performance <- postResample(pred = valiSet[[1]][,"prediction"], obs = valiSet[[1]][,"reference"]) 57 | } 58 | valiSet <- do.call("cbind",valiSet) 59 | colnames(valiSet) <- c("reference", "prediction", "cell") 60 | out <- list(performance = performance, validationSet = valiSet) 61 | structure(out, class = c("mapValidation", "RStoolbox")) 62 | 63 | } 64 | 65 | 66 | 67 | #' @method print mapValidation 68 | #' @export 69 | print.mapValidation <- function(x,...){ 70 | cat("$performance\n") 71 | print(x$performance) 72 | cat("\n$validationSet\n") 73 | if(nrow(x$validationSet) < 6){ 74 | print(x$validationSet) 75 | } else { 76 | cat("Total number of validation samples: ", nrow(x$validationSet), "\n") 77 | cat("Validation samples per class:") 78 | print(table(x$validationSet$reference)) 79 | cat("\n") 80 | print(x$validationSet[c(1:3),]) 81 | cat("...\n") 82 | write.table(format(tail(x$validationSet,3), justify="right"), col.names=F, quote=F) 83 | } 84 | } -------------------------------------------------------------------------------- /R/zzz.R: -------------------------------------------------------------------------------- 1 | .onAttach <- function(libname = find.package("RStoolbox"), pkgname = "RStoolbox") { 2 | packageStartupMessage("This is version ", utils::packageVersion(pkgname), " of ", pkgname) 3 | 4 | lsat_rs <- terra::readRDS(system.file("external", "lsat.rds", package = pkgname)) 5 | assign("lsat", lsat_rs, envir=as.environment("package:RStoolbox")) 6 | 7 | srtml_rs <- terra::readRDS(system.file("external", "srtm_lsat.rds", package = pkgname)) 8 | assign("srtm", srtml_rs, envir=as.environment("package:RStoolbox")) 9 | 10 | rlogo_rs <- terra::readRDS(system.file("external", "rlogo.rds", package = pkgname)) 11 | assign("rlogo", rlogo_rs, envir=as.environment("package:RStoolbox")) 12 | 13 | sen2_rs <- terra::readRDS(system.file("external", "sen2.rds", package = pkgname)) 14 | assign("sen2", sen2_rs, envir=as.environment("package:RStoolbox")) 15 | 16 | srtms_rs <- terra::readRDS(system.file("external", "srtm_sen2.rds", package = pkgname)) 17 | assign("srtm_sen2", srtms_rs, envir=as.environment("package:RStoolbox")) 18 | 19 | } 20 | -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | url: https://bleutner.github.io/RStoolbox/ 2 | 3 | template: 4 | bootstrap: 5 5 | 6 | development: 7 | mode: release 8 | 9 | home: 10 | links: 11 | - text: Find out more about our work 12 | href: http://remote-sensing.org 13 | 14 | navbar: 15 | #type: inverse 16 | left: 17 | - text: "News" 18 | href: news/index.html 19 | - text: "Functions" 20 | href: reference/index.html 21 | 22 | right: 23 | - icon: fa-github fa-lg fab 24 | href: http://www.github.com/bleutner -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | This release fixes test results as requested by CRAN(Ripley/10.2.2022). 2 | Plus additional functional changes and fixes. 3 | 4 | ### R CMD checks 5 | ### Test environments 6 | * Ubuntu 20.04 64bit (oldrel, release, devel) 7 | * winbuilder (oldrel, release, devel) 8 | * MacOS (release) 9 | 10 | ### R CMD check results 11 | There were no ERRORs or WARNINGs 12 | 13 | ### Downstream dependencies 14 | fieldRS OK 15 | foster OK 16 | rtsVis OK 17 | PlanetNICFI OK 18 | spatialEco OK 19 | 20 | ### Changelog: 21 | #### RStoolbox 0.3.0 22 | ###### New: 23 | * `rasterCVA()` by default no longer enforces a minimal change magnitude (can still be accomplished with the `tmf` argument). 24 | Also a new argument `nct` allows to fix this threshold to a user selected value instead of deriving it based on the median of the observed change magnitudes. 25 | * `unsuperClass()` has a new argument `output` which allows to return the distances to all cluster centers as raster layers, instead of the class itself 26 | * added spectral index kNDVI in `spectralIndices()` as suggested by Camps-Valls et al (2021) 27 | * added support for `terra::SpatRast` objects throughout RStoolbox (as alternative to `raster` objects). Note: internal functionality is still based on `raster`. 28 | 29 | ##### Changes: 30 | * arguments `master` and `slave` in `coregisterImages()` were deprecated in favor of `ref` and `img`, respectively (closes #63, suggested by @MatthiasSiewert) 31 | 32 | ##### Fixes: 33 | * `rasterCVA()` estimates median values now for entire rasters and not per chunk 34 | * `cloudMask()` now returns NA for non-clouds instead of NaN 35 | * `topCor()` now works for tiny rasters as well (fixes #55, reported by @latenooker) 36 | * `rasterPCA()` now correctly considers the number observations in face of missing values (fixes #79, reported by @andliszmmu) 37 | * `superClass()` now accepts different geometries for trainData and valData (fixes #73, suggested by @Silviculturalist) 38 | * fix `readMeta()` for MTL files delivered with Landsat collection data (fixes #71, reported by @jkoellin et al.) 39 | -------------------------------------------------------------------------------- /data-raw/sun_earth_dists.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/data-raw/sun_earth_dists.rds -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | citHeader("Please cite RStoolbox, e.g. when you use it in projects or publications:") 2 | 3 | bibentry( 4 | bibtype = "Article", 5 | title = "RStoolbox: An R package for Remote Sensing Data Analysis", 6 | author = "Konstantin Müller and Jakob Schwalb-Willmann and Martin Wegmann and Benjamin Leutner", 7 | journal = "Methods in Ecology and Evolution", 8 | doi = "10.1111/2041-210X.14451", 9 | year = "2024" 10 | ) 11 | -------------------------------------------------------------------------------- /inst/external/EarthExplorer_LS8.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/EarthExplorer_LS8.txt -------------------------------------------------------------------------------- /inst/external/landsat/LT52240631988227CUB02_B1.TIF: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/landsat/LT52240631988227CUB02_B1.TIF -------------------------------------------------------------------------------- /inst/external/landsat/LT52240631988227CUB02_B2.TIF: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/landsat/LT52240631988227CUB02_B2.TIF -------------------------------------------------------------------------------- /inst/external/landsat/LT52240631988227CUB02_B3.TIF: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/landsat/LT52240631988227CUB02_B3.TIF -------------------------------------------------------------------------------- /inst/external/landsat/LT52240631988227CUB02_B4.TIF: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/landsat/LT52240631988227CUB02_B4.TIF -------------------------------------------------------------------------------- /inst/external/landsat/LT52240631988227CUB02_B5.TIF: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/landsat/LT52240631988227CUB02_B5.TIF -------------------------------------------------------------------------------- /inst/external/landsat/LT52240631988227CUB02_B6.TIF: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/landsat/LT52240631988227CUB02_B6.TIF -------------------------------------------------------------------------------- /inst/external/landsat/LT52240631988227CUB02_B7.TIF: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/landsat/LT52240631988227CUB02_B7.TIF -------------------------------------------------------------------------------- /inst/external/lsat.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/lsat.rds -------------------------------------------------------------------------------- /inst/external/rlogo.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/rlogo.rds -------------------------------------------------------------------------------- /inst/external/sen2.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/sen2.rds -------------------------------------------------------------------------------- /inst/external/srtm_lsat.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/srtm_lsat.rds -------------------------------------------------------------------------------- /inst/external/srtm_sen2.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/srtm_sen2.rds -------------------------------------------------------------------------------- /inst/external/trainingPoints_rlogo.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/trainingPoints_rlogo.rds -------------------------------------------------------------------------------- /inst/external/trainingPolygons_lsat.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/trainingPolygons_lsat.rds -------------------------------------------------------------------------------- /inst/external/trainingPolygons_sen2.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/trainingPolygons_sen2.rds -------------------------------------------------------------------------------- /inst/external/vegSpec.sli: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/inst/external/vegSpec.sli -------------------------------------------------------------------------------- /man-roxygen/examples_SLI.R: -------------------------------------------------------------------------------- 1 | #' @examples 2 | #' 3 | #' ## Example data 4 | #' sliFile <- system.file("external/vegSpec.sli", package="RStoolbox") 5 | #' sliTmpFile <- paste0(tempdir(),"/vegetationSpectra.sli") 6 | #' 7 | #' ## Read spectral library 8 | #' sli <- readSLI(sliFile) 9 | #' head(sli) 10 | #' plot(sli[,1:2], col = "orange", type = "l") 11 | #' lines(sli[,c(1,3)], col = "green") 12 | #' 13 | #' ## Write to binary spectral library 14 | #' writeSLI(sli, path = sliTmpFile) 15 | -------------------------------------------------------------------------------- /man-roxygen/examples_cloudMask.R: -------------------------------------------------------------------------------- 1 | #' @examples 2 | #' library(ggplot2) 3 | #' ## Import Landsat example subset 4 | #' ## We have two tiny clouds in the east 5 | #' \donttest{ggRGB(lsat, stretch = "lin")} 6 | #' 7 | #' ## Calculate cloud index 8 | #' cldmsk <- cloudMask(lsat, blue = 1, tir = 6) 9 | #' \donttest{ggR(cldmsk, 2, geom_raster = TRUE) } 10 | #' 11 | #' ## Define threshold (re-use the previously calculated index) 12 | #' ## Everything above the threshold is masked 13 | #' ## In addition we apply a region-growing around the core cloud pixels 14 | #' cldmsk_final <- cloudMask(cldmsk, threshold = 0.1, buffer = 5) 15 | #' 16 | #' ## Plot cloudmask 17 | #' \donttest{ggRGB(lsat, stretch = "lin") + 18 | #' ggR(cldmsk_final[[1]], ggLayer = TRUE, forceCat = TRUE, geom_raster = TRUE) + 19 | #' scale_fill_manual(values = c("red"), na.value = NA) 20 | #' } 21 | #' #' ## Estimate cloud shadow displacement 22 | #' ## Interactively (click on cloud pixels and the corresponding shadow pixels) 23 | #' \dontrun{ shadow <- cloudShadowMask(lsat, cldmsk_final, nc = 2) } 24 | #' 25 | #' ## Non-interactively. Pre-defined shadow displacement estimate (shiftEstimate) 26 | #' \donttest{shadow <- cloudShadowMask(lsat, cldmsk_final, shiftEstimate = c(-16,-6))} 27 | #' 28 | #' ## Plot 29 | #' \donttest{csmask <- terra::merge(cldmsk_final[[1]], shadow) 30 | #' ggRGB(lsat, stretch = "lin") + 31 | #' ggR(csmask, ggLayer = TRUE, forceCat = TRUE, geom_raster = TRUE) + 32 | #' scale_fill_manual(values = c("blue", "yellow"), 33 | #' labels = c("shadow", "cloud"), na.value = NA) 34 | #' } 35 | -------------------------------------------------------------------------------- /man-roxygen/spectralIndices_table.R: -------------------------------------------------------------------------------- 1 | #' @details 2 | #' \code{spectralIndices} calculates all indices in one go in C++, which is more efficient than calculating each index separately (for large rasters). 3 | #' By default all indices which can be calculated given the specified indices will be calculated. If you don't want all indices, use the \code{indices} argument to specify exactly which indices are to be calculated. 4 | #' See the table bellow for index names and required bands. 5 | #' 6 | #' Index values outside the valid value ranges (if such a range exists) will be set to NA. For example a pixel with NDVI > 1 will be set to NA. 7 | #' 8 | #' 9 | #' <% fr <- sapply(.IDXdbFormulae, function(x) paste0("\\code{",paste0(names(formals(x)), collapse=", "),"}")) %> 10 | #' <% dl <- sapply(.IDXdbFormulae, function(x) paste0("\\eqn", paste0(body(x), collapse=""), "}")) %> 11 | #' <% fn <- sapply(.IDX.REFdb[names(.IDXdbFormulae)],"[",2) %> 12 | #' <% sr <- sapply(.IDX.REFdb[names(.IDXdbFormulae)],"[",1) %> 13 | #' <% df <- data.frame(Index = names(.IDXdbFormulae), Description = fn, Source = sr, Bands = fr, Formula = dl) %> 14 | #' <%= .df2tab(df, "lllll") %> 15 | #' 16 | #' 17 | #' Some indices require additional parameters, such as the slope of the soil line which are specified via a list to the \code{coefs} argument. 18 | #' Although the defaults are sensible values, values like the soil brightnes factor \code{L} for SAVI should be adapted depending on the characteristics of the scene. 19 | #' The coefficients are: 20 | #' \tabular{lll}{ 21 | #' \strong{Coefficient} \tab \strong{Description} \tab \strong{Affected Indices} \cr 22 | #' \code{s} \tab slope of the soil line \tab DVI, WDVI \cr 23 | #' \code{L_evi, C1, C2, G} \tab various \tab EVI \cr 24 | #' \code{L} \tab soil brightness factor \tab SAVI, SATVI \cr 25 | #' \code{swir2ccc} \tab minimum swir2 value (completely closed forest canopy) \tab NDVIC \cr 26 | #' \code{swir2coc} \tab maximum swir2 value (completely open canopy) \tab NDVIC \cr 27 | #' } 28 | #' 29 | #' 30 | #' The wavelength band names are defined following Schowengertd 2007, p10. 31 | #' The last column shows exemplarily which Landsat 5 TM bands correspond to which wavelength range definition. 32 | #' <%= .df2tab(.wavlDB, "lllllll") %> 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /man/ImageMetaData.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/readMeta.R 3 | \name{ImageMetaData} 4 | \alias{ImageMetaData} 5 | \title{ImageMetaData Class} 6 | \usage{ 7 | ImageMetaData( 8 | file = NA, 9 | format = NA, 10 | sat = NA, 11 | sen = NA, 12 | scene = NA, 13 | colNum = NA, 14 | colTier = NA, 15 | proj = NA, 16 | date = NA, 17 | pdate = NA, 18 | path = NA, 19 | row = NA, 20 | az = NA, 21 | selv = NA, 22 | esd = NA, 23 | files = NA, 24 | bands = NA, 25 | quant = NA, 26 | cat = NA, 27 | na = NA, 28 | vsat = NA, 29 | scal = NA, 30 | dtyp = NA, 31 | calrad = NA, 32 | calref = NA, 33 | calbt = NA, 34 | radRes = NA, 35 | spatRes = NA 36 | ) 37 | } 38 | \arguments{ 39 | \item{file}{Character. Metadata file} 40 | 41 | \item{format}{Character. Metadata format, e.g. xml, mtl} 42 | 43 | \item{sat}{Character. Satellite platform} 44 | 45 | \item{sen}{Character. Sensor} 46 | 47 | \item{scene}{Character. Scene_ID} 48 | 49 | \item{colNum}{Character Collection number} 50 | 51 | \item{colTier}{Character Collection tier} 52 | 53 | \item{proj}{CRS. Projection.} 54 | 55 | \item{date}{POSIXct. Aquisition date.} 56 | 57 | \item{pdate}{POSIXct. Processing date.} 58 | 59 | \item{path}{Integer. Path.} 60 | 61 | \item{row}{Integer. Row.} 62 | 63 | \item{az}{Numeric. Sun azimuth} 64 | 65 | \item{selv}{Numeric. Sun elevation} 66 | 67 | \item{esd}{Numeric. Earth-sun distance} 68 | 69 | \item{files}{Character vector. Files containing the data, e.g. tiff files} 70 | 71 | \item{bands}{Character vector. Band names} 72 | 73 | \item{quant}{Character vector. Quantity, one of c("dn", "tra", "tre", "sre", "bt", "idx", "angle")} 74 | 75 | \item{cat}{Character vector. Category, e.g. c("image", "pan", "index", "qa", "aux")} 76 | 77 | \item{na}{Numeric vector. No-data value per band} 78 | 79 | \item{vsat}{Numeric vector. Saturation value per band} 80 | 81 | \item{scal}{Numeric vector. Scale factor per band. e.g. if data was scaled to 1000*reflectance for integer conversion.} 82 | 83 | \item{dtyp}{Character vector. Data type per band.} 84 | 85 | \item{calrad}{data.frame. Calibration coefficients for dn->radiance conversion. Must have columns 'gain' and 'offset'. Rows named according to \code{bands}.} 86 | 87 | \item{calref}{data.frame. Calibration coefficients for dn->reflectance conversion. Must have columns 'gain' and 'offset'. Rows named according to \code{bands}.} 88 | 89 | \item{calbt}{data.frame. Calibration coefficients for dn->brightness temperature conversion. Must have columns 'K1' and 'K2'. Rows named according to \code{bands}.} 90 | 91 | \item{radRes}{Numeric vector. Radiometric resolution per band.} 92 | 93 | \item{spatRes}{Numeric vector. Spatial resolution per band.} 94 | } 95 | \value{ 96 | Returns a structured, fully customizable meta-data table of a file 97 | } 98 | \description{ 99 | ImageMetaData Class 100 | } 101 | -------------------------------------------------------------------------------- /man/RStoolbox.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RStoolbox-package.R 3 | \name{RStoolbox} 4 | \alias{RStoolbox} 5 | \title{RStoolbox: A Collection of Remote Sensing Tools} 6 | \description{ 7 | The RStoolbox package provides a set of functions which simplify performing standard remote sensing tasks in R. 8 | } 9 | \section{Data Import and Export}{ 10 | 11 | 12 | \itemize{ 13 | \item \code{\link{readMeta}}: import Landsat metadata from MTL or XML files 14 | \item \code{\link{stackMeta}}, \code{\link{getMeta}}: load Landsat bands based on metadata 15 | \item \code{\link{readSLI} & \link{writeSLI}}: read and write ENVI spectral libraries 16 | \item \code{\link{saveRSTBX} & \link{readRSTBX}}: save and re-import RStoolbox classification objects (model and map) 17 | \item \code{\link{readEE}}: import and tidy EarthExplorer search results 18 | } 19 | } 20 | 21 | \section{Data Pre-Processing}{ 22 | 23 | 24 | \itemize{ 25 | \item \code{\link{radCor}}: radiometric conversions and corrections. Primarily, yet not exclusively, intended for Landsat data processing. DN to radiance to reflectance conversion as well as DOS approaches 26 | \item \code{\link{topCor}}: topographic illumination correction 27 | \item \code{\link{cloudMask} & \link{cloudShadowMask}}: mask clouds and cloud shadows in Landsat or other imagery which comes with a thermal band 28 | \item \code{\link{classifyQA}}: extract layers from Landsat 8 QA bands, e.g. cloud confidence 29 | \item \code{\link{encodeQA} & \link{decodeQA}}: encode/decode Landsat 16-bit QA bands. 30 | \item \code{\link{rescaleImage}}: rescale image to match min/max from another image or a specified min/max range 31 | \item \code{\link{normImage}}: normalize imagery by centering and scaling 32 | \item \code{\link{oneHotEncode}}: one-hot encode a raster or vector 33 | \item \code{\link{histMatch}}: matches the histograms of two scenes 34 | \item \code{\link{pifMatch}}: matches one scene to another based on linear regression of Pseudo-Invariant Features (PIF) 35 | \item \code{\link{coregisterImages}}: co-register images based on mutual information 36 | \item \code{\link{panSharpen}}: sharpen a coarse resolution image with a high resolution image (typically panchromatic) 37 | \item \code{\link{estimateHaze}}: estimate image haze for Dark Object Subtraction (DOS) 38 | } 39 | } 40 | 41 | \section{Data Analysis}{ 42 | 43 | 44 | \itemize{ 45 | \item \code{\link{spectralIndices}}: calculate a set of predefined multispectral indices like NDVI 46 | \item \code{\link{tasseledCap}}: tasseled cap transformation 47 | \item \code{\link{sam}}: spectral angle mapper 48 | \item \code{\link{rasterPCA}}: principal components transform for raster data 49 | \item \code{\link{rasterCVA}}: change vector analysis 50 | \item \code{\link{rasterEntropy}}: calculates shannon entropy 51 | \item \code{\link{unsuperClass}}: unsupervised classification 52 | \item \code{\link{superClass}}, \code{\link{validateMap}}, \code{\link{getValidation}}: supervised classification and validation 53 | \item \code{\link{fCover}}: fractional cover of coarse resolution imagery based on high resolution classification 54 | \item \code{\link{mesma}}: spectral unmixing using Multiple Endmember Spectral Mixture Analysis (MESMA) 55 | } 56 | } 57 | 58 | \section{Data Display}{ 59 | 60 | 61 | \itemize{ 62 | \item \code{\link{ggR}}: single raster layer plotting with ggplot2 63 | \item \code{\link{ggRGB}}: efficient plotting of remote sensing imagery in RGB with ggplot2 64 | } 65 | } 66 | 67 | \keyword{"RStoolbox"} 68 | \keyword{earth-observation} 69 | \keyword{remote-sensing} 70 | \keyword{spatial-data-analysis} 71 | -------------------------------------------------------------------------------- /man/classifyQA.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/classifyQA.R 3 | \name{classifyQA} 4 | \alias{classifyQA} 5 | \title{Classify Landsat QA bands} 6 | \usage{ 7 | classifyQA( 8 | img, 9 | type = c("background", "cloud", "cirrus", "snow", "water"), 10 | confLayers = FALSE, 11 | sensor = "OLI", 12 | legacy = "collection1", 13 | ... 14 | ) 15 | } 16 | \arguments{ 17 | \item{img}{SpatRaster. Landsat 8 OLI QA band.} 18 | 19 | \item{type}{Character. Classes which should be returned. One or more of c("background", "cloud", "cirrus","snow", "water").} 20 | 21 | \item{confLayers}{Logical. Return one layer per class classified by confidence levels, i.e. cloud:low, cloud:med, cloud:high.} 22 | 23 | \item{sensor}{Sensor to encode. Options: \code{c("OLI", "TIRS", "ETM+", "TM", "MSS")}.} 24 | 25 | \item{legacy}{Encoding systematic Options: \code{c("collection1", "pre_collection")}. Default is "collection1" for the Landsat Collection 1 8-bit quality designations. Use "pre_collection" for imagery downloaded before the Collection 1 quality designations were introduced} 26 | 27 | \item{...}{further arguments passed to \link[terra]{writeRaster}} 28 | } 29 | \value{ 30 | Returns a SpatRaster with maximal five classes: 31 | \tabular{rr}{ 32 | class \tab value \cr 33 | background \tab 1L \cr 34 | cloud \tab 2L \cr 35 | cirrus \tab 3L \cr 36 | snow \tab 4L \cr 37 | water \tab 5L \cr 38 | } 39 | Values outside of these classes are returned as NA. 40 | If \code{confLayers = TRUE} then a RasterStack with one layer per condition (except 'background') is returned, whereby each layer contains the confidence level of the condition. 41 | \tabular{rr}{ 42 | Confidence \tab value \cr 43 | low \tab 1L \cr 44 | med \tab 2L \cr 45 | high \tab 3L \cr 46 | } 47 | } 48 | \description{ 49 | extracts five classes from QA band: background, cloud, cirrus, snow and water. 50 | } 51 | \details{ 52 | By default each class is queried for *high* confidence. See \link{encodeQA} for details. To return the different confidence levels per condition use \code{confLayers=TRUE}. 53 | This approach corresponds to the way LandsatLook Quality Images are produced by the USGS. 54 | } 55 | \examples{ 56 | library(terra) 57 | qa <- rast(ncol = 100, nrow=100, val = sample(1:2^14, 10000)) 58 | 59 | ## QA classes 60 | qacs <- classifyQA(img = qa) 61 | ## Confidence levels 62 | qacs_conf <- classifyQA(img = qa, confLayers = TRUE) 63 | } 64 | \seealso{ 65 | \link{encodeQA} \link{decodeQA} 66 | } 67 | -------------------------------------------------------------------------------- /man/cloudShadowMask.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/cloudMask.R 3 | \name{cloudShadowMask} 4 | \alias{cloudShadowMask} 5 | \title{Cloud Shadow Masking for Flat Terrain} 6 | \usage{ 7 | cloudShadowMask( 8 | img, 9 | cm, 10 | nc = 5, 11 | shiftEstimate = NULL, 12 | preciseShift = NULL, 13 | quantile = 0.2, 14 | returnShift = FALSE 15 | ) 16 | } 17 | \arguments{ 18 | \item{img}{SpatRaster containing the scene} 19 | 20 | \item{cm}{SpatRaster. Cloud mask (typically the result of \code{\link{cloudMask}})} 21 | 22 | \item{nc}{Integer. Number of control points. A few points (default) are fine because the final shift is estimated by \link{coregisterImages}.} 23 | 24 | \item{shiftEstimate}{NULL or numeric vector of length two (x,y). Estimated displacement of shadows in map units. If \code{NULL}, the user will be asked to select control points interactively.} 25 | 26 | \item{preciseShift}{NULL or numeric vector of length two (x,y). Use this if cloud/cloud-shadow displacement is already known, e.g. from a previous run of \code{cloudShadowMask}.} 27 | 28 | \item{quantile}{Numeric (between 0 and 1). Quantile threshold used for image co-registration. By default the 20\% quantile of the total intensity (sum) of the image is used as potential shadow mask.} 29 | 30 | \item{returnShift}{Logical. Return a numeric vector containing the shift parameters. Useful if you estimate parameters on a subset of the image.} 31 | } 32 | \value{ 33 | Returns a RasterLayer with the cloud shadow mask (0 = shadow, NA = not-shadow). 34 | } 35 | \description{ 36 | Intended for interactive use, \code{cloudShadowMask} will ask the user to select a few 37 | corresponding cloud/cloudShadow pixels which will be used to estimate coordinates 38 | for a linear cloudmask shift. 39 | } 40 | \details{ 41 | This is a very simplistic approach to cloud shadow masking (simple shift of the cloud mask). It is not image based and accuracy will suffer from clouds at different altitudes. However, just as cloudMask 42 | this is a quick and easy to use tool for Landsat data if you're just working on a few scenes and don't have fMask or CDR data at hand. Although for some test scenes 43 | it does perform surprisingly well. 44 | } 45 | \examples{ 46 | library(ggplot2) 47 | ## Import Landsat example subset 48 | ## We have two tiny clouds in the east 49 | \donttest{ggRGB(lsat, stretch = "lin")} 50 | 51 | ## Calculate cloud index 52 | cldmsk <- cloudMask(lsat, blue = 1, tir = 6) 53 | \donttest{ggR(cldmsk, 2, geom_raster = TRUE) } 54 | 55 | ## Define threshold (re-use the previously calculated index) 56 | ## Everything above the threshold is masked 57 | ## In addition we apply a region-growing around the core cloud pixels 58 | cldmsk_final <- cloudMask(cldmsk, threshold = 0.1, buffer = 5) 59 | 60 | ## Plot cloudmask 61 | \donttest{ggRGB(lsat, stretch = "lin") + 62 | ggR(cldmsk_final[[1]], ggLayer = TRUE, forceCat = TRUE, geom_raster = TRUE) + 63 | scale_fill_manual(values = c("red"), na.value = NA) 64 | } 65 | #' ## Estimate cloud shadow displacement 66 | ## Interactively (click on cloud pixels and the corresponding shadow pixels) 67 | \dontrun{ shadow <- cloudShadowMask(lsat, cldmsk_final, nc = 2) } 68 | 69 | ## Non-interactively. Pre-defined shadow displacement estimate (shiftEstimate) 70 | \donttest{shadow <- cloudShadowMask(lsat, cldmsk_final, shiftEstimate = c(-16,-6))} 71 | 72 | ## Plot 73 | \donttest{csmask <- terra::merge(cldmsk_final[[1]], shadow) 74 | ggRGB(lsat, stretch = "lin") + 75 | ggR(csmask, ggLayer = TRUE, forceCat = TRUE, geom_raster = TRUE) + 76 | scale_fill_manual(values = c("blue", "yellow"), 77 | labels = c("shadow", "cloud"), na.value = NA) 78 | } 79 | } 80 | \seealso{ 81 | \link{cloudMask} 82 | } 83 | -------------------------------------------------------------------------------- /man/coregisterImages.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/coregisterImages.R 3 | \name{coregisterImages} 4 | \alias{coregisterImages} 5 | \title{Image to Image Co-Registration based on Mutual Information} 6 | \usage{ 7 | coregisterImages( 8 | img, 9 | ref, 10 | shift = 3, 11 | shiftInc = 1, 12 | nSamples = 100, 13 | reportStats = FALSE, 14 | verbose, 15 | nBins = 100, 16 | master = deprecated(), 17 | slave = deprecated(), 18 | ... 19 | ) 20 | } 21 | \arguments{ 22 | \item{img}{SpatRaster. Image to shift to match reference image. \code{img} and \code{ref} must have equal numbers of bands.} 23 | 24 | \item{ref}{SpatRaster. Reference image. \code{img} and \code{ref} must have equal numbers of bands.} 25 | 26 | \item{shift}{Numeric or matrix. If numeric, then shift is the maximal absolute radius (in pixels of \code{img} resolution) which \code{img} is shifted (\code{seq(-shift, shift, by=shiftInc)}). 27 | If shift is a matrix it must have two columns (x shift and y shift), then only these shift values will be tested.} 28 | 29 | \item{shiftInc}{Numeric. Shift increment (in pixels, but not restricted to integer). Ignored if \code{shift} is a matrix.} 30 | 31 | \item{nSamples}{Integer. Number of samples to calculate mutual information.} 32 | 33 | \item{reportStats}{Logical. If \code{FALSE} it will return only the shifted images. Otherwise it will return the shifted image in a list containing stats such as mutual information per shift and joint histograms.} 34 | 35 | \item{verbose}{Logical. Print status messages. Overrides global RStoolbox.verbose option.} 36 | 37 | \item{nBins}{Integer. Number of bins to calculate joint histogram.} 38 | 39 | \item{master}{DEPRECATED! Argument was renamed. Please use \code{ref} from now on.} 40 | 41 | \item{slave}{DEPRECATED! Argument was renamed. Please use \code{img} from now on.} 42 | 43 | \item{...}{further arguments passed to \code{\link[terra]{writeRaster}}.} 44 | } 45 | \value{ 46 | \code{reportStats=FALSE} returns a SpatRaster (x-y shifted image). 47 | \code{reportStats=TRUE} returns a list containing a data.frame with mutual information per shift ($MI), the shift of maximum MI ($bestShift), 48 | the joint histograms per shift in a list ($jointHist) and the shifted image ($coregImg). 49 | } 50 | \description{ 51 | Shifts an image to match a reference image. Matching is based on maximum 52 | mutual information. 53 | } 54 | \details{ 55 | Currently only a simple linear x - y shift is considered and tested. No higher order shifts (e.g. rotation, non-linear transformation) are performed. This means that your imagery 56 | should already be properly geometrically corrected. 57 | 58 | \href{https://en.wikipedia.org/wiki/Mutual_information}{Mutual information} is a similarity metric originating from information theory. 59 | Roughly speaking, the higher the mutual information of two data-sets, the higher is their shared information content, i.e. their similarity. 60 | When two images are exactly co-registered their mutual information is maximal. By trying different image shifts, we aim to find the best overlap which maximises the mutual information. 61 | } 62 | \examples{ 63 | library(terra) 64 | library(ggplot2) 65 | library(reshape2) 66 | reference <- rlogo 67 | ## Shift reference 2 pixels to the right and 3 up 68 | missreg <- shift(reference, 2, 3) 69 | 70 | ## Compare shift 71 | p <- ggR(reference, sat = 1, alpha = .5) 72 | p + ggR(missreg, sat = 1, hue = .5, alpha = 0.5, ggLayer=TRUE) 73 | 74 | ## Coregister images (and report statistics) 75 | coreg <- coregisterImages(missreg, ref = reference, 76 | nSamples = 500, reportStats = TRUE) 77 | 78 | ## Plot mutual information per shift 79 | ggplot(coreg$MI) + geom_raster(aes(x,y,fill=mi)) 80 | 81 | ## Plot joint histograms per shift (x/y shift in facet labels) 82 | \donttest{ 83 | df <- melt(coreg$jointHist) 84 | df$L1 <- factor(df$L1, levels = names(coreg$jointHist)) 85 | df[df$value == 0, "value"] <- NA ## don't display p = 0 86 | ggplot(df) + geom_raster(aes(x = Var2, y = Var1,fill=value)) + facet_wrap(~L1) + 87 | scale_fill_gradientn(name = "p", colours = heat.colors(10), na.value = NA) 88 | } 89 | ## Compare correction 90 | ggR(reference, sat = 1, alpha = .5) + 91 | ggR(coreg$coregImg, sat = 1, hue = .5, alpha = 0.5, ggLayer=TRUE) 92 | } 93 | -------------------------------------------------------------------------------- /man/decodeQA.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/encodeQA.R 3 | \name{decodeQA} 4 | \alias{decodeQA} 5 | \title{Decode QA flags to bit-words} 6 | \usage{ 7 | decodeQA(x) 8 | } 9 | \arguments{ 10 | \item{x}{Integer (16bit)} 11 | } 12 | \value{ 13 | Returns the decoded QA values from an integer 14 | } 15 | \description{ 16 | Intended for use with Landsat 16-bit QA bands. Decodes pixel quality flags from integer to bit-words. 17 | } 18 | \examples{ 19 | decodeQA(53248) 20 | } 21 | \seealso{ 22 | \link{encodeQA} 23 | } 24 | -------------------------------------------------------------------------------- /man/encodeQA.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/encodeQA.R 3 | \name{encodeQA} 4 | \alias{encodeQA} 5 | \title{Encode QA Conditions to Integers} 6 | \usage{ 7 | encodeQA( 8 | fill = "no", 9 | terrainOcclusion = "no", 10 | radSaturation = "na", 11 | cloudMask = "all", 12 | cloud = "all", 13 | cloudShadow = "all", 14 | snow = "all", 15 | cirrus = "all", 16 | droppedPixel = "no", 17 | water = "all", 18 | droppedFrame = "no", 19 | sensor = "OLI", 20 | legacy = "collection1" 21 | ) 22 | } 23 | \arguments{ 24 | \item{fill}{Designated fill. Options: \code{c("yes", "no", "all")}.} 25 | 26 | \item{terrainOcclusion}{Terrain induced occlusion. Options: \code{c("yes", "no", "all")}.} 27 | 28 | \item{radSaturation}{Number of bands that contain radiometric saturation. Options: \code{c("na", "low", "med", "high", "all")} for no bands, 1-2 bands, 3-4 bands, 5 or more bands contain saturation.} 29 | 30 | \item{cloudMask}{Cloud mask. Options: \code{c("yes", "no", "all")}.} 31 | 32 | \item{cloud}{Cloud confidence. Options: \code{c("na", "low", "med", "high", "all")}.} 33 | 34 | \item{cloudShadow}{Cloud shadow confidence. Options: \code{c("yes", "no", "all")}.} 35 | 36 | \item{snow}{Snow / ice confidence. Options: \code{c("na", "low", "med", "high", "all")}.} 37 | 38 | \item{cirrus}{Cirrus confidence. Options: \code{c("na", "low", "med", "high", "all")}.} 39 | 40 | \item{droppedPixel}{Dropped pixel. Options: \code{c("yes", "no", "all")}.} 41 | 42 | \item{water}{Water confidence. Options: \code{c("na", "low", "med", "high", "all")}.} 43 | 44 | \item{droppedFrame}{Dropped frame. Options: \code{c("yes", "no", "all")}.} 45 | 46 | \item{sensor}{Sensor to encode. Options: \code{c("OLI", "TIRS", "ETM+", "TM", "MSS")}.} 47 | 48 | \item{legacy}{Encoding systematic Options: \code{c("collection1", "pre_collection")}. Default is "collection1" for the Landsat Collection 1 8-bit quality designations. Use "pre_collection" for imagery downloaded before the Collection 1 quality designations were introduced} 49 | } 50 | \value{ 51 | Returns the Integer value for the QA values 52 | } 53 | \description{ 54 | Intended for use with Landsat 16-bit QA bands. Converts pixel quality flags from human readable to integer, which can then be used to 55 | subset a QA image. Please be aware of the default settings which differ for different parameters. 56 | Depending on, which \code{sensor} and \code{legacy} is selected, some quality parameters are not used, since the sequences of available bitwise quality designations differ per sensor and collection. 57 | } 58 | \note{ 59 | Only currently populated bits are available as arguments. 60 | } 61 | \examples{ 62 | encodeQA(snow = "low", cirrus = c("med", "high"), cloud = "high") 63 | } 64 | \references{ 65 | \url{https://www.usgs.gov/landsat-missions/landsat-collection-1-level-1-quality-assessment-band} for Collection 1 quality designations (\code{legacy = "collection1"}) 66 | } 67 | -------------------------------------------------------------------------------- /man/estimateHaze.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/estimateHaze.R 3 | \name{estimateHaze} 4 | \alias{estimateHaze} 5 | \title{Estimate Image Haze for Dark Object Subtraction (DOS)} 6 | \usage{ 7 | estimateHaze( 8 | x, 9 | hazeBands, 10 | darkProp = 0.01, 11 | maxSlope = TRUE, 12 | plot = FALSE, 13 | returnTables = FALSE 14 | ) 15 | } 16 | \arguments{ 17 | \item{x}{SpatRaster or a previous result from \code{estimateHaze} with \code{returnTables = TRUE} from which to estimate haze} 18 | 19 | \item{hazeBands}{Integer or Character. Band number or bandname from which to estimate atmospheric haze (optional if x contains only one layer)} 20 | 21 | \item{darkProp}{Numeric. Proportion of pixels estimated to be dark.} 22 | 23 | \item{maxSlope}{Logical. Use \code{darkProp} only as an upper boundary and search for the DN of maximum slope in the histogram below this value.} 24 | 25 | \item{plot}{Logical. Option to display histograms and haze values} 26 | 27 | \item{returnTables}{Logical. Option to return the frequency table per layer. Only takes effect if x is a SpatRaster. If x is a result of estimateHaze tables will always be returned.} 28 | } 29 | \value{ 30 | If returnTables is FALSE (default). Then a vector of length(hazeBands) containing the estimated haze DNs will be returned. 31 | If returnTables is TRUE a list with two components will be returned. The list element 'SHV' contains the haze values, while 'table' 32 | contains another list with the sampled frequency tables. The latter can be re-used to try different darkProp thresholds without having to sample 33 | the raster again. 34 | } 35 | \description{ 36 | estimates the digital number (DN) pixel value of *dark* objects for the visible wavelength range. 37 | } 38 | \details{ 39 | It is assumed that any radiation originating from *dark* pixels is due to atmospheric haze and 40 | not the reflectance of the surface itself (the surface is dark, i.e. it has a reflectance close to zero). 41 | Hence, the haze values are estimates of path radiance, which can be subtracted in a dark object subtraction (DOS) procedure (see \code{\link{radCor}}) 42 | 43 | Atmospheric haze affects almost exclusively the visible wavelength range. Therefore, typically, you'd only want to estimate haze in blue, green and red bands, occasionally also in the nir band. 44 | } 45 | \examples{ 46 | ## Estimate haze for blue, green and red band 47 | haze <- estimateHaze(lsat, hazeBands = 1:3, plot = FALSE) 48 | haze 49 | 50 | ## Find threshold interactively 51 | #### Return the frequency tables for re-use 52 | #### avoids having to sample the Raster again and again 53 | haze <- estimateHaze(lsat, hazeBands = 1:3, returnTables = TRUE) 54 | ## Use frequency table instead of lsat and fiddle with 55 | haze <- estimateHaze(haze, hazeBands = 1:3, darkProp = .1, plot = FALSE) 56 | haze$SHV 57 | } 58 | -------------------------------------------------------------------------------- /man/figures/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/man/figures/logo.png -------------------------------------------------------------------------------- /man/fortifySpatRaster.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fortify-raster.R 3 | \name{fortifySpatRaster} 4 | \alias{fortifySpatRaster} 5 | \title{Fortify method for classes from the terra package.} 6 | \usage{ 7 | fortifySpatRaster(x, maxpixels = 50000) 8 | } 9 | \arguments{ 10 | \item{x}{\code{SpatRaster} object to convert into a dataframe.} 11 | 12 | \item{maxpixels}{Integer. Maximum number of pixels to sample} 13 | } 14 | \value{ 15 | Returns a data.frame with coordinates (x,y) and corresponding raster values. 16 | } 17 | \description{ 18 | Fortify method for classes from the terra package. 19 | } 20 | \examples{ 21 | r_df <- fortifySpatRaster(rlogo) 22 | head(r_df) 23 | } 24 | -------------------------------------------------------------------------------- /man/getMeta.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/getMeta.R 3 | \name{getMeta} 4 | \alias{getMeta} 5 | \title{Extract bandwise information from ImageMetaData} 6 | \usage{ 7 | getMeta(img, metaData, what) 8 | } 9 | \arguments{ 10 | \item{img}{SpatRaster or character vector with band names.} 11 | 12 | \item{metaData}{ImageMetaData or path to meta data file.} 13 | 14 | \item{what}{Character. Parameter to extract. Either data descriptors, or conversion parameters (see Details for options)} 15 | } 16 | \value{ 17 | If \code{what} is one of \code{c('CALRAD', 'CALBT', 'CALREF')} a data.frame is returned with bands in rows (order corresponding to \code{img} band order). 18 | Otherwise a named numeric vector with the corresponding parameter is returned (layernames as names). 19 | } 20 | \description{ 21 | This is an accessor function to quickly access information stored in ImageMetaData, e.g. scale factor per band. 22 | Intended for use with imagery which was imported using stackMeta. Will return parameters using the actual band order in img. 23 | } 24 | \details{ 25 | Possible metadata parameters (\code{what} argument): 26 | 27 | Data descriptors 28 | \tabular{ll}{ 29 | 'FILES' \tab \cr 30 | 'QUANTITY' \tab \cr 31 | 'CATEGORY' \tab \cr 32 | 'NA_VALUE' \tab \cr 33 | 'SATURATE_VALUE' \tab \cr 34 | 'SCALE_FACTOR' \tab \cr 35 | 'DATA_TYPE' \tab \cr 36 | 'SPATIAL_RESOLUTION' \tab \cr 37 | } 38 | Conversion parameters 39 | \tabular{ll}{ 40 | 'CALRAD' \tab Conversion parameters from DN to radiance \cr 41 | 'CALBT' \tab Conversion parameters from radiance to brightness temperature \cr 42 | 'CALREF' \tab Conversion parameters from DN to reflectance (Landsat 8 only) \cr 43 | } 44 | } 45 | \examples{ 46 | ## Import example data 47 | mtlFile <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox") 48 | meta <- readMeta(mtlFile) 49 | lsat_t <- stackMeta(mtlFile) 50 | 51 | ## Get integer scale factors 52 | getMeta(lsat_t, metaData = meta, what = "SCALE_FACTOR") 53 | 54 | ## Conversion factors for brightness temperature 55 | getMeta("B6_dn", metaData = meta, what = "CALBT") 56 | 57 | ## Conversion factors to top-of-atmosphere radiance 58 | ## Band order not corresponding to metaData order 59 | getMeta(lsat_t[[5:1]], metaData = meta, what = "CALRAD") 60 | 61 | ## Get integer scale factors 62 | getMeta(lsat_t, metaData = meta, what = "SCALE_FACTOR") 63 | 64 | ## Get file basenames 65 | getMeta(lsat_t, metaData = meta, what = "FILES") 66 | 67 | } 68 | -------------------------------------------------------------------------------- /man/getValidation.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/getValidation.R 3 | \name{getValidation} 4 | \alias{getValidation} 5 | \title{Extract validation results from superClass objects} 6 | \usage{ 7 | getValidation(x, from = "testset", metrics = "overall") 8 | } 9 | \arguments{ 10 | \item{x}{superClass object or caret::confusionMatrix} 11 | 12 | \item{from}{Character. 'testset' extracts the results from independent validation with testset. 'cv' extracts cross-validation results.} 13 | 14 | \item{metrics}{Character. Only relevant in classification mode (ignored for regression models). 15 | Select 'overall' for overall accuracy metrics, 'classwise' for classwise metrics, 16 | 'confmat' for the confusion matrix itself and 'caret' to return the whole caret::confusionMatrix object.} 17 | } 18 | \value{ 19 | Returns a data.frame with validation results. 20 | If metrics = 'confmat' or 'caret' will return a table or the full caret::confusionMatrix object, respectively. 21 | } 22 | \description{ 23 | Extract validation results from superClass objects 24 | } 25 | \examples{ 26 | library(pls) 27 | ## Fit classifier (splitting training into 70\\% training data, 30\\% validation data) 28 | train <- readRDS(system.file("external/trainingPoints_rlogo.rds", package="RStoolbox")) 29 | SC <- superClass(rlogo, trainData = train, responseCol = "class", 30 | model="pls", trainPartition = 0.7) 31 | ## Independent testset-validation 32 | getValidation(SC) 33 | getValidation(SC, metrics = "classwise") 34 | ## Cross-validation based 35 | getValidation(SC, from = "cv") 36 | } 37 | -------------------------------------------------------------------------------- /man/histMatch.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/histMatch.R 3 | \name{histMatch} 4 | \alias{histMatch} 5 | \title{Image to Image Contrast Matching} 6 | \usage{ 7 | histMatch( 8 | x, 9 | ref, 10 | xmask = NULL, 11 | refmask = NULL, 12 | nSamples = 1e+05, 13 | intersectOnly = TRUE, 14 | paired = TRUE, 15 | forceInteger = FALSE, 16 | returnFunctions = FALSE, 17 | ... 18 | ) 19 | } 20 | \arguments{ 21 | \item{x}{SpatRaster. Source raster which is to be modified.} 22 | 23 | \item{ref}{SpatRaster. Reference raster, to which x will be matched.} 24 | 25 | \item{xmask}{RasterLayer or SpatRaster. Mask layer for \code{x} to exclude pixels which might distort the histogram, i.e. are not present in \code{ref}. Any NA pixel in \code{xmask} will be ignored (\code{maskvalue = NA}).} 26 | 27 | \item{refmask}{RasterLayer or SpatRaster. Mask layer for \code{ref}. Any NA pixel in \code{refmask} will be ignored (\code{maskvalue = NA}).} 28 | 29 | \item{nSamples}{Integer. Number of random samples from each image to build the histograms.} 30 | 31 | \item{intersectOnly}{Logical. If \code{TRUE} sampling will only take place in the overlap extent of the two rasters. Otherwise the full rasters will be used for sampling.} 32 | 33 | \item{paired}{Logical. If \code{TRUE} the corresponding pixels will be used in the overlap.} 34 | 35 | \item{forceInteger}{Logical. Force integer output.} 36 | 37 | \item{returnFunctions}{Logical. If \code{TRUE} the matching functions will be returned instead of applying them to \code{x}.} 38 | 39 | \item{...}{Further arguments to be passed to \link[terra]{writeRaster}.} 40 | } 41 | \value{ 42 | A SpatRaster of \code{x} adjusted to the histogram of \code{ref}. If \code{returnFunctions = TRUE} a list of functions (one for each layer) will be returned instead. 43 | } 44 | \description{ 45 | Performs image to image contrast adjustments based on histogram matching using empirical cumulative 46 | distribution functions from both images. 47 | } 48 | \note{ 49 | \code{x} and \code{ref} must have the same number of layers. 50 | } 51 | \examples{ 52 | library(ggplot2) 53 | library(terra) 54 | ## Original image a (+1 to prevent log(0)) 55 | img_a <- rlogo + 1 56 | ## Degraded image b 57 | img_b <- log(img_a) 58 | ## Cut-off half the image (just for better display) 59 | img_b[, 1:50] <- NA 60 | 61 | ## Compare Images before histMatching 62 | ggRGB(img_a,1,2,3)+ 63 | ggRGB(img_b, 1,2,3, ggLayer = TRUE, stretch = "lin", q = 0:1) + 64 | geom_vline(aes(xintercept = 50))+ 65 | ggtitle("Img_a vs. Img_b") 66 | 67 | ## Do histogram matching 68 | img_b_matched <- histMatch(img_b, img_a) 69 | 70 | ## Compare Images after histMatching 71 | ggRGB(img_a, 1, 2, 3)+ 72 | ggRGB(img_b_matched, 1, 2, 3, ggLayer = TRUE, stretch = "lin", q = 0:1) + 73 | geom_vline(aes(xintercept = 50))+ 74 | ggtitle("Img_a vs. Img_b_matched") 75 | 76 | ## Histogram comparison 77 | opar <- par(mfrow = c(1, 3), no.readonly = TRUE) 78 | img_a[,1:50] <- NA 79 | redLayers <- c(img_a, img_b, img_b_matched)[[c(1,4,7)]] 80 | names(redLayers) <- c("img_a", "img_b", "img_b_matched") 81 | 82 | hist(redLayers) 83 | ## Reset par 84 | par(opar) 85 | } 86 | \references{ 87 | Richards and Jia: Remote Sensing Digital Image Analysis. Springer, Berlin, Heidelberg, Germany, 439pp. 88 | } 89 | -------------------------------------------------------------------------------- /man/lsat.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RStoolbox-package.R 3 | \docType{data} 4 | \name{lsat} 5 | \alias{lsat} 6 | \title{Landsat 5TM Example Data} 7 | \usage{ 8 | lsat 9 | } 10 | \description{ 11 | Subset of Landsat 5 TM Scene: LT52240631988227CUB02 12 | Contains all seven bands in DN format. 13 | } 14 | \examples{ 15 | ggRGB(lsat, stretch = "sqrt") 16 | } 17 | \keyword{datasets} 18 | -------------------------------------------------------------------------------- /man/normImage.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normImage.R 3 | \name{normImage} 4 | \alias{normImage} 5 | \title{Normalize Raster Images: Center and Scale} 6 | \usage{ 7 | normImage(img, norm = TRUE, ...) 8 | } 9 | \arguments{ 10 | \item{img}{SpatRaster. Image to transform. Transformation will be performed separately for each layer.} 11 | 12 | \item{norm}{Logical. Perform normalization (scaling) in addition to centering, i.e. divide by standard deviation.} 13 | 14 | \item{...}{further arguments passed to \link[terra]{writeRaster}.} 15 | } 16 | \value{ 17 | Returns a SpatRaster with the same number layers as input layers with each layer being centered and optionally normalized. 18 | } 19 | \description{ 20 | For each pixel subtracts the mean of the raster layer and optionally divide by its standard deviation. 21 | } 22 | \examples{ 23 | library(terra) 24 | ## Load example data 25 | 26 | ## Normalization: Center and Scale 27 | rlogo_center_norm <- normImage(rlogo) 28 | hist(rlogo_center_norm) 29 | 30 | ## Centering 31 | rlogo_center <- normImage(rlogo, norm = FALSE) 32 | } 33 | -------------------------------------------------------------------------------- /man/oneHotEncode.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/oneHotEncode.R 3 | \name{oneHotEncode} 4 | \alias{oneHotEncode} 5 | \title{One-hot encode a raster or vector} 6 | \usage{ 7 | oneHotEncode(img, classes, background = 0, foreground = 1, na.rm = FALSE, ...) 8 | } 9 | \arguments{ 10 | \item{img}{SpatRaster or integer/numeric vector containing multiple classes} 11 | 12 | \item{classes}{integer: vector of classes which should be extracted} 13 | 14 | \item{background}{integer: background value (default = 0)} 15 | 16 | \item{foreground}{integer: foreground value (default = 1)} 17 | 18 | \item{na.rm}{logical: if \code{TRUE}, \code{NA}s will be coerced to the \code{background} value.} 19 | 20 | \item{...}{further arguments passed to \link[terra]{writeRaster}. Ignored if img is not a SpatRaster, but a numeric/integer vector or matrix} 21 | } 22 | \value{ 23 | A SpatRaster with as many layers as there are classes. 24 | Pixels matching the class of interest are set to 1, backround values by default are set to 0 (see background argument) 25 | } 26 | \description{ 27 | Splits a categorical raster layer (or a vector) into a multilayer raster (or matrix). 28 | } 29 | \examples{ 30 | \donttest{ 31 | sc <- unsuperClass(rlogo, nClasses = 3) 32 | 33 | ## one-hot encode 34 | sc_oneHot <- oneHotEncode(sc$map, classes = c(1,2,3)) 35 | 36 | ## check results 37 | sc_oneHot 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /man/panSharpen.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/panSharpen.R 3 | \name{panSharpen} 4 | \alias{panSharpen} 5 | \title{Pan Sharpen Imagery / Image Fusion} 6 | \usage{ 7 | panSharpen(img, pan, r, g, b, pc = 1, method = "brovey", norm = TRUE) 8 | } 9 | \arguments{ 10 | \item{img}{SpatRaster. Coarse resolution multispectral image} 11 | 12 | \item{pan}{SpatRaster. High resolution image, typically panchromatic.} 13 | 14 | \item{r}{Character or Integer. Red band in \code{img}. Only relevant if \code{method!='pca'}} 15 | 16 | \item{g}{Character or Integer. Green band in \code{img}. Only relevant if \code{method!='pca'}} 17 | 18 | \item{b}{Character or Integer. Blue band in \code{img}. Only relevant if \code{method!='pca'}} 19 | 20 | \item{pc}{Integer. Only relevant if \code{method = 'pca'}. Which principal component to replace. Usually this should be the first component (default). Only if the first component is dominated by something else than brightness it might be worth a try to use the second component.} 21 | 22 | \item{method}{Character. Choose method from c("pca", "ihs", "brovey").} 23 | 24 | \item{norm}{Logical. Rescale pan image to match the 1st PC component. Only relevant if \code{method = 'pca'}. If \code{TRUE} only min and max are matched to the 1st PC. If \code{FALSE} pan will be histogram matched to the 1st PC.} 25 | } 26 | \value{ 27 | pan-sharpened SpatRaster 28 | } 29 | \description{ 30 | provides different methods for pan sharpening a coarse resolution (typically multispectral) image with 31 | a higher reolution panchromatic image. Values of the pan-chromatic and multispectral images must be of the same scale, (e.g. from 0:1, or all DNs from 0:255) 32 | } 33 | \details{ 34 | Pan sharpening options: 35 | \itemize{ 36 | \item{\code{method='pca'}: Performs a pca using \link{rasterPCA}. The first component is then swapped for the pan band an the PCA is rotated backwards.} 37 | \item{\code{method='ihs'}: Performs a color space transform to Intensity-Hue-Saturation space, swaps intensity for the histogram matched pan and does the backwards transformation.} 38 | \item{\code{method='brovey'}: Performs Brovey reweighting. Pan and img must be at the same value scale (e.g. 0:1, or 0:255) otherwise you'll end up with psychodelic colors.} 39 | } 40 | } 41 | \examples{ 42 | library(terra) 43 | library(ggplot2) 44 | 45 | ## Fake panchromatic image (30m resolution covering 46 | ## the visible range (integral from blue to red)) 47 | pan <- sum(lsat[[1:3]]) 48 | ggR(pan, stretch = "lin") 49 | 50 | ## Fake coarse resolution image (150m spatial resolution) 51 | lowResImg <- aggregate(lsat, 5) 52 | 53 | 54 | ## Brovey pan sharpening 55 | lowResImg_pan <- panSharpen(lowResImg, pan, r = 3, g = 2, b = 1, method = "brovey") 56 | lowResImg_pan 57 | ## Plot 58 | ggRGB(lowResImg, stretch = "lin") + ggtitle("Original") 59 | ggRGB(lowResImg_pan, stretch="lin") + ggtitle("Pansharpened (Brovey)") 60 | 61 | } 62 | -------------------------------------------------------------------------------- /man/pifMatch.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pifMatch.R 3 | \name{pifMatch} 4 | \alias{pifMatch} 5 | \title{Pseudo-Invariant Features based Image Matching} 6 | \usage{ 7 | pifMatch( 8 | img, 9 | ref, 10 | method = "cor", 11 | quantile = 0.95, 12 | returnPifMap = TRUE, 13 | returnSimMap = TRUE, 14 | returnModels = FALSE 15 | ) 16 | } 17 | \arguments{ 18 | \item{img}{SpatRaster. Image to be adjusted.} 19 | 20 | \item{ref}{SpatRaster. Reference image.} 21 | 22 | \item{method}{Method to calculate pixel similarity. Options: euclidean distance ('ed'), spectral angle ('sam') or pearson correlation coefficient ('cor').} 23 | 24 | \item{quantile}{Numeric. Threshold quantile used to identify PIFs} 25 | 26 | \item{returnPifMap}{Logical. Return a binary raster map ot pixels which were identified as pesudo-invariant features.} 27 | 28 | \item{returnSimMap}{Logical. Return the similarity map as well} 29 | 30 | \item{returnModels}{Logical. Return the linear models along with the adjusted image.} 31 | } 32 | \value{ 33 | Returns a List with the adjusted image and intermediate products (if requested). 34 | \itemize{ 35 | \item \code{img}: the adjusted image 36 | \item \code{simMap}: pixel-wise similarity map (if \code{returnSimMap = TRUE}) 37 | \item \code{pifMap}: binary map of pixels selected as pseudo-invariant features (if \code{returnPifMap = TRUE}) 38 | \item \code{models}: list of linear models; one per layer (if \code{returnModels = TRUE}) 39 | } 40 | } 41 | \description{ 42 | Match one scene to another based on linear regression of pseudo-invariant features (PIF). 43 | } 44 | \details{ 45 | The function consists of three main steps: 46 | First, it calculates pixel-wise similarity between the two rasters and identifies pseudo-invariant pixels based on 47 | a similarity threshold. 48 | In the second step the values of the pseudo-invariant pixels are regressed against each other in a linear model for each layer. 49 | Finally the linear models are applied to all pixels in the \code{img}, thereby matching it to the reference scene. 50 | 51 | Pixel-wise similarity can be calculated using one of three methods: euclidean distance (\code{method = "ed"}), spectral angle (\code{"sam"}) or pearsons correlation coefficient (\code{"cor"}). 52 | The threshold is defined as a similarity quantile. Setting \code{quantile=0.95} will select all pixels with a similarity above the 95\% quantile as pseudo-invariant features. 53 | 54 | Model fitting is performed with simple linear models (\code{\link[stats]{lm}}); fitting one model per layer. 55 | } 56 | \examples{ 57 | library(terra) 58 | 59 | 60 | ## Create fake example data 61 | ## In practice this would be an image from another acquisition date 62 | lsat_b <- log(lsat) 63 | 64 | ## Run pifMatch and return similarity layer, invariant features mask and models 65 | lsat_b_adj <- pifMatch(lsat_b, lsat, returnPifMap = TRUE, 66 | returnSimMap = TRUE, returnModels = TRUE) 67 | \donttest{ 68 | ## Pixelwise similarity 69 | ggR(lsat_b_adj$simMap, geom_raster = TRUE) 70 | 71 | ## Pesudo invariant feature mask 72 | ggR(lsat_b_adj$pifMap) 73 | 74 | ## Histograms of changes 75 | par(mfrow=c(1,3)) 76 | hist(lsat_b[[1]], main = "lsat_b") 77 | hist(lsat[[1]], main = "reference") 78 | hist(lsat_b_adj$img[[1]], main = "lsat_b adjusted") 79 | 80 | ## Model summary for first band 81 | summary(lsat_b_adj$models[[1]]) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /man/pipe.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils-pipe.R 3 | \name{\%>\%} 4 | \alias{\%>\%} 5 | \title{Pipe operator} 6 | \usage{ 7 | lhs \%>\% rhs 8 | } 9 | \arguments{ 10 | \item{lhs}{A value or the magrittr placeholder.} 11 | 12 | \item{rhs}{A function call using the magrittr semantics.} 13 | } 14 | \value{ 15 | The result of calling `rhs(lhs)`. 16 | } 17 | \description{ 18 | See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. 19 | } 20 | \keyword{internal} 21 | -------------------------------------------------------------------------------- /man/predict.unsuperClass.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/unsuperClass.R 3 | \name{predict.unsuperClass} 4 | \alias{predict.unsuperClass} 5 | \title{Predict a raster map based on a unsuperClass model fit.} 6 | \usage{ 7 | \method{predict}{unsuperClass}(object, img, output = "classes", ...) 8 | } 9 | \arguments{ 10 | \item{object}{unsuperClass object} 11 | 12 | \item{img}{Raster object. Layernames must correspond to layernames used to train the superClass model, i.e. layernames in the original raster image.} 13 | 14 | \item{output}{Character. Either 'classes' (kmeans class; default) or 'distances' (euclidean distance to each cluster center).} 15 | 16 | \item{...}{further arguments to be passed to \link[terra]{writeRaster}, e.g. filename} 17 | } 18 | \value{ 19 | Returns a raster with the K-means distances base on your object passed in the arguments. 20 | } 21 | \description{ 22 | applies a kmeans cluster model to all pixels of a raster. 23 | Useful if you want to apply a kmeans model of scene A to scene B. 24 | } 25 | \examples{ 26 | ## Load training data 27 | 28 | ## Perform unsupervised classification 29 | uc <- unsuperClass(rlogo, nClasses = 10) 30 | 31 | ## Apply the model to another raster 32 | map <- predict(uc, rlogo) 33 | } 34 | -------------------------------------------------------------------------------- /man/rasterCVA.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rasterCVA.R 3 | \name{rasterCVA} 4 | \alias{rasterCVA} 5 | \title{Change Vector Analysis} 6 | \usage{ 7 | rasterCVA(x, y, tmf = NULL, nct = NULL, ...) 8 | } 9 | \arguments{ 10 | \item{x}{SpatRaster with two layers. This will be the reference/origin for the change calculations. Both rasters (y and y) need to correspond to each other, i.e. same resolution, extent and origin.} 11 | 12 | \item{y}{SpatRaster with two layers. Both rasters (y and y) need to correspond to each other, i.e. same resolution, extent and origin.} 13 | 14 | \item{tmf}{Numeric. Threshold median factor (optional). Used to calculate a threshold magnitude for which pixels are considered stable, i.e. no change. Calculated as \code{tmf * mean(magnitude[magnitude > 0])}.} 15 | 16 | \item{nct}{Numeric. No-change threshold (optional). Alternative to \code{tmf}. Sets an absolute threshold. Change magnitudes below \code{nct} are considered stable and set to NA.} 17 | 18 | \item{...}{further arguments passed to writeRaster} 19 | } 20 | \value{ 21 | Returns a SpatRaster with two layers: change vector angle and change vector magnitude 22 | } 23 | \description{ 24 | Calculates angle and magnitude of change vectors. 25 | Dimensionality is limited to two bands per image. 26 | } 27 | \details{ 28 | Change Vector Analysis (CVA) is used to identify spectral changes between two identical scenes which were acquired at different times. 29 | CVA is limited to two bands per image. For each pixel it calculates the change vector in the two-dimensional spectral space. 30 | For example for a given pixel in image A and B for the red and nir band the change vector is calculated for the coordinate pairs: (red_A | nir_A) and (red_B | nir_B). 31 | 32 | The coordinate system is defined by the order of the input bands: the first band defines the x-axis and the second band the y-axis, respectively. 33 | Angles are returned *in degree* beginning with 0 degrees pointing 'north', i.e. the y-axis, i.e. the second band. 34 | } 35 | \examples{ 36 | library(terra) 37 | pca <- rasterPCA(lsat)$map 38 | 39 | ## Do change vector analysis 40 | cva <- rasterCVA(pca[[1:2]], pca[[3:4]]) 41 | cva 42 | } 43 | -------------------------------------------------------------------------------- /man/rasterEntropy.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rasterEntropy.R 3 | \name{rasterEntropy} 4 | \alias{rasterEntropy} 5 | \title{Multi-layer Pixel Entropy} 6 | \usage{ 7 | rasterEntropy(img, ...) 8 | } 9 | \arguments{ 10 | \item{img}{SpatRaster} 11 | 12 | \item{...}{additional arguments passed to writeRaster} 13 | } 14 | \value{ 15 | SpatRaster "entropy" 16 | } 17 | \description{ 18 | Shannon entropy is calculated for each pixel based on it's layer values. 19 | To be used with categorical / integer valued rasters. 20 | } 21 | \details{ 22 | Entropy is calculated as -sum(p log(p)); p being the class frequency per pixel. 23 | } 24 | \examples{ 25 | re <- rasterEntropy(rlogo) 26 | ggR(re, geom_raster = TRUE) 27 | } 28 | -------------------------------------------------------------------------------- /man/rasterPCA.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rasterPCA.R 3 | \name{rasterPCA} 4 | \alias{rasterPCA} 5 | \title{Principal Component Analysis for Rasters} 6 | \usage{ 7 | rasterPCA( 8 | img, 9 | nSamples = NULL, 10 | nComp = nlyr(img), 11 | spca = FALSE, 12 | maskCheck = TRUE, 13 | ... 14 | ) 15 | } 16 | \arguments{ 17 | \item{img}{SpatRaster.} 18 | 19 | \item{nSamples}{Integer or NULL. Number of pixels to sample for PCA fitting. If NULL, all pixels will be used.} 20 | 21 | \item{nComp}{Integer. Number of PCA components to return.} 22 | 23 | \item{spca}{Logical. If \code{TRUE}, perform standardized PCA. Corresponds to centered and scaled input image. This is usually beneficial for equal weighting of all layers. (\code{FALSE} by default)} 24 | 25 | \item{maskCheck}{Logical. Masks all pixels which have at least one NA (default TRUE is reccomended but introduces a slow-down, see Details when it is wise to disable maskCheck). 26 | Takes effect only if nSamples is NULL.} 27 | 28 | \item{...}{further arguments to be passed to \link[terra]{writeRaster}, e.g. filename.} 29 | } 30 | \value{ 31 | Returns a named list containing the PCA model object ($model) and a SpatRaster with the principal component layers ($object). 32 | } 33 | \description{ 34 | Calculates R-mode PCA for SpatRasters and returns a SpatRaster with multiple layers of PCA scores. 35 | } 36 | \details{ 37 | Internally rasterPCA relies on the use of \link[stats]{princomp} (R-mode PCA). If nSamples is given the PCA will be calculated 38 | based on a random sample of pixels and then predicted for the full raster. If nSamples is NULL then the covariance matrix will be calculated 39 | first and will then be used to calculate princomp and predict the full raster. The latter is more precise, since it considers all pixels, 40 | however, it may be slower than calculating the PCA only on a subset of pixels. 41 | 42 | Pixels with missing values in one or more bands will be set to NA. The built-in check for such pixels can lead to a slow-down of rasterPCA. 43 | However, if you make sure or know beforehand that all pixels have either only valid values or only NAs throughout all layers you can disable this check 44 | by setting maskCheck=FALSE which speeds up the computation. 45 | 46 | Standardised PCA (SPCA) can be useful if imagery or bands of different dynamic ranges are combined. SPC uses the correlation matrix instead of the covariance matrix, which 47 | has the same effect as using normalised bands of unit variance. 48 | } 49 | \examples{ 50 | library(ggplot2) 51 | library(reshape2) 52 | ggRGB(rlogo, 1,2,3) 53 | 54 | ## Run PCA 55 | set.seed(25) 56 | rpc <- rasterPCA(rlogo) 57 | rpc 58 | 59 | ## Model parameters: 60 | summary(rpc$model) 61 | loadings(rpc$model) 62 | 63 | ggRGB(rpc$map,1,2,3, stretch="lin", q=0) 64 | if(require(gridExtra)){ 65 | plots <- lapply(1:3, function(x) ggR(rpc$map, x, geom_raster = TRUE)) 66 | grid.arrange(plots[[1]],plots[[2]], plots[[3]], ncol=2) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /man/readEE.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/readEE.R 3 | \name{readEE} 4 | \alias{readEE} 5 | \title{Tidy import tool for EarthExplorer .csv export files} 6 | \usage{ 7 | readEE(x) 8 | } 9 | \arguments{ 10 | \item{x}{Character, Character or list. One or more paths to EarthExplorer export files.} 11 | } 12 | \value{ 13 | data.frame 14 | } 15 | \description{ 16 | Imports and tidies CSV files exported from EarthExplorer into data.frames and annotates missing fields. 17 | } 18 | \details{ 19 | The \href{https://earthexplorer.usgs.gov/}{EarthExplorer} CSV file can be produced from the search results page. Above the results click on 'export results' and select 'comma (,) delimited'. 20 | 21 | Note that only a subset of columns is imported which was deemed interesting. Please contact the maintainer if you think an omited column should be included. 22 | } 23 | \examples{ 24 | library(ggplot2) 25 | ee <- readEE(system.file("external/EarthExplorer_LS8.txt", package = "RStoolbox")) 26 | 27 | ## Scenes with cloud cover < 20\% 28 | ee[ee$Cloud.Cover < 20,] 29 | 30 | ## Available time-series 31 | ggplot(ee) + 32 | geom_segment(aes(x = Date, xend = Date, y = 0, yend = 100 - Cloud.Cover, 33 | col = as.factor(Year))) + 34 | scale_y_continuous(name = "Scene quality (\% clear sky)") 35 | 36 | } 37 | -------------------------------------------------------------------------------- /man/readMeta.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/readMeta.R 3 | \name{readMeta} 4 | \alias{readMeta} 5 | \title{Read Landsat MTL metadata files} 6 | \usage{ 7 | readMeta(file, raw = FALSE) 8 | } 9 | \arguments{ 10 | \item{file}{path to Landsat MTL file (...MTL.txt)} 11 | 12 | \item{raw}{Logical. If \code{TRUE} the full raw metadata will be returned as a list. if \code{FALSE} (the default) all important metadata are homogenized into a standard format (ImageMetaData) and some information is added.} 13 | } 14 | \value{ 15 | Object of class ImageMetaData 16 | } 17 | \description{ 18 | Reads metadata and deals with legacy versions of Landsat metadata files and where possible adds missing information (radiometric gain and offset, earth-sun distance). 19 | } 20 | \examples{ 21 | ## Example metadata file (MTL) 22 | mtlFile <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox") 23 | 24 | ## Read metadata 25 | metaData <- readMeta(mtlFile) 26 | 27 | ## Summary 28 | summary(metaData) 29 | 30 | } 31 | -------------------------------------------------------------------------------- /man/readSLI.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/readSLI.R 3 | \name{readSLI} 4 | \alias{readSLI} 5 | \title{Read ENVI spectral libraries} 6 | \usage{ 7 | readSLI(path) 8 | } 9 | \arguments{ 10 | \item{path}{Path to spectral library file with ending .sli.} 11 | } 12 | \value{ 13 | The spectral libraries are read into a data.frame. The first column contains the wavelengths and the remaining columns contain the spectra. 14 | } 15 | \description{ 16 | read/write support for ENVI spectral libraries 17 | } 18 | \details{ 19 | ENVI spectral libraries consist of a binary data file (.sli) and a corresponding header (.hdr, or .sli.hdr) file. 20 | } 21 | \examples{ 22 | 23 | ## Example data 24 | sliFile <- system.file("external/vegSpec.sli", package="RStoolbox") 25 | sliTmpFile <- paste0(tempdir(),"/vegetationSpectra.sli") 26 | 27 | ## Read spectral library 28 | sli <- readSLI(sliFile) 29 | head(sli) 30 | plot(sli[,1:2], col = "orange", type = "l") 31 | lines(sli[,c(1,3)], col = "green") 32 | 33 | ## Write to binary spectral library 34 | writeSLI(sli, path = sliTmpFile) 35 | } 36 | \seealso{ 37 | \code{\link{writeSLI}} 38 | } 39 | -------------------------------------------------------------------------------- /man/rescaleImage.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rescaleImage.R 3 | \name{rescaleImage} 4 | \alias{rescaleImage} 5 | \title{Linear Image Rescaling} 6 | \usage{ 7 | rescaleImage(x, y, xmin, xmax, ymin, ymax, forceMinMax = FALSE, ...) 8 | } 9 | \arguments{ 10 | \item{x}{SpatRaster or numeric vector. Image to normalise.} 11 | 12 | \item{y}{SpatRaster or numeric vector. Reference image. Optional. Used to extract min and max values if ymin or ymax are missing.} 13 | 14 | \item{xmin}{Numeric. Min value of x. Either a single value or one value per layer in x. If xmin is not provided it will be extracted from x.} 15 | 16 | \item{xmax}{Numeric. Max value of x. Either a single value or one value per layer in x. If xmax is not provided it will be extracted from x.} 17 | 18 | \item{ymin}{Numeric. Min value of y. Either a single value or one value per layer in x. If ymin is not provided it will be extracted from y.} 19 | 20 | \item{ymax}{Numeric. Max value of y. Either a single value or one value per layer in x. If ymax is not provided it will be extracted from y.} 21 | 22 | \item{forceMinMax}{Logical. Forces update of min and max data slots in x or y.} 23 | 24 | \item{...}{additional arguments passed to \code{\link[terra:writeRaster]{terra::writeRaster()}}} 25 | } 26 | \value{ 27 | Returns a SpatRaster of the same dimensions as the input raster \code{x} but shifted and stretched to the new limits. 28 | } 29 | \description{ 30 | performs linear shifts of value ranges either to match min/max of another image (\code{y}) 31 | or to any other min and max value (\code{ymin} and \code{ymax}). 32 | } 33 | \details{ 34 | Providing \code{xmin} and \code{xmax} values manually can be useful if the raster contains a variable of a known, fixed value range, 35 | e.g. NDVI from -1 to 1 but the actual pixel values don't encompass this entire range. 36 | By providing \code{xmin = -1} and \code{xmax = 1} the values can be rescaled to any other range, 37 | e.g. 1 to 100 while comparability to other rescaled NDVI scenes is retained. 38 | } 39 | \examples{ 40 | lsat2 <- lsat - 1000 41 | lsat2 42 | 43 | ## Rescale lsat2 to match original lsat value range 44 | lsat2_rescaled <- rescaleImage(lsat2, lsat) 45 | lsat2_rescaled 46 | 47 | ## Rescale lsat to value range [0,1] 48 | lsat2_unity <- rescaleImage(lsat2, ymin = 0, ymax = 1) 49 | lsat2_unity 50 | } 51 | \seealso{ 52 | \link[=histMatch]{histMatch} 53 | } 54 | -------------------------------------------------------------------------------- /man/rlogo.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RStoolbox-package.R 3 | \docType{data} 4 | \name{rlogo} 5 | \alias{rlogo} 6 | \title{Rlogo as SpatRaster} 7 | \usage{ 8 | rlogo 9 | } 10 | \description{ 11 | Tiny example of raster data used to run examples. 12 | } 13 | \examples{ 14 | ggRGB(rlogo,r = 1,g = 2,b = 3) 15 | } 16 | \keyword{datasets} 17 | -------------------------------------------------------------------------------- /man/rsOpts.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rsOpts.R 3 | \name{rsOpts} 4 | \alias{rsOpts} 5 | \title{Set global options for RStoolbox} 6 | \usage{ 7 | rsOpts(verbose = NULL, idxdb = NULL) 8 | } 9 | \arguments{ 10 | \item{verbose}{Logical. If \code{TRUE} many functions will print status messages about the current processing step. By default verbose mode is disabled.} 11 | 12 | \item{idxdb}{List. The list conatins the formal calculation of spectral indices. Modify this list to pipe your own spectral index through the internal C++ calculation of RStoolbox.} 13 | } 14 | \value{ 15 | No return, just a setter for the verbosiness and the index-database of the RStoolbox package. For latter, see the example of Rstoolbox::spectralIndices() 16 | } 17 | \description{ 18 | shortcut to options(RStoolbox.*) 19 | } 20 | \examples{ 21 | rsOpts(verbose=TRUE) 22 | 23 | } 24 | -------------------------------------------------------------------------------- /man/sam.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/sam.R 3 | \name{sam} 4 | \alias{sam} 5 | \title{Spectral Angle Mapper} 6 | \usage{ 7 | sam(img, em, angles = FALSE, ...) 8 | } 9 | \arguments{ 10 | \item{img}{SpatRaster. Remote sensing imagery.} 11 | 12 | \item{em}{Matrix or data.frame with endmembers. Each row should contain the endmember spectrum of a class, i.e. columns correspond to bands in \code{img}. It is reccomended to set the rownames to class names.} 13 | 14 | \item{angles}{Logical. If \code{TRUE} a RasterBrick containing each one layer per endmember will be returned containing the spectral angles.} 15 | 16 | \item{...}{further arguments to be passed to \code{\link[terra]{writeRaster}}} 17 | } 18 | \value{ 19 | SpatRaster 20 | If \code{angles = FALSE} a single Layer will be returned in which each pixel is assigned to the closest endmember class (integer pixel values correspond to row order of \code{em}. 21 | } 22 | \description{ 23 | Calculates the angle in spectral space between pixels and a set of reference spectra (endmembers) for image classification based on spectral similarity. 24 | } 25 | \details{ 26 | For each pixel the spectral angle mapper calculates the angle between the vector defined by the pixel values and each endmember vector. The result of this is 27 | one raster layer for each endmember containing the spectral angle. The smaller the spectral angle the more similar a pixel is to a given endmember class. 28 | In a second step one can the go ahead an enforce thresholds of maximum angles or simply classify each pixel to the most similar endmember. 29 | } 30 | \examples{ 31 | library(terra) 32 | library(ggplot2) 33 | 34 | ## Sample endmember spectra 35 | ## First location is water, second is open agricultural vegetation 36 | pts <- data.frame(x = c(624720, 627480), y = c(-414690, -411090)) 37 | endmembers <- extract(lsat, pts) 38 | rownames(endmembers) <- c("water", "vegetation") 39 | 40 | ## Calculate spectral angles 41 | lsat_sam <- sam(lsat, endmembers, angles = TRUE) 42 | plot(lsat_sam) 43 | 44 | ## Classify based on minimum angle 45 | lsat_sam <- sam(lsat, endmembers, angles = FALSE) 46 | 47 | ggR(lsat_sam, forceCat = TRUE, geom_raster=TRUE) + 48 | scale_fill_manual(values = c("blue", "green"), labels = c("water", "vegetation")) 49 | } 50 | -------------------------------------------------------------------------------- /man/saveRSTBX.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/saveRSTBX.R 3 | \name{saveRSTBX} 4 | \alias{saveRSTBX} 5 | \alias{readRSTBX} 6 | \title{Save and Read RStoolbox Classification Results} 7 | \usage{ 8 | saveRSTBX(x, filename, format = "raster", ...) 9 | 10 | readRSTBX(filename) 11 | } 12 | \arguments{ 13 | \item{x}{RStoolbox object of classes c("fCover", "rasterPCA", "superClass", "unsuperClass")} 14 | 15 | \item{filename}{Character. Path and filename. Any file extension will be ignored.} 16 | 17 | \item{format}{Character. Driver to use for the raster file} 18 | 19 | \item{...}{further arguments passed to writeRaster} 20 | } 21 | \value{ 22 | The output of writeRSTBX will be at least two files written to disk: 23 | a) an .rds file containing the object itself and 24 | b) the raster file (depending on the driver you choose this can be more than two files). 25 | } 26 | \description{ 27 | Saves objects of classes unsuperClass, superClass, rasterPCA and fCover to 28 | file. Useful to archive the fitted models. 29 | } 30 | \section{Functions}{ 31 | \itemize{ 32 | \item \code{saveRSTBX()}: Save RStoolbox object to file 33 | 34 | \item \code{readRSTBX()}: Read files saved with saveRSTBX 35 | 36 | }} 37 | \note{ 38 | All files must be kept in the same directory to read the full object back into R 39 | by means of readRSTBX. You can move them to another location but you'll have to move *all* of them 40 | (just like you would with Shapefiles). In case the raster file(s) is missing, readRSTBX will still 41 | return the object but the raster will be missing. 42 | 43 | writeRSTBX and readRSTBX are convenience wrappers around saveRDS, readRDS. This means 44 | you can read all files created this way also with base functionality as long as you don't move your files. 45 | This is because x$map is a SpatRaster object and hence contains only a static link to the file on disk. 46 | } 47 | \examples{ 48 | \dontrun{ 49 | input <- rlogo 50 | ## Create filename 51 | file <- paste0(tempdir(), "/test", runif(1)) 52 | ## Run PCA 53 | rpc <- rasterPCA(input, nSample = 100) 54 | ## Save object 55 | saveRSTBX(rpc, filename=file) 56 | ## Which files were written? 57 | list.files(tempdir(), pattern = basename(file)) 58 | ## Re-read files 59 | re_rpc <- readRSTBX(file) 60 | ## Remove files 61 | file.remove(list.files(tempdir(), pattern = basename(file), full = TRUE)) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /man/sen2.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RStoolbox-package.R 3 | \docType{data} 4 | \name{sen2} 5 | \alias{sen2} 6 | \title{Sentinel 2 MSI L2A Scene} 7 | \usage{ 8 | sen2 9 | } 10 | \description{ 11 | Contains all 13 bands in already converted spectral reflectances 12 | } 13 | \examples{ 14 | ggRGB(sen2, r=4, g=3, b=2, stretch = "lin") 15 | } 16 | \keyword{datasets} 17 | -------------------------------------------------------------------------------- /man/srtm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RStoolbox-package.R 3 | \docType{data} 4 | \name{srtm} 5 | \alias{srtm} 6 | \title{SRTM Digital Elevation Model} 7 | \usage{ 8 | srtm 9 | } 10 | \description{ 11 | DEM for the Landsat example area taken from SRTM v3 tile: s04_w050_1arc_v3.tif 12 | } 13 | \examples{ 14 | ggR(srtm) 15 | } 16 | \keyword{datasets} 17 | -------------------------------------------------------------------------------- /man/srtm_sen2.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RStoolbox-package.R 3 | \docType{data} 4 | \name{srtm_sen2} 5 | \alias{srtm_sen2} 6 | \title{SRTM scene for the sen2 exemplary scene} 7 | \usage{ 8 | srtm_sen2 9 | } 10 | \description{ 11 | DEM for the Sentinel 2 example area taken from SRTM v4 12 | } 13 | \examples{ 14 | ggR(srtm_sen2) 15 | } 16 | \keyword{datasets} 17 | -------------------------------------------------------------------------------- /man/stackMeta.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/stackMeta.R 3 | \name{stackMeta} 4 | \alias{stackMeta} 5 | \title{Import separate Landsat files into single stack} 6 | \usage{ 7 | stackMeta(file, quantity = "all", category = "image", allResolutions = FALSE) 8 | } 9 | \arguments{ 10 | \item{file}{Character. Path to Landsat MTL metadata (*_MTL.txt) file or an Landsat CDR xml metadata file (*.xml).} 11 | 12 | \item{quantity}{Character vector. Which quantity should be returned. Options: digital numbers ('dn'), top of atmosphere reflectance ('tre'), at surface reflectance ('sre'), brightness temperature ('bt'), spectral index ('index'), all quantities ('all').} 13 | 14 | \item{category}{Character vector. Which category of data to return. Options 'image': image data, 'pan': panchromatic image, 'index': multiband indices, 'qa' quality flag bands, 'all': all categories.} 15 | 16 | \item{allResolutions}{Logical. if \code{TRUE} a list will be returned with length = unique spatial resolutions. 17 | This argument was introduced to maintain backward compatibility and will be switched to TRUE in an upcoming release. Please base all new code on terra.} 18 | } 19 | \value{ 20 | Returns one single SpatRaster comprising all requested bands. 21 | If \code{allResolutions = TRUE} *and* there are different resolution layers (e.g. a 15m panchromatic band along wit 30m imagery) a list of RasterStacks will be returned. 22 | } 23 | \description{ 24 | Reads Landsat MTL or XML metadata files and loads single Landsat Tiffs into a rasterStack. 25 | Be aware that by default stackMeta() does NOT import panchromatic bands nor thermal bands with resolutions != 30m. 26 | } 27 | \note{ 28 | Be aware that by default stackMeta() does NOT import panchromatic bands nor thermal bands with resolutions != 30m. Use the allResolutions argument to import all layers. 29 | Note that nowadays the USGS uses cubic convolution to resample the TIR bands to 30m resolution. 30 | } 31 | \examples{ 32 | ## Example metadata file (MTL) 33 | mtlFile <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox") 34 | 35 | ## Read metadata 36 | metaData <- readMeta(mtlFile) 37 | summary(metaData) 38 | 39 | ## Load rasters based on metadata file 40 | lsat <- stackMeta(mtlFile) 41 | lsat 42 | } 43 | -------------------------------------------------------------------------------- /man/tasseledCap.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tasseledCap.R 3 | \name{tasseledCap} 4 | \alias{tasseledCap} 5 | \title{Tasseled Cap Transformation} 6 | \usage{ 7 | tasseledCap(img, sat, ...) 8 | } 9 | \arguments{ 10 | \item{img}{SpatRaster. Input image. Band order must correspond to sensor specifications (see Details and Examples)} 11 | 12 | \item{sat}{Character. Sensor; one of: c("Landsat4TM", "Landsat5TM", "Landsat7ETM", "Landsat8OLI", "MODIS", "QuickBird", "Spot5", "RapidEye"). Case is irrelevant.} 13 | 14 | \item{...}{Further arguments passed to writeRaster.} 15 | } 16 | \value{ 17 | Returns a SpatRaster with the thee bands: brigthness, greenness, and (soil) wetness. 18 | } 19 | \description{ 20 | Calculates brightness, greenness and wetness from multispectral imagery. 21 | Currently implemented Landsat 4 TM, Landsat 5 TM, Landsat 7ETM+, Landsat 8 OLI, MODIS, QuickBird, Spot5 and RapidEye. 22 | } 23 | \details{ 24 | Currently implemented: Landsat 4 TM, Landsat 5 TM, Landsat 7ETM+, Landsat 8 OLI, MODIS, QuickBird, Spot5, RapdiEye. 25 | Input data must be in top of atmosphere reflectance. 26 | Moreover, bands must be provided in ascending order as listed in the table below. 27 | Irrelevant bands, such as Landsat Thermal Bands or QuickBird/Spot5 Panchromatic Bands must be omitted. 28 | Required bands are: 29 | \tabular{rrrl}{ 30 | sat \tab bands \tab coefficients \tab data unit\cr 31 | Landsat4TM \tab 1,2,3,4,5,7 \tab Crist 1985 \tab reflectance \cr 32 | Landsat5TM \tab 1,2,3,4,5,7 \tab Crist 1985 \tab reflectance \cr 33 | Landsat7ETM \tab 1,2,3,4,5,7 \tab Huang 2002 \tab reflectance \cr 34 | Landsat8OLI \tab 2,3,4,5,6,7 \tab Baig 2014 \tab reflectance \cr 35 | MODIS \tab 1,2,3,4,5,6,7 \tab Lobser 2007 \tab reflectance \cr 36 | QuickBird \tab 2,3,4,5 \tab Yarbrough 2005 \tab reflectance \cr 37 | Spot5 \tab 2,3,4,5 \tab Ivtis 2008 \tab reflectance \cr 38 | RapidEye \tab 1,2,3,4,5 \tab Schoenert 2014 \tab reflectance \cr 39 | } 40 | } 41 | \examples{ 42 | library(terra) 43 | 44 | ## Run tasseled cap (exclude thermal band 6) 45 | lsat_tc <- tasseledCap(lsat[[c(1:5,7)]], sat = "Landsat5TM") 46 | lsat_tc 47 | plot(lsat_tc) 48 | } 49 | \references{ 50 | Crist (1985) "A TM Tasseled Cap Equivalent Transformation for Reflectance Factor Data." Remote Sensing of Environment 17 (3): 301-306 51 | 52 | Huang et al. (2002) "Derivation of a Tasselled Cap Transformation Based on Landsat 7 At-Satellite Reflectance." International Journal of Remote Sensing 23 (8): 1741-1748 53 | 54 | Baig et al. (2014) "Derivation of a Tasselled Cap Transformation Based on Landsat 8 At-Satellite Reflectance." Remote Sensing Letters 5 (5): 423-431. 55 | 56 | Lobser et al. (2007) "MODIS Tasselled Cap: Land Cover Characteristics Expressed through Transformed MODIS Data." International Journal of Remote Sensing 28 (22): 5079-5101. 57 | 58 | Yarbrough et al. (2005) "QuickBird 2 tasseled cap transform coefficients: a comparison of derivation methods." Pecora 16 Global Priorities in Land Remote Sensing: 23-27. 59 | 60 | Ivits et al. (2008) "Orthogonal transformation of segmented SPOT5 images." Photogrammetric Engineering & Remote Sensing 74 (11): 1351-1364. 61 | 62 | Schoenert et al. (2014) "Derivation of tasseled cap coefficients for RapidEye data." Earth Resources and Environmental Remote Sensing/GIS Applications V (9245): 92450Qs. 63 | } 64 | -------------------------------------------------------------------------------- /man/topCor.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/topCor.R 3 | \name{topCor} 4 | \alias{topCor} 5 | \title{Topographic Illumination Correction} 6 | \usage{ 7 | topCor( 8 | img, 9 | dem, 10 | metaData, 11 | solarAngles = c(), 12 | method = "C", 13 | stratImg = NULL, 14 | nStrat = 5, 15 | illu, 16 | ... 17 | ) 18 | } 19 | \arguments{ 20 | \item{img}{SpatRaster. Imagery to correct} 21 | 22 | \item{dem}{SpatRaster. Either a digital elevation model as a RasterLayer or a RasterStack/Brick with pre-calculated slope and aspect (see \link[terra]{terrain}) in which case the layers must be named 'slope' and 'aspect'. 23 | Must have the same dimensions as \code{img}.} 24 | 25 | \item{metaData}{Character, ImageMetaData. Either a path to a Landsat meta-data file (MTL) or an ImageMetaData object (see \link{readMeta})} 26 | 27 | \item{solarAngles}{Numeric vector containing sun azimuth and sun zenith (in radians and in that order). Not needed if metaData is provided} 28 | 29 | \item{method}{Character. One of c("cos", "avgcos", "minnaert", "C", "stat", "illu"). Choosing 'illu' will return only the local illumination map.} 30 | 31 | \item{stratImg}{RasterLayer or SpatRaster to define strata, e.g. NDVI. Or the string 'slope' in which case stratification will be on \code{nStrat} slope classes. Only relevant if \code{method = 'minnaert'}.} 32 | 33 | \item{nStrat}{Integer. Number of bins or quantiles to stratify by. If a bin has less than 50 samples it will be merged with the next bin. Only relevant if \code{method = 'minnaert'}.} 34 | 35 | \item{illu}{SpatRaster. Optional pre-calculated ilumination map. Run topCor with method="illu" to calculate an ilumination map} 36 | 37 | \item{...}{arguments passed to \code{\link[terra]{writeRaster}}} 38 | } 39 | \value{ 40 | SpatRaster 41 | } 42 | \description{ 43 | account and correct for changes in illumination due to terrain elevation. 44 | } 45 | \details{ 46 | For detailed discussion of the various approaches please see Riano et al. (2003). 47 | 48 | The minnaert correction can be stratified for different landcover characteristics. If \code{stratImg = 'slope'} the analysis is stratified by the slope, 49 | i.e. the slope values are divided into \code{nStrat} classes and the correction coefficient k is calculated and applied separately for each slope class. 50 | An alternative could be to stratify by a vegetation index in which case an additional raster layer has to be provided via the \code{stratImg} argument. 51 | } 52 | \examples{ 53 | ## Load example data 54 | metaData <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox") 55 | metaData <- readMeta(metaData) 56 | 57 | ## Minnaert correction, solar angles from metaData 58 | lsat_minnaert <- topCor(lsat, dem = srtm, metaData = metaData, method = "minnaert") 59 | 60 | ## C correction, solar angles provided manually 61 | lsat_C <- topCor(lsat, dem = srtm, solarAngles = c(1.081533, 0.7023922), method = "C") 62 | 63 | } 64 | \references{ 65 | Riano et al. (2003) Assessment of different topographic correction in Landsat-TM data for mapping vegetation types. IEEE Transactions on Geoscience and Remote Sensing. 66 | } 67 | -------------------------------------------------------------------------------- /man/unsuperClass.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/unsuperClass.R 3 | \name{unsuperClass} 4 | \alias{unsuperClass} 5 | \title{Unsupervised Classification} 6 | \usage{ 7 | unsuperClass( 8 | img, 9 | nSamples = 10000, 10 | nClasses = 5, 11 | nStarts = 25, 12 | nIter = 100, 13 | norm = FALSE, 14 | clusterMap = TRUE, 15 | algorithm = "Hartigan-Wong", 16 | output = "classes", 17 | ... 18 | ) 19 | } 20 | \arguments{ 21 | \item{img}{SpatRaster.} 22 | 23 | \item{nSamples}{Integer. Number of random samples to draw to fit cluster map. Only relevant if clusterMap = TRUE.} 24 | 25 | \item{nClasses}{Integer. Number of classes.} 26 | 27 | \item{nStarts}{Integer. Number of random starts for kmeans algorithm.} 28 | 29 | \item{nIter}{Integer. Maximal number of iterations allowed.} 30 | 31 | \item{norm}{Logical. If \code{TRUE} will normalize img first using \link{normImage}. Normalizing is beneficial if your predictors have different scales.} 32 | 33 | \item{clusterMap}{Logical. Fit kmeans model to a random subset of the img (see Details).} 34 | 35 | \item{algorithm}{Character. \link[stats]{kmeans} algorithm. One of c("Hartigan-Wong", "Lloyd", "MacQueen")} 36 | 37 | \item{output}{Character. Either 'classes' (kmeans class; default) or 'distances' (euclidean distance to each cluster center).} 38 | 39 | \item{...}{further arguments to be passed to \link[terra]{writeRaster}, e.g. filename} 40 | } 41 | \value{ 42 | Returns an RStoolbox::unsuperClass object, which is a list containing the kmeans model ($model) and the raster map ($map). 43 | For output = "classes", $map contains a SpatRaster with discrete classes (kmeans clusters); for output = "distances" $map contains 44 | a SpatRaster, with `nClasses` layers, where each layer maps the euclidean distance to the corresponding class centroid. 45 | } 46 | \description{ 47 | Unsupervised clustering of SpatRaster data using kmeans clustering 48 | } 49 | \details{ 50 | Clustering is done using \code{\link[stats]{kmeans}}. This can be done for all pixels of the image (\code{clusterMap=FALSE}), however this can be slow and is 51 | not memory safe. Therefore if you have large raster data (> memory), as is typically the case with remote sensing imagery it is advisable to choose clusterMap=TRUE (the default). 52 | This means that a kmeans cluster model is calculated based on a random subset of pixels (\code{nSamples}). Then the distance of *all* pixels to the cluster centers 53 | is calculated in a stepwise fashion using \code{\link[terra]{predict}}. Class assignment is based on minimum euclidean distance to the cluster centers. 54 | 55 | The solution of the kmeans algorithm often depends on the initial configuration of class centers which is chosen randomly. 56 | Therefore, kmeans is usually run with multiple random starting configurations in order to find a convergent solution from different starting configurations. 57 | The \code{nStarts} argument allows to specify how many random starts are conducted. 58 | } 59 | \examples{ 60 | \dontrun{ 61 | library(terra) 62 | input <- rlogo 63 | 64 | ## Plot 65 | olpar <- par(no.readonly = TRUE) # back-up par 66 | par(mfrow=c(1,2)) 67 | plotRGB(input) 68 | 69 | ## Run classification 70 | set.seed(25) 71 | unC <- unsuperClass(input, nSamples = 100, nClasses = 5, nStarts = 5) 72 | unC 73 | 74 | ## Plots 75 | colors <- rainbow(5) 76 | plot(unC$map, col = colors, legend = FALSE, axes = FALSE, box = FALSE) 77 | legend(1,1, legend = paste0("C",1:5), fill = colors, title = "Classes", horiz = TRUE, bty = "n") 78 | 79 | ## Return the distance of each pixel to each class centroid 80 | unC <- unsuperClass(input, nSamples = 100, nClasses = 3, output = "distances") 81 | unC 82 | 83 | ggR(unC$map, 1:3, geom_raster = TRUE) 84 | 85 | par(olpar) # reset par 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /man/validateMap.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/validateMap.R 3 | \name{validateMap} 4 | \alias{validateMap} 5 | \title{Map accuracy assessment} 6 | \usage{ 7 | validateMap( 8 | map, 9 | valData, 10 | responseCol, 11 | nSamplesV = 500, 12 | mode = "classification", 13 | classMapping = NULL 14 | ) 15 | } 16 | \arguments{ 17 | \item{map}{SpatRaster. The classified map.} 18 | 19 | \item{valData}{sf object with validation data (POLYGONs or POINTs).} 20 | 21 | \item{responseCol}{Character. Column containing the validation data in attribute table of \code{valData}.} 22 | 23 | \item{nSamplesV}{Integer. Number of pixels to sample for validation (only applies to polygons).} 24 | 25 | \item{mode}{Character. Either 'classification' or 'regression'.} 26 | 27 | \item{classMapping}{optional data.frame with columns \code{'class'} and \code{'classID'} defining the mapping from raster integers to class names.} 28 | } 29 | \value{ 30 | Returns a structured list includng the preformance and confusion-matrix of your then validated input data 31 | } 32 | \description{ 33 | validate a map from a classification or regression model. This can be useful to update the accuracy assessment after filtering, e.g. for a minimum mapping unit. 34 | } 35 | \examples{ 36 | library(caret) 37 | library(terra) 38 | 39 | ## Training data 40 | poly <- readRDS(system.file("external/trainingPolygons_lsat.rds", package="RStoolbox")) 41 | 42 | ## Split training data in training and validation set (50\%-50\%) 43 | splitIn <- createDataPartition(poly$class, p = .5)[[1]] 44 | train <- poly[splitIn,] 45 | val <- poly[-splitIn,] 46 | 47 | ## Classify (deliberately poorly) 48 | sc <- superClass(lsat, trainData = train, responseCol = "class", nSamples = 50, model = "mlc") 49 | 50 | ## Polish map with majority filter 51 | 52 | polishedMap <- focal(sc$map, matrix(1,3,3), fun = modal) 53 | 54 | ## Validation 55 | ## Before filtering 56 | val0 <- validateMap(sc$map, valData = val, responseCol = "class", 57 | classMapping = sc$classMapping) 58 | ## After filtering 59 | val1 <- validateMap(polishedMap, valData = val, responseCol = "class", 60 | classMapping = sc$classMapping) 61 | } 62 | -------------------------------------------------------------------------------- /man/writeSLI.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/readSLI.R 3 | \name{writeSLI} 4 | \alias{writeSLI} 5 | \title{Write ENVI spectral libraries} 6 | \usage{ 7 | writeSLI( 8 | x, 9 | path, 10 | wavl.units = "Micrometers", 11 | scaleF = 1, 12 | mode = "bin", 13 | endian = .Platform$endian 14 | ) 15 | } 16 | \arguments{ 17 | \item{x}{data.frame with first column containing wavelengths and all other columns containing spectra.} 18 | 19 | \item{path}{path to spectral library file to be created.} 20 | 21 | \item{wavl.units}{wavelength units. Defaults to Micrometers. Nanometers is another typical option.} 22 | 23 | \item{scaleF}{optional reflectance scaling factor. Defaults to 1.} 24 | 25 | \item{mode}{character string specifying output file type. Must be one of \code{"bin"} for binary .sli files or \code{"ASCII"} for ASCII ENVI plot files.} 26 | 27 | \item{endian}{character. Optional. By default the endian is determined based on the platform, but can be forced manually by setting it to either "little" or "big".} 28 | } 29 | \value{ 30 | Does not return anything, write the SLI file directly to your drive for where your specified your path parameter 31 | } 32 | \description{ 33 | Writes binary ENVI spectral library files (sli) with accompanying header (.sli.hdr) files OR ASCII spectral library files in ENVI format. 34 | } 35 | \details{ 36 | ENVI spectral libraries with ending .sli are binary arrays with spectra saved in rows. 37 | } 38 | \examples{ 39 | 40 | ## Example data 41 | sliFile <- system.file("external/vegSpec.sli", package="RStoolbox") 42 | sliTmpFile <- paste0(tempdir(),"/vegetationSpectra.sli") 43 | 44 | ## Read spectral library 45 | sli <- readSLI(sliFile) 46 | head(sli) 47 | plot(sli[,1:2], col = "orange", type = "l") 48 | lines(sli[,c(1,3)], col = "green") 49 | 50 | ## Write to binary spectral library 51 | writeSLI(sli, path = sliTmpFile) 52 | } 53 | \seealso{ 54 | \code{\link{readSLI}} 55 | } 56 | -------------------------------------------------------------------------------- /prep-release/makeRelease.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd ${HOME}/eclipseWorkspace/RStoolbox || exit 3 | 4 | ## Check for uncommited changes 5 | if [[ -n $(git status -s) ]] 6 | then 7 | echo "Uncommited changes. Clean-up first." 8 | git status 9 | exit 1 10 | fi 11 | 12 | 13 | ## Begin build and check 14 | git checkout master 15 | datScr=$(git log -1 --format=%ct data-raw/) 16 | datSys=$(git log -1 --format=%ct R/sysdata.rda) 17 | if [ $(( satScr > datSys )) -eq 1 ] 18 | then 19 | echo -e "\n**********************************************************" 20 | echo "Generate sysdata *****************************************" 21 | echo "**********************************************************" 22 | Rscript data-raw/generate_sysdata.R 23 | git commit -a -m "Automatic commit: Document & Update sysdata" 24 | else 25 | echo 'R/sysdata.R is already up-to-date' 26 | fi 27 | 28 | echo -e "\n**********************************************************" 29 | echo "Document and install RStoolbox ***************************" 30 | echo "**********************************************************" 31 | Rscript -e "library(devtools); library(methods); document(); install()" 32 | 33 | 34 | ## Re-build example data 35 | tmstr=$(git log -1 --format=%ct data/ inst/external/trainingPoints_rlogo.rds inst/external/landsat/) 36 | texmpl=$(git log example-data -1 --format=%ct) 37 | if [ $(( tmstr > texmpl )) -eq 0 ] 38 | then 39 | echo -e "\n**********************************************************" 40 | echo "Generate example data ************************************" 41 | echo "**********************************************************" 42 | git checkout example-data 43 | Rscript data-raw/generate_data.R 44 | git commit -a -m "Automatic commit: Update example data (landsat, rlogo, srtm, lsat)" 45 | 46 | ## Back to master 47 | git checkout master 48 | git checkout example-data data/rlogo.rda data/srtm.rda data/lsat.rda inst/external/landsat inst/external/trainingPolygons_lsat.rds 49 | Rscript -e "library(devtools); library(methods); document()" 50 | git commit -a -m "Automatic commit: Pull example data from branch example-data" 51 | else 52 | echo "Example data already up-to-date." 53 | fi 54 | 55 | # ## Website () 56 | # echo -e "\n**********************************************************" 57 | # echo "Build website documentation ******************************" 58 | # echo "**********************************************************" 59 | # git checkout gh-pages 60 | # Rscript rstbx-docu/build_docu.R 61 | # git commit -a -m "Automatic commit: Update gh-pages package documentation" 62 | # git checkout master 63 | 64 | echo -e "\n**********************************************************" 65 | echo "R CMD check **********************************************" 66 | echo "**********************************************************" 67 | Rscript -e "library(devtools); library(methods); check_win_release(); check_win_oldrelease(); check_win_devel(); check()" 68 | #Valgrind times out --> run locally 69 | #Rscript -e "library(rhub); library(methods); check(platform='debian-gcc-release', valgrind = TRUE)" &> ${HOME}/RHub_RStoolbox_check_with_valgrind.log 70 | Rscript -e "library(rhub); library(methods); check(platform=c('linux-x86_64-rocker-gcc-san'))" 71 | ## Check on MAC 72 | #Rscript -e "library(methods); library(rhub); check(platform='macos-highsierra-release-cran')" 73 | 74 | Rscript -e "library(revdepcheck); revdep_check(num_workers = 4)" 75 | 76 | 77 | cd .. 78 | R CMD build RStoolbox 79 | R CMD check $(ls RStoolbox*tar.gz | tail -n1) -o /tmp --run-donttest --as-cran --use-valgrind 80 | rm RStoolbox_0*tar.gz 81 | 82 | 83 | -------------------------------------------------------------------------------- /src/Makevars: -------------------------------------------------------------------------------- 1 | PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) 2 | -------------------------------------------------------------------------------- /src/Makevars.win: -------------------------------------------------------------------------------- 1 | 2 | PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) 3 | -------------------------------------------------------------------------------- /src/classQA.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | 4 | // [[Rcpp::export]] 5 | IntegerVector classQA(NumericVector& x, NumericMatrix rcl){ 6 | int xs = x.size(); 7 | int rclr = rcl.nrow(); 8 | IntegerVector out(xs, NA_INTEGER); 9 | for(int r = 0; r < rclr; r++){ 10 | for(int j = 0; j < xs; j++){ 11 | if(rcl(r,0) == x[j]) out[j] = rcl(r,1); 12 | } 13 | } 14 | return out; 15 | } 16 | -------------------------------------------------------------------------------- /src/entropy.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | // [[Rcpp::export]] 4 | NumericVector entropyCpp(NumericMatrix& x){ 5 | int i, nr = x.nrow(); 6 | int nl = x.ncol(); 7 | NumericVector p, ptx, out(nr); 8 | 9 | for(i=0; i < nr; ++i){ 10 | // FREQUENCY TABLE 11 | NumericVector tab = unique(x(i,_)); 12 | NumericVector ts(tab.size()); 13 | for(int c = 0; c < tab.size(); c++){ 14 | for(int j = 0; j < nl; j++){ 15 | if(x(i,j) == tab[c]) ts[c]++; 16 | } 17 | } 18 | // ENTROPY 19 | p = ts / nl; 20 | ptx = ifelse(p == 0, 0.0, p * log(p)); 21 | out[i] = -1.0 * sum(ptx); 22 | } 23 | return out; 24 | } 25 | 26 | -------------------------------------------------------------------------------- /src/gainOffsetRescale.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | // [[Rcpp::export]] 4 | NumericMatrix gainOffsetRescale( NumericMatrix x, NumericVector g, NumericVector o, LogicalVector clamp){ 5 | int nl = x.ncol(); 6 | int nr = x.nrow(); 7 | for(int i = 0 ; i < nl ; i++){ 8 | for(int p = 0; p < nr; p++){ 9 | double db = x(p,i) * g[i] + o[i]; 10 | if(clamp[0] && db < 0.0) db = 0.0; 11 | if(clamp[1] && db > 1.0) db = 1.0; 12 | x(p,i) = db; 13 | } 14 | } 15 | return x; 16 | } 17 | -------------------------------------------------------------------------------- /src/init.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // for NULL 4 | #include 5 | 6 | /* FIXME: 7 | Check these declarations against the C/Fortran source code. 8 | */ 9 | 10 | /* .Call calls */ 11 | extern SEXP _RStoolbox_classQA(SEXP, SEXP); 12 | extern SEXP _RStoolbox_entropyCpp(SEXP); 13 | extern SEXP _RStoolbox_gainOffsetRescale(SEXP, SEXP, SEXP, SEXP); 14 | extern SEXP _RStoolbox_nnls_solver(SEXP, SEXP, SEXP, SEXP); 15 | extern SEXP _RStoolbox_normImageCpp(SEXP, SEXP, SEXP); 16 | extern SEXP _RStoolbox_oneHotCpp(SEXP, SEXP, SEXP, SEXP, SEXP); 17 | extern SEXP _RStoolbox_predictMlcCpp(SEXP, SEXP, SEXP); 18 | extern SEXP _RStoolbox_predKmeansCpp(SEXP, SEXP, SEXP); 19 | extern SEXP _RStoolbox_pwSimilarityCpp(SEXP, SEXP, SEXP); 20 | extern SEXP _RStoolbox_rescaleImageCpp(SEXP, SEXP, SEXP, SEXP); 21 | extern SEXP _RStoolbox_specSimC(SEXP, SEXP); 22 | extern SEXP _RStoolbox_spectralIndicesCpp(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 23 | extern SEXP _RStoolbox_availableRAMCpp(SEXP); 24 | 25 | static const R_CallMethodDef CallEntries[] = { 26 | {"_RStoolbox_classQA", (DL_FUNC) &_RStoolbox_classQA, 2}, 27 | {"_RStoolbox_entropyCpp", (DL_FUNC) &_RStoolbox_entropyCpp, 1}, 28 | {"_RStoolbox_gainOffsetRescale", (DL_FUNC) &_RStoolbox_gainOffsetRescale, 4}, 29 | {"_RStoolbox_nnls_solver", (DL_FUNC) &_RStoolbox_nnls_solver, 4}, 30 | {"_RStoolbox_normImageCpp", (DL_FUNC) &_RStoolbox_normImageCpp, 3}, 31 | {"_RStoolbox_oneHotCpp", (DL_FUNC) &_RStoolbox_oneHotCpp, 5}, 32 | {"_RStoolbox_predictMlcCpp", (DL_FUNC) &_RStoolbox_predictMlcCpp, 3}, 33 | {"_RStoolbox_predKmeansCpp", (DL_FUNC) &_RStoolbox_predKmeansCpp, 3}, 34 | {"_RStoolbox_pwSimilarityCpp", (DL_FUNC) &_RStoolbox_pwSimilarityCpp, 3}, 35 | {"_RStoolbox_rescaleImageCpp", (DL_FUNC) &_RStoolbox_rescaleImageCpp, 4}, 36 | {"_RStoolbox_specSimC", (DL_FUNC) &_RStoolbox_specSimC, 2}, 37 | {"_RStoolbox_spectralIndicesCpp", (DL_FUNC) &_RStoolbox_spectralIndicesCpp, 24}, 38 | {"_RStoolbox_availableRAMCpp", (DL_FUNC) &_RStoolbox_availableRAMCpp, 1}, 39 | {NULL, NULL, 0} 40 | }; 41 | 42 | void R_init_RStoolbox(DllInfo *dll) 43 | { 44 | R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); 45 | R_useDynamicSymbols(dll, FALSE); 46 | } 47 | -------------------------------------------------------------------------------- /src/memory.cpp: -------------------------------------------------------------------------------- 1 | // Robert Hijmans with improvements by Ben Fasoli 2 | // https://github.com/rspatial/raster/pull/175 3 | 4 | #ifdef _WIN32 5 | #include 6 | #elif __linux__ 7 | #include 8 | #elif __APPLE__ 9 | #include 10 | #include 11 | #include 12 | #include 13 | #endif 14 | 15 | // [[Rcpp::export]] 16 | double availableRAMCpp(double ram) { 17 | // return available RAM 18 | #ifdef _WIN32 19 | MEMORYSTATUSEX statex; 20 | statex.dwLength = sizeof(statex); 21 | GlobalMemoryStatusEx(&statex); 22 | ram = statex.ullAvailPhys; 23 | #elif __linux__ 24 | // source available memory from /proc/meminfo 25 | // default to searching for MemAvailable field (kernel versions >= 3.14) 26 | FILE *fp = popen("awk '/MemAvailable/ {print $2}' /proc/meminfo", "r"); 27 | if (fp == NULL) { 28 | return ram; 29 | } 30 | double ramkb; 31 | int ok = fscanf(fp, "%lf", &ramkb); // returned in kB 32 | pclose(fp); 33 | if (ok && (ramkb > 0)) { 34 | return ramkb * 1000.; 35 | } 36 | 37 | // fallback to estimating memory from other fields if MemAvailable not found 38 | FILE *fp2 = popen("awk -v low=$(grep low /proc/zoneinfo | awk '{k+=$2}END{print k}') '{a[$1]=$2}END{print a[\"MemFree:\"]+a[\"Active(file):\"]+a[\"Inactive(file):\"]+a[\"SReclaimable:\"]-(12*low);}' /proc/meminfo", "r"); 39 | if (fp2 == NULL) { 40 | return ram; 41 | } 42 | ok = fscanf(fp2, "%lf", &ramkb); // returned in kB 43 | pclose(fp2); 44 | if (ramkb > 0) { 45 | return ramkb * 1000.; 46 | } 47 | #elif __APPLE__ 48 | vm_size_t page_size; 49 | mach_port_t mach_port; 50 | mach_msg_type_number_t count; 51 | vm_statistics64_data_t vm_stats; 52 | 53 | mach_port = mach_host_self(); 54 | count = sizeof(vm_stats) / sizeof(natural_t); 55 | if (KERN_SUCCESS == host_page_size(mach_port, &page_size) && 56 | KERN_SUCCESS == host_statistics64(mach_port, HOST_VM_INFO, 57 | (host_info64_t)&vm_stats, &count)) { 58 | long long free_memory = ((int64_t)vm_stats.free_count + 59 | (int64_t)vm_stats.inactive_count) * (int64_t)page_size; 60 | ram = free_memory; 61 | //https://stackoverflow.com/questions/63166/how-to-determine-cpu-and-memory-consumption-from-inside-a-process 62 | } 63 | #endif 64 | 65 | return ram; 66 | } 67 | -------------------------------------------------------------------------------- /src/memory.h: -------------------------------------------------------------------------------- 1 | double availableRAM(); -------------------------------------------------------------------------------- /src/nnls_solver.cpp: -------------------------------------------------------------------------------- 1 | //[[Rcpp::depends(RcppArmadillo)]] 2 | #include 3 | #include 4 | 5 | using namespace Rcpp; 6 | using namespace std; 7 | //using namespace arma; //included for simple mat. multiplication 8 | 9 | //[[Rcpp::export]] 10 | arma::mat nnls_solver(arma::mat x, arma::mat A, int iterate = 400, float tolerance = 0.000001){ 11 | int A_nbands = A.n_cols; 12 | int b_nbands = x.n_cols; 13 | if( A_nbands != b_nbands) { // catch false inputs 14 | stop("A and b do not have equal column lengths."); 15 | } 16 | 17 | int A_nEM = A.n_rows; 18 | int b_npix = x.n_rows; 19 | arma::mat sol(b_npix, A_nEM+1); 20 | 21 | for(int i = 0; i < b_npix; i++){ // parallelization with clusterR possible with this framework? --> test 22 | 23 | //vec b_vpix = b.row(i); 24 | 25 | arma::vec xv(A_nEM), xstore(A_nEM); 26 | xv.fill(0); 27 | xstore.fill(-9999); 28 | arma::vec xdiff = xv - xstore; 29 | 30 | // switching to arma here for nice matrix multiplication 31 | arma::vec nab = -A * x.row(i).t(); // negative A * b 32 | arma::mat ata = A * A.t(); // A * transposed A 33 | 34 | double temporary; 35 | int j = 0; 36 | 37 | //execute solving loop 38 | while(j < iterate && max(abs(xdiff)) > tolerance) { 39 | xstore = xv; 40 | 41 | for (int k = 0; k < A_nEM; k++) { 42 | 43 | temporary = xv[k] - nab[k] / ata(k,k); 44 | if (temporary < 0){ 45 | temporary = 0; 46 | } 47 | 48 | if (temporary != xv[k]){ 49 | nab += ((temporary - xv[k]) * ata.row(k).t()); 50 | } 51 | 52 | xv[k] = temporary; 53 | } 54 | xdiff = xv-xstore; 55 | ++j; 56 | } 57 | 58 | //predict values 59 | arma::mat prob = xv.t(); 60 | arma::mat pred = prob * A; 61 | 62 | //calculate RMSE 63 | arma::mat ppdiff = pred.row(0) - x.row(i); 64 | float rmsem = mean(mean(pow(ppdiff, 2))); 65 | float rmse = sqrt(rmsem); 66 | 67 | arma::mat ret(1, (A_nEM+1)); 68 | 69 | for(int f = 0; f < A_nEM; f++) { 70 | ret(0,f) = prob(0,f); 71 | } 72 | 73 | //fill 74 | ret(0,A_nEM) = rmse; 75 | sol.row(i) = ret; //xv.t(); 76 | } 77 | return(sol); //mat 78 | } 79 | -------------------------------------------------------------------------------- /src/normImage.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | // [[Rcpp::export]] 4 | NumericMatrix normImageCpp(NumericMatrix& x, NumericVector& M, NumericVector& S){ 5 | int nc = x.ncol(); 6 | for(int i = 0; i < nc; ++i){ 7 | x(_,i) = (x(_,i) - M[i]) / S[i]; 8 | } 9 | return x; 10 | } 11 | -------------------------------------------------------------------------------- /src/oneHot.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | 4 | // @param x NumericVector. Data passed from rasterLayer 5 | // @param classes NumericVector. Classes to be one hot encoded 6 | // @param bg int background value 7 | // @param fg int foreground value 8 | // @param na_rm bool: if na_rm=TRUE, NAs will be set to background value 9 | // @keywords internal 10 | // [[Rcpp::export]] 11 | IntegerMatrix oneHotCpp(NumericVector & x, NumericVector & classes, int bg, int fg, bool na_rm) { 12 | const int nr=x.size(); 13 | const int nc=classes.size(); 14 | IntegerMatrix out(nr,nc); 15 | if(bg != 0) std::fill( out.begin(), out.end(), bg ) ; 16 | IntegerVector na_temp(nc,NA_INTEGER); 17 | 18 | for (int i = 0; i < nr; i++) { 19 | if(!na_rm && ISNAN(x(i))) { 20 | out(i,_) = na_temp; 21 | } else { 22 | for(int c = 0; c < nc; c++) { 23 | if (x(i) == classes(c)) { 24 | out(i, c) = fg; 25 | break ; 26 | } 27 | } 28 | } 29 | } 30 | 31 | return out; 32 | } 33 | -------------------------------------------------------------------------------- /src/predictKmeans.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | 4 | // @title Predict kmeans based on assignment to the nearest cluster center 5 | // @param x Matrix 6 | // @param centers Matrix: kmeans cluster centers 7 | // @param returnDistance boolean: return distance to cluster centers 8 | // @keywords internal 9 | // [[Rcpp::export]] 10 | NumericMatrix predKmeansCpp(NumericMatrix& x, NumericMatrix& centers, const bool returnDistance = false ){ 11 | int ncent = centers.nrow(); 12 | int nr = x.nrow(); 13 | NumericMatrix out = no_init(nr, 1); 14 | // when returning classes we don't need to store all distances 15 | // hence we can keep `dist` small, i.e. 1 row only 16 | NumericMatrix dist = no_init(((nr-1) * returnDistance) + 1, ncent); 17 | 18 | if (!returnDistance) { 19 | std::fill(out.begin(), out.end(), R_NaReal); 20 | } 21 | std::fill(dist.begin(), dist.end(), R_NaReal); 22 | 23 | for(int i = 0; i < nr; i++) { 24 | if(all(!Rcpp::is_na(x.row(i)))){ 25 | 26 | for(int c = 0; c < ncent; c++){ 27 | NumericVector d = centers(c,_) - x(i,_); 28 | dist(i * returnDistance, c) = sqrt(sum(d * d)); 29 | } 30 | 31 | if(!returnDistance) { 32 | out(i,0) = which_min(dist.row(i * returnDistance)) + 1.0; 33 | } 34 | } 35 | } 36 | 37 | if(returnDistance) { 38 | return dist; 39 | } else { 40 | return out; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/predictMlc.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | 4 | // [[Rcpp::export]] 5 | arma::mat predictMlcCpp(NumericMatrix newdata, List model, int nclasses){ 6 | int ns = newdata.nrow(); 7 | arma::mat out(ns, nclasses + 1, arma::fill::zeros); 8 | 9 | for(int c = 0; c < nclasses; c++){ 10 | List classMod = model[c]; 11 | NumericVector m = classMod["m"]; 12 | for(int s = 0; s < ns; s++){ 13 | NumericVector xm(m.size()); 14 | for(int i = 0; i < m.size(); i++){ 15 | xm[i] = newdata(s,i) - m[i] ; 16 | } 17 | arma::mat dum = arma::rowvec(xm) * as(classMod["I"]) * arma::colvec(xm); 18 | double dummy = dum(0,0); 19 | double deter = classMod["D"]; 20 | out(s,c + 1) = deter - dummy; 21 | } 22 | } 23 | 24 | arma::uword index; 25 | for(int s = 0; s < ns; s++){ 26 | out.submat(s, 1, s, nclasses).max(index); 27 | out(s,0) = index + 1; 28 | } 29 | 30 | return out; 31 | } 32 | -------------------------------------------------------------------------------- /src/pwSimilarity.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | // [[Rcpp::export]] 4 | NumericVector pwSimilarityCpp(NumericMatrix& img, NumericMatrix& ref, int method){ 5 | int nr = img.nrow(); 6 | NumericVector out(nr), a, b; 7 | 8 | if(method == 1){ 9 | // Euclidean distance 10 | for(int i = 0; i < nr; i++){ 11 | out[i] = -1.0 * sqrt(sum(pow((ref(i,_) - img(i,_)), 2))); 12 | } 13 | } else if(method == 2){ 14 | // Spectral angle 15 | for(int i = 0; i < nr; i++){ 16 | out[i] = acos(sum(img(i,_)*ref(i,_))/sqrt(sum(pow(img(i,_), 2))*sum(pow(ref(i,_), 2)))); 17 | } 18 | } else if(method == 3){ 19 | // Correlation 20 | for(int i = 0; i < nr; i++){ 21 | a = img(i,_) - mean(img(i,_)); 22 | b = ref(i,_) - mean(ref(i,_)); 23 | out[i] = sum(a*b)/sqrt(sum(pow(a,2))*sum(pow(b,2))); 24 | } 25 | } 26 | return out; 27 | } 28 | -------------------------------------------------------------------------------- /src/rescaleImage.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | // [[Rcpp::export]] 4 | NumericMatrix rescaleImageCpp(NumericMatrix x, NumericVector scal, NumericVector xmin, NumericVector ymin){ 5 | LogicalVector finite = is_finite(scal) & is_finite(xmin) & is_finite(ymin); 6 | for(int i = 0; i < scal.size(); i++){ 7 | // 8 | if(!finite[i]) { 9 | for(int r = 0; r < x.nrow(); r++) { 10 | x(r,i) = NA_REAL; 11 | } 12 | } else { 13 | x(_, i) = (x(_, i) - xmin[i]) * scal[i] + ymin[i]; 14 | } 15 | } 16 | return x; 17 | } 18 | -------------------------------------------------------------------------------- /src/sam.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace Rcpp; 3 | // [[Rcpp::export]] 4 | NumericMatrix specSimC(NumericMatrix& x, NumericMatrix& em) { 5 | int ns = x.nrow(); 6 | int nend = em.nrow(); 7 | NumericMatrix out(ns, nend); 8 | for(int j = 0; j < nend; ++j){ 9 | double normEM = sum(pow(em(j,_),2)); 10 | for(int i = 0; i < ns; ++i){ 11 | out(i,j) = acos(sum(x(i,_) * em(j,_)) / sqrt(sum(pow(x(i,_),2)) * normEM)) ; 12 | } 13 | } 14 | return out; 15 | } 16 | 17 | 18 | // Alternative Armadillo version. same speed as Rcpp only 19 | /* 20 | cppFunction(" 21 | arma::mat specSimCx(arma::mat& x, arma::mat& em) { 22 | int ns = x.n_rows; 23 | int nend = em.n_rows; 24 | arma::mat out(ns, nend); 25 | out.zeros(); 26 | arma::mat normEM = sum(pow(em,2),1); 27 | arma::mat normT = sum(pow(x,2),1); 28 | 29 | for(int j = 0; j < nend; ++j){ 30 | for(int i = 0; i < ns; ++i){ 31 | out(i,j) = acos(accu(x.row(i) % em.row(j)) / sqrt(normT(i,0) * normEM(j,0))) ; 32 | } 33 | } 34 | 35 | return out; 36 | } 37 | ", depends = "RcppArmadillo") 38 | */ 39 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(RStoolbox) 3 | 4 | Sys.setenv("R_TESTS" = "") ## needed to pass R CMD check: https://github.com/hadley/testthat/issues/144 5 | 6 | test_check("RStoolbox") 7 | -------------------------------------------------------------------------------- /tests/testthat/helper-testUtils.R: -------------------------------------------------------------------------------- 1 | .th_naCount <- function(x) { 2 | if (inherits(x, "SpatRaster")) { 3 | return(as.numeric(global(is.na(x), "sum"))) 4 | } 5 | t(global(is.na(x), sum)) 6 | } 7 | 8 | .th_minmax <- function(x, names = FALSE) { 9 | if (inherits(x, "SpatRaster")) { 10 | mm <- minmax(x) 11 | if(!names) dimnames(mm) <- NULL 12 | } else { 13 | mm <- rbind(min(values(x)), max(values(x))) 14 | if(names) colnames(mm) <- names(x) 15 | } 16 | return(mm) 17 | } 18 | -------------------------------------------------------------------------------- /tests/testthat/test-classifyQA.R: -------------------------------------------------------------------------------- 1 | context("classifyQA") 2 | 3 | set.seed(1) 4 | qa <- rast(ncol = 128, nrow=128, val = 1:2^14) 5 | # TODO: add real LS8 example data 6 | 7 | test_that("returned classes and number of layers", { 8 | ## QA classes 9 | expect_is(qacs <- classifyQA(img = qa), "SpatRaster") 10 | expect_equal(names(qacs), "QAclass") 11 | expect_true(all(unique(qacs)$QAclass %in% 1:5)) 12 | 13 | ## Single category 14 | expect_is(qacs <- classifyQA(img = qa, type = "cirrus"), "SpatRaster") 15 | expect_equal(unique(qacs)$QAclass , 3) 16 | 17 | ## Confidence levels 18 | ## All categories 19 | expect_is(qacs_conf <- classifyQA(img = qa, confLayers = TRUE), "list") 20 | expect_equal(names(qacs_conf), c("cloud", "cirrus", "snow", "water")) 21 | expect_true(all(sapply(qacs_conf, function(layer) all(values(layer) %in% c(NA, 1:3))))) 22 | 23 | ## Single category 24 | expect_is(qacs_conf <- classifyQA(img = qa, type = "water", confLayers = TRUE), "list") 25 | expect_equal(names(qacs_conf), "water") 26 | expect_true(all(sapply(qacs_conf, function(layer) all(values(layer) %in% c(NA, 1:3))))) 27 | 28 | }) 29 | -------------------------------------------------------------------------------- /tests/testthat/test-cloudMask.R: -------------------------------------------------------------------------------- 1 | context("cloudMask and cloudShadowMask" ) 2 | 3 | library(terra) 4 | 5 | test_that("cloud and shadow masking works", { 6 | expect_is(cldmsk <- cloudMask(lsat, blue = 1, tir = 6), "SpatRaster") 7 | expect_is(cldmsk_final <- cloudMask(cldmsk, threshold = 0.1, buffer = 5), "SpatRaster") 8 | expect_equivalent(names(cldmsk), c("CMASK", "NDTCI")) 9 | expect_equivalent(names(cldmsk_final), c("CMASK", "NDTCI")) 10 | expect_is(shadow <- cloudShadowMask(lsat, cldmsk_final, shiftEstimate = c(-16, -6)), "SpatRaster") 11 | expect_is(c(lsat, cldmsk_final, shadow), "SpatRaster", label = "img, cloud and shadow rasters do not fit to each other") 12 | }) 13 | 14 | 15 | 16 | ## TODO: How to unit test interactive components? -------------------------------------------------------------------------------- /tests/testthat/test-coregisterImages.R: -------------------------------------------------------------------------------- 1 | context("coregisterImages") 2 | 3 | loff <- shift(lsat, 30, 60) 4 | 5 | test_that("works and finds correct shift", { 6 | expect_is(lcor <- coregisterImages(loff, lsat, nSamples = 100, reportStats= TRUE), "list") 7 | expect_equivalent(lcor$bestShift, c(x=-30, y=-60)) 8 | expect_equivalent(ext(lcor$coregImg) , ext(lsat)) 9 | expect_equal(vapply(lcor, class, character(1)), c(MI="data.frame", jointHist="list", bestShift = "data.frame", coregImg = "SpatRaster")) 10 | }) 11 | 12 | -------------------------------------------------------------------------------- /tests/testthat/test-entropy.R: -------------------------------------------------------------------------------- 1 | context("Entropy") 2 | 3 | entro <- function(x){ 4 | p <- table(x)/length(x) 5 | -sum(p*log(p)) 6 | } 7 | 8 | test_that("entropyCpp is correct", 9 | expect_equal(rasterEntropy(rlogo), { 10 | x <- app(rlogo, entro) 11 | names(x)<-"entropy"; 12 | x 13 | }) 14 | ) 15 | 16 | 17 | -------------------------------------------------------------------------------- /tests/testthat/test-estimateHaze.R: -------------------------------------------------------------------------------- 1 | context("estimateHaze") 2 | 3 | mtlFile <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox") 4 | 5 | test_that("all hazeBand specifications work", { 6 | hb <- list(single = 2, contiguous = c(1:3), noncontiguous = c(1,3), noncontiguous2 = c(2,4)) 7 | for(i in seq_along(hb)){ 8 | expect_is(hdn <- estimateHaze(lsat, hazeBands = hb[[i]], plot = FALSE), "numeric") 9 | expect_identical(length(hdn), length(hb[[i]])) 10 | expect_is(hdn2 <- estimateHaze(lsat, hazeBands = names(lsat)[hb[[i]]], plot = FALSE), "numeric") 11 | expect_identical(hdn, hdn2) 12 | expect_equal(names(hdn), names(lsat)[hb[[i]]]) 13 | expect_is(hdn <- estimateHaze(lsat, hazeBands = hb[[i]], maxSlope = FALSE, plot = FALSE), "numeric") 14 | } 15 | expect_error(estimateHaze(lsat), "specify the band") 16 | }) 17 | 18 | 19 | vals <- unlist(Map(rep, 1:20, c(1:20)^2)) 20 | tera <- rast(vals = vals, ncol = 1, nrow = length(vals)) 21 | vals[1]<-NA 22 | 23 | test_that("correct haze values are found (and deals with NA)", { 24 | expect_is(hdn <- estimateHaze(tera, hazeBands = 1, darkProp = .02, maxSlope = FALSE, plot = FALSE), "numeric") 25 | expect_equal(hdn, c(lyr.1 = 5)) 26 | expect_is(hdn <- estimateHaze(tera, hazeBands = 1, darkProp = .02, maxSlope = TRUE, plot = FALSE), "numeric") 27 | expect_equal(hdn, c(lyr.1 = 4)) 28 | }) -------------------------------------------------------------------------------- /tests/testthat/test-fCover.R: -------------------------------------------------------------------------------- 1 | context("fCover") 2 | 3 | set.seed(42) 4 | suppressPackageStartupMessages(library(terra)) 5 | 6 | lc <- unsuperClass(lsat, nSamples = 50, nClass=3)$map 7 | modis <- aggregate(lsat, 9) 8 | 9 | for(cl in 1:2) { 10 | if (!identical(Sys.getenv("NOT_CRAN"), "true") && cl == 2) next 11 | 12 | test_that(sprintf("works for %s classes(s)",cl), { 13 | expect_is(fc <- fCover( 14 | classImage = lc , 15 | predImage = modis, 16 | classes=1:cl, 17 | model="lm", 18 | trControl = trainControl(method = "cv", number = 2), 19 | nSample = 30, 20 | tuneLength=1 21 | ), c("fCover", "RStoolbox")) 22 | expect_equal(nlyr(fc$map), cl) 23 | }) 24 | 25 | } 26 | 27 | test_that("errors and warnings are thrown",{ 28 | expect_error(fCover( 29 | classImage = lc , 30 | predImage = modis, 31 | classes = 4, 32 | model="rf", 33 | nSample = 50, 34 | tuneLength=1 35 | ),"One or more classes are not represented in the sampled pixels") 36 | }) -------------------------------------------------------------------------------- /tests/testthat/test-gainOffsetRescale.R: -------------------------------------------------------------------------------- 1 | context("gainOffsetRescaleCpp") 2 | suppressPackageStartupMessages(library(terra)) 3 | r <- rast(vals = 1, ncol = 2, nrow = 2) 4 | r <- c(r,r*2) 5 | r[[1]][1:4] <- c(NA, -100, 100, 0) 6 | gain <- c(.1,.2) 7 | offset <- c(.1,.4) 8 | clamp <- list(c(TRUE,FALSE), c(FALSE,TRUE), c(TRUE, TRUE), c(FALSE, FALSE)) 9 | expected <-list(c(NA, .8, 0, .8, 10.1, .8, 0.1, .8), 10 | c(NA, .8, -9.9, .8, 1, .8, 0.1, .8), 11 | c(NA, .8, 0, .8, 1, .8, 0.1, .8), 12 | c(NA, .8, -9.9, .8, 10.1, .8, 0.1, .8) 13 | ) 14 | 15 | test_that("NA, clamping, general",{ 16 | for(i in seq_along(clamp)){ 17 | out <- app(r, function(x) gainOffsetRescale(x, gain, offset, clamp[[i]])) 18 | expect_equal(as.vector(out[]), expected[[i]]) 19 | } 20 | }) -------------------------------------------------------------------------------- /tests/testthat/test-getMeta.R: -------------------------------------------------------------------------------- 1 | context("getMeta") 2 | mtlFile <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox") 3 | meta <- readMeta(mtlFile) 4 | lsat_t <- stackMeta(mtlFile) 5 | 6 | test_that("Get integer scale factors",{ 7 | ## Vectors 8 | expect_is(gm <- getMeta(lsat_t, metaData = meta, what = "SCALE_FACTOR"), "numeric") 9 | expect_equal(length(gm), nlyr(lsat_t)) 10 | expect_is(gm <- getMeta(lsat_t, metaData = meta, what = "FILES"), "character") 11 | expect_is(gm <- getMeta(lsat_t, metaData = meta, what = "QUANTITY"), "character") 12 | expect_true(all(gm == "dn")) 13 | expect_is(gm <- getMeta(lsat_t, metaData = meta, what = "CATEGORY"), "character") 14 | expect_true(all(gm == "image")) 15 | 16 | ## Data.frames 17 | expect_is(gm <- getMeta("B6_dn", metaData = meta, what = "CALBT"), "data.frame") 18 | expect_is(gm <- getMeta(c("B1_dn", "B2_dn"), metaData = meta, what = "CALRAD"), "data.frame") 19 | ## Ordered? 20 | expect_is(gm <- getMeta(lsat_t[[5:1]], metaData = meta, what = "CALRAD"), "data.frame") 21 | expect_equal(rownames(gm), names(lsat_t[[5:1]])) 22 | ## NA 23 | expect_error(gm <- getMeta(lsat_t, metaData = meta, what = "CALREF"), "not populated") 24 | 25 | }) 26 | 27 | -------------------------------------------------------------------------------- /tests/testthat/test-getValidation.R: -------------------------------------------------------------------------------- 1 | context("getValidation") 2 | suppressPackageStartupMessages(library("randomForest")) 3 | 4 | train <- readRDS(system.file("external/trainingPoints_rlogo.rds", package="RStoolbox")) 5 | train$num <- rnorm(nrow(train)) 6 | class <- superClass(rlogo, trainData = train, responseCol = "class", tuneLength = 1, trainPartition = 0.7, predict = FALSE) 7 | reg <- superClass(rlogo, trainData = train, responseCol = "num", tuneLength = 1, trainPartition = 0.7, predict = FALSE, mode = "regression") 8 | 9 | 10 | test_that("getValidation returns correct objects", { 11 | for(f in c("testset", "cv")){ 12 | expect_is(getValidation(class, from = f), "data.frame") 13 | expect_equal(nrow(getValidation(class, from = f)), 1L) 14 | expect_is(getValidation(class, metrics = "classwise", from = f), "data.frame") 15 | expect_equal(nrow(getValidation(class, metrics = "classwise", from = f)), 3) 16 | expect_is(getValidation(class, metrics = "confmat", from = f), "table") 17 | expect_is(getValidation(class, metrics = "caret", from = f), "confusionMatrix") 18 | } 19 | for(f in c("testset", "cv")){ 20 | expect_is(getValidation(reg, from = f), "data.frame") 21 | expect_equal(nrow(getValidation(reg, from = f)), 1L) 22 | } 23 | } 24 | ) -------------------------------------------------------------------------------- /tests/testthat/test-histMatch.R: -------------------------------------------------------------------------------- 1 | context("histMatch") 2 | 3 | library(terra) 4 | singLay <- histMatch(lsat[[1]], lsat[[2]]) 5 | multLay <- histMatch(lsat, sqrt(lsat)) 6 | 7 | test_that("histMatch RasterLayers and Stack/Bricks", { 8 | expect_is(singLay, "SpatRaster") 9 | expect_is(multLay, "SpatRaster") 10 | expect_equal(names(multLay), names(lsat)) 11 | expect_equal(names(singLay), names(lsat)[1]) 12 | }) 13 | -------------------------------------------------------------------------------- /tests/testthat/test-internalFunctions.R: -------------------------------------------------------------------------------- 1 | context("internal functions") 2 | 3 | terra <- rast(ncol = 5, nrow=5, vals = 1) 4 | 5 | sf <- readRDS(system.file("external/trainingPolygons_lsat.rds", package="RStoolbox")) 6 | 7 | test_that("Loaded as sf", { 8 | expect_is(sf, c("sf", "data.frame")) 9 | }) 10 | 11 | 12 | test_that(".canProcInMem says no", { 13 | expect_true(.canProcInMem(terra,1)) 14 | expect_false(.canProcInMem(terra,1e20)) 15 | }) -------------------------------------------------------------------------------- /tests/testthat/test-mesma.R: -------------------------------------------------------------------------------- 1 | context("multiple endmember spectral mixture analysis") 2 | 3 | # sets of endmembers 4 | em_sma <- as.matrix(data.frame(lsat[c(5294, 47916)])) 5 | rownames(em_sma) <- c("forest", "water") 6 | 7 | em_mesma_2 <- rbind( 8 | data.frame(lsat[c(4155, 17018, 53134, 69487, 83704)], class = "forest"), 9 | data.frame(lsat[c(22742, 25946, 38617, 59632, 67313)], class = "water") 10 | ) 11 | em_mesma_3 <- rbind( 12 | data.frame(lsat[c(4155, 17018, 53134, 69487, 83704)], class = "forest"), 13 | data.frame(lsat[c(22742, 25946, 38617, 59632, 67313)], class = "water"), 14 | data.frame(lsat[c(4330, 1762, 1278, 1357, 17414)], class = "shortgrown") 15 | ) 16 | 17 | props <- matrix(c(seq(0,1,.1), seq(1,0,-.1)),ncol=2) 18 | mat <- props %*% em_sma 19 | 20 | test_that("nnls_solver returns correct solutions",{ 21 | expect_equal(props, round(nnls_solver(x = mat, A = em_sma)[,c(1,2)], digits = 2)) 22 | } 23 | ) 24 | 25 | test_that("solver output class", { 26 | expect_is(solved <- nnls_solver(x = mat, A = em_sma)[,c(1,2)], "matrix") 27 | }) 28 | 29 | test_that("sma call using NNLS", { 30 | expect_is(solved <- mesma(lsat, em_sma, method = "NNLS"), "SpatRaster") 31 | expect_is(solved <- mesma(lsat, data.frame(em_sma), method = "NNLS"), "SpatRaster") 32 | }) 33 | 34 | test_that("method error", { 35 | expect_error(mesma(lsat, em_sma, method = "no-valid-method")) 36 | }) 37 | 38 | lsat_t <- lsat 39 | values(lsat_t)[c(1, 10, 100, 400, 200), c(3, 4, 5, 2, 7)] <- NA 40 | 41 | test_that("img NA handling", { 42 | expect_is(solved <- mesma(lsat_t, em_sma), "SpatRaster") 43 | }) 44 | 45 | emNA <- em_sma 46 | emNA[1,6] <- NA 47 | 48 | test_that("img NA handling", { 49 | expect_error(mesma(lsat_t, emNA)) 50 | }) 51 | 52 | test_that("mesma two classes", { 53 | probs <- expect_is(mesma(lsat, em_mesma_2), "SpatRaster") 54 | expect_equal(nlyr(probs), 3) 55 | expect_equal(names(probs), c("forest", "water", "RMSE")) 56 | expect_equal(sapply(c(1000, 2000, 3000), function(x) sum(probs[[1:2]][x])), c(1,1,1)) 57 | }) 58 | 59 | test_that("mesma n_models", { 60 | expect_warning(mesma(lsat, em_mesma_2, n_models = 10)) 61 | }) 62 | 63 | test_that("mesma sum_to_one", { 64 | probs <- expect_is(mesma(lsat, em_mesma_2, sum_to_one = F), "SpatRaster") 65 | expect_equal(round(sapply(c(1000, 2000, 3000), function(x) sum(probs[[1:2]][x])), 5), c(0.99841, 1.09785, 1.00944)) 66 | }) 67 | 68 | test_that("mesma three classes", { 69 | probs <- expect_is(mesma(lsat, em_mesma_3), "SpatRaster") 70 | expect_equal(nlyr(probs), 4) 71 | expect_equal(names(probs), c("forest", "water", "shortgrown", "RMSE")) 72 | }) 73 | 74 | -------------------------------------------------------------------------------- /tests/testthat/test-mlc.R: -------------------------------------------------------------------------------- 1 | context("mlc") 2 | suppressPackageStartupMessages(library(caret)) 3 | 4 | set.seed(1) 5 | mat <- matrix(rnorm(300), ncol = 3, nrow = 100) 6 | colnames(mat) <- letters[1:3] 7 | y <- sample(factor(c("a", "b")), 100, replace = TRUE) 8 | 9 | 10 | test_that("fit mlc",{ 11 | expect_is( mr <- mlc(mat,y), "list") 12 | expect_equal(names(mr), c("a", "b", "levels")) 13 | expect_equal(vapply(mr$a, length, numeric(1)), c(m=3,D=1,I=9)) 14 | }) 15 | 16 | test_that("predict mlc",{ 17 | mod <- train( mat, y, method = mlcCaret, trControl = trainControl(method = "none")) 18 | expect_is(pred <- predict.mlc(mod, mat), "factor") 19 | expect_equal(length(pred), nrow(mat)) 20 | expect_equal(levels(pred), c("a", "b")) 21 | expect_is(prob <- predict.mlc.prob(mod, mat), "matrix") 22 | expect_equal(nrow(prob), nrow(mat)) 23 | expect_equal(ncol(prob), 2) 24 | }) 25 | 26 | -------------------------------------------------------------------------------- /tests/testthat/test-multicore.R: -------------------------------------------------------------------------------- 1 | context("Multicore/Singlecore") 2 | library(terra) 3 | 4 | 5 | test_that(".paraRasterFun is equal to predict, calc, overlay, Both single and multicore.", { 6 | skip_on_cran() 7 | for (clusterType in c('PSOCK', 'FORK')){ 8 | if(Sys.info()[["sysname"]] != "Linux" && clusterType == "FORK") next 9 | r <- rast(ncol=10,nrow=10, vals=1:100) 10 | r <- c(r, r^2) 11 | names(r) <- c("red", "nir") 12 | m <- lm(red~nir, data = as.data.frame(r)) 13 | f <- function(a,b){a-b} 14 | 15 | for(i in 1:2){ 16 | expect_equal(.paraRasterFun(r, rasterFun = terra::predict, args = list(model = m)), terra::predict(r, m), label = paste("predict:", "singlecore")) 17 | expect_equal(.paraRasterFun(r, rasterFun = app, args = list(fun = sum)), app(r, fun = sum), label = paste("calc:", "singlecore")) 18 | } 19 | } 20 | }) 21 | 22 | 23 | test_that(".parXapply family returns identical results to ?pply family. Both single and multicore.", { 24 | skip_on_cran() 25 | for (clusterType in c('PSOCK', 'FORK')){ 26 | if(Sys.info()[["sysname"]] != "Linux" && clusterType == "FORK") next 27 | lis <- lapply(1:5, rnorm) 28 | mat <- matrix(1:100,10,10) 29 | for(i in 1:2){ 30 | expect_equal(.parXapply(X = lis, XFUN ="lapply", FUN=sum, envir = environment()), lapply(lis, sum), label = paste("lapply:", "singlecore")) 31 | expect_equal(.parXapply(X = lis, XFUN ="sapply", FUN=sum, envir = environment()), sapply(lis, sum), label = paste("sapply:", "singlecore")) 32 | expect_equal(.parXapply(X = mat, XFUN ="apply", 1, FUN=sum, envir = environment()), apply(mat, MAR =1, sum), label = paste("aapply:", "singlecore")) 33 | } 34 | } 35 | }) 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /tests/testthat/test-normImage.R: -------------------------------------------------------------------------------- 1 | context("normImage") 2 | 3 | library(terra) 4 | lsat_t <- lsat 5 | 6 | for(mem in c(TRUE, FALSE)){ 7 | test_that("normImage for single or multiple layers", { 8 | ## Multiple layers 9 | expect_is(nlsat <- normImage(lsat, norm = TRUE), "SpatRaster") 10 | expect_true(all(round(colMeans(nlsat[]), 5)==0)) 11 | 12 | ## Single layer 13 | expect_is(nlsat <- normImage(lsat[[1]], norm = TRUE), "SpatRaster") 14 | expect_equal(round(mean(nlsat[]), 5),0) 15 | } 16 | ) 17 | } 18 | 19 | test_that("terra inputs work", { 20 | expect_is(nlsat <- normImage(lsat, norm = TRUE), "SpatRaster") 21 | expect_true(all(round(colMeans(nlsat[]), 5)==0)) 22 | }) 23 | 24 | lsat_t[1, 1] <- NA 25 | lsat_t[[2]][2] <- NA 26 | test_that("normImage with NAs",{ 27 | expect_is(nlsat <- normImage(lsat_t, norm = TRUE), "SpatRaster") 28 | expect_true(all(is.na(nlsat[1]))) 29 | expect_equal(as.vector(is.na(nlsat[2])), c(F,T,rep(F,5))) 30 | }) 31 | 32 | 33 | 34 | test_that("normImage.cpp works", { 35 | m <- as.matrix(lsat_t[1:5]) 36 | cm <- colMeans(m, na.rm = TRUE) 37 | cs <- apply(m, 2, sd, na.rm = TRUE) 38 | 39 | expect_is(cmat <- normImageCpp(m, cm, cs), "matrix") 40 | expect_true(all(round(colMeans(cmat, na.rm = T), 10)==0)) 41 | expect_true(all(round(apply(cmat, 2, sd, na.rm = T), 10)==1)) 42 | expect_equal(sum(is.na(cmat[1,])), 7) 43 | expect_equal(sum(is.na(cmat[2,])), 1) 44 | expect_equivalent(normImageCpp(m, cm, cs), scale(m, T, T)) 45 | 46 | }) 47 | -------------------------------------------------------------------------------- /tests/testthat/test-oneHotEncode.R: -------------------------------------------------------------------------------- 1 | context("oneHotEncode") 2 | library(terra) 3 | r <- rast(vals = c(1,2,0,1,NA,-1), ncol = 2, nrow = 3) 4 | 5 | result_with_na <- structure(c(1L, 0L, 0L, 1L, NA, 0L, 0L, 1L, 0L, 0L, NA, 0L), .Dim = c(6L, 2L), .Dimnames = list(NULL, c("c_1", "c_2"))) 6 | result_without_na <- result_without_na_bgNA <- structure(c(1L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L), .Dim = c(6L, 2L), .Dimnames = list(NULL, c("c_1", "c_2"))) 7 | result_without_na_bgNA[result_without_na_bgNA == 0] <- NA 8 | 9 | test_that("oneHotEncode raster input", { 10 | expect_is(oh <- oneHotEncode(r, classes = c(1,2), na.rm = TRUE), "SpatRaster", info = "check class is SpatRaster") 11 | expect_equal(nlyr(oh), 2, info = "check number of layers") 12 | expect_equal(oh[], result_without_na, info = "check return values") 13 | expect_equal(oneHotEncode(r[], classes = c(1,2), na.rm = FALSE), result_with_na, info = "with na.rm=TRUE") 14 | expect_equal(as.numeric(oneHotEncode(r[], classes = 1, na.rm = TRUE)), result_without_na[,1], info = "for one class only") 15 | expect_equal(oneHotEncode(r, 1:2, background = NA, na.rm = TRUE)[], result_without_na_bgNA) 16 | expect_equal(oneHotEncode(r, 1:2, foreground = 7, na.rm = TRUE)[], result_without_na*7) 17 | }) 18 | 19 | test_that("oneHotEncode vector input", { 20 | expect_is(oh <- oneHotEncode(r[], classes = c(1,2), na.rm = TRUE), "matrix") 21 | expect_equal(oneHotEncode(r[], classes = c(1,2), na.rm = FALSE), result_with_na) 22 | expect_equal(oneHotEncode(r[], classes = 1, na.rm = TRUE), result_without_na[,1,drop = FALSE]) 23 | }) 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /tests/testthat/test-panSharpen.R: -------------------------------------------------------------------------------- 1 | context("panSharpen") 2 | 3 | library(terra) 4 | 5 | test_that("panSharpen methods", { 6 | suppressWarnings({ 7 | agg <- aggregate(lsat, 10) 8 | pan <- sum(lsat[[1:3]]) 9 | meth <- c("brovey", "ihs", "pca") 10 | panList <- lapply(meth, function(m) panSharpen(img = agg, pan = pan, r = 3, g = 2, b = 1, method = m)) 11 | nlayers <- c(3,3,7) 12 | names(nlayers) <- names(panList) <- meth 13 | 14 | for(m in meth) expect_is(panList[[m]], "SpatRaster") 15 | for(m in meth) expect_equal(names(panList[[m]]), paste0(names(agg)[1:nlayers[m]], "_pan"), info = m) # tests inmplicitly for correct number of layers 16 | for(m in meth) expect_equal(res(panList[[m]]), res(pan), info = m) 17 | }) 18 | }) -------------------------------------------------------------------------------- /tests/testthat/test-pifMatch.R: -------------------------------------------------------------------------------- 1 | context("pifMatch") 2 | library(terra) 3 | 4 | lsat_b <- log(lsat) 5 | 6 | for(m in c("cor", "sam", "ed")) { 7 | test_that("pifMatch return classes", { 8 | expect_is(lb <- pifMatch(lsat_b, lsat, method = m, returnPifMap = TRUE, returnSimMap = TRUE, returnModels = TRUE), "list", info = sprintf("method=%s", m)) 9 | expect_equal(names(lb), c("img", "simMap", "pifMap", "models")) 10 | expect_is(lb$models$B1_dn, "lm") 11 | expect_true(all(vapply(lb[2:3],inherits, logical(1), "SpatRaster"))) 12 | expect_is(lb$img, "list") 13 | 14 | }) 15 | } 16 | 17 | test_that("error messages", { 18 | expect_error(lb <- pifMatch(lsat_b, lsat, method = "ok", returnPifMap = TRUE, returnSimMap = TRUE, returnModels = TRUE), "method must be one of") 19 | }) 20 | -------------------------------------------------------------------------------- /tests/testthat/test-rasterCVA.R: -------------------------------------------------------------------------------- 1 | context("rasterCVA") 2 | 3 | r <- rast(val = 0, ncol = 2, nrow = 10) 4 | r1 <- r2 <- c(r, r) 5 | s <- 4 6 | x <- c(0,s,s,s,0,-s,-s,-s, NA, 0) 7 | y <- c(s,s,0,-s,-s,-s,0,s, 0, NA) 8 | r2[[1]][] <- c(x, x + sign(x)*2) 9 | r2[[2]][] <- c(y, y + sign(y)*2) 10 | 11 | expectedDf <- as.matrix(data.frame(angle = c(0,45,90,135,180,225,270,315,NA,NA), 12 | magnitude = c(rep(c(s, sqrt(2*s^2)), 4), NA,NA, rep(c((2+s), sqrt(2*(2+s)^2)), 4), NA,NA))) 13 | 14 | test_that("angles and magnitudes are correct (incl. NA treatment)", { 15 | skip_on_cran() 16 | expect_is(cva <- rasterCVA(r1,r2, tmf = 0), "SpatRaster") 17 | expect_equal(cva[], expectedDf) 18 | }) 19 | 20 | 21 | test_that("angles and magnitudes are correct (incl. NA treatment)", { 22 | skip_on_cran() 23 | expect_is(cva <- rasterCVA(r1,r2), "SpatRaster") 24 | expect_is(cva <- rasterCVA(r1,r2, nct = 0.7), "SpatRaster") 25 | expect_is(cva <- rasterCVA(r1,r2, tmf = 0.7), "SpatRaster") 26 | tmpfile <- tempfile(fileext=".tif") 27 | expect_is(cva <- rasterCVA(r1,r2, nct = 0.7, filename=tmpfile), "SpatRaster") 28 | expect_true(file.remove(tmpfile)) 29 | 30 | }) -------------------------------------------------------------------------------- /tests/testthat/test-rasterPCA.R: -------------------------------------------------------------------------------- 1 | context("rasterPCA") 2 | 3 | library(terra) 4 | 5 | lsat_t <- crop(lsat, ext(lsat)*.2) 6 | ld <- as.data.frame(lsat_t) 7 | 8 | for(spc in c(FALSE, TRUE)) { 9 | test_that(paste("stats::princomp(covMat(raster)) == stats::princomp(sample) with spca=",spc), { 10 | expect_s3_class(r <- rasterPCA(lsat_t, nSamples = NULL, spca = spc), c("RStoolbox", "rasterPCA")) 11 | expect_s3_class(rs <- rasterPCA(lsat_t, nSamples = ncell(lsat_t), spca = spc), c("RStoolbox", "rasterPCA")) 12 | expect_equal(abs(unclass(rs$model$loadings)), abs(unclass(r$model$loadings))) 13 | expect_equal(abs(r$map[]), abs(rs$map[]), tolerance = 1e-03) 14 | }) 15 | 16 | } 17 | 18 | lsat_t[[1]][100:200] <- NA 19 | lsat_t[400:500] <- NA 20 | G <- expand.grid(smpl=c(TRUE,FALSE), spc = c(TRUE, FALSE)) 21 | for(i in seq_len(nrow(G))){ 22 | spc <- G[i,"spc"] 23 | smpl <- if(G[i,"smpl"]) ncell(lsat_t) else NULL 24 | test_that(paste("rasterPCA NA handling; spca =",spc, "; nSamples =", deparse(smpl)), { 25 | suppressWarnings({ 26 | expect_s3_class(r <- rasterPCA(lsat_t, nSamples = smpl, spca = spc), c("RStoolbox", "rasterPCA")) 27 | expect_true(all(is.na(r$map[c(100:200,400:500)]))) 28 | expect_false(any(is.na(r$map[1:99]))) 29 | }) 30 | }) 31 | } -------------------------------------------------------------------------------- /tests/testthat/test-readEE.R: -------------------------------------------------------------------------------- 1 | context("readEE") 2 | 3 | exfile <- system.file("external/EarthExplorer_LS8.txt", package = "RStoolbox") 4 | files <- list.files("testdata/earthexplorer", full = TRUE) 5 | 6 | test_that("returned classes", { 7 | skip_on_cran() 8 | expect_is(ee <- readEE(files[1]), "data.frame") 9 | expect_is(ee <- readEE(files), "data.frame") 10 | expect_true(all(is.na(ee$Download.Link))) 11 | expect_is(ee <- readEE(exfile), "data.frame") 12 | expect_true(all(is.na(ee$Browse.Link))) 13 | }) 14 | -------------------------------------------------------------------------------- /tests/testthat/test-readMeta.R: -------------------------------------------------------------------------------- 1 | context("readMeta") 2 | mfil <- list.files("testdata/metadata", full = TRUE) 3 | for(f in mfil) { 4 | test_that(paste("readMeta and summary(readMeta)", basename(f)), { 5 | expect_s3_class(m <- readMeta(f), c("RStoolbox", "ImageMetaData")) 6 | expect_s3_class(m$ACQUISITION_DATE, c("POSIXlt", "POSIXt")) 7 | expect_true(all(grepl("^B1?[0-9]_(dn|toa|sre){1}$", m$DATA$BANDS[1:4]))) 8 | expect_output( summary(m), "Scene:") 9 | expect_is(m <- readMeta(f, raw = TRUE), "list") 10 | expect_gte(length(names(m)), 2) 11 | }) 12 | } 13 | -------------------------------------------------------------------------------- /tests/testthat/test-rescaleImage.R: -------------------------------------------------------------------------------- 1 | context("rescaleImage") 2 | library(terra) 3 | lsat_t <- lsat 4 | lsat2 <- lsat_t - 1000 5 | lsat2r <- rescaleImage(lsat2, lsat_t) 6 | lsat2u <- rescaleImage(lsat2, ymin = 0.5, ymax = 1.6) 7 | 8 | mm <- matrix(c(0.5, 1.6), ncol= nlyr(lsat_t), nrow = 2) 9 | colnames(mm) <- names(lsat_t) 10 | rownames(mm) <- c("min", "max") 11 | 12 | test_that("rescales to proper limits", { 13 | skip_on_cran() 14 | expect_equal(lsat_t[], lsat2r[], values = TRUE) 15 | expect_equal(minmax(lsat2u), mm) 16 | expect_equal(lsat2u[[1]][], rescaleImage(lsat_t[[1]], ymin = 0.5, ymax = 1.6)[], values = TRUE) 17 | }) 18 | 19 | 20 | test_that("deals with missing values and single valued layers and returns NAs", { 21 | skip_on_cran() 22 | lsat2[[1]][] <- 1 23 | suppressWarnings(lsat2[[2]][] <- NA) 24 | suppressWarnings(lsat2[[3]][] <- Inf) 25 | lsat2[[4]][,1:100] <- NA 26 | lsat2[[5]][,1:100] <- Inf 27 | expect_warning(lsaResc <- rescaleImage(lsat2, ymin = 0, ymax = 1), "no value range.*B1_dn*") 28 | expect_equal(.th_naCount(lsaResc[[1]]), ncell(lsat_t)) # single values 29 | expect_equal(.th_naCount(lsaResc[[2]]), ncell(lsat_t)) # NAs 30 | expect_equal(.th_naCount(lsaResc[[3]]), ncell(lsat_t)) # Infinites 31 | expect_equal(.th_minmax(lsaResc[[4]]), matrix(0:1.,ncol=1)) ## Ignores NAs 32 | expect_false(any(!is.na(lsaResc[[4]][,1:100]))) 33 | expect_equal(.th_naCount(lsaResc[[5]]), ncell(lsat_t)) # Partial Infinites -> NA 34 | }) 35 | -------------------------------------------------------------------------------- /tests/testthat/test-rsOpts.R: -------------------------------------------------------------------------------- 1 | context("rsOpts & .vMessage") 2 | 3 | test_that("setting global options",{ 4 | expect_false(options("RStoolbox.verbose")[[1]]) 5 | expect_silent(.vMessage("MIC CHECK 12")) 6 | expect_silent(rsOpts(verbose = TRUE)) 7 | expect_true(options("RStoolbox.verbose")[[1]]) 8 | expect_message(.vMessage("MIC CHECK 12"), "MIC CHECK 12") 9 | expect_silent(rsOpts(verbose = FALSE)) 10 | }) 11 | 12 | -------------------------------------------------------------------------------- /tests/testthat/test-sam.R: -------------------------------------------------------------------------------- 1 | context("spectral angle mapper") 2 | 3 | lsat_t <- lsat 4 | lsat_t[1] <- NA 5 | lsat_t[2, 1] <- NA 6 | pts <- data.frame(x = c(624720, 627480), y = c(-414690, -411090)) 7 | endmembers <- extract(lsat_t, pts) 8 | rownames(endmembers) <- c("water", "vegetation") 9 | 10 | test_that("returns correct classes and deals with NA",{ 11 | expect_is(ls <- sam(lsat_t, endmembers, angles = TRUE), "SpatRaster") 12 | expect_equal(names(ls), c("water_sa", "vegetation_sa")) 13 | expect_true(all(is.na(ls[1]))) 14 | expect_true(all(is.na(ls[1,1]))) 15 | expect_is(ls <- sam(lsat_t, endmembers, angles = FALSE), "SpatRaster") 16 | expect_equal(names(ls), "class") 17 | expect_true(is.na(ls[1])) 18 | expect_true(is.na(ls[2,1])) 19 | 20 | } 21 | ) 22 | 23 | sem_mat <- endmembers[1,,drop=FALSE] 24 | sem_vec <- endmembers[1,] 25 | sem_df <- as.data.frame(endmembers) 26 | 27 | test_that("endmember class", { 28 | expect_is(ls <- sam(lsat_t, sem_mat, angles = TRUE), "SpatRaster") 29 | expect_equal(nlyr(ls), 1) 30 | expect_is(ls <- sam(lsat_t, sem_vec, angles = TRUE), "SpatRaster") 31 | expect_is(ls <- sam(lsat_t, sem_df, angles = TRUE), "SpatRaster") 32 | expect_equal(nlyr(ls), 2) 33 | expect_is(ls <- sam(lsat_t, sem_df, angles = FALSE), "SpatRaster") 34 | expect_error(ls <- sam(lsat_t, sem_vec, angles = FALSE), "only one class") 35 | }) 36 | -------------------------------------------------------------------------------- /tests/testthat/test-saveReadRSTBX.R: -------------------------------------------------------------------------------- 1 | context("save and read RSTBX objects") 2 | library(terra) 3 | 4 | ## Create RSTBX object 5 | train <- readRDS(system.file("external/trainingPoints_rlogo.rds", package="RStoolbox")) 6 | sc <- superClass(rlogo, train, tuneLength = 1, resp="class") 7 | 8 | ## Save and re-import 9 | outbase <- paste0(tempdir(),"/test-RSTOOLBOX-sc") 10 | saveRSTBX(sc, outbase , overwrite = TRUE) 11 | sc_re <- readRSTBX(paste0(outbase, ".rds")) 12 | womap <- setdiff(names(sc), "map") 13 | 14 | test_that("export and import works",{ 15 | expect_is(sc_re, c("RStoolbox", "superClass")) 16 | expect_equal(sc[womap], sc_re[womap]) 17 | expect_equal(values(sc_re$map), values(sc$map)) 18 | }) -------------------------------------------------------------------------------- /tests/testthat/test-sli.R: -------------------------------------------------------------------------------- 1 | context("readSLI & writeSLI") 2 | sliFile <- system.file("external/vegSpec.sli", package="RStoolbox") 3 | sliTmpFile <- paste0(tempdir(),"/spectralLibraryFile", Sys.getpid(),".sli") 4 | 5 | test_that("read and write are compatible", { 6 | expect_is(sli <- readSLI(sliFile), "data.frame") 7 | 8 | expect_equal(names(sli), c("wavelength", "veg_stressed", "veg_vital")) 9 | expect_equal(nrow(sli), 2151) 10 | 11 | for(mode in c("bin", "ASCII")) { 12 | for(endian in c("little", "big")) { 13 | if(mode=="ASCII" && endian=="big") next 14 | expect_silent(writeSLI(sli, sliTmpFile, wavl.units = "Nanometers", mode = mode, endian = endian)) 15 | 16 | sliR <- readSLI(sliTmpFile) 17 | expect_equal(names(sli), c("wavelength", "veg_stressed", "veg_vital")) 18 | expect_equal(nrow(sli), 2151) 19 | expect_equal(sli, sliR) 20 | 21 | if(mode=="bin") { 22 | file.rename(paste0(sliTmpFile, ".hdr"), gsub(".sli", ".hdr", sliTmpFile)) 23 | expect_is(readSLI(sliTmpFile) , "data.frame") 24 | } 25 | 26 | file.remove(list.files(tempdir(), basename(sliTmpFile), full = TRUE)) 27 | } 28 | } 29 | }) 30 | 31 | 32 | test_that("spectra labels are parsed correctly", { 33 | skip_on_cran() 34 | for(s in letters[1:4]) { 35 | expect_is(sli <- readSLI(paste0("testdata/sli/", s, ".sli")), "data.frame", info = paste0("failed: ", s, ".sli")) 36 | expect_equal(colnames(sli), c("wavelength", "spectrum_a", "spectrum_b")) 37 | expect_equal(unlist(sli[1,]), c(wavelength = 350, spectrum_a = 0.008958003153785, spectrum_b = 0.00883699393591312 )) 38 | } 39 | }) 40 | -------------------------------------------------------------------------------- /tests/testthat/test-spectralIndices.R: -------------------------------------------------------------------------------- 1 | context("spectralIndices") 2 | 3 | library(terra) 4 | 5 | ## Create test data-sets 6 | vals <- c(-1, 0, 0.5, 1, 2, NA) 7 | vals <- expand.grid(vals,vals) 8 | r <- ml <- rast(ncol= 6, nrow = 6) 9 | r[] <- vals[,1] 10 | r <- c(r,r) 11 | r[[2]]<-vals[,2] 12 | names(r) <- c("L1", "L2") 13 | ml[] <- 1 14 | ml[,2] <- 10 15 | ml[,3] <- NA 16 | names(ml) <- "henryMaske" 17 | 18 | r <- rast(r) 19 | ml <- rast(ml) 20 | test_that("errors and warnings are emitted", { 21 | expect_error(spectralIndices(r, red = 1, indices = "NDVI"), "you must specify \\*all\\* required bands") 22 | expect_error(spectralIndices(r, red = 1), "you must specify \\*all\\* required bands") 23 | expect_warning(spectralIndices(r, red = 1, nir = 2, indices = c("NDVI", "EVI")), "not specified: blue") 24 | expect_warning(spectralIndices(r, red = 1, nir = 2, blue = 1, index = c("NDVI", "EVI")), "Skipping EVI") 25 | expect_warning(spectralIndices(r, red = 1, nir = 2, blue = 1, index = "EVI", skipRefCheck = TRUE), "raster has no values") 26 | expect_error(spectralIndices(r, red = 1, nir = 2, blue = 1, maskLayer = FALSE, index = "ndvi", skipRefCheck = TRUE), "maskLayer must be") 27 | expect_error(spectralIndices(r, red = 1, nir = 2, blue = 1, maskLayer = "reginaHalmich", index = "ndvi", skipRefCheck = TRUE), "is not a layer") 28 | }) 29 | 30 | m_cfg <- list(ml, 3, "henryMaske") 31 | nadf <- data.frame("NDVI" = rep(NA_real_, 6)) 32 | 33 | # Suppresswarnings in the next section, we are still dealing with na data, guess raster does not throw a warning 34 | # with a valid raster even if every value is na 35 | test_that("maskLayer is considered regardless of input class", { 36 | for(i in 1:3){ 37 | expect_s4_class(nd <- suppressWarnings({spectralIndices(c(r,ml), red = 1, nir=2, indices = "NDVI", maskLayer = m_cfg[[i]], maskValue = 10)}), "SpatRaster") 38 | expect_equal(nd[,2], nadf) 39 | expect_s4_class(nd <- suppressWarnings({spectralIndices(c(r,ml), red = 1, nir=2, indices = "NDVI", maskLayer = m_cfg[[i]], maskValue = NA)}), "SpatRaster") 40 | expect_equal(nd[,3], nadf) 41 | } 42 | }) 43 | 44 | # Rebuild those test for landsat data 45 | test_that("returned classes", { 46 | vi <- list( 47 | spectralIndices(lsat, red = 4, nir = 5, indices = "NDVI"), 48 | spectralIndices(lsat, red = "B4_dn", nir = 5, indices = "NDVI"), 49 | spectralIndices(lsat, red = "B4_dn", nir = "B5_dn", indices = "NDVI"), 50 | spectralIndices(lsat, red = 4, nir = 5, indices = c("NDVI", "DVI", "MSAVI2")) 51 | ) 52 | ## Check numeric, mixed and character band indices 53 | expect_equal(vi[[1]], vi[[2]], info = "numeric vs. mixed band indexes") 54 | expect_equal(vi[[1]], vi[[3]], info = "numeric vs. character band indexes") 55 | 56 | ## Check layer numbers and names 57 | expect_equal(nlyr(vi[[1]]), 1) 58 | expect_identical(names(vi[[1]]), "NDVI") 59 | expect_identical(nlyr(suppressWarnings(spectralIndices(r, red = 1, nir = 2, indices = c("NDVI", "EVI")))), 1) 60 | expect_identical(nlyr(vi[[4]]), 3, info = "nlayers: 3 indices NDVI, MSAVI2, DVI") 61 | expect_identical(names(vi[[4]]), c("NDVI", "DVI", "MSAVI2"), info = "names: 3 indices NDVI, MSAVI2, DVI") 62 | 63 | ## Check index values 64 | ## NDVI like 65 | expect_lte(max(range(vi[[1]][], na.rm = TRUE)), 1) 66 | expect_gte(min(range(vi[[1]][], na.rm = TRUE)), -1) 67 | }) 68 | 69 | test_that("excercise all indices", { 70 | expect_is(sp <- spectralIndices(lsat, blue = 1, green=2, redEdge1=1, redEdge2=2, redEdge3=3, red=3, nir=4, swir2=5, swir3=7, 71 | coefs = list(L=0.4,s=0.3,swir2ccc=30,swir2coc=140), scaleFactor=255), "SpatRaster") 72 | }) 73 | 74 | 75 | 76 | idxdb <- getOption("RStoolbox.idxdb") 77 | cdb <- c( 78 | idxdb, 79 | CUSTOM = list(list(c("Mueller2024", "Super custom index"), function(red) {red * 0})), 80 | CUSTOM2 = list(list(c("Mueller2024", "Super custom index"), function(swir1) {swir1 - swir1})) 81 | ) 82 | rsOpts(idxdb = cdb) 83 | 84 | test_that("custom spectral index",{ 85 | expect_equal( 86 | unique(values(spectralIndices(lsat, red = 3, indices = "CUSTOM"))), 87 | as.matrix(data.frame(CUSTOM=0))) 88 | expect_equal( 89 | unique(values(spectralIndices(lsat, swir1 = 5, indices = "CUSTOM2"))), 90 | as.matrix(data.frame(CUSTOM2=0)) 91 | ) 92 | }) 93 | 94 | 95 | 96 | ## Check for duplicate indices 97 | #tmat <- do.call(rbind, lapply(1:ncol(k), function(i){colSums(k[,i]-k, na.rm = T)==0})) 98 | #colnames(tmat) <- rownames(tmat) <- colnames(k) 99 | 100 | -------------------------------------------------------------------------------- /tests/testthat/test-stackMeta.R: -------------------------------------------------------------------------------- 1 | context("stackMeta") 2 | 3 | mtlFile <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox") 4 | test_that("stackMeta with exampleData", { 5 | expect_s4_class(st <- stackMeta(mtlFile), "SpatRaster") 6 | expect_s4_class(stackMeta(readMeta(mtlFile)), "SpatRaster") 7 | expect_true(all(grepl("B[1-7]_dn", names(st)))) 8 | }) 9 | -------------------------------------------------------------------------------- /tests/testthat/test-tasseledCap.R: -------------------------------------------------------------------------------- 1 | context("tasseledCap") 2 | 3 | test_that("basic function",{ 4 | for(sat in c("Landsat4TM", "Landsat5TM", "Landsat7ETM", "Landsat8OLI")) { 5 | expect_is(lsat_tc <- tasseledCap(lsat[[c(1:5, 7)]], sat = sat), "SpatRaster") 6 | } 7 | expect_is(lsat_tc <- tasseledCap(lsat, sat = "MODIS"), "SpatRaster") 8 | expect_error(tc <- tasseledCap(lsat[[1:4]], sat = "MODIS"), "Number of layers does not match") 9 | expect_error(tc <- tasseledCap(lsat[[1:4]], sat = "xkcd"), "Sensor not implemented") 10 | }) 11 | 12 | -------------------------------------------------------------------------------- /tests/testthat/test-topCor.R: -------------------------------------------------------------------------------- 1 | context("topCor") 2 | suppressPackageStartupMessages(library(terra)) 3 | metaData <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox") 4 | metaData <- readMeta(metaData) 5 | lsat_t <- stackMeta(metaData) 6 | 7 | ## Minnaert correction, solar angles from metaData 8 | test_that("basic functioning", { 9 | suppressWarnings({ 10 | mths <- if (identical(Sys.getenv("NOT_CRAN"), "true")) c("cos", "avgcos", "C", "stat", "illu") else "cos" 11 | for(method in mths){ 12 | expect_is(tc <- topCor(lsat_t, dem = srtm, metaData = metaData, method = method), "SpatRaster") 13 | expect_equal(names(tc), if(method!="illu") names(lsat_t) else "illu") 14 | } 15 | skip_on_cran() 16 | for(method in mths){ 17 | expect_is(tc <- topCor(lsat_t, dem = srtm, metaData = metaData, method = method, filename = .terraTmpFile()), "SpatRaster") 18 | expect_equal(names(tc), if(method!="illu") names(lsat_t) else "illu") 19 | } 20 | expect_is(tc2 <- topCor(lsat_t, dem = srtm, metaData = metaData, method = "minnaert", stratImg='slope', nStrat = 5), "list") 21 | expect_is(tc3 <- topCor(lsat_t, dem = srtm, solarAngles = c(1.081533, 0.7023922), method = "C"), "SpatRaster") 22 | }) 23 | 24 | }) 25 | -------------------------------------------------------------------------------- /tests/testthat/test-unsuperClass.R: -------------------------------------------------------------------------------- 1 | context("unsuperClass") 2 | library(terra) 3 | 4 | ## Set-up test data 5 | lsatNA <- lsat 6 | lsatNA[20:40, ] <- NA 7 | lsatNA2 <- lsat 8 | lsatNA2 <- terra::writeRaster(lsatNA2, .terraTmpFile()) 9 | values(lsatNA2)[is.na(values(lsatNA2))] <- 20 10 | 11 | ## Tiny raster bug caused superClass to fail when predictions were written to .grd file 12 | test_that("unsuperClass and NA",{ 13 | for(cm in c(TRUE, FALSE)) { 14 | expect_is(sc <- unsuperClass(lsat, nClasses = 2, clusterMap = cm), "unsuperClass") 15 | expect_is(scNA <- unsuperClass(lsatNA, nClasses = 2, clusterMap = cm), "unsuperClass") 16 | expect_true(all(is.na(scNA$map[20:40,]))) 17 | expect_is(scNA <- unsuperClass(lsatNA2, nClasses = 2, filename = .terraTmpFile(), clusterMap = cm), "unsuperClass") 18 | expect_equal(sort(unique(scNA$map[])), c(1,2)) 19 | } 20 | }) 21 | 22 | test_that("terra inputs",{ 23 | expect_is(sc <- unsuperClass(lsat, nClasses = 2), "unsuperClass") 24 | }) 25 | 26 | ## kmeans prediction function only 27 | mat <- matrix(1:20, by = TRUE, nrow = 5, ncol=4) 28 | cents <- mat[c(1,3),] 29 | dists <- apply(cents, 1, function(ce) { apply(mat, 1, function(x) { 30 | sqrt(sum((x - ce)^2)) 31 | } ) }) 32 | 33 | 34 | test_that("kmeans predictions",{ 35 | expect_equal(predKmeansCpp(mat, cents), matrix(c(1,1,2,2,2), ncol = 1)) 36 | expect_equal(predKmeansCpp(mat, cents,TRUE), dists) 37 | mat[1] <- NA 38 | dists[1,] <- NA 39 | expect_equal(predKmeansCpp(mat, cents), matrix(c(NA,1,2,2,2), ncol = 1)) 40 | expect_equal(predKmeansCpp(mat, cents, TRUE), dists) 41 | }) 42 | 43 | 44 | ## pretty print 45 | test_that("printing method", { 46 | skip_on_cran() 47 | expect_output(print(unsuperClass(lsat, nClasses = 2)), "unsuperClass results") 48 | }) 49 | 50 | 51 | ## algortithm warning 52 | test_that("kmeans fail detection", { 53 | skip_on_cran() 54 | set.seed(1) 55 | expect_warning(unsuperClass(lsat, nSamples = ncell(lsat), nStarts = 1, nClasses = 30), "doesn't converge properly") 56 | }) 57 | 58 | ## Helper for symlink-proof filename checking 59 | ## Added to fix CI on gh-actions 60 | slp_bn <- function(path, tmp = basename(tempdir())) { 61 | tail(strsplit(path, tmp)[[1]],1) 62 | } 63 | 64 | ## Predict S3 method 65 | test_that("predict.unSuperClass", { 66 | skip_on_cran() 67 | uc <- unsuperClass(lsat, nSamples = ncell(lsat), nClasses = 2) 68 | expect_s4_class(pred <- predict(uc, lsat), "SpatRaster") 69 | expect_equal(sum((uc$map - pred)[]), 0) 70 | 71 | tmpFile <- tempfile(fileext = ".grd") 72 | pred <- predict(uc, lsat, filename = tmpFile ) 73 | expect_false(inMemory(pred)) 74 | expect_equal(basename(sources(pred)), basename(tmpFile)) 75 | file.remove(tmpFile, gsub("grd", "gri", tmpFile)) 76 | }) 77 | -------------------------------------------------------------------------------- /tests/testthat/test-validateMap.R: -------------------------------------------------------------------------------- 1 | context("validateMap") 2 | lsat_t <- lsat 3 | lsat_t <- lsat_t[[1:4]] 4 | 5 | ## Set-up test data 6 | set.seed(1) 7 | poly <- readRDS(system.file("external/trainingPolygons_lsat.rds", package="RStoolbox")) 8 | poly$classNum <- as.numeric(poly$class) 9 | 10 | sc <- superClass(lsat_t, trainData = poly, nSamples = 50, responseCol = "class", model = "mlc", trainPartition = 0.7, predict = TRUE) 11 | 12 | test_that("classification, without class mapping",{ 13 | val <- validateMap(sc$map, valData = poly, nSample =50, responseCol = "classNum", classMapping = NULL) 14 | expect_is(val, "mapValidation") 15 | expect_equal(lapply(val, "class"), list(performance="confusionMatrix",validationSet = "data.frame")) 16 | expect_equal(colnames(val$validationSet), c("reference", "prediction", "cell")) 17 | }) 18 | 19 | test_that("classification, with class mapping",{ 20 | skip_on_cran() 21 | val <- validateMap(sc$map, valData = poly, nSample = 50, responseCol = "class", classMapping = sc$classMapping) 22 | expect_is(val, "mapValidation") 23 | expect_output(print(val), "performance") 24 | expect_equal(lapply(val, "class"), list(performance="confusionMatrix",validationSet = "data.frame")) 25 | expect_equal(colnames(val$validationSet), c("reference", "prediction", "cell")) 26 | }) 27 | 28 | test_that("regression",{ 29 | skip_on_cran() 30 | val <- validateMap(sc$map, valData = poly, nSample = 50, mode = "regression", responseCol = "classNum") 31 | expect_is(val, "mapValidation") 32 | expect_equal(lapply(val, "class"), list(performance="numeric",validationSet = "data.frame")) 33 | expect_equal(colnames(val$validationSet), c("reference", "prediction", "cell")) 34 | expect_equal(names(val$performance)[1:2], c("RMSE", "Rsquared")) 35 | }) 36 | 37 | 38 | -------------------------------------------------------------------------------- /tests/testthat/testdata/earthexplorer/EE_LANDSAT_5.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/tests/testthat/testdata/earthexplorer/EE_LANDSAT_5.txt -------------------------------------------------------------------------------- /tests/testthat/testdata/earthexplorer/EE_LANDSAT_7.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/tests/testthat/testdata/earthexplorer/EE_LANDSAT_7.txt -------------------------------------------------------------------------------- /tests/testthat/testdata/earthexplorer/EE_LANDSAT_8.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/tests/testthat/testdata/earthexplorer/EE_LANDSAT_8.txt -------------------------------------------------------------------------------- /tests/testthat/testdata/earthexplorer/EE_LANDSAT_NEWFORMAT.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/tests/testthat/testdata/earthexplorer/EE_LANDSAT_NEWFORMAT.csv -------------------------------------------------------------------------------- /tests/testthat/testdata/sli/a.sli: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/tests/testthat/testdata/sli/a.sli -------------------------------------------------------------------------------- /tests/testthat/testdata/sli/a.sli.hdr: -------------------------------------------------------------------------------- 1 | ENVI 2 | description = { 3 | ENVI SpecLib created using RStoolbox for R [Sat Jul 20 10:50:27 2019]} 4 | samples = 2 5 | lines = 2 6 | bands = 1 7 | header offset = 0 8 | file type = ENVI Spectral Library 9 | data type = 5 10 | interleave = bsq 11 | sensor type = Unknown 12 | byte order = 0 13 | wavelength units = Nanometers 14 | reflectance scale factor = 1 15 | z plot range = {0.00,1} 16 | z plot titles = {Wavelength, Reflectance} 17 | band names = { 18 | Spectral Library} 19 | spectra names = { 20 | spectrum_a, 21 | spectrum_b} 22 | wavelength = { 23 | 350, 351} 24 | -------------------------------------------------------------------------------- /tests/testthat/testdata/sli/b.sli: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/tests/testthat/testdata/sli/b.sli -------------------------------------------------------------------------------- /tests/testthat/testdata/sli/b.sli.hdr: -------------------------------------------------------------------------------- 1 | ENVI 2 | description = { 3 | ENVI SpecLib created using RStoolbox for R [Sat Jul 20 10:50:50 2019]} 4 | samples = 2 5 | lines = 2 6 | bands = 1 7 | header offset = 0 8 | file type = ENVI Spectral Library 9 | data type = 5 10 | interleave = bsq 11 | sensor type = Unknown 12 | byte order = 0 13 | wavelength units = Nanometers 14 | reflectance scale factor = 1 15 | z plot range = {0.00,1} 16 | z plot titles = {Wavelength, Reflectance} 17 | band names = { 18 | Spectral Library} 19 | spectra names = { 20 | spectrum_a, spectrum_b} 21 | wavelength = { 22 | 350, 351} 23 | -------------------------------------------------------------------------------- /tests/testthat/testdata/sli/c.sli: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/tests/testthat/testdata/sli/c.sli -------------------------------------------------------------------------------- /tests/testthat/testdata/sli/c.sli.hdr: -------------------------------------------------------------------------------- 1 | ENVI 2 | description = { 3 | ENVI SpecLib created using RStoolbox for R [Sat Jul 20 10:51:03 2019]} 4 | samples = 2 5 | lines = 2 6 | bands = 1 7 | header offset = 0 8 | file type = ENVI Spectral Library 9 | data type = 5 10 | interleave = bsq 11 | sensor type = Unknown 12 | byte order = 0 13 | wavelength units = Nanometers 14 | reflectance scale factor = 1 15 | z plot range = {0.00,1} 16 | z plot titles = {Wavelength, Reflectance} 17 | band names = { 18 | Spectral Library} 19 | spectra names = { spectrum_a, spectrum_b} 20 | wavelength = { 21 | 350, 351} 22 | -------------------------------------------------------------------------------- /tests/testthat/testdata/sli/d.sli: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bleutner/RStoolbox/3f8f05b1af57d355b1d525976c5782d3d7d2b66c/tests/testthat/testdata/sli/d.sli -------------------------------------------------------------------------------- /tests/testthat/testdata/sli/d.sli.hdr: -------------------------------------------------------------------------------- 1 | ENVI 2 | description = { 3 | ENVI SpecLib created using RStoolbox for R [Sat Jul 20 10:50:27 2019]} 4 | samples = 2 5 | lines = 2 6 | bands = 1 7 | header offset = 0 8 | file type = ENVI Spectral Library 9 | data type = 5 10 | interleave = bsq 11 | sensor type = Unknown 12 | byte order = 0 13 | wavelength units = Nanometers 14 | reflectance scale factor = 1 15 | z plot range = {0.00,1} 16 | z plot titles = {Wavelength, Reflectance} 17 | band names = { 18 | Spectral Library} 19 | spectra names = { spectrum_a, 20 | spectrum_b} 21 | wavelength = { 22 | 350, 351} 23 | --------------------------------------------------------------------------------