├── .gitignore ├── .travis.yml ├── README.md ├── build.bash ├── document.bash ├── drat.sh ├── pkg ├── DESCRIPTION ├── NAMESPACE ├── NEWS ├── R │ ├── data_checks.R │ ├── oystercatcher.R │ ├── parse_outfile.R │ ├── read_icv.R │ ├── read_tcf.R │ ├── read_tdf.R │ ├── rtrim-pkg.R │ ├── skylark.R │ ├── trim.R │ ├── trim_estimate.R │ ├── trim_gof.R │ ├── trim_heatmap.R │ ├── trim_index.R │ ├── trim_overall.R │ ├── trim_post.R │ ├── trim_refine.R │ ├── trim_smooth.R │ ├── trim_wald.R │ ├── trim_workhorse.R │ └── utils.R ├── data │ ├── oystercatcher.RData │ ├── skylark.RData │ └── skylark2.RData ├── tests │ ├── testthat.R │ └── testthat │ │ ├── outfiles │ │ ├── skylark-1a.out │ │ ├── skylark-1a.tcf │ │ ├── skylark-1b.out │ │ ├── skylark-1b.tcf │ │ ├── skylark-1c.out │ │ ├── skylark-1c.tcf │ │ ├── skylark-1d.out │ │ ├── skylark-1d.tcf │ │ ├── skylark-1e.out │ │ ├── skylark-1e.tcf │ │ ├── skylark-1f.out │ │ ├── skylark-1f.tcf │ │ ├── skylark-2a.out │ │ ├── skylark-2a.tcf │ │ ├── skylark-2b.out │ │ ├── skylark-2b.tcf │ │ ├── skylark-3a.out │ │ ├── skylark-3a.tcf │ │ ├── skylark-4a.out │ │ ├── skylark-4a.tcf │ │ ├── skylark-4b.out │ │ ├── skylark-4b.tcf │ │ ├── skylark-4c.out │ │ ├── skylark-4c.tcf │ │ ├── skylark-4d.out │ │ ├── skylark-4d.tcf │ │ ├── skylark-4e.out │ │ ├── skylark-4e.tcf │ │ ├── skylark-4f.out │ │ ├── skylark-4f.tcf │ │ ├── skylark-minimal.tcf │ │ ├── skylark-model2-f.csv │ │ ├── skylark-x1.ocv │ │ ├── skylark-x1.out │ │ ├── skylark-x1.tcf │ │ ├── skylark-x2.out │ │ ├── skylark-x2.tcf │ │ ├── skylark-x3.out │ │ ├── skylark-x3.tcf │ │ ├── skylark.dat │ │ ├── skylark_wt.dat │ │ ├── skylark_yr.dat │ │ └── test_skylark_1a_to_2b.R │ │ ├── test_checks.R │ │ ├── test_errors.R │ │ ├── test_tcf.R │ │ ├── test_tdf.R │ │ ├── test_trim.R │ │ ├── test_utils.R │ │ └── testdata │ │ └── 131183.RData └── vignettes │ ├── Skylark_example.Rmd │ ├── TRIM_methods_v2.pdf │ ├── TRIM_methods_v2.pdf.asis │ ├── UIndex_Oystercatcher_output.RData │ ├── rtrim_2_extensions.Rmd │ ├── rtrim_confidence_intervals.Rmd │ ├── rtrim_for_TRIM_users.Rmd │ └── taming_overdispersion.Rmd ├── roxygen.R └── test.r /.gitignore: -------------------------------------------------------------------------------- 1 | # History files 2 | .Rhistory 3 | .Rapp.history 4 | # Example code in package build process 5 | *-Ex.R 6 | # RStudio files 7 | .Rproj.user/ 8 | pkg/.Rbuildignore 9 | rtrim.Rproj 10 | # produced vignette files 11 | pkg/vignettes/*.html 12 | pkg/vignettes/*.R 13 | # pkg/vignettes/*.pdf 14 | .Rproj.user 15 | pkg/man/*.Rd 16 | output/* 17 | *.gz 18 | .DS_Store 19 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | 2 | # travis config 3 | 4 | sudo: required 5 | dist: trusty 6 | 7 | language: r 8 | sudo: false 9 | cache: packages 10 | 11 | before_install: 12 | - R -e "install.packages(c('devtools','roxygen2','testthat'))" 13 | - R -e "devtools::install_deps('./pkg')" 14 | - R -e "devtools::document('./pkg')" 15 | - cd ./pkg 16 | 17 | r_packages: 18 | - covr 19 | - rmarkdown 20 | - R.rsp 21 | 22 | 23 | after_success: 24 | - Rscript -e 'library(covr);coveralls()' 25 | 26 | notifications: 27 | email: 28 | on_success: change 29 | on_failure: change 30 | 31 | 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/markvanderloo/rtrim.svg)](https://travis-ci.org/markvanderloo/rtrim) 2 | [![Coverage Status](https://coveralls.io/repos/github/markvanderloo/rtrim/badge.svg?branch=master)](https://coveralls.io/github/markvanderloo/rtrim?branch=master) 3 | [![CRAN](http://www.r-pkg.org/badges/version/rtrim)](http://cran.r-project.org/web/packages/rtrim) 4 | [![Downloads](http://cranlogs.r-pkg.org/badges/rtrim)](http://cran.r-project.org/package=rtrim/) [![Mentioned in Awesome Official Statistics ](https://awesome.re/mentioned-badge.svg)](http://www.awesomeofficialstatistics.org) 5 | 6 | 7 | # rtrim 8 | Reimplementation of [TRIM](https://www.cbs.nl/en-gb/society/nature-and-environment/indices-and-trends--trim--) for R 9 | 10 | ### Installation and getting started 11 | 12 | To install `rtrim`, issue the following command at the R console. 13 | ```r 14 | install.packages("rtrim") 15 | ``` 16 | 17 | To get started, we recommend users to first work through the introductory vignette.This can be opened as follows. 18 | ```r 19 | library(rtrim) 20 | vignette("rtrim_for_TRIM_users") 21 | ``` 22 | 23 | An extended introduction showing all options can be opened as follows: 24 | ```r 25 | library(rtrim) 26 | vignette("Skylark_example") 27 | ``` 28 | After working through one of these vignettes, we advise users to have a look atthe help file of `rtrim`'s main function by typing `?trim`. Also, to get a feel of how the package works one can browse through the help files by following thelinks under the `see also` sections in each help file. 29 | 30 | ### Release planning 31 | 32 | Version 1.0.2 will be uploaded to CRAN on 31 march 2017. 33 | 34 | See the [NEWS](pkg/NEWS) file for a list of updates. 35 | 36 | 37 | ### Install the development version 38 | 39 | Sometimes we release a beta version for public testing. 40 | 41 | **Current Beta Version:** 1.02 42 | 43 | 44 | Beta versions can be installed as follows. 45 | 46 | 1. If you are a Windows user, first install [Rtools](https://cran.r-project.org/bin/windows/Rtools/). 47 | 2. Open an R session, and issue the following commands. 48 | ```r 49 | # we need the 'drat' package to install the review version: 50 | if(!require(drat)) install.packages('drat') 51 | # we tell R where to find the review version (on github) 52 | drat::addRepo('markvanderloo') 53 | # install as usual 54 | install.packages('rtrim',type='source') 55 | ``` 56 | This procedure is necessary for the prerelease version. Once the package is on CRAN, installation of neither `Rtools` nor `drat` will be required. 57 | 58 | 59 | 60 | ### Note 61 | 62 | The current package has been tested at two institutes and we have compared 63 | numerical outcomes of hundreds of production jobs with outcomes of the original 64 | TRIM software. We find that most results are reproducible to about `1E-4` or 65 | better. Nevertheless, since this software has not been tried and tested for 66 | over 25 years like TRIM has, you can still expect to bump into a rare bug or 67 | difference. 68 | 69 | We would really appreciate it if you let us know using the following instructions. 70 | 71 | 72 | ### Telling us what works and what doesn't 73 | 74 | We'd love to hear your findings. It would be great if you could do this by adding 75 | issues to our [issues page](https://github.com/markvanderloo/rtrim/issues). You will 76 | need to create a github account for that. If you can not or do not want to create one, 77 | send an e-mail to `rtrim AT cbs dot nl`. 78 | 79 | When reporting bugs, it helps us a lot if you can create an as small as possible example 80 | that demonstrates the error. Some good general guidelines for reporting bugs are given [here](https://sifterapp.com/blog/2012/08/tips-for-effectively-reporting-bugs-and-issues/) 81 | 82 | 83 | 84 | 85 | 86 | 87 | -------------------------------------------------------------------------------- /build.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Bash script to build the rtrim R package 4 | # version 1: Mark van der Loo 5 | # version 2: Patrick Bogaart 6 | # - added generation of install script 7 | 8 | R=R 9 | CHECKARG="--as-cran" 10 | while [ $# -gt 0 ] ; do 11 | case "$1" in 12 | -dev) 13 | R=Rdev 14 | shift 1 ;; 15 | *) 16 | CHECKARG="$CHECKARG $1" 17 | shift 1 ;; 18 | esac 19 | done 20 | 21 | echo "######## Removing building information..." 22 | rm -rf output 23 | 24 | 25 | echo "######## Generate documentation..." 26 | $R -q -f roxygen.R 27 | 28 | echo "######## Building package in output..." 29 | mkdir output 30 | cd output 31 | $R CMD build ../pkg 32 | echo "######## Testing package with $CHECKARG ..." 33 | for x in *.tar.gz 34 | do 35 | $R CMD check $CHECKARG $x 36 | done 37 | 38 | echo "######## Creating installation script..." 39 | TARGET=install.R 40 | PRE="install.packages(\"" 41 | TGZ=$(eval ls -1 *.tar.gz | head -1) 42 | POST="\", repos=NULL, type=\"source\")" 43 | echo "# R script to install rtrim package from source" > $TARGET 44 | echo $PRE$TGZ$POST >> $TARGET 45 | 46 | echo "**BUILT USING $R" 47 | $R --version 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /document.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cp ./build/DESCRIPTION ./pkg 4 | R -f roxygen.R 5 | R CMD Rd2pdf --force --no-preview -o manual.pdf ./pkg 6 | 7 | 8 | -------------------------------------------------------------------------------- /drat.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/Rscript 2 | 3 | suppressPackageStartupMessages({ 4 | if (!require("drat")) stop("drat not installed") 5 | if (!require("docopt")) stop("docopt not installed") 6 | }) 7 | 8 | 9 | "Usage: drat.sh [commit] [--pkg FILE] [--dratrepo FOLDER] 10 | 11 | commit commit after insert? 12 | --pkg FILE The tarball to insert in the drat repo (by default the tarball in ./output) 13 | --dratrepo FOLDER path to root of drat repo [default: ../drat] 14 | " -> doc 15 | 16 | opt <- docopt(doc) 17 | 18 | stopifnot(file.exists(opt$dratrepo)) 19 | 20 | pkg <- opt$pkg 21 | if ( is.null(pkg) ){ 22 | pkg <- dir("output/",pattern = ".*tar\\.gz",full.names = TRUE) 23 | } 24 | 25 | if (!file.exists(pkg)){ 26 | stop(sprintf("%s not found",pkg)) 27 | } 28 | 29 | drat::insertPackage(pkg, repodir=opt$dratrepo, commit=opt$commit) 30 | 31 | cat(sprintf("Inserted %s into %s %s\n" 32 | , pkg 33 | , opt$dratrepo 34 | , if(opt$commit) "and committed" else "" 35 | )) 36 | -------------------------------------------------------------------------------- /pkg/DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: rtrim 2 | Title: Trends and Indices for Monitoring Data 3 | Version: 2.3.0 4 | Date: 2024-06-21 5 | Authors@R: c( 6 | person("Patrick","Bogaart", 7 | email="rtrim@cbs.nl", 8 | role=c("aut","cre"), 9 | comment=c(ORCID="0000-0002-8612-1289")), 10 | person("Mark", "van der Loo", role="aut"), 11 | person("Jeroen","Pannekoek", role="aut"), 12 | person("Statistics Netherlands", role="cph")) 13 | Description: The TRIM model is widely used for estimating growth and decline of 14 | animal populations based on (possibly sparsely available) count data. The 15 | current package is a reimplementation of the original TRIM software developed 16 | at Statistics Netherlands by Jeroen Pannekoek. 17 | See 18 | for more information about TRIM. 19 | URL: https://github.com/SNStatComp/rtrim 20 | BugReports: https://github.com/SNStatComp/rtrim/issues 21 | LazyLoad: yes 22 | LazyData: no 23 | License: EUPL 24 | Type: Package 25 | Imports: methods, utils, stats, graphics, grDevices 26 | Suggests: 27 | testthat, 28 | knitr, 29 | rmarkdown, 30 | R.rsp 31 | RoxygenNote: 7.3.1 32 | Encoding: UTF-8 33 | VignetteBuilder: knitr, R.rsp 34 | -------------------------------------------------------------------------------- /pkg/NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(check_observations,character) 4 | S3method(check_observations,data.frame) 5 | S3method(check_observations,trimcommand) 6 | S3method(coef,trim) 7 | S3method(confint,trim) 8 | S3method(gof,trim) 9 | S3method(plot,trim.index) 10 | S3method(plot,trim.overall) 11 | S3method(plot,trim.results) 12 | S3method(plot,trim.smooth) 13 | S3method(plot,trim.totals) 14 | S3method(print,count.summary) 15 | S3method(print,trim) 16 | S3method(print,trim.gof) 17 | S3method(print,trim.overall) 18 | S3method(print,trim.summary) 19 | S3method(print,trim.wald) 20 | S3method(print,trimcommand) 21 | S3method(read_icv,character) 22 | S3method(read_icv,trimcommand) 23 | S3method(read_tdf,character) 24 | S3method(read_tdf,trimcommand) 25 | S3method(summary,trim) 26 | S3method(summary,trimbatch) 27 | S3method(trim,data.frame) 28 | S3method(trim,formula) 29 | S3method(trim,trimcommand) 30 | S3method(vcov,trim) 31 | S3method(wald,trim) 32 | export(check_observations) 33 | export(count_summary) 34 | export(gof) 35 | export(heatmap) 36 | export(index) 37 | export(now_what) 38 | export(overall) 39 | export(overdispersion) 40 | export(read_tcf) 41 | export(read_tdf) 42 | export(results) 43 | export(serial_correlation) 44 | export(set_trim_verbose) 45 | export(totals) 46 | export(trendlines) 47 | export(trim) 48 | export(trimcommand) 49 | export(wald) 50 | import(methods) 51 | importFrom(grDevices,adjustcolor) 52 | importFrom(grDevices,col2rgb) 53 | importFrom(grDevices,gray) 54 | importFrom(grDevices,hcl) 55 | importFrom(grDevices,rgb) 56 | importFrom(graphics,abline) 57 | importFrom(graphics,legend) 58 | importFrom(graphics,lines) 59 | importFrom(graphics,par) 60 | importFrom(graphics,plot) 61 | importFrom(graphics,points) 62 | importFrom(graphics,polygon) 63 | importFrom(graphics,rasterImage) 64 | importFrom(graphics,rect) 65 | importFrom(graphics,segments) 66 | importFrom(graphics,title) 67 | importFrom(stats,pchisq) 68 | importFrom(stats,pt) 69 | importFrom(stats,qchisq) 70 | importFrom(stats,qgamma) 71 | importFrom(stats,qnorm) 72 | importFrom(stats,qt) 73 | importFrom(stats,quantile) 74 | importFrom(stats,setNames) 75 | importFrom(stats,time) 76 | importFrom(utils,capture.output) 77 | importFrom(utils,head) 78 | importFrom(utils,packageVersion) 79 | importFrom(utils,read.table) 80 | importFrom(utils,str) 81 | importFrom(utils,tail) 82 | -------------------------------------------------------------------------------- /pkg/NEWS: -------------------------------------------------------------------------------- 1 | version 2.3.0 2 | - Updated the FAQ 3 | - Fixed an issue regarding incomplete covariates 4 | - Added scaled index in addition to "formal" 5 | - Added 'long' output in index() and totals() 6 | - Added Trendlines() to process overall() output 7 | - Fixed a bug in autodelete() 8 | - Removed spurious option "imputed" in plot.trim.overall() *) 9 | - Fixed a bug that resulted in negative lower bounds for confidence intervals 10 | in extreme cases (huge overdispersion) *) 11 | - Fixed issue reporting wrong package version *) 12 | - Fixed documentation of plot.trim.overall which erroneously mentioned that for 13 | each time point confidence intervals are plotted rather than standard errors *) 14 | 15 | *) Thanks for Tomas Telensky for pointing at these issues. 16 | 17 | version 2.1.1 18 | - Fixed incompatibility with R 4.0 19 | 20 | version 2.0.7 21 | - Improved error messages for invalid beta values 22 | - Improved the use of actual year numbers as change points 23 | - Improved documentation for overall() function 24 | 25 | version 2.0.6 26 | - Removed spurious output from index() function 27 | - Fixed bug in totals() which prevented both obs=TRUE and level!=NULL 28 | 29 | version 2.0.5 30 | - Fixed bug causing non-zero standard errors for the base year index if this isn't the first year (thanks to Tomas Telensky) 31 | 32 | version 2.0 33 | - Many major updates (monthly data; stratified trim; advanced visualizations); See the vignette "trim 2 extensions". 34 | 35 | version 1.3.1 36 | - Fixed bug concerning covariates 37 | - Fixed typos and minor edits in vignettes (thanks to Martin Poot) 38 | 39 | version 1.3.0 40 | - Integration of experimental monthly version 41 | 42 | version 1.2.0 43 | - Introduced time-window-based indexing 44 | - Automatically converts non-factor covariates to factors 45 | - Fixes a bug occuring when empty sites are combined with covariates (thanks to Marnix de Zeeuw) 46 | - Speed improvements 47 | - Minor bug fixes 48 | 49 | version 1.1.5 50 | - Now generates an error when the computed overdispersion is 0, and a warning when overdispersion < 1 (thanks to Oscar Ramírez for proving a data set where this happened) 51 | 52 | version 1.1.4 53 | - Fixed a bug caused by auto-removal of empty factorial sites (thanks to Tomás Murray) 54 | 55 | version 1.1.3 56 | - Fixed a bug resulting in wrong p-values output by the overall() function (thanks to Anna Marie Holand for reporting). 57 | 58 | version 1.1.2 59 | - Fixed problem when to many years have no or 0 counts, such that the expected value becomes effectively 0 as well. This case now generates an error message “Zero expected value” (thanks to Arco van Strien for proving this case) 60 | 61 | version 1.1.1 62 | - Fixed problem during stepwise regression when the algorithm wants to remove the first changepoint (thanks to Asaf Tsoar) 63 | - Fixed computation of standard errors when model==1 (thanks to Asaf Tsoar) 64 | 65 | version 1.1.0 66 | - Added backward compatibility option in overall() to match erroneous overall trend interpretation in original TRIM (thans for Lars Petterson to pointing out the differences in trend interpretation) 67 | 68 | version 1.0.2 69 | - Limit on regression parameters now equal to original TRIM (thanks to Lars Petterson) 70 | 71 | version 1.0.1 72 | - initial CRAN release 73 | -------------------------------------------------------------------------------- /pkg/R/oystercatcher.R: -------------------------------------------------------------------------------- 1 | #' @name oystercatcher 2 | #' @title Oystercatcher population data 3 | #' 4 | #' @usage data(oystercatcher) 5 | #' 6 | #' @description 7 | #' 8 | #' A sample data set for demonstation of monthly counts. 9 | #' 10 | #' The \bold{\code{oystercatcher}} data set looks as follows. 11 | #' 12 | #' \tabular{lll}{ 13 | #' \bold{Column} \tab \bold{Type} \tab \bold{Description}\cr 14 | #' \code{site} \tab \code{integer} \tab Site number\cr 15 | #' \code{year} \tab \code{integer} \tab Year\cr 16 | #' \code{month} \tab \code{integer} \tab Month\cr 17 | #' \code{count} \tab \code{integer} \tab Counted oystercatchers 18 | #' } 19 | #' 20 | #' @docType data 21 | #' @format \code{.RData} 22 | #' 23 | NULL 24 | -------------------------------------------------------------------------------- /pkg/R/read_icv.R: -------------------------------------------------------------------------------- 1 | # Functions to read the covariance matrix in case of iterative use of rtrim 2 | 3 | 4 | read_icv <- function(x,...){ 5 | UseMethod("read_icv") 6 | } 7 | 8 | #' @export 9 | read_icv.character <- function(x, J=0, ...){ 10 | icv_read(filenm=x, J=J) 11 | } 12 | 13 | #' @export 14 | read_icv.trimcommand <- function(x,...){ 15 | basename <- strsplit(x$file,"\\.")[[1]][1] 16 | filenm <- paste0(basename,".ICV") 17 | icv_read(filenm, x$ntimes) 18 | } 19 | 20 | 21 | # Workhorse reader 22 | icv_read <- function(filenm, J=0) 23 | { 24 | # the covariance matrices might differ in size from site to site due 25 | # to missing data. So, use autodetection 26 | covin <- list() 27 | lines = readLines(filenm) 28 | nsite <- 0 29 | while (length(lines)>0) { 30 | snif = read.table(textConnection(lines), nrows=1) 31 | J = ncol(snif) 32 | stopifnot(J>0) 33 | df <- read.table(textConnection(lines), nrows=J) 34 | V <- unname(as.matrix(df)) 35 | nsite <- nsite+1 36 | covin[[nsite]] <- V 37 | lines <- lines[-(1:J)] 38 | } 39 | covin 40 | } 41 | 42 | # Workhorse reader 43 | # icv_read <- function(filenm, J=0) 44 | # { 45 | # m = unname(as.matrix(read.table(filenm))) 46 | # if (J>0) { # perform a check 47 | # stopifnot(ncol(m)==J) # COVIN should have J columns 48 | # stopifnot(nrow(m)%%J == 0) #... and I*J rows 49 | # } else J <- ncol(m) # autodetect 50 | # nsite <- nrow(m) %/% J 51 | # covin <- vector("list", nsite) 52 | # idx = 1:J 53 | # for (i in 1:nsite) { 54 | # covin[[i]] <- m[idx, ] 55 | # idx = idx + J 56 | # } 57 | # covin 58 | # } -------------------------------------------------------------------------------- /pkg/R/read_tdf.R: -------------------------------------------------------------------------------- 1 | 2 | #' Read TRIM data files 3 | #' 4 | #' Read data files intended for the original TRIM programme. 5 | #' 6 | #' @section The TRIM data file format: 7 | #' 8 | #' TRIM input data is stored in a \code{ASCII} encoded file where headerless columns 9 | #' are separated by one or more spaces. Below are the columns as \code{read_tdf} expects 10 | #' them. 11 | #' 12 | #' \tabular{lll}{ 13 | #' \bold{Variable} \tab\bold{status} \tab \bold{R type}\cr 14 | #' \code{site} \tab requiered \tab \code{integer}\cr 15 | #' \code{time} \tab required \tab \code{integer}\cr 16 | #' \code{count} \tab required \tab \code{numeric}\cr 17 | #' \code{weight} \tab optional \tab \code{numeric}\cr 18 | #' \code{}\tab optional\tab \code{integer}\cr 19 | #' \code{...}\tab\tab\cr 20 | #' \code{}\tab optional\tab \code{integer}\cr 21 | #' } 22 | #' 23 | #' 24 | #' @param x a filename or a \code{\link{trimcommand}} object 25 | #' @param missing \code{[integer]} missing value indicator. 26 | #' Missing values are translated to \code{\link[base]{NA}}. 27 | #' @param weight \code{[logical]} indicate presence of a weight column 28 | #' @param ncovars \code{[logical]} The number of covariates in the file 29 | #' @param labels \code{[character]} (optional) specify labels for the covariates. 30 | #' Defaults to \code{cov} (\code{i=1,2,...,ncovars}) if none are specified. 31 | #' @param ... (unused) 32 | #' 33 | #' @return A \code{data.frame}. 34 | #' 35 | #' @family modelspec 36 | #' @export 37 | read_tdf <- function(x,...){ 38 | UseMethod("read_tdf") 39 | } 40 | 41 | #' @rdname read_tdf 42 | #' @export 43 | read_tdf.character <- function(x, missing = -1, weight = FALSE, ncovars=0, labels=character(0),...){ 44 | tdfread(file=x, missing=missing, weight=weight,ncovars=ncovars, labels=labels) 45 | } 46 | 47 | 48 | #' @rdname read_tdf 49 | #' @export 50 | read_tdf.trimcommand <- function(x,...){ 51 | tdfread(x$file, missing = x$missing, weight = x$weight, ncovars = x$ncovars, labels=x$labels) 52 | } 53 | 54 | 55 | # workhorse function for the S3 interfaces 56 | tdfread <- function(file, missing, weight, ncovars, labels) { 57 | # First check if the file does indeed exist 58 | if (!file.exists(file)) stop(sprintf("Could not find trim data file %s",file), call.=FALSE) 59 | 60 | if ( ncovars > 0 && length(labels) == 0 ){ 61 | labels <- paste0("cov",seq_len(ncovars)) 62 | } else if ( ncovars != length(labels)) { 63 | stop(sprintf("Length of 'labels' (%d) unequal to 'ncovars' (%d)",length(labels),ncovars)) 64 | } 65 | 66 | colclasses <- c(site = "integer", time = "integer", count="numeric") 67 | if (weight) colclasses['weight'] <- "numeric" 68 | # add labels and names for covariates 69 | colclasses <- c(colclasses, setNames(rep("integer",ncovars), labels)) 70 | 71 | 72 | # by default, one or more blanks (space, tab) are used as separators 73 | tab <- tryCatch( 74 | read.table(file, header=FALSE, colClasses=colclasses, col.names = names(colclasses)) 75 | , error=function(e) snifreport(file, colclasses)) 76 | if (nrow(tab)==0) stop(sprintf("file \"%s\" appears to be empty", file)) 77 | if (nrow(tab) > 0) tab[tab == missing] <- NA 78 | tab 79 | } 80 | 81 | 82 | snifreport <- function(file, colclasses){ 83 | if (!file.exists(file)) stop(sprintf("Could not find file %s",file)) 84 | ncl <- length(colclasses) 85 | lns <- readLines(file,n=5) 86 | if (length(lns)==0) stop(sprintf("file \"%s\" appears to be empty", file)) 87 | cls <- paste(paste0(names(colclasses),"<",colclasses,">"),collapse=" ") 88 | msg <- sprintf("\n\rExpected %s columns: %s\nStart of file looks like this:\n",ncl,cls) 89 | msg <- paste0(msg,paste(sprintf("\r%s\n",lns), collapse="")) 90 | stop(msg, call.=FALSE) 91 | } 92 | 93 | #' Compute a summary of counts 94 | #' 95 | #' 96 | #' Summarize counts over a trim input dataset. Sites without counts are removed 97 | #' before any counting takes place (since these will not be used when calling 98 | #' \code{\link{trim}}). For the remaining records, the total number of 99 | #' zero-counts, positive counts, total number of observed counts and the total 100 | #' number of missings are reported. 101 | #' 102 | #' @param x A \code{data.frame} with annual counts per site. 103 | #' @param eps \code{[numeric]} Numbers smaller then \code{eps} are treated a zero. 104 | #' @param site_col \code{[character|numeric]} index of the column containing the site ID's 105 | #' @param year_col \code{[character|numeric]} index of the column containing the year 106 | #' @param count_col \code{[character|numeric]} index of the column containing the counts 107 | #' 108 | #' @return A \code{list} of class \code{count.summary} containing individual names. 109 | #' @export 110 | #' @examples 111 | #' data(skylark) 112 | #' count_summary(skylark) 113 | #' 114 | #' s <- count_summary(skylark) 115 | #' s$zero_counts # obtain number of zero counts 116 | count_summary <- function(x, count_col="count", site_col="site", year_col="year", eps=1e-8){ 117 | 118 | site_count <- tapply(X = x[,count_col], INDEX = x[site_col], FUN=sum, na.rm=TRUE) 119 | ii <- abs(site_count) < eps 120 | sites_wout_counts <- character(0) 121 | if (any(ii)){ 122 | sites_wout_counts <- names(site_count[ii]) 123 | x <- x[!x[,site_col] %in% sites_wout_counts,,drop=FALSE] 124 | } 125 | 126 | cnt <- x[,count_col] 127 | L <- list( 128 | sites = length(unique(x[,site_col])) 129 | , sites_without_counts = sites_wout_counts 130 | , zero_counts = sum(cnt0, na.rm=TRUE) 132 | , total_observed = sum(!is.na(cnt)) 133 | , missing_counts = sum(is.na(cnt)) 134 | ) 135 | L$total_counts <- with(L, total_observed + missing_counts) 136 | structure(L, class=c("count.summary","list")) 137 | } 138 | 139 | #' print a count summary 140 | #' 141 | #' @param x An R object 142 | #' @param ... unused 143 | #' 144 | #' @export 145 | #' @keywords internal 146 | print.count.summary <- function(x,...){ 147 | printf("Total number of sites %8d\n", x$sites) 148 | printf("Sites without positive counts (%d): %s\n" 149 | , length(x$sites_without_counts) 150 | , paste(x$sites_without_counts,collapse=", ") 151 | ) 152 | printf("Number of observed zero counts %8d\n",x$zero_counts) 153 | printf("Number of observed positive counts %8d\n",x$positive_counts) 154 | printf("Total number of observed counts %8d\n",x$total_observed) 155 | printf("Number of missing counts %8d\n",x$missing_counts) 156 | printf("Total number of counts %8d\n",x$total_counts) 157 | 158 | } 159 | 160 | 161 | 162 | 163 | 164 | -------------------------------------------------------------------------------- /pkg/R/rtrim-pkg.R: -------------------------------------------------------------------------------- 1 | #' 2 | #' Trend and Indices for Monitoring Data 3 | #' 4 | #' The TRIM model is used to estimate species populations based on frequent 5 | #' (annual) counts at a varying collection of sites. The model is able to take 6 | #' account of missing data by imputing prior to estimation of population totals. 7 | #' The current package is a complete re-implementation of the \code{Delphi} 8 | #' based 9 | #' \href{https://www.cbs.nl/en-gb/society/nature-and-environment/indices-and-trends--trim--}{TRIM} 10 | #' software developed at Statistics Netherlands by Jeroen Pannekoek. 11 | #' 12 | #' @section Getting started: 13 | #' 14 | #' Several vignettes have been written to document the `rtrim` package. 15 | #' 16 | #' For everybody: 17 | #' \itemize{ 18 | #' \item\href{../doc/Skylark_example.html}{rtrim by example} 19 | #' \item\href{../doc/rtrim_2_extensions.html}{rtrim 2 extensions} 20 | #' \item\href{../doc/FAQ.html}{Frequently Asked Questions} 21 | #' } 22 | #' 23 | #' For users of the original Windows TRIM software: 24 | #' \itemize{ 25 | #' \item\href{../doc/rtrim_for_TRIM_users.html}{rtrim for TRIM users} 26 | #' } 27 | #' 28 | #' For users who would like to have more insight what is going on under the hood: 29 | #' \itemize{ 30 | #' \item\href{../doc/TRIM_methods_v2.pdf}{Models and statistical methods in rtrim} (PDF), 31 | #' \item\href{../doc/TRIM_confidence_intervals.html}{rtrim confidence intervals} 32 | #' \item\href{../doc/taming_overdispersion.html}{Taming overdispersion}. 33 | #' } 34 | #' Enjoy! 35 | #' The rtrim team of Statistics Netherlands 36 | #' 37 | # #' @name rtrim-package 38 | # #' @docType package 39 | #' @aliases rtrim-package 40 | "_PACKAGE" 41 | #' @import methods 42 | #' @importFrom utils read.table head tail str capture.output packageVersion 43 | #' @importFrom grDevices gray rgb hcl adjustcolor col2rgb 44 | #' @importFrom graphics lines plot points polygon segments title abline legend par rasterImage rect 45 | #' @importFrom stats pchisq qchisq pt qt qnorm time setNames quantile qgamma 46 | #' 47 | {} 48 | 49 | .onLoad <- function(libname, pkgname){ 50 | options(trim_verbose=FALSE) 51 | } 52 | 53 | .onAttach <- function(libname, pkgname){ 54 | packageStartupMessage("Welcome to ", pkgname, " version ", packageVersion(pkgname), "; Type ?`rtrim-package` to get started.") 55 | } 56 | -------------------------------------------------------------------------------- /pkg/R/skylark.R: -------------------------------------------------------------------------------- 1 | #' @name skylark 2 | #' @aliases skylark2 3 | #' @title Skylark population data 4 | #' 5 | #' @usage data(skylark); data(skylark2) 6 | #' 7 | #' @description 8 | #' 9 | #' The Skylark dataset that was included with the original TRIM software. 10 | #' 11 | #' The dataset can be loaded in two forms. The \bold{\code{skylark}} dataset is 12 | #' exactly equal to the data set in the original TRIM software: 13 | #' 14 | #' \tabular{lll}{ 15 | #' \bold{Column} \tab \bold{Type} \tab \bold{Description}\cr 16 | #' \code{site} \tab \code{integer} \tab Site number\cr 17 | #' \code{time} \tab \code{integer} \tab Time point coded as integer sequence\cr 18 | #' \code{count} \tab \code{numeric} \tab Counted skylarks\cr 19 | #' \code{Habitat} \tab \code{integer} \tab Habitat type (1, 2)\cr 20 | #' \code{Deposition} \tab \code{integer} \tab Deposition type (1, 2, 3, 4) 21 | #' } 22 | #' 23 | #' The current implementation is more flexible and allows time points to be coded as years 24 | #' and covariates as factors. The \bold{\code{skylark2}} data set looks as follows. 25 | #' 26 | #' \tabular{lll}{ 27 | #' \bold{Column} \tab \bold{Type} \tab \bold{Description}\cr 28 | #' \code{site} \tab \code{factor} \tab Site number\cr 29 | #' \code{year} \tab \code{integer} \tab Time point coded as year\cr 30 | #' \code{count} \tab \code{integer} \tab Counted skylarks\cr 31 | #' \code{Habitat} \tab \code{factor} \tab Habithat type (\code{dunes}, \code{heath})\cr 32 | #' \code{Deposition} \tab \code{integer} \tab Deposition type (1, 2, 3, 4)\cr 33 | #' \code{Weight} \tab \code{numeric} \tab Site weight 34 | #' } 35 | #' 36 | #' 37 | #' 38 | #' @docType data 39 | #' @format \code{.RData} 40 | #' 41 | NULL 42 | -------------------------------------------------------------------------------- /pkg/R/trim_estimate.R: -------------------------------------------------------------------------------- 1 | #' TRIM estimation function 2 | #' 3 | #' @param count a numerical vector of count data. 4 | #' @param site an integer/numerical/character/factor vector of site identifiers for each count data point 5 | #' @param year an integer/numerical vector time points for each count data point. 6 | #' @param month an optional integer/character/factor vector of months for each count data point. 7 | #' @param weights an optional numerical vector of weights. 8 | #' @param covars an optional data frame withcovariates 9 | #' @param model a model type selector (1, 2 or 3) 10 | #' @param changepoints a numerical vector change points (only for Model 2) 11 | #' @param overdisp a flag indicating of overdispersion has to be taken into account. 12 | #' @param serialcor a flag indication of autocorrelation has to be taken into account. 13 | #' @param autodelete a flag indicating auto-deletion of changepoints with too little observations. 14 | #' @param stepwise a flag indicating stepwise refinement of changepoints is to be used. 15 | #' @param covin a list of variance-covariance matrices; one per pseudo-site. 16 | #' @param verbose flag to enable addtional output during a single run. 17 | #' 18 | #' @return a list of class \code{trim}, that contains all output, statistiscs, etc. 19 | #' Usually this information is retrieved by a set of postprocessing functions 20 | #' 21 | #' @keywords internal 22 | trim_estimate <- function(count, site, year, month, weights, covars 23 | , model, changepoints, overdisp, serialcor 24 | , autodelete, stepwise, covin, verbose=FALSE, ...) 25 | { 26 | call <- sys.call(1) 27 | saved_verbosity <- getOption("trim_verbose") 28 | if (verbose) options(trim_verbose=TRUE) 29 | 30 | # kick out missing/zero sites 31 | tot_count <- tapply(count, site, function(x) sum(x>0, na.rm=TRUE)) # Count total observations per site 32 | full_sites <- names(tot_count)[tot_count>0] 33 | empty_sites <- names(tot_count)[tot_count==0] 34 | nkickout <- length(empty_sites) 35 | 36 | if (nkickout>0) { 37 | msg <- sprintf("Removed %d %s without positive observations: (%s)", nkickout, 38 | ifelse(nkickout==1, "site","sites"), paste0(empty_sites, collapse=", ")) 39 | warning(msg) 40 | idx <- site %in% full_sites 41 | count <- count[idx] 42 | site <- site[idx] 43 | if (is.factor(site)) site <- droplevels(site) 44 | year <- year[idx] 45 | if(!is.null(month)) month <- month[idx] 46 | if (!is.null(weights)) weights <- weights[idx] 47 | # Don't forget to adjust the covariates as well! 48 | if (nrow(covars)>0) covars <- covars[idx, ,drop=FALSE] # prevent data.frame -> vector degradation! 49 | } 50 | 51 | # Handle "auto" changepoints 52 | if (model==2 && is.character(changepoints)) { 53 | changepoints <- tolower(changepoints) # Allow changepoints="All" 54 | if (changepoints %in% c("all","auto")) { 55 | if (changepoints == "auto") stepwise=TRUE 56 | J <- length(unique(year)) 57 | changepoints <- 1 : (J-1) 58 | } 59 | } 60 | 61 | if (isTRUE(stepwise)) { 62 | if (model != 2) stop("Stepwise refinement requires model 2", call.=FALSE) 63 | if (length(changepoints)<2) stop("Stepwise refinement requires >1 changepoints.", call.=FALSE) 64 | } 65 | 66 | # Now allowed 67 | #if (isTRUE(serialcor) && !is.null(month)) { 68 | # stop("serialcor=TRUE not allowed when using monthly data", call.=FALSE) 69 | #} 70 | 71 | t1 <- Sys.time() 72 | if (isTRUE(stepwise)) { 73 | m <- trim_refine(count, site, year, month, weights, covars, 74 | model, changepoints, overdisp, serialcor, autodelete, stepwise, covin, ...) 75 | } else { 76 | # data input checks: throw error if not enough counts available. 77 | if (model == 2 && length(changepoints)>0 && autodelete){ 78 | changepoints <- autodelete(count=count, time=year 79 | , changepoints = changepoints, covars=covars) 80 | } else if (model == 2){ 81 | assert_plt_model(count = count, time = year 82 | , changepoints = changepoints, covars = covars) 83 | 84 | } else if (model == 3) { 85 | assert_sufficient_counts(count=count, index=list(year=year)) 86 | if (!is.null(month)) assert_sufficient_counts(count=count, index=list(month=month)) 87 | assert_covariate_counts(count=count, time=year, covars=covars, timename="year") 88 | } 89 | 90 | # compute actual model 91 | m <- trim_workhorse(count, site, year, month, weights, covars, 92 | model, changepoints, overdisp, serialcor, autodelete, stepwise, covin, ...) 93 | } 94 | 95 | t2 <- Sys.time() 96 | m$dt <- difftime(t2,t1) 97 | rprintf("Running trim took %8.4f %s\n",m$dt,attr(m$dt,"units")) 98 | if (verbose) options(trim_verbose=saved_verbosity) 99 | m$call <- call 100 | m 101 | } 102 | -------------------------------------------------------------------------------- /pkg/R/trim_gof.R: -------------------------------------------------------------------------------- 1 | # ######################################################### Goodness of fit #### 2 | 3 | # The goodness-of-fit of the model is assessed using three statistics: 4 | # Chi-squared, Likelihood Ratio and Aikaike Information Content. 5 | 6 | # ============================================================= Computation ==== 7 | 8 | # Here we define `gof' as a S3 generic function 9 | #' Extract TRIM goodness-of-fit information. 10 | #' 11 | #' \code{\link{trim}} computes three goodness-of-fit measures: 12 | #' \itemize{ 13 | #' \item Chi-squared 14 | #' \item Likelihood ratio 15 | #' \item Akaike Information content 16 | #' } 17 | #' 18 | #' @param x an object of class \code{\link{trim}} (as returned by \code{\link{trim}}) 19 | #' 20 | #' @return a list of type "trim.gof", containing elements \code{chi2}, \code{LR} 21 | #' and \code{AIC}, for Chi-squared, Likelihoof Ratio and Akaike informatiuon content, 22 | #' respectively. 23 | #' @export 24 | #' 25 | #' @family analyses 26 | #' @examples 27 | #' data(skylark) 28 | #' z <- trim(count ~ site + time, data=skylark, model=2) 29 | #' # prettyprint GOF information 30 | #' gof(z) 31 | #' 32 | #' # get individual elements, e.g. p-value 33 | #' L <- gof(z) 34 | #' LR_p <- L$LR$p # get p-value for likelihood ratio 35 | #' 36 | gof <- function(x) UseMethod("gof") 37 | 38 | # Here is a simple wrapper function for TRIM output lists. 39 | #' @export 40 | #' @rdname gof 41 | gof.trim <- function(x) { 42 | stopifnot(class(x)=="trim") 43 | gof_numeric(x$f, x$mu, x$alpha, x$beta) 44 | } 45 | 46 | # Here is the workhorse function 47 | 48 | gof_numeric <- function(f, mu, alpha, beta) { 49 | observed <- is.finite(f) 50 | 51 | # The $\chi^2$ (Chi-square) statistic is given by 52 | # \begin{equation} 53 | # \chi^2 = \sum_{ij}\frac{f_{i,j}-\mu_{i,j}}{\mu_{i,j}} 54 | # \end{equation} 55 | # where the summation is over the observed $i,j$'s only. 56 | # Significance is assessed by comparing against a $\chi^2$ distribution with 57 | # $df$ degrees of freedom, equal to the number of observations 58 | # minus the total number of parameters involved, i.e.\ 59 | # $df = n_f - n_\alpha - n_\beta$. 60 | chi2 <- sum((f-mu)^2/mu, na.rm=TRUE) 61 | df <- sum(observed) - length(alpha) - length(beta) 62 | p <- 1 - pchisq(chi2, df=df) 63 | chi2 <- list(chi2=chi2, df=df, p=p) # store in a list 64 | 65 | # Similarly, the \emph{Likelihood ratio} (LR) is computed as 66 | # \begin{equation} 67 | # \operatorname{LR} = 2\sum_{ij}f_{ij} \log\frac{f_{i,j}}{\mu_{i,j}} \label{LR} 68 | # \end{equation} 69 | # and again compared against a $\chi^2$ distribution. 70 | LR <- 2 * sum(f * log(f / mu), na.rm=TRUE) 71 | df <- sum(observed) - length(alpha) - length(beta) 72 | p <- 1 - pchisq(LR, df=df) 73 | LR <- list(LR=LR, df=df, p=p) 74 | 75 | # The Akaike Information Content (AIC) is related to the LR as: 76 | AIC <- LR$LR - 2*LR$df 77 | 78 | # Output all goodness-of-fit measures in a single list 79 | structure(list(chi2=chi2, LR=LR, AIC=AIC), class="trim.gof") 80 | } 81 | 82 | # ================================================================ Printing ==== 83 | 84 | # A simple printing function is provided that mimics TRIM for Windows output. 85 | 86 | #' Print method for \code{trim.gof} 87 | #' 88 | #' @export 89 | #' @param x a \code{trim.gof} object 90 | #' @keywords internal 91 | print.trim.gof <- function(x,...) { 92 | # print welcome message 93 | cat(sprintf("Goodness of fit:\n")) 94 | 95 | # print $\chi^2$ results 96 | with(x$chi2, 97 | printf("%24s = %.2f, df=%d, p=%.4f\n", "Chi-square", chi2, df, p)) 98 | 99 | # idem, Likelihood ratio 100 | with(x$LR, 101 | printf("%24s = %.2f, df=%d, p=%.4f\n", "Likelihood Ratio", LR, df, p)) 102 | 103 | # idem, Akaike Information Content 104 | with(x, 105 | printf("%24s = %.2f\n", "AIC (up to a constant)", AIC)) 106 | } 107 | -------------------------------------------------------------------------------- /pkg/R/trim_heatmap.R: -------------------------------------------------------------------------------- 1 | .colorize <- function(m, pal1, pal2=NULL, idx2=NULL, log=FALSE, na.col) { 2 | # m : matrix 3 | # pal1 : palette 4 | # pal2 : optional second palette 5 | # idx2 : logical matrix of grid cells where pal2 should be used 6 | # log : flag to set log-transformation of values in m 7 | # na.col : color to use for NA cells 8 | 9 | # Internal functions 10 | 11 | .normalize <- function(x) { 12 | minx <- min(x, na.rm=TRUE) 13 | maxx <- max(x, na.rm=TRUE) 14 | (x-minx) / (maxx-minx) 15 | } 16 | 17 | # (next lines are obsolete: 0 will be painted white) 18 | # # test if all data points are >0 iff log=TRUE 19 | # if (log) { 20 | # nzero <- sum(m==0, na.rm=TRUE) 21 | # if (nzero>0) { 22 | # msg <- sprintf("Can't make a heat map because %d zero elements; consider setting log=FALSE.", nzero) 23 | # warning(msg) 24 | # # stop(msg, call.=FALSE) 25 | # } 26 | # } 27 | 28 | rgb.pal.1 <- col2rgb(pal1) / 255.0 29 | variant <- ifelse(is.null(pal2), 1, 2) # 1: only pal1; 2: pal1+pal2 30 | if (variant==2) { 31 | rgb.pal.2 <- col2rgb(pal2) / 255.0 32 | stopifnot(!is.null(idx2)) 33 | } 34 | if (inherits(na.col, "character")) na.col <- col2rgb(na.col)/255.0 35 | ncolor <- length(pal1) 36 | nr <- nrow(m) 37 | nc <- ncol(m) 38 | img <- array(0, c(nr, nc, 3L)) 39 | 40 | # determine special values 41 | if (log) { 42 | na_idx <- is.na(m) 43 | zero_idx <- is.finite(m) & m==0 44 | ok_idx <- is.finite(m) & m>0 45 | } else { 46 | na_idx <- is.na(m) 47 | ok_idx <- is.finite(m) 48 | } 49 | 50 | # transform 51 | if (log) { 52 | m[zero_idx] <- NA 53 | m <- log10(m) 54 | } 55 | 56 | idx1 <- is.finite(m) 57 | 58 | # try to uniformly distribute colors. Probably there's only one largest value. 59 | eps <- 1e-7 60 | im <- 1L + as.integer((ncolor-eps) * .normalize(m)) 61 | for (i in 1:3) { 62 | layer <- matrix(na.col[i], nr, nc) 63 | if (variant==1) { 64 | layer[idx1] <- rgb.pal.1[i, im[ok_idx]] 65 | } else { 66 | layer[idx1] <- rgb.pal.1[i, im[ok_idx]] 67 | layer[idx2] <- rgb.pal.2[i, im[idx2]] 68 | } 69 | # mark zeros as WHITE 70 | if (log) { 71 | layer[zero_idx] <- 1.0 # white 72 | } 73 | img[ , ,i] <- layer 74 | } 75 | img 76 | } 77 | 78 | 79 | # heatmap <- function(x) UseMethod("heatmap") 80 | 81 | 82 | #' Plot a heatmap representation of observed and/or imputed counts. 83 | #' 84 | #' This function organizes the observed and/or imputed counts into a matrix where 85 | #' rows represent sites and columns represent time points. 86 | #' A bitmap image is constructed in which each pixel corresponds to an element of this matrix. 87 | #' Each pixel is colored according the correspondong count status, and the type of heatmap plot requested ('data', 'imputed' or 'fitted'). 88 | #' 89 | #' The 'imputed' heatmap uses the most elaborate color scheme: 90 | #' Site/time combinations that are observed are colored red, the higher the count, the darker the red. 91 | #' Site/time combinations that are imputed are colored blue, the higher the estimate, the darker the blue. 92 | #' 93 | #' For the 'data' heatmap, missing site/time combinations are colored gray. 94 | #' 95 | #' For the 'fitted' heatmap, all site/time combinations are colored blue. 96 | #' 97 | #' By default, all counts are log-transformed prior to colorization, and observed counts of 0 are indicates as white pixels. 98 | #' 99 | #' @param z output of a call to \code{\link{trim}}. 100 | #' @param what the type of heatmap to be plotted: 'data' (default), 'imputed' or 'fitted'. 101 | #' @param log flag to indicate whether the count should be log-transformed first. 102 | #' @param xlab x-axis label. The default value "auto" will evaluate to either "Year" or "Time point" 103 | #' @param ylab y-axis label 104 | #' @param ... other parameters to be passed to \code{\link[graphics]{plot}} 105 | #' 106 | #' @export 107 | #' @family graphical post-processing 108 | #' 109 | #' @examples 110 | #' data(skylark2) 111 | #' z <- trim(count ~ site + year, data=skylark2, model=3) 112 | #' heatmap(z,"imputed") 113 | #' 114 | heatmap <- function(z, what=c("data","imputed","fitted"), log=TRUE, xlab="auto", ylab="Site #", ...) { 115 | # Create an RGB heatmap image 116 | 117 | # Define the color palettes to use: RColorBrewer "Reds" (9) and "Blues" (9) 118 | reds <- c("#FFF5F0", "#FEE0D2", "#FCBBA1", "#FC9272", "#FB6A4A", "#EF3B2C", "#CB181D", "#A50F15", "#67000D") 119 | blues <- c("#F7FBFF", "#DEEBF7", "#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5", "#08519C", "#08306B") 120 | 121 | na.col=gray(0.85) 122 | 123 | # In case of monthly data, convert the 3D arrays to 2D matrices 124 | I <- z$nsite 125 | J <- z$nyear 126 | M <- z$nmonth 127 | if (M > 1) { 128 | obs <- matrix( aperm(z$f, c(1,3,2)), I, J*M) 129 | imp <- matrix( aperm(z$imputed, c(1,3,2)), I, J*M) 130 | fit <- matrix( aperm(z$mu, c(1,3,2)), I, J*M) 131 | } else { 132 | obs <- z$f 133 | imp <- z$imputed 134 | fit <- z$mu 135 | } 136 | 137 | what <- match.arg(what) 138 | if (what=="data") { 139 | img <- .colorize(obs, reds, log=log, na.col=na.col) 140 | } else if (what=="imputed") { 141 | img <- .colorize(imp, reds, blues, is.na(obs), log=log, na.col=na.col) 142 | } else if (what=="fitted") { 143 | img <- .colorize(fit, blues, log=log, na.col=na.col) 144 | } else stop("Can't happen.") 145 | 146 | # and plot it 147 | nc <- dim(img)[2] # number of columns 148 | xx = c(0, nc)+0.5 149 | xx = c(min(z$time.id)-0.5, max(z$time.id)+0.5) 150 | # if (nc != J) stop("Problem with # of time points") 151 | # if (nc != length(z$time.id)) stop("Problem with # of time points") 152 | # xx <- range(z$time.id) + 0.5 153 | if (xlab=="auto") xlab <- ifelse(z$time.id[1]==1, "Time point", "Year") 154 | 155 | nr <- dim(img)[1] # number of rows ... 156 | yy = c(0, nr)+0.5 157 | 158 | plot(xx,yy, type='n', ylim=rev(yy), xlab=xlab, ylab=ylab, ...) 159 | rasterImage(img, xx[1], yy[1], xx[2], yy[2], interpolate=FALSE) 160 | 161 | # Draw grid for monthly only 162 | if (M>1) { 163 | xgrid <- (xx[1]+1) : (xx[2]-1) 164 | ygrid1 <- rep(yy[1], length(xgrid)) 165 | ygrid2 <- rep(yy[2], length(xgrid)) 166 | segments(xgrid,ygrid1, xgrid,ygrid2, col="black", lwd=1) 167 | } 168 | rect(xx[1], yy[1], xx[2], yy[2], border="black") 169 | } 170 | -------------------------------------------------------------------------------- /pkg/R/trim_refine.R: -------------------------------------------------------------------------------- 1 | # ##################################################### Stepwise refinement #### 2 | # 3 | # #TODO: use premove/penter in trim() calls 4 | 5 | # =============================================== Main refinement prodecure ==== 6 | 7 | #' TRIM stepwise refinement 8 | #' 9 | #' @param count a numerical vector of count data. 10 | #' @param site a vector (numerical or factor) of site identifiers for each count data point. 11 | #' @param year a numerical vector of annual time points for each count data point. 12 | #' @param month an optional numerical vector of monthly time points. 13 | #' @param weights an optional numerical vector of weights. 14 | #' @param covars an optional list of covariates. 15 | #' @param model a model type selector. 16 | #' @param changepoints a numerical vector change points (only for Model 2) 17 | #' 18 | #' @param premove threshold probability for removal of parameters. 19 | #' @param penter threshold probability for re-introduction of parameters. 20 | #' 21 | #' @return a list of class \code{trim}, that opcontains all output, statistiscs, etc. 22 | #' Usually this information is retrieved by a set of postprocessing functions 23 | #' 24 | #' @keywords internal 25 | trim_refine <- function(count, site, year, month, weights, covars, 26 | model, changepoints, ..., premove=0.2, penter=0.15) 27 | { 28 | org_cp = changepoints 29 | ncp <- length(org_cp) 30 | active <- rep(TRUE, ncp) # Keeps track which original changepoints are active or not 31 | 32 | # Always start with an estimation using all proposed changepoints 33 | cur_cp <- org_cp 34 | z <- trim_workhorse(count, site, year, month, weights, covars, 35 | model, changepoints=org_cp, ...) 36 | 37 | # # Hack: remove all except the first changepoints 38 | # n <- length(org_cp) 39 | # active[2:n] <- FALSE 40 | # cur_cp <- org_cp[active] 41 | # z <- trim_workhorse(count, site.id, year, covars, model, serialcor, overdisp, cur_cp) 42 | 43 | for (iter in 1:100) { 44 | # Phase 1: can one of the changepoints be removed? 45 | if (sum(active)>0) { 46 | W <- wald(z) 47 | max_p = max(W$dslope$p) 48 | if (max_p > premove) { 49 | i = which.max(W$dslope$p) 50 | del_cp <- cur_cp[i] 51 | del_p <- max_p 52 | # remove from original changepoints 53 | i = which(org_cp == del_cp) 54 | active[i] = FALSE 55 | removed <- TRUE 56 | # report 57 | rprintf("\n>>> Deleted changepoint %d (p = %.4f); %d changepoint(s) left. <<<\n\n", del_cp, del_p, sum(active)) 58 | # Collapse model 2 to 1? 59 | if (sum(active)==0) { 60 | rprintf(">>> Collapsing to Model 1 <<<\n") 61 | # browser() 62 | } 63 | } else removed <- FALSE 64 | } else removed <- FALSE 65 | 66 | # If a changepoint has been removed, we'll need to re-estimate the model 67 | if (removed) { 68 | cur_cp = org_cp[active] # collapes to numeric(0) if no active changepoints, as intended 69 | z <- trim_workhorse(count, site, year, month, weights, covars, 70 | model, changepoints=cur_cp, ...) 71 | } 72 | 73 | # Phase 2: try to re-insert previously removed changepoints 74 | alpha <- z$alpha 75 | beta <- z$beta 76 | nacp <- sum(active) # Number of active changpoints 77 | beta <- matrix(z$beta, nacp) # vector matrix; columns are covariates 78 | p <- numeric(ncp) 79 | for (i in 1:ncp) if (active[i]==FALSE) { 80 | # deleted changepoints 81 | num.active.before <- ifelse(i==1, 0, sum(active[1:(i-1)])) 82 | num.active.after <- ifelse(i==ncp, 0, sum(active[(i+1):ncp])) 83 | # cast beta in a matrix with beta0 in first column, covars in other columns 84 | if (num.active.before==0) { 85 | beta_t <- rbind(0.0, beta) # add no-trend top row 86 | # stop("Should never happen") 87 | } else if (num.active.after==0) { 88 | beta_last <- beta[num.active.before, ,drop=FALSE] # last before test position; corresponds to Jeroen's '0' 89 | beta_t <- rbind(beta, beta_last) 90 | } else { 91 | beta1 <- beta[1:num.active.before, ,drop=FALSE] 92 | beta_last <- beta[num.active.before, ,drop=FALSE] 93 | beta2 <- beta[(nacp-num.active.after+1):nacp, ,drop=FALSE] 94 | beta_t <- rbind(beta1, beta_last, beta2) 95 | } 96 | beta_t <- matrix(beta_t) # Reshape into column vector 97 | active_t <- active # Create list of current (test) changepoints 98 | active_t[i] <- TRUE 99 | cp_t <- org_cp[active_t] 100 | idx <- c(rep(FALSE,num.active.before), TRUE, rep(FALSE, num.active.after)) 101 | p[i] <- Score(z, alpha, beta_t, cp_t, idx) 102 | } else p[i] = 1.0 # active changepoints 103 | 104 | # A changepoint is re-inserted if the minimum signficance is lower than a 105 | # specified threshold 106 | min_p <- min(p) 107 | if (min_p < penter) { 108 | i <- which.min(p) 109 | active[i] <- TRUE 110 | ins_cp <- org_cp[i] 111 | rprintf("\n>>> Re-inserted changepoint %d (p=%.4f); now %d changepoints. <<<\n\n", ins_cp, min_p, sum(active)) 112 | added <- TRUE 113 | } else added <- FALSE 114 | 115 | # If a changepoint has been re-inserted, we'll need to re-estimate the model 116 | if (added) { 117 | cur_cp <- org_cp[active] 118 | z <- trim_workhorse(count, site, year, month, weights, covars, 119 | model, changepoints=cur_cp, ...) 120 | } 121 | 122 | # Finished refinement? 123 | if (removed==FALSE & added==FALSE) { 124 | rprintf("Stepwise refinement ready.\n") 125 | break 126 | } 127 | } 128 | 129 | # Return last estimated model 130 | z 131 | } 132 | 133 | # ======================================================= Score computation ==== 134 | 135 | Score <- function(z, alpha, beta, changepoints, index) { 136 | # Unpack TRIM variables 137 | f <- z$f 138 | rho <- ifelse(is.null(z$rho), 0.0, z$rho) 139 | sig2 <- ifelse(is.null(z$sig2), 1.0, z$sig2) 140 | covars <- z$covars 141 | cvmat <- z$cvmat 142 | nsite <- z$nsite 143 | ntime <- z$ntime 144 | if (is.null(z$wt)) { 145 | wt <- matrix(1.0, nsite, ntime) 146 | } else { 147 | wt <- z$wt 148 | } 149 | 150 | ncp <- length(changepoints) 151 | ncovar <- length(covars) 152 | 153 | # Define covar aux info 154 | if (ncovar>0) { 155 | nclass <- numeric(ncovar) 156 | for (i in 1:ncovar) { 157 | if (is.factor(covars[[i]])) { 158 | nclass[i] <- nlevels(covars[[i]] ) 159 | } else { 160 | nclass[i] <- max(covars[[i]]) 161 | } 162 | } 163 | } 164 | 165 | # Define B0, the global B for model 2 166 | B0 <- matrix(0, ntime, ncp) 167 | for (i in 1:ncp) { 168 | cp1 <- changepoints[i] 169 | cp2 <- ifelse(i1) B0[1:(cp1-1), i] <- 0 171 | B0[cp1:cp2, i] <- 0:(cp2-cp1) 172 | if (cp20) { 177 | cvmask <- list() 178 | for (cv in 1:ncovar) { 179 | cvmask[[cv]] = list() 180 | for (cls in 2:nclass[cv]) { 181 | cvmask[[cv]][[cls]] <- list() 182 | for (i in 1:nsite) { 183 | cvmask[[cv]][[cls]][[i]] <- which(cvmat[[cv]][i, ]!=cls) 184 | } 185 | } 186 | } 187 | } 188 | 189 | # Global R 190 | if (rho==0.0) { 191 | Rg <- diag(1, ntime) # default (no autocorrelation) value 192 | } else { 193 | Rg <- rho ^ abs(row(diag(ntime)) - col(diag(ntime))) 194 | } 195 | 196 | # Compute score matrix 197 | U_b <- 0 198 | i_b <- 0 199 | for (i in 1:nsite) { 200 | # Select observations 201 | f_i <- f[i, ] 202 | observed <- is.finite(f_i) 203 | f_i <- f_i[observed] 204 | 205 | # Define B 206 | B <- B0 207 | if (ncovar>0) { # add a copy of $B$ for each covar class 208 | for (cv in ncovar) { 209 | for (cls in 2:nclass[cv]) { 210 | Btmp <- B0 211 | mask <- cvmask[[cv]][[cls]][[i]] 212 | if (length(mask)>0) Btmp[mask, ] = 0 213 | B <- cbind(B, Btmp) 214 | } 215 | } 216 | } 217 | # Compute mu 218 | mu = exp(alpha[i] + B %*% beta) / wt[i, ] 219 | mu_i = mu[observed] 220 | 221 | d_mu_i <- diag(mu_i, length(mu_i)) # Length argument guarantees diag creation 222 | 223 | # Compute $V$ and $\Omega$ 224 | if (rho==0.0 && sig2==1.0) { # ML 225 | V_i <- sig2 * d_mu_i 226 | } else { # GEE 227 | idx <- which(observed) 228 | R_i <- Rg[idx,idx] 229 | V_i <- sig2 * sqrt(d_mu_i) %*% R_i %*% sqrt(d_mu_i) 230 | } 231 | V_inv <- solve(V_i) 232 | 233 | Omega <- d_mu_i %*% V_inv %*% d_mu_i 234 | 235 | B_i = B[observed, ,drop=FALSE] # recyle index for covariates 236 | U_b <- U_b + t(B_i) %*% d_mu_i %*% V_inv %*% (f_i - mu_i) 237 | 238 | nobs_i = length(f_i) 239 | ones <- matrix(1, nobs_i, 1) 240 | d_i <- as.numeric(t(ones) %*% Omega %*% ones) 241 | i_b <- i_b - t(B_i) %*% (Omega - (Omega %*% ones %*% t(ones) %*% Omega) / d_i) %*% B_i 242 | } 243 | V <- solve(-i_b) 244 | S <- t(U_b) %*% V %*% U_b 245 | 246 | df <- length(beta) / length(changepoints) # Number of beta-blocks (baseline+covariates) 247 | p <- 1 - pchisq(S, df=df) 248 | p 249 | } 250 | -------------------------------------------------------------------------------- /pkg/R/trim_smooth.R: -------------------------------------------------------------------------------- 1 | run_kalman <- function(x,y,Q11, h=1, smooth=FALSE) { 2 | # setup 3 | a00 <- matrix(0, 2,1) 4 | P00 <- 10000*max(y) * diag(2) 5 | Q <- matrix(c(Q11,0,0,0), 2,2) 6 | T <- matrix(c(2,1,-1,0), 2,2) # Transition matrix 7 | z <- matrix(c(1,0), 2,1) # observation coefficients 8 | 9 | 10 | N <- length(y) 11 | nu <- numeric(N) 12 | f <- numeric(N) 13 | 14 | if (length(h)==1) h <- rep(h, N) 15 | 16 | ap <- a <- array(0, c(2,1,N)) # State vector (history) 17 | Pp <- P <- array(0, c(2,2,N)) # Variance-covariance matrix (history) 18 | 19 | for (i in 1:N) { 20 | # 1. Predict 21 | aprev <- if (i==1) a00 else a[,,i-1] 22 | Pprev <- if (i==1) P00 else P[,,i-1] 23 | ap[,,i] <- T %*% aprev # D.7 24 | Pp[,,i] <- T %*% Pprev %*% t(T) + Q # D.8 25 | # 2. Innovations 26 | nu[i] <- y[i] - t(z) %*% ap[,,i] 27 | f[i] <- t(z) %*% Pp[,,i] %*% z + h[i] 28 | # 3. Update 29 | a[,,i] <- ap[,,i] + Pp[,,i] %*% z %*% nu[i] / f[i] # D.12 30 | P[,,i] <- Pp[,,i] - Pp[,,i] %*% z %*% t(z) %*% Pp[,,i] / f[i] # D.13 31 | } 32 | 33 | # Compute likelihood 34 | Ns <- ceiling(0.15*N) # Skip first time steps 35 | idx <- (Ns+1) : N 36 | sigma2 <- mean(nu[idx]^2/f[idx]) # D.24 37 | loglik <- sum(log(sigma2 * f[idx])) # D.25 38 | 39 | out <- list( 40 | rawdata = list(x=x, y=y, se=sqrt(h)), 41 | yy_filter = a[1,1,], 42 | sigma2=sigma2, loglik=loglik, 43 | internal = list(Q=Q, a=a, P=P, Pp=Pp)) 44 | 45 | # Optional smoothing 46 | if (smooth) { 47 | as <- array(0, c(2,1,N)) # Smoothed state vector history 48 | Ps <- array(0, c(2,2,N)) # Smoothed variance matrix history 49 | Pstar <- array(0, c(2,2,N)) 50 | 51 | as[,,N] <- a[,,N] 52 | Ps[,,N] <- P[,,N] 53 | for (i in (N-1) : 1) { 54 | Pstar[,,i] <- P[,,i] %*% t(T) %*% solve(Pp[,,i+1]) # D.16 55 | as[,,i] <- a[,,i] + (Pstar[,,i] %*% (as[,,i+1] - T %*% a[,,i])) # D.14 56 | Ps[,,i] <- P[,,i] + Pstar[,,i] %*% (Ps[,,i+1] - Pp[,,i+1]) %*% t(Pstar[,,i]) # D.15 57 | # Note: the last Pp above is not required (could be just P) 58 | } 59 | # out$as <- as 60 | out$smoothed <- list(x=x, y=as[1,1,], se=sqrt(Ps[1,1, ]*sigma2)) 61 | out$internal$Ps <- Ps # required for lagdiff 62 | out$internal$Pstar <- Pstar # idem 63 | } 64 | out 65 | } 66 | 67 | fit_kalman <- function(x,y,h, optimize=TRUE) { 68 | f <- function(Q, x,y,h) { 69 | out <- run_kalman(x,y, Q,h, smooth=FALSE) 70 | out$loglik 71 | } 72 | if (optimize) { 73 | Qhi <- 1 74 | for (i in 1:100) { 75 | print(i) 76 | Qopt <- optimize(f, c(0,Qhi), x=x,y=y,h=h)$minimum 77 | if (Qopt < 0.8*Qhi) { 78 | cat(sprintf("*** Qopt = %.3f %.3f ***\n", Qopt, Qhi)) 79 | break 80 | } 81 | Qhi <- Qhi * 2 82 | } 83 | } else Qopt <- 1 84 | out <- run_kalman(x,y, Qopt, h, smooth=TRUE) 85 | out 86 | } 87 | 88 | smooth <- function(x, which=c("imputed","fitted")) { 89 | stopifnot(class(x)=="trim") 90 | which = match.arg(which) 91 | 92 | # extract vars from TRIM output 93 | years <- x$time.id 94 | tt_mod <- x$tt_mod 95 | tt_imp <- x$tt_imp 96 | var_tt_mod <- x$var_tt_mod 97 | var_tt_imp <- x$var_tt_imp 98 | J <- ntime <- x$ntime 99 | 100 | if (which=="imputed") { 101 | tt <- tt_imp 102 | var_tt <- var_tt_imp 103 | src = "imputed" 104 | } else if (which=="fitted") { 105 | tt <- tt_mod 106 | var_tt <- var_tt_mod 107 | src = "fitted" 108 | } 109 | 110 | J = length(tt) 111 | out <- fit_kalman(years, tt, diag(var_tt), optimize=TRUE) 112 | #out <- fit_kalman(years, tt, 1, optimize=FALSE) 113 | out$type="normal" 114 | out$tt = tt 115 | out$err = sqrt(diag(var_tt)) 116 | out$internal <- NULL # remove trendspotter internal info 117 | structure(out, class="trim.smooth") 118 | } 119 | 120 | #--------------------------------------------------------------------- Plot ---- 121 | 122 | #' Plot overall slope 123 | #' 124 | #' Creates a plot of the overall slope, its 95\% confidence band, the 125 | #' total population per time and their 95\% confidence intervals. 126 | #' 127 | #' @param x An object of class \code{trim.overall} (returned by \code{\link{overall}}) 128 | #' @param imputed \code{[logical]} Toggle to show imputed counts 129 | #' @param ... Further options passed to \code{\link[graphics]{plot}} 130 | #' 131 | #' @family analyses 132 | #' 133 | #' @examples 134 | #' data(skylark) 135 | #' m <- trim(count ~ site + time, data=skylark, model=2) 136 | #' plot(overall(m)) 137 | #' 138 | #' @export 139 | plot.trim.smooth <- function(x, imputed=TRUE, ...) { 140 | trend <- x 141 | title <- if (is.null(list(...)$main)){ 142 | attr(trend, "title") 143 | } else { 144 | list(...)$main 145 | } 146 | 147 | make_cband <- function(x) { 148 | ylo <- x$y - x$se * 1.96 149 | yhi <- x$y + x$se * 1.96 150 | xconf <- c(x$x, rev(x$x)) 151 | yconf <- c(ylo, rev(yhi)) 152 | x$cb <- cbind(xconf, yconf) 153 | x 154 | } 155 | 156 | # Convert SE to confidence band 157 | trend$rawdata <- make_cband(trend$rawdata) 158 | trend$smoothed <- make_cband(trend$smoothed) 159 | 160 | xrange <- range(trend$rawdata$x) 161 | yrange1 <- range(trend$rawdata$cb[,2]) 162 | yrange2 <- range(trend$smoothed$cb[,2]) 163 | yrange <- range(0.0, yrange1, yrange2) # include y=0 ! 164 | 165 | # Now plot layer-by-layer (using ColorBrewer colors) 166 | cbred <- rgb(228,26,28, maxColorValue = 255) 167 | cbblue <- rgb(55,126,184, maxColorValue = 255) 168 | plot(xrange, yrange, type='n', xlab="Year", ylab="Count", las=1, main=title,...) 169 | # raw data in red; trend in blue 170 | polygon(trend$rawdata$cb, col=adjustcolor(cbred, 0.2), lty=0) 171 | polygon(trend$smoothed$cb, col=adjustcolor(cbblue, 0.2), lty=0) 172 | 173 | points(trend$rawdata$x, trend$rawdata$y, col=cbred, pch=16) 174 | lines(trend$smoothed$x, trend$smoothed$y, col=cbblue, lwd=2) 175 | trend 176 | } 177 | -------------------------------------------------------------------------------- /pkg/R/utils.R: -------------------------------------------------------------------------------- 1 | 2 | #' Set verbosity of trim model functions 3 | #' 4 | #' Control how much output \code{\link{trim}} writes to the screen while 5 | #' fitting the model. By default, \code{trim} only returns the output 6 | #' and does not write any progress to the screen. After calling 7 | #' \code{set_trim_verbose(TRUE)}, \code{trim} will write information 8 | #' about running iterations and convergence to the screen during optmization. 9 | #' 10 | #' 11 | #' 12 | #' @param verbose \code{[logical]} toggle verbosity. \code{TRUE} means: be 13 | #' verbose, \code{FALSE} means be quiet (this is the default). 14 | #' @family modelspec 15 | #' @export 16 | set_trim_verbose <- function(verbose=FALSE){ 17 | stopifnot(isTRUE(verbose)|!isTRUE(verbose)) 18 | options(trim_verbose=verbose) 19 | } 20 | 21 | # Convenience function for console output during runs 22 | rprintf <- function(fmt,...) { if(getOption("trim_verbose")) cat(sprintf(fmt,...)) } 23 | 24 | # Similar, but for object/summary printing 25 | printf <- function(fmt,...) {cat(sprintf(fmt,...))} 26 | 27 | -------------------------------------------------------------------------------- /pkg/data/oystercatcher.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SNStatComp/rtrim/a0dbbb5cf3987625d0e5562d8b383d7a01cc7b6f/pkg/data/oystercatcher.RData -------------------------------------------------------------------------------- /pkg/data/skylark.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SNStatComp/rtrim/a0dbbb5cf3987625d0e5562d8b383d7a01cc7b6f/pkg/data/skylark.RData -------------------------------------------------------------------------------- /pkg/data/skylark2.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SNStatComp/rtrim/a0dbbb5cf3987625d0e5562d8b383d7a01cc7b6f/pkg/data/skylark2.RData -------------------------------------------------------------------------------- /pkg/tests/testthat.R: -------------------------------------------------------------------------------- 1 | if( require("testthat") ) test_check("rtrim") 2 | 3 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1a.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 4-7-2016 15:06:11 5 | 6 | Title : skylark-1a 7 | 8 | Comment: Example 1; without overdispersion or autocorrelation" 9 | 10 | The following 5 variables have been read from file: 11 | F:\TRIM\TRIM_manual_demo\skylark.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. HABITAT number of values: 2 17 | 5. COV2 number of values: 4 18 | 19 | Number of sites without positive counts (removed) 0 20 | 21 | Number of observed zero counts 0 22 | Number of observed positive counts 202 23 | Total number of observed counts 202 24 | Number of missing counts 238 25 | Total number of counts 440 26 | 27 | Total count 2536.0 28 | 29 | Sites containing more than 10% of the total count 30 | Site Number Observed Total % 31 | 3 431.0 17.0 32 | 37 266.0 10.5 33 | 40 624.0 24.6 34 | 35 | Time Point Averages 36 | TimePoint Observations Average Index 37 | 1 25 8.52 1.00 38 | 2 20 8.10 0.95 39 | 3 30 10.90 1.28 40 | 4 30 11.27 1.32 41 | 5 28 12.75 1.50 42 | 6 29 14.66 1.72 43 | 7 22 17.00 2.00 44 | 8 18 18.89 2.22 45 | 46 | RESULTS FOR MODEL: Effects for Each Time Point 47 | ---------------------------------------------- 48 | 49 | 50 | ESTIMATION METHOD = Maximum Likelihood 51 | 52 | 53 | Total time used: 5.08 seconds 54 | 55 | GOODNESS OF FIT 56 | Chi-square 188.14, df 140, p 0.0041 57 | Likelihood Ratio 184.98, df 140, p 0.0065 58 | AIC (up to a constant) -95.02 59 | 60 | WALD-TEST FOR SIGNIFICANCE OF DEVIATIONS FROM LINEAR TREND 61 | Wald-Test 20.29, df 6, p 0.0025 62 | 63 | PARAMETER ESTIMATES 64 | 65 | Parameters for Each Time Point 66 | 67 | Time Additive std.err. Multiplicative std.err. 68 | 1 0 1 69 | 2 -0.3430 0.1086 0.7096 0.0771 70 | 3 -0.1732 0.0927 0.8410 0.0780 71 | 4 -0.1875 0.0932 0.8290 0.0773 72 | 5 -0.0853 0.0915 0.9182 0.0840 73 | 6 0.0213 0.0902 1.0216 0.0922 74 | 7 0.0953 0.0924 1.1000 0.1016 75 | 8 0.1712 0.0940 1.1867 0.1115 76 | 77 | Linear Trend + Deviations for Each Time 78 | 79 | Additive std.err. Multiplicative std.err. 80 | Slope 0.0485 0.0107 1.0497 0.0112 81 | 82 | Time Deviations 83 | 1 0.2325 0.0539 1.2617 0.0680 84 | 2 -0.1591 0.0670 0.8529 0.0571 85 | 3 -0.0378 0.0541 0.9629 0.0521 86 | 4 -0.1006 0.0539 0.9043 0.0488 87 | 5 -0.0469 0.0505 0.9542 0.0481 88 | 6 0.0112 0.0466 1.0113 0.0472 89 | 7 0.0366 0.0453 1.0373 0.0470 90 | 8 0.0640 0.0437 1.0661 0.0466 91 | 92 | Time INDICES 93 | Time Model std.err. Imputed std.err. 94 | 1 1 1 95 | 2 0.7096 0.0771 0.7096 0.0771 96 | 3 0.8410 0.0780 0.8410 0.0780 97 | 4 0.8290 0.0773 0.8290 0.0773 98 | 5 0.9182 0.0840 0.9182 0.0840 99 | 6 1.0216 0.0922 1.0216 0.0922 100 | 7 1.1000 0.1016 1.1000 0.1016 101 | 8 1.1867 0.1115 1.1867 0.1115 102 | 103 | TIME TOTALS 104 | Time Model std.err. Imputed std.err. 105 | 1 511 38 511 38 106 | 2 362 31 362 31 107 | 3 429 26 429 26 108 | 4 423 25 423 25 109 | 5 469 27 469 27 110 | 6 522 27 522 27 111 | 7 562 32 562 32 112 | 8 606 36 606 36 113 | 114 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 115 | Additive std.err. Multiplicative std.err. 116 | 0.0485 0.0107 1.0497 0.0112 117 | 118 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 119 | Additive std.err. Multiplicative std.err. 120 | 0.0485 0.0107 1.0497 0.0112 121 | 122 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1a.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-1a 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | comment Example 1; without overdispersion or autocorrelation" 13 | weighting off 14 | overdisp off 15 | serialcor off 16 | model 3 17 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1b.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 4-7-2016 15:06:52 5 | 6 | Title : skylark-1b 7 | 8 | Comment: Example 1; overdispersion only 9 | 10 | The following 5 variables have been read from file: 11 | F:\TRIM\TRIM_manual_demo\skylark.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. HABITAT number of values: 2 17 | 5. COV2 number of values: 4 18 | 19 | Number of sites without positive counts (removed) 0 20 | 21 | Number of observed zero counts 0 22 | Number of observed positive counts 202 23 | Total number of observed counts 202 24 | Number of missing counts 238 25 | Total number of counts 440 26 | 27 | Total count 2536.0 28 | 29 | Sites containing more than 10% of the total count 30 | Site Number Observed Total % 31 | 3 431.0 17.0 32 | 37 266.0 10.5 33 | 40 624.0 24.6 34 | 35 | Time Point Averages 36 | TimePoint Observations Average Index 37 | 1 25 8.52 1.00 38 | 2 20 8.10 0.95 39 | 3 30 10.90 1.28 40 | 4 30 11.27 1.32 41 | 5 28 12.75 1.50 42 | 6 29 14.66 1.72 43 | 7 22 17.00 2.00 44 | 8 18 18.89 2.22 45 | 46 | RESULTS FOR MODEL: Effects for Each Time Point 47 | ---------------------------------------------- 48 | 49 | 50 | ESTIMATION METHOD = Generalised Estimating Equations 51 | 52 | 53 | Total time used: 5.38 seconds 54 | 55 | Estimated Overdispersion = 1.344 56 | 57 | GOODNESS OF FIT 58 | Chi-square 188.14, df 140, p 0.0041 59 | Likelihood Ratio 184.98, df 140, p 0.0065 60 | AIC (up to a constant) -95.02 61 | 62 | WALD-TEST FOR SIGNIFICANCE OF DEVIATIONS FROM LINEAR TREND 63 | Wald-Test 15.10, df 6, p 0.0195 64 | 65 | PARAMETER ESTIMATES 66 | 67 | Parameters for Each Time Point 68 | 69 | Time Additive std.err. Multiplicative std.err. 70 | 1 0 1 71 | 2 -0.3430 0.1259 0.7096 0.0894 72 | 3 -0.1732 0.1075 0.8410 0.0904 73 | 4 -0.1875 0.1080 0.8290 0.0896 74 | 5 -0.0853 0.1061 0.9182 0.0974 75 | 6 0.0213 0.1046 1.0216 0.1069 76 | 7 0.0953 0.1071 1.1000 0.1178 77 | 8 0.1712 0.1089 1.1867 0.1293 78 | 79 | Linear Trend + Deviations for Each Time 80 | 81 | Additive std.err. Multiplicative std.err. 82 | Slope 0.0485 0.0124 1.0497 0.0130 83 | 84 | Time Deviations 85 | 1 0.2325 0.0625 1.2617 0.0789 86 | 2 -0.1591 0.0777 0.8529 0.0662 87 | 3 -0.0378 0.0627 0.9629 0.0604 88 | 4 -0.1006 0.0625 0.9043 0.0565 89 | 5 -0.0469 0.0585 0.9542 0.0558 90 | 6 0.0112 0.0541 1.0113 0.0547 91 | 7 0.0366 0.0525 1.0373 0.0544 92 | 8 0.0640 0.0507 1.0661 0.0540 93 | 94 | Time INDICES 95 | Time Model std.err. Imputed std.err. 96 | 1 1 1 97 | 2 0.7096 0.0894 0.7096 0.0894 98 | 3 0.8410 0.0904 0.8410 0.0904 99 | 4 0.8290 0.0896 0.8290 0.0896 100 | 5 0.9182 0.0974 0.9182 0.0974 101 | 6 1.0216 0.1069 1.0216 0.1069 102 | 7 1.1000 0.1178 1.1000 0.1178 103 | 8 1.1867 0.1293 1.1867 0.1293 104 | 105 | TIME TOTALS 106 | Time Model std.err. Imputed std.err. 107 | 1 511 45 511 45 108 | 2 362 35 362 35 109 | 3 429 30 429 30 110 | 4 423 28 423 28 111 | 5 469 31 469 31 112 | 6 522 32 522 32 113 | 7 562 37 562 37 114 | 8 606 42 606 42 115 | 116 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 117 | Additive std.err. Multiplicative std.err. 118 | 0.0485 0.0124 1.0497 0.0130 119 | 120 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 121 | Additive std.err. Multiplicative std.err. 122 | 0.0485 0.0124 1.0497 0.0130 123 | 124 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1b.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-1b 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | comment Example 1; overdispersion only 13 | weighting off 14 | overdisp on 15 | serialcor off 16 | model 3 17 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1c.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 4-7-2016 15:07:34 5 | 6 | Title : skylark-1c 7 | 8 | Comment: Example 1; both overdispersion and serialcorrelation 9 | 10 | The following 5 variables have been read from file: 11 | F:\TRIM\TRIM_manual_demo\skylark.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. HABITAT number of values: 2 17 | 5. COV2 number of values: 4 18 | 19 | Number of sites without positive counts (removed) 0 20 | 21 | Number of observed zero counts 0 22 | Number of observed positive counts 202 23 | Total number of observed counts 202 24 | Number of missing counts 238 25 | Total number of counts 440 26 | 27 | Total count 2536.0 28 | 29 | Sites containing more than 10% of the total count 30 | Site Number Observed Total % 31 | 3 431.0 17.0 32 | 37 266.0 10.5 33 | 40 624.0 24.6 34 | 35 | Time Point Averages 36 | TimePoint Observations Average Index 37 | 1 25 8.52 1.00 38 | 2 20 8.10 0.95 39 | 3 30 10.90 1.28 40 | 4 30 11.27 1.32 41 | 5 28 12.75 1.50 42 | 6 29 14.66 1.72 43 | 7 22 17.00 2.00 44 | 8 18 18.89 2.22 45 | 46 | RESULTS FOR MODEL: Effects for Each Time Point 47 | ---------------------------------------------- 48 | 49 | 50 | ESTIMATION METHOD = Generalised Estimating Equations 51 | 52 | 53 | Total time used: 2.84 seconds 54 | 55 | Estimated Overdispersion = 1.367 56 | Estimated Serial Correlation = 0.302 57 | 58 | GOODNESS OF FIT 59 | Chi-square 191.40, df 140, p 0.0026 60 | Likelihood Ratio 194.80, df 140, p 0.0015 61 | AIC (up to a constant) -85.20 62 | 63 | WALD-TEST FOR SIGNIFICANCE OF DEVIATIONS FROM LINEAR TREND 64 | Wald-Test 16.22, df 6, p 0.0126 65 | 66 | PARAMETER ESTIMATES 67 | 68 | Parameters for Each Time Point 69 | 70 | Time Additive std.err. Multiplicative std.err. 71 | 1 0 1 72 | 2 -0.3202 0.1055 0.7260 0.0766 73 | 3 -0.1687 0.1054 0.8448 0.0891 74 | 4 -0.1897 0.1083 0.8272 0.0896 75 | 5 -0.0824 0.1070 0.9209 0.0986 76 | 6 0.0208 0.1059 1.0210 0.1081 77 | 7 0.0997 0.1082 1.1048 0.1196 78 | 8 0.1558 0.1108 1.1686 0.1295 79 | 80 | Linear Trend + Deviations for Each Time 81 | 82 | Additive std.err. Multiplicative std.err. 83 | Slope 0.0460 0.0142 1.0471 0.0149 84 | 85 | Time Deviations 86 | 1 0.2217 0.0582 1.2481 0.0727 87 | 2 -0.1446 0.0660 0.8654 0.0571 88 | 3 -0.0391 0.0575 0.9617 0.0553 89 | 4 -0.1061 0.0588 0.8994 0.0529 90 | 5 -0.0448 0.0546 0.9561 0.0522 91 | 6 0.0124 0.0488 1.0124 0.0495 92 | 7 0.0452 0.0452 1.0463 0.0473 93 | 8 0.0553 0.0495 1.0569 0.0523 94 | 95 | Time INDICES 96 | Time Model std.err. Imputed std.err. 97 | 1 1 1 98 | 2 0.7260 0.0766 0.7201 0.0766 99 | 3 0.8448 0.0891 0.8454 0.0893 100 | 4 0.8272 0.0896 0.8314 0.0901 101 | 5 0.9209 0.0986 0.9221 0.0989 102 | 6 1.0210 0.1081 1.0251 0.1086 103 | 7 1.1048 0.1196 1.1082 0.1201 104 | 8 1.1686 0.1295 1.1828 0.1308 105 | 106 | TIME TOTALS 107 | Time Model std.err. Imputed std.err. 108 | 1 509 45 509 45 109 | 2 370 35 366 35 110 | 3 430 29 430 29 111 | 4 421 28 423 28 112 | 5 469 31 469 31 113 | 6 520 32 521 32 114 | 7 563 37 564 37 115 | 8 595 42 601 42 116 | 117 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 118 | Additive std.err. Multiplicative std.err. 119 | 0.0460 0.0142 1.0471 0.0149 120 | 121 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 122 | Additive std.err. Multiplicative std.err. 123 | 0.0478 0.0142 1.0489 0.0149 124 | 125 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1c.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-1c 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | comment Example 1; both overdispersion and serialcorrelation 13 | weighting off 14 | overdisp on 15 | serialcor on 16 | model 3 17 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1d.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 4-7-2016 15:08:28 5 | 6 | Title : skylark-1d 7 | 8 | Comment: Example 1; using linear trend model 9 | 10 | The following 5 variables have been read from file: 11 | F:\TRIM\TRIM_manual_demo\skylark.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. HABITAT number of values: 2 17 | 5. COV2 number of values: 4 18 | 19 | Number of sites without positive counts (removed) 0 20 | 21 | Number of observed zero counts 0 22 | Number of observed positive counts 202 23 | Total number of observed counts 202 24 | Number of missing counts 238 25 | Total number of counts 440 26 | 27 | Total count 2536.0 28 | 29 | Sites containing more than 10% of the total count 30 | Site Number Observed Total % 31 | 3 431.0 17.0 32 | 37 266.0 10.5 33 | 40 624.0 24.6 34 | 35 | Time Point Averages 36 | TimePoint Observations Average Index 37 | 1 25 8.52 1.00 38 | 2 20 8.10 0.95 39 | 3 30 10.90 1.28 40 | 4 30 11.27 1.32 41 | 5 28 12.75 1.50 42 | 6 29 14.66 1.72 43 | 7 22 17.00 2.00 44 | 8 18 18.89 2.22 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | --------------------------------- 48 | 49 | 50 | ESTIMATION METHOD = Generalised Estimating Equations 51 | 52 | 53 | Total time used: 3.23 seconds 54 | 55 | Estimated Overdispersion = 1.439 56 | Estimated Serial Correlation = 0.281 57 | 58 | GOODNESS OF FIT 59 | Chi-square 210.09, df 146, p 0.0004 60 | Likelihood Ratio 175.28, df 146, p 0.0495 61 | AIC (up to a constant) -116.72 62 | 63 | WALD-TEST FOR SIGNIFICANCE OF SLOPE PARAMETER 64 | Wald-Test 11.69, df 1, p 0.0006 65 | 66 | PARAMETER ESTIMATES 67 | 68 | Additive std.err. Multiplicative std.err. 69 | Slope 0.0484 0.0142 1.0496 0.0149 70 | 71 | Time INDICES 72 | Time Model std.err. Imputed std.err. 73 | 1 1 1 74 | 2 1.0496 0.0149 0.8948 0.0410 75 | 3 1.1017 0.0312 0.9777 0.0601 76 | 4 1.1563 0.0491 0.9790 0.0678 77 | 5 1.2137 0.0687 1.0698 0.0800 78 | 6 1.2738 0.0902 1.1727 0.0945 79 | 7 1.3370 0.1136 1.2527 0.1094 80 | 8 1.4033 0.1391 1.3226 0.1283 81 | 82 | TIME TOTALS 83 | Time Model std.err. Imputed std.err. 84 | 1 404 26 443 28 85 | 2 424 23 396 26 86 | 3 445 19 433 26 87 | 4 468 17 434 26 88 | 5 491 17 474 26 89 | 6 515 20 519 28 90 | 7 541 25 555 30 91 | 8 567 32 586 35 92 | 93 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 94 | Additive std.err. Multiplicative std.err. 95 | 0.0484 0.0142 1.0496 0.0149 96 | 97 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 98 | Additive std.err. Multiplicative std.err. 99 | 0.0509 0.0139 1.0522 0.0146 100 | 101 | 102 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1d.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-1d 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | END 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | comment Example 1; using linear trend model 13 | weighting off 14 | overdisp on 15 | serialcor on 16 | model 2 17 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1e.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 22-8-2016 14:38:50 5 | 6 | Title : skylark-1e 7 | 8 | The following 5 variables have been read from file: 9 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark.dat 10 | 11 | 1. Site number of values: 55 12 | 2. Time number of values: 8 13 | 3. Count missing = 999 14 | 4. HABITAT number of values: 2 15 | 5. COV2 number of values: 4 16 | 17 | Number of sites without positive counts (removed) 0 18 | 19 | Number of observed zero counts 0 20 | Number of observed positive counts 202 21 | Total number of observed counts 202 22 | Number of missing counts 238 23 | Total number of counts 440 24 | 25 | Total count 2536.0 26 | 27 | Sites containing more than 10% of the total count 28 | Site Number Observed Total % 29 | 3 431.0 17.0 30 | 37 266.0 10.5 31 | 40 624.0 24.6 32 | 33 | Time Point Averages 34 | TimePoint Observations Average Index 35 | 1 25 8.52 1.00 36 | 2 20 8.10 0.95 37 | 3 30 10.90 1.28 38 | 4 30 11.27 1.32 39 | 5 28 12.75 1.50 40 | 6 29 14.66 1.72 41 | 7 22 17.00 2.00 42 | 8 18 18.89 2.22 43 | 44 | RESULTS FOR MODEL: Linear Trend 45 | -------------------------------- 46 | Changes in Slope at Timepoints 47 | 1 2 3 4 5 6 7 48 | 49 | ESTIMATION METHOD = Generalised Estimating Equations 50 | 51 | 52 | Total time used: 3.44 seconds 53 | 54 | Estimated Overdispersion = 1.367 55 | Estimated Serial Correlation = 0.302 56 | 57 | GOODNESS OF FIT 58 | Chi-square 191.40, df 140, p 0.0026 59 | Likelihood Ratio 194.80, df 140, p 0.0015 60 | AIC (up to a constant) -85.20 61 | 62 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 63 | Changepoint Wald-Test df p 64 | 1 9.22 1 0.0024 65 | 2 6.85 1 0.0089 66 | 3 1.44 1 0.2298 67 | 4 1.03 1 0.3107 68 | 5 0.00 1 0.9735 69 | 6 0.04 1 0.8358 70 | 7 0.03 1 0.8519 71 | 72 | PARAMETER ESTIMATES 73 | 74 | Slope for Time Intervals 75 | from upto Additive std.err. Multiplicative std.err. 76 | 1 2 -0.3202 0.1055 0.7260 0.0766 77 | 2 3 0.1515 0.1033 1.1636 0.1202 78 | 3 4 -0.0210 0.0773 0.9792 0.0757 79 | 4 5 0.1072 0.0754 1.1132 0.0840 80 | 5 6 0.1032 0.0721 1.1087 0.0799 81 | 6 7 0.0789 0.0721 1.0821 0.0780 82 | 7 8 0.0561 0.0770 1.0577 0.0815 83 | 84 | Time INDICES 85 | Time Model std.err. Imputed std.err. 86 | 1 1 1 87 | 2 0.7260 0.0766 0.7201 0.0766 88 | 3 0.8448 0.0891 0.8454 0.0893 89 | 4 0.8272 0.0896 0.8314 0.0901 90 | 5 0.9209 0.0986 0.9221 0.0989 91 | 6 1.0210 0.1081 1.0250 0.1086 92 | 7 1.1048 0.1196 1.1082 0.1201 93 | 8 1.1686 0.1295 1.1828 0.1308 94 | 95 | TIME TOTALS 96 | Time Model std.err. Imputed std.err. 97 | 1 509 45 509 45 98 | 2 370 35 366 35 99 | 3 430 29 430 29 100 | 4 421 28 423 28 101 | 5 469 31 469 31 102 | 6 520 32 521 32 103 | 7 563 37 564 37 104 | 8 595 42 601 42 105 | 106 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 107 | Additive std.err. Multiplicative std.err. 108 | 0.0460 0.0142 1.0471 0.0149 109 | 110 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 111 | Additive std.err. Multiplicative std.err. 112 | 0.0478 0.0142 1.0489 0.0149 113 | 114 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1e.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-1e 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | model 2 13 | weighting off 14 | overdisp on 15 | serialcor on 16 | changepoints 1 2 3 4 5 6 7 17 | run 18 | 19 | 20 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1f.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 25-8-2016 17:07:14 5 | 6 | Title : skylark-1f 7 | 8 | The following 5 variables have been read from file: 9 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark.dat 10 | 11 | 1. Site number of values: 55 12 | 2. Time number of values: 8 13 | 3. Count missing = 999 14 | 4. HABITAT number of values: 2 15 | 5. COV2 number of values: 4 16 | 17 | Number of sites without positive counts (removed) 0 18 | 19 | Number of observed zero counts 0 20 | Number of observed positive counts 202 21 | Total number of observed counts 202 22 | Number of missing counts 238 23 | Total number of counts 440 24 | 25 | Total count 2536.0 26 | 27 | Sites containing more than 10% of the total count 28 | Site Number Observed Total % 29 | 3 431.0 17.0 30 | 37 266.0 10.5 31 | 40 624.0 24.6 32 | 33 | Time Point Averages 34 | TimePoint Observations Average Index 35 | 1 25 8.52 1.00 36 | 2 20 8.10 0.95 37 | 3 30 10.90 1.28 38 | 4 30 11.27 1.32 39 | 5 28 12.75 1.50 40 | 6 29 14.66 1.72 41 | 7 22 17.00 2.00 42 | 8 18 18.89 2.22 43 | 44 | RESULTS FOR MODEL: Linear Trend 45 | -------------------------------- 46 | Changes in Slope at Timepoints 47 | 1 2 48 | 49 | ESTIMATION METHOD = Generalised Estimating Equations 50 | 51 | 52 | Total time used: 0.96 seconds 53 | 54 | Estimated Overdispersion = 1.334 55 | Estimated Serial Correlation = 0.305 56 | 57 | GOODNESS OF FIT 58 | Chi-square 193.36, df 145, p 0.0045 59 | Likelihood Ratio 191.81, df 145, p 0.0056 60 | AIC (up to a constant) -98.19 61 | 62 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 63 | Changepoint Wald-Test df p 64 | 1 11.08 1 0.0009 65 | 2 14.90 1 0.0001 66 | 67 | PARAMETER ESTIMATES 68 | 69 | Slope for Time Intervals 70 | from upto Additive std.err. Multiplicative std.err. 71 | 1 2 -0.3029 0.0910 0.7387 0.0672 72 | 2 8 0.0757 0.0157 1.0787 0.0169 73 | 74 | Time INDICES 75 | Time Model std.err. Imputed std.err. 76 | 1 1 1 77 | 2 0.7387 0.0672 0.7276 0.0686 78 | 3 0.7968 0.0687 0.8295 0.0774 79 | 4 0.8595 0.0723 0.8353 0.0794 80 | 5 0.9272 0.0787 0.9193 0.0875 81 | 6 1.0001 0.0885 1.0150 0.0977 82 | 7 1.0788 0.1020 1.0945 0.1084 83 | 8 1.1637 0.1195 1.1776 0.1230 84 | 85 | TIME TOTALS 86 | Time Model std.err. Imputed std.err. 87 | 1 513 43 511 43 88 | 2 379 23 372 25 89 | 3 409 20 424 24 90 | 4 441 18 427 24 91 | 5 476 17 470 25 92 | 6 513 19 519 27 93 | 7 553 25 559 30 94 | 8 597 34 602 36 95 | 96 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 97 | Additive std.err. Multiplicative std.err. 98 | 0.0442 0.0134 1.0452 0.0140 99 | 100 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 101 | Additive std.err. Multiplicative std.err. 102 | 0.0463 0.0135 1.0474 0.0141 103 | 104 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-1f.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-1f 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | model 2 13 | weighting off 14 | overdisp on 15 | serialcor on 16 | changepoints 1 2 17 | run 18 | 19 | 20 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-2a.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 25-8-2016 17:58:48 5 | 6 | Title : skylark-2a 7 | 8 | The following 5 variables have been read from file: 9 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark.dat 10 | 11 | 1. Site number of values: 55 12 | 2. Time number of values: 8 13 | 3. Count missing = 999 14 | 4. HABITAT number of values: 2 15 | 5. COV2 number of values: 4 16 | 17 | Number of sites without positive counts (removed) 0 18 | 19 | Number of observed zero counts 0 20 | Number of observed positive counts 202 21 | Total number of observed counts 202 22 | Number of missing counts 238 23 | Total number of counts 440 24 | 25 | Total count 2536.0 26 | 27 | Sites containing more than 10% of the total count 28 | Site Number Observed Total % 29 | 3 431.0 17.0 30 | 37 266.0 10.5 31 | 40 624.0 24.6 32 | 33 | Time Point Averages 34 | TimePoint Observations Average Index 35 | 1 25 8.52 1.00 36 | 2 20 8.10 0.95 37 | 3 30 10.90 1.28 38 | 4 30 11.27 1.32 39 | 5 28 12.75 1.50 40 | 6 29 14.66 1.72 41 | 7 22 17.00 2.00 42 | 8 18 18.89 2.22 43 | 44 | RESULTS FOR MODEL: Linear Trend 45 | -------------------------------- 46 | Effects of covariate(s) 47 | HABITAT 48 | Changes in Slope at Timepoints 49 | 1 2 3 4 5 6 7 50 | 51 | ESTIMATION METHOD = Generalised Estimating Equations 52 | 53 | 54 | Total time used: 4.03 seconds 55 | 56 | Estimated Overdispersion = 1.162 57 | Estimated Serial Correlation = 0.227 58 | 59 | GOODNESS OF FIT 60 | Chi-square 154.50, df 133, p 0.0979 61 | Likelihood Ratio 159.64, df 133, p 0.0575 62 | AIC (up to a constant) -106.36 63 | 64 | WALD-TEST FOR SIGNIFICANCE OF COVARIATES 65 | Covariate Wald-Test df p 66 | HABITAT 21.55 7 0.0030 67 | 68 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 69 | Changepoint Wald-Test df p 70 | 1 10.27 2 0.0059 71 | 2 9.18 2 0.0102 72 | 3 3.08 2 0.2143 73 | 4 1.54 2 0.4637 74 | 5 1.64 2 0.4413 75 | 6 0.89 2 0.6419 76 | 7 0.01 2 0.9927 77 | 78 | PARAMETER ESTIMATES 79 | 80 | Effects of Covariates on Slope for Time Intervals 81 | from upto 82 | 1 2 83 | Additive std.err. Multiplicative std.err. 84 | Constant -0.2165 0.1991 0.8053 0.1604 85 | 86 | Covariate 1 87 | ------------ 88 | Category 2 -0.1445 0.2324 0.8655 0.2011 89 | 90 | from upto 91 | 2 3 92 | Additive std.err. Multiplicative std.err. 93 | Constant -0.1616 0.2207 0.8508 0.1878 94 | 95 | Covariate 1 96 | ------------ 97 | Category 2 0.4216 0.2480 1.5244 0.3781 98 | 99 | from upto 100 | 3 4 101 | Additive std.err. Multiplicative std.err. 102 | Constant -0.1201 0.2195 0.8869 0.1946 103 | 104 | Covariate 1 105 | ------------ 106 | Category 2 0.1094 0.2336 1.1156 0.2606 107 | 108 | from upto 109 | 4 5 110 | Additive std.err. Multiplicative std.err. 111 | Constant -0.2410 0.2260 0.7859 0.1776 112 | 113 | Covariate 1 114 | ------------ 115 | Category 2 0.3882 0.2389 1.4744 0.3522 116 | 117 | from upto 118 | 5 6 119 | Additive std.err. Multiplicative std.err. 120 | Constant 0.2179 0.2249 1.2434 0.2797 121 | 122 | Covariate 1 123 | ------------ 124 | Category 2 -0.1298 0.2366 0.8783 0.2078 125 | 126 | from upto 127 | 6 7 128 | Additive std.err. Multiplicative std.err. 129 | Constant -0.1153 0.2180 0.8911 0.1943 130 | 131 | Covariate 1 132 | ------------ 133 | Category 2 0.2139 0.2301 1.2385 0.2850 134 | 135 | from upto 136 | 7 8 137 | Additive std.err. Multiplicative std.err. 138 | Constant -0.0849 0.2330 0.9186 0.2140 139 | 140 | Covariate 1 141 | ------------ 142 | Category 2 0.1720 0.2459 1.1876 0.2920 143 | 144 | 145 | Time INDICES 146 | Time Model std.err. Imputed std.err. 147 | 1 1 1 148 | 2 0.7281 0.0751 0.7234 0.0751 149 | 3 0.8411 0.0846 0.8422 0.0848 150 | 4 0.8119 0.0835 0.8145 0.0838 151 | 5 0.8757 0.0886 0.8765 0.0888 152 | 6 0.9771 0.0987 0.9792 0.0990 153 | 7 1.0420 0.1068 1.0433 0.1071 154 | 8 1.1106 0.1155 1.1219 0.1164 155 | 156 | TIME TOTALS 157 | Time Model std.err. Imputed std.err. 158 | 1 526 44 526 44 159 | 2 383 36 380 36 160 | 3 443 30 443 30 161 | 4 427 28 428 28 162 | 5 461 29 461 29 163 | 6 514 29 515 29 164 | 7 548 34 548 34 165 | 8 585 38 590 38 166 | 167 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 168 | Additive std.err. Multiplicative std.err. 169 | 0.0363 0.0134 1.0370 0.0139 170 | 171 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 172 | Additive std.err. Multiplicative std.err. 173 | 0.0376 0.0134 1.0384 0.0140 174 | 175 | 176 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-2a.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-2a 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | model 2 13 | covariates 1 14 | weighting off 15 | overdisp on 16 | serialcor on 17 | changepoints 1 2 3 4 5 6 7 18 | 19 | 20 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-2b.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 6-9-2016 9:25:20 5 | 6 | Title : skylark-2b: 7 | 8 | The following 5 variables have been read from file: 9 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark.dat 10 | 11 | 1. Site number of values: 55 12 | 2. Time number of values: 8 13 | 3. Count missing = 999 14 | 4. HABITAT number of values: 2 15 | 5. COV2 number of values: 4 16 | 17 | Number of sites without positive counts (removed) 0 18 | 19 | Number of observed zero counts 0 20 | Number of observed positive counts 202 21 | Total number of observed counts 202 22 | Number of missing counts 238 23 | Total number of counts 440 24 | 25 | Total count 2536.0 26 | 27 | Sites containing more than 10% of the total count 28 | Site Number Observed Total % 29 | 3 431.0 17.0 30 | 37 266.0 10.5 31 | 40 624.0 24.6 32 | 33 | Time Point Averages 34 | TimePoint Observations Average Index 35 | 1 25 8.52 1.00 36 | 2 20 8.10 0.95 37 | 3 30 10.90 1.28 38 | 4 30 11.27 1.32 39 | 5 28 12.75 1.50 40 | 6 29 14.66 1.72 41 | 7 22 17.00 2.00 42 | 8 18 18.89 2.22 43 | 44 | RESULTS FOR MODEL: Effects for Each Time Point 45 | ---------------------------------------------- 46 | Effects of covariate(s) 47 | HABITAT 48 | 49 | 50 | ESTIMATION METHOD = Generalised Estimating Equations 51 | 52 | 53 | Total time used: 1.36 seconds 54 | 55 | Estimated Overdispersion = 1.162 56 | Estimated Serial Correlation = 0.227 57 | 58 | GOODNESS OF FIT 59 | Chi-square 154.50, df 133, p 0.0979 60 | Likelihood Ratio 159.64, df 133, p 0.0575 61 | AIC (up to a constant) -106.36 62 | 63 | WALD-TEST FOR SIGNIFICANCE OF COVARIATES 64 | Covariate Wald-Test df p 65 | HABITAT 21.55 7 0.0030 66 | 67 | PARAMETER ESTIMATES 68 | 69 | Additive std.err. Multiplicative std.err. 70 | Constant 71 | -------- 72 | Time 1 0.0000 0.0000 1.0000 0.0000 73 | 2 -0.2165 0.1991 0.8053 0.1604 74 | 3 -0.3781 0.2303 0.6851 0.1578 75 | 4 -0.4982 0.2437 0.6076 0.1481 76 | 5 -0.7392 0.2565 0.4775 0.1225 77 | 6 -0.5213 0.2409 0.5938 0.1430 78 | 7 -0.6366 0.2634 0.5291 0.1393 79 | 8 -0.7215 0.2652 0.4860 0.1289 80 | 81 | Covariate 1 82 | ------------ 83 | Category 2 84 | Time 1 0.0000 0.0000 1.0000 0.0000 85 | 2 -0.1445 0.2324 0.8655 0.2011 86 | 3 0.2771 0.2553 1.3194 0.3369 87 | 4 0.3865 0.2681 1.4718 0.3946 88 | 5 0.7747 0.2790 2.1700 0.6053 89 | 6 0.6449 0.2643 1.9058 0.5036 90 | 7 0.8588 0.2856 2.3603 0.6742 91 | 8 1.0308 0.2883 2.8032 0.8081 92 | 93 | 94 | Time INDICES 95 | Time Model std.err. Imputed std.err. 96 | 1 1 1 97 | 2 0.7281 0.0751 0.7234 0.0751 98 | 3 0.8411 0.0846 0.8422 0.0848 99 | 4 0.8119 0.0835 0.8145 0.0838 100 | 5 0.8757 0.0886 0.8765 0.0888 101 | 6 0.9771 0.0987 0.9792 0.0990 102 | 7 1.0420 0.1068 1.0433 0.1071 103 | 8 1.1106 0.1155 1.1219 0.1164 104 | 105 | TIME TOTALS 106 | Time Model std.err. Imputed std.err. 107 | 1 526 44 526 44 108 | 2 383 36 380 36 109 | 3 443 30 443 30 110 | 4 427 28 428 28 111 | 5 461 29 461 29 112 | 6 514 29 515 29 113 | 7 548 34 548 34 114 | 8 585 38 590 38 115 | 116 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 117 | Additive std.err. Multiplicative std.err. 118 | 0.0363 0.0134 1.0370 0.0139 119 | 120 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 121 | Additive std.err. Multiplicative std.err. 122 | 0.0376 0.0134 1.0384 0.0140 123 | 124 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-2b.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-2b: model 3 with covariate 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | model 3 13 | covariates 1 14 | weighting off 15 | overdisp on 16 | serialcor on 17 | run 18 | 19 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-3a.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 12-9-2016 11:41:51 5 | 6 | Title : skylark-3: 7 | 8 | The following 5 variables have been read from file: 9 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark.dat 10 | 11 | 1. Site number of values: 55 12 | 2. Time number of values: 8 13 | 3. Count missing = 999 14 | 4. HABITAT number of values: 2 15 | 5. COV2 number of values: 4 16 | 17 | Number of sites without positive counts (removed) 0 18 | 19 | Number of observed zero counts 0 20 | Number of observed positive counts 202 21 | Total number of observed counts 202 22 | Number of missing counts 238 23 | Total number of counts 440 24 | 25 | Total count 2536.0 26 | 27 | Sites containing more than 10% of the total count 28 | Site Number Observed Total % 29 | 3 431.0 17.0 30 | 37 266.0 10.5 31 | 40 624.0 24.6 32 | 33 | Time Point Averages 34 | TimePoint Observations Average Index 35 | 1 25 8.52 1.00 36 | 2 20 8.10 0.95 37 | 3 30 10.90 1.28 38 | 4 30 11.27 1.32 39 | 5 28 12.75 1.50 40 | 6 29 14.66 1.72 41 | 7 22 17.00 2.00 42 | 8 18 18.89 2.22 43 | 44 | RESULTS FOR MODEL: Linear Trend 45 | -------------------------------- 46 | Effects of covariate(s) 47 | HABITAT 48 | Changes in Slope at Timepoints 49 | 1 2 3 4 5 6 7 50 | 51 | ESTIMATION METHOD = Generalised Estimating Equations 52 | 53 | 54 | Estimated Overdispersion = 1.162 55 | Estimated Serial Correlation = 0.227 56 | 57 | GOODNESS OF FIT 58 | Chi-square 154.50, df 133, p 0.0979 59 | Likelihood Ratio 159.64, df 133, p 0.0575 60 | AIC (up to a constant) -106.36 61 | 62 | WALD-TEST FOR SIGNIFICANCE OF COVARIATES 63 | Covariate Wald-Test df p 64 | HABITAT 21.55 7 0.0030 65 | 66 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 67 | Changepoint Wald-Test df p 68 | 1 10.27 2 0.0059 69 | 2 9.18 2 0.0102 70 | 3 3.08 2 0.2143 71 | 4 1.54 2 0.4637 72 | 5 1.64 2 0.4413 73 | 6 0.89 2 0.6419 74 | 7 0.01 2 0.9927 75 | 76 | STEPWISE SELECTION OF CHANGEPOINTS 77 | Deleted ChangePoint 7 Significance to delete 0.9927 78 | Deleted ChangePoint 6 Significance to delete 0.5368 79 | Deleted ChangePoint 5 Significance to delete 0.6867 80 | Deleted ChangePoint 4 Significance to delete 0.4639 81 | Deleted ChangePoint 3 Significance to delete 0.3822 82 | 83 | Remaining Changepoints at time: 84 | 1 2 85 | 86 | Total time used: 10.73 seconds 87 | 88 | Estimated Overdispersion = 1.126 89 | Estimated Serial Correlation = 0.228 90 | 91 | GOODNESS OF FIT 92 | Chi-square 161.09, df 143, p 0.1431 93 | Likelihood Ratio 160.76, df 143, p 0.1471 94 | AIC (up to a constant) -125.24 95 | 96 | WALD-TEST FOR SIGNIFICANCE OF COVARIATES 97 | Covariate Wald-Test df p 98 | HABITAT 18.51 2 0.0001 99 | 100 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 101 | Changepoint Wald-Test df p 102 | 1 10.99 2 0.0041 103 | 2 14.65 2 0.0007 104 | 105 | PARAMETER ESTIMATES 106 | 107 | Effects of Covariates on Slope for Time Intervals 108 | from upto 109 | 1 2 110 | Additive std.err. Multiplicative std.err. 111 | Constant -0.2691 0.1823 0.7641 0.1393 112 | 113 | Covariate 1 114 | ------------ 115 | Category 2 -0.0204 0.2068 0.9798 0.2026 116 | 117 | from upto 118 | 2 8 119 | Additive std.err. Multiplicative std.err. 120 | Constant -0.0776 0.0411 0.9254 0.0380 121 | 122 | Covariate 1 123 | ------------ 124 | Category 2 0.1749 0.0437 1.1911 0.0521 125 | 126 | 127 | Time INDICES 128 | Time Model std.err. Imputed std.err. 129 | 1 1 1 130 | 2 0.7531 0.0655 0.7373 0.0666 131 | 3 0.7916 0.0648 0.8304 0.0729 132 | 4 0.8369 0.0670 0.8179 0.0731 133 | 5 0.8895 0.0720 0.8859 0.0798 134 | 6 0.9500 0.0800 0.9628 0.0881 135 | 7 1.0189 0.0910 1.0269 0.0964 136 | 8 1.0969 0.1053 1.1098 0.1088 137 | 138 | TIME TOTALS 139 | Time Model std.err. Imputed std.err. 140 | 1 532 43 530 43 141 | 2 401 24 391 26 142 | 3 421 20 440 24 143 | 4 446 16 433 23 144 | 5 474 15 470 23 145 | 6 506 17 510 24 146 | 7 542 22 544 27 147 | 8 584 29 588 32 148 | 149 | OVERALL SLOPE MODEL: Moderate increase (p<0.05) * 150 | Additive std.err. Multiplicative std.err. 151 | 0.0329 0.0127 1.0335 0.0131 152 | 153 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 154 | Additive std.err. Multiplicative std.err. 155 | 0.0346 0.0127 1.0352 0.0132 156 | 157 | 158 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-3a.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-3a 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | comment skylark-3a: Model 2 with covariates and stepwise refinement 13 | model 2 14 | covariates 1 15 | weighting off 16 | overdisp on 17 | serialcor on 18 | changepoints 1 2 3 4 5 6 7 19 | stepwise on 20 | run 21 | 22 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4a.out: -------------------------------------------------------------------------------- 1 | TRIM 3.70 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 3-10-2016 11:52:50 5 | 6 | Title : skylark-4a 7 | 8 | Comment: Simplest model 2 with weights 9 | 10 | The following 6 variables have been read from file: 11 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark_wt.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. weight 17 | 5. HABITAT number of values: 2 18 | 6. COV2 number of values: 4 19 | 20 | Number of observed zero counts 0 21 | Number of observed positive counts 202 22 | Total number of observed counts 202 23 | Number of missing counts 238 24 | Total number of counts 440 25 | 26 | Total count 2536.0 27 | 28 | Sites containing more than 10% of the total count 29 | Site Number Observed Total % 30 | 3 431.0 17.0 31 | 37 266.0 10.5 32 | 40 624.0 24.6 33 | 34 | Time Point Averages 35 | Weighted Weighted 36 | TimePoint Observations Average Index Average Index 37 | 1 25 8.52 1.00 30.12 1.00 38 | 2 20 8.10 0.95 27.00 0.90 39 | 3 30 10.90 1.28 24.10 0.80 40 | 4 30 11.27 1.32 23.87 0.79 41 | 5 28 12.75 1.50 23.36 0.78 42 | 6 29 14.66 1.72 36.38 1.21 43 | 7 22 17.00 2.00 31.73 1.05 44 | 8 18 18.89 2.22 35.39 1.17 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | --------------------------------- 48 | 49 | 50 | WEIGHTING = On 51 | 52 | ESTIMATION METHOD = Maximum Likelihood 53 | 54 | 55 | Total time used: 0.63 seconds 56 | 57 | GOODNESS OF FIT 58 | Chi-square 210.53, df 146, p 0.0000 59 | Likelihood Ratio 204.63, df 146, p 0.0000 60 | AIC (up to a constant) -87.37 61 | 62 | WALD-TEST FOR SIGNIFICANCE OF SLOPE PARAMETER 63 | Wald-Test 27.60, df 1, p 0.0000 64 | 65 | PARAMETER ESTIMATES 66 | 67 | Additive std.err. Multiplicative std.err. 68 | Slope 0.0548 0.0104 1.0564 0.0110 69 | 70 | Time INDICES 71 | Time Model std.err. Imputed std.err. 72 | 1 1 1 73 | 2 1.0564 0.0110 0.9195 0.0588 74 | 3 1.1159 0.0233 0.9549 0.0619 75 | 4 1.1788 0.0369 0.9662 0.0653 76 | 5 1.2452 0.0520 0.9518 0.0708 77 | 6 1.3154 0.0686 1.0744 0.0824 78 | 7 1.3895 0.0870 1.1139 0.0887 79 | 8 1.4678 0.1072 1.1316 0.1006 80 | 81 | TIME TOTALS 82 | Time Model std.err. Imputed std.err. 83 | 1 1116 73 1344 88 84 | 2 1179 70 1236 87 85 | 3 1246 69 1284 89 86 | 4 1316 70 1299 92 87 | 5 1390 74 1280 96 88 | 6 1469 81 1444 104 89 | 7 1551 91 1498 108 90 | 8 1639 105 1521 121 91 | 92 | OVERALL SLOPE MODEL (with intercept) 93 | Additive std.err. Multiplicative std.err. 94 | 0.0548 0.0104 1.0564 0.0110 95 | 96 | OVERALL SLOPE IMPUTED (with intercept, recommended) 97 | Additive std.err. Multiplicative std.err. 98 | 0.0257 0.0113 1.0261 0.0116 99 | 100 | OVERALL SLOPE MODEL (through base time point) 101 | Additive std.err. Multiplicative std.err. 102 | 0.0548 0.0104 1.0564 0.0110 103 | 104 | OVERALL SLOPE IMPUTED (through base time point) 105 | Additive std.err. Multiplicative std.err. 106 | 0.0100 0.0126 1.0100 0.0127 107 | 108 | 109 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4a.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark_wt.dat 2 | TITLE skylark-4a 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT present 11 | 12 | comment Simplest model 2 with weights 13 | model 2 14 | weighting on 15 | overdisp off 16 | serialcor off 17 | run 18 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4b.out: -------------------------------------------------------------------------------- 1 | TRIM 3.70 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 3-10-2016 11:53:53 5 | 6 | Title : skylark-4b 7 | 8 | Comment: Now with preset changepoints 9 | 10 | The following 6 variables have been read from file: 11 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark_wt.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. weight 17 | 5. HABITAT number of values: 2 18 | 6. COV2 number of values: 4 19 | 20 | Number of observed zero counts 0 21 | Number of observed positive counts 202 22 | Total number of observed counts 202 23 | Number of missing counts 238 24 | Total number of counts 440 25 | 26 | Total count 2536.0 27 | 28 | Sites containing more than 10% of the total count 29 | Site Number Observed Total % 30 | 3 431.0 17.0 31 | 37 266.0 10.5 32 | 40 624.0 24.6 33 | 34 | Time Point Averages 35 | Weighted Weighted 36 | TimePoint Observations Average Index Average Index 37 | 1 25 8.52 1.00 30.12 1.00 38 | 2 20 8.10 0.95 27.00 0.90 39 | 3 30 10.90 1.28 24.10 0.80 40 | 4 30 11.27 1.32 23.87 0.79 41 | 5 28 12.75 1.50 23.36 0.78 42 | 6 29 14.66 1.72 36.38 1.21 43 | 7 22 17.00 2.00 31.73 1.05 44 | 8 18 18.89 2.22 35.39 1.17 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | -------------------------------- 48 | Changes in Slope at Timepoints 49 | 1 2 50 | 51 | WEIGHTING = On 52 | 53 | ESTIMATION METHOD = Maximum Likelihood 54 | 55 | 56 | Total time used: 0.72 seconds 57 | 58 | GOODNESS OF FIT 59 | Chi-square 190.45, df 145, p 0.0000 60 | Likelihood Ratio 186.75, df 145, p 0.0000 61 | AIC (up to a constant) -103.25 62 | 63 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 64 | Changepoint Wald-Test df p 65 | 1 13.44 1 0.0002 66 | 2 18.63 1 0.0000 67 | 68 | PARAMETER ESTIMATES 69 | 70 | Slope for Time Intervals 71 | from upto Additive std.err. Multiplicative std.err. 72 | 1 2 -0.3108 0.0848 0.7329 0.0621 73 | 2 8 0.0798 0.0120 1.0831 0.0130 74 | 75 | Time INDICES 76 | Time Model std.err. Imputed std.err. 77 | 1 1 1 78 | 2 0.7329 0.0621 0.7530 0.0721 79 | 3 0.7938 0.0639 0.7947 0.0739 80 | 4 0.8597 0.0669 0.8109 0.0748 81 | 5 0.9311 0.0716 0.8059 0.0762 82 | 6 1.0084 0.0786 0.9241 0.0860 83 | 7 1.0922 0.0881 0.9706 0.0898 84 | 8 1.1829 0.1007 1.0027 0.0984 85 | 86 | TIME TOTALS 87 | Time Model std.err. Imputed std.err. 88 | 1 1424 119 1533 127 89 | 2 1044 68 1154 83 90 | 3 1130 67 1218 85 91 | 4 1224 67 1243 88 92 | 5 1326 70 1235 93 93 | 6 1436 77 1417 101 94 | 7 1556 88 1488 106 95 | 8 1685 105 1537 121 96 | 97 | OVERALL SLOPE MODEL (with intercept) 98 | Additive std.err. Multiplicative std.err. 99 | 0.0472 0.0103 1.0484 0.0108 100 | 101 | OVERALL SLOPE IMPUTED (with intercept, recommended) 102 | Additive std.err. Multiplicative std.err. 103 | 0.0206 0.0114 1.0209 0.0116 104 | 105 | OVERALL SLOPE MODEL (through base time point) 106 | Additive std.err. Multiplicative std.err. 107 | 0.0017 0.0156 1.0017 0.0156 108 | 109 | OVERALL SLOPE IMPUTED (through base time point) 110 | Additive std.err. Multiplicative std.err. 111 | -0.0199 0.0163 0.9803 0.0160 112 | 113 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4b.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark_wt.dat 2 | TITLE skylark-4b 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT present 11 | 12 | comment Now with preset changepoints 13 | model 2 14 | changepoints 1 2 15 | weighting on 16 | overdisp off 17 | serialcor off 18 | run 19 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4c.out: -------------------------------------------------------------------------------- 1 | TRIM 3.70 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 3-10-2016 11:54:14 5 | 6 | Title : skylark-4c 7 | 8 | Comment: Now with overdispersion 9 | 10 | The following 6 variables have been read from file: 11 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark_wt.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. weight 17 | 5. HABITAT number of values: 2 18 | 6. COV2 number of values: 4 19 | 20 | Number of observed zero counts 0 21 | Number of observed positive counts 202 22 | Total number of observed counts 202 23 | Number of missing counts 238 24 | Total number of counts 440 25 | 26 | Total count 2536.0 27 | 28 | Sites containing more than 10% of the total count 29 | Site Number Observed Total % 30 | 3 431.0 17.0 31 | 37 266.0 10.5 32 | 40 624.0 24.6 33 | 34 | Time Point Averages 35 | Weighted Weighted 36 | TimePoint Observations Average Index Average Index 37 | 1 25 8.52 1.00 30.12 1.00 38 | 2 20 8.10 0.95 27.00 0.90 39 | 3 30 10.90 1.28 24.10 0.80 40 | 4 30 11.27 1.32 23.87 0.79 41 | 5 28 12.75 1.50 23.36 0.78 42 | 6 29 14.66 1.72 36.38 1.21 43 | 7 22 17.00 2.00 31.73 1.05 44 | 8 18 18.89 2.22 35.39 1.17 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | -------------------------------- 48 | Changes in Slope at Timepoints 49 | 1 2 50 | 51 | WEIGHTING = On 52 | 53 | ESTIMATION METHOD = Generalised Estimating Equations 54 | 55 | 56 | Total time used: 0.72 seconds 57 | 58 | Estimated Overdispersion = 1.313 59 | 60 | GOODNESS OF FIT 61 | Chi-square 190.45, df 145, p 0.0000 62 | Likelihood Ratio 186.75, df 145, p 0.0000 63 | AIC (up to a constant) -103.25 64 | 65 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 66 | Changepoint Wald-Test df p 67 | 1 10.23 1 0.0014 68 | 2 14.18 1 0.0002 69 | 70 | PARAMETER ESTIMATES 71 | 72 | Slope for Time Intervals 73 | from upto Additive std.err. Multiplicative std.err. 74 | 1 2 -0.3108 0.0972 0.7329 0.0712 75 | 2 8 0.0798 0.0138 1.0831 0.0149 76 | 77 | Time INDICES 78 | Time Model std.err. Imputed std.err. 79 | 1 1 1 80 | 2 0.7329 0.0712 0.7530 0.0827 81 | 3 0.7938 0.0732 0.7947 0.0847 82 | 4 0.8597 0.0766 0.8109 0.0857 83 | 5 0.9311 0.0821 0.8059 0.0874 84 | 6 1.0084 0.0900 0.9241 0.0985 85 | 7 1.0922 0.1010 0.9706 0.1029 86 | 8 1.1829 0.1154 1.0027 0.1127 87 | 88 | TIME TOTALS 89 | Time Model std.err. Imputed std.err. 90 | 1 1424 136 1533 146 91 | 2 1044 78 1154 95 92 | 3 1130 77 1218 98 93 | 4 1224 77 1243 101 94 | 5 1326 80 1235 106 95 | 6 1436 88 1417 116 96 | 7 1556 101 1488 122 97 | 8 1685 120 1537 139 98 | 99 | OVERALL SLOPE MODEL (with intercept) 100 | Additive std.err. Multiplicative std.err. 101 | 0.0472 0.0118 1.0484 0.0123 102 | 103 | OVERALL SLOPE IMPUTED (with intercept, recommended) 104 | Additive std.err. Multiplicative std.err. 105 | 0.0206 0.0131 1.0209 0.0133 106 | 107 | OVERALL SLOPE MODEL (through base time point) 108 | Additive std.err. Multiplicative std.err. 109 | 0.0017 0.0179 1.0017 0.0179 110 | 111 | OVERALL SLOPE IMPUTED (through base time point) 112 | Additive std.err. Multiplicative std.err. 113 | -0.0199 0.0187 0.9803 0.0183 114 | 115 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4c.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark_wt.dat 2 | TITLE skylark-4c 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT present 11 | 12 | comment Now with overdispersion 13 | model 2 14 | changepoints 1 2 15 | weighting on 16 | overdisp on 17 | serialcor off 18 | run 19 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4d.out: -------------------------------------------------------------------------------- 1 | TRIM 3.70 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 3-10-2016 11:54:35 5 | 6 | Title : skylark-4d 7 | 8 | Comment: Now with serial correlation 9 | 10 | The following 6 variables have been read from file: 11 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark_wt.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. weight 17 | 5. HABITAT number of values: 2 18 | 6. COV2 number of values: 4 19 | 20 | Number of observed zero counts 0 21 | Number of observed positive counts 202 22 | Total number of observed counts 202 23 | Number of missing counts 238 24 | Total number of counts 440 25 | 26 | Total count 2536.0 27 | 28 | Sites containing more than 10% of the total count 29 | Site Number Observed Total % 30 | 3 431.0 17.0 31 | 37 266.0 10.5 32 | 40 624.0 24.6 33 | 34 | Time Point Averages 35 | Weighted Weighted 36 | TimePoint Observations Average Index Average Index 37 | 1 25 8.52 1.00 30.12 1.00 38 | 2 20 8.10 0.95 27.00 0.90 39 | 3 30 10.90 1.28 24.10 0.80 40 | 4 30 11.27 1.32 23.87 0.79 41 | 5 28 12.75 1.50 23.36 0.78 42 | 6 29 14.66 1.72 36.38 1.21 43 | 7 22 17.00 2.00 31.73 1.05 44 | 8 18 18.89 2.22 35.39 1.17 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | -------------------------------- 48 | Changes in Slope at Timepoints 49 | 1 2 50 | 51 | WEIGHTING = On 52 | 53 | ESTIMATION METHOD = Generalised Estimating Equations 54 | 55 | 56 | Total time used: 1.21 seconds 57 | 58 | Estimated Serial Correlation = 0.305 59 | 60 | GOODNESS OF FIT 61 | Chi-square 193.36, df 145, p 0.0000 62 | Likelihood Ratio 191.81, df 145, p 0.0000 63 | AIC (up to a constant) -98.19 64 | 65 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 66 | Changepoint Wald-Test df p 67 | 1 14.77 1 0.0001 68 | 2 19.87 1 0.0000 69 | 70 | PARAMETER ESTIMATES 71 | 72 | Slope for Time Intervals 73 | from upto Additive std.err. Multiplicative std.err. 74 | 1 2 -0.3029 0.0788 0.7387 0.0582 75 | 2 8 0.0757 0.0136 1.0787 0.0146 76 | 77 | Time INDICES 78 | Time Model std.err. Imputed std.err. 79 | 1 1 1 80 | 2 0.7387 0.0582 0.7567 0.0643 81 | 3 0.7968 0.0595 0.7971 0.0690 82 | 4 0.8595 0.0626 0.8100 0.0707 83 | 5 0.9272 0.0682 0.8038 0.0731 84 | 6 1.0001 0.0766 0.9194 0.0834 85 | 7 1.0788 0.0883 0.9631 0.0889 86 | 8 1.1637 0.1035 0.9939 0.0995 87 | 88 | TIME TOTALS 89 | Time Model std.err. Imputed std.err. 90 | 1 1424 119 1536 126 91 | 2 1052 75 1162 87 92 | 3 1134 73 1224 88 93 | 4 1224 72 1244 89 94 | 5 1320 75 1234 94 95 | 6 1424 82 1412 102 96 | 7 1536 95 1479 109 97 | 8 1657 113 1526 125 98 | 99 | OVERALL SLOPE MODEL (with intercept) 100 | Additive std.err. Multiplicative std.err. 101 | 0.0442 0.0116 1.0452 0.0121 102 | 103 | OVERALL SLOPE IMPUTED (with intercept, recommended) 104 | Additive std.err. Multiplicative std.err. 105 | 0.0188 0.0129 1.0190 0.0132 106 | 107 | OVERALL SLOPE MODEL (through base time point) 108 | Additive std.err. Multiplicative std.err. 109 | 0.0000 0.0153 1.0000 0.0153 110 | 111 | OVERALL SLOPE IMPUTED (through base time point) 112 | Additive std.err. Multiplicative std.err. 113 | -0.0209 0.0161 0.9793 0.0158 114 | 115 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4d.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark_wt.dat 2 | TITLE skylark-4d 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT present 11 | 12 | comment Now with serial correlation 13 | model 2 14 | changepoints 1 2 15 | weighting on 16 | overdisp off 17 | serialcor on 18 | run 19 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4e.out: -------------------------------------------------------------------------------- 1 | TRIM 3.70 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 3-10-2016 11:54:59 5 | 6 | Title : skylark-4e 7 | 8 | Comment: Now with both overdispersion and serial correlation 9 | 10 | The following 6 variables have been read from file: 11 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark_wt.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. weight 17 | 5. HABITAT number of values: 2 18 | 6. COV2 number of values: 4 19 | 20 | Number of observed zero counts 0 21 | Number of observed positive counts 202 22 | Total number of observed counts 202 23 | Number of missing counts 238 24 | Total number of counts 440 25 | 26 | Total count 2536.0 27 | 28 | Sites containing more than 10% of the total count 29 | Site Number Observed Total % 30 | 3 431.0 17.0 31 | 37 266.0 10.5 32 | 40 624.0 24.6 33 | 34 | Time Point Averages 35 | Weighted Weighted 36 | TimePoint Observations Average Index Average Index 37 | 1 25 8.52 1.00 30.12 1.00 38 | 2 20 8.10 0.95 27.00 0.90 39 | 3 30 10.90 1.28 24.10 0.80 40 | 4 30 11.27 1.32 23.87 0.79 41 | 5 28 12.75 1.50 23.36 0.78 42 | 6 29 14.66 1.72 36.38 1.21 43 | 7 22 17.00 2.00 31.73 1.05 44 | 8 18 18.89 2.22 35.39 1.17 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | -------------------------------- 48 | Changes in Slope at Timepoints 49 | 1 2 50 | 51 | WEIGHTING = On 52 | 53 | ESTIMATION METHOD = Generalised Estimating Equations 54 | 55 | 56 | Total time used: 1.29 seconds 57 | 58 | Estimated Overdispersion = 1.334 59 | Estimated Serial Correlation = 0.305 60 | 61 | GOODNESS OF FIT 62 | Chi-square 193.36, df 145, p 0.0000 63 | Likelihood Ratio 191.81, df 145, p 0.0000 64 | AIC (up to a constant) -98.19 65 | 66 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 67 | Changepoint Wald-Test df p 68 | 1 11.08 1 0.0009 69 | 2 14.90 1 0.0001 70 | 71 | PARAMETER ESTIMATES 72 | 73 | Slope for Time Intervals 74 | from upto Additive std.err. Multiplicative std.err. 75 | 1 2 -0.3029 0.0910 0.7387 0.0672 76 | 2 8 0.0757 0.0157 1.0787 0.0169 77 | 78 | Time INDICES 79 | Time Model std.err. Imputed std.err. 80 | 1 1 1 81 | 2 0.7387 0.0672 0.7567 0.0743 82 | 3 0.7968 0.0687 0.7971 0.0797 83 | 4 0.8595 0.0723 0.8100 0.0816 84 | 5 0.9272 0.0787 0.8038 0.0844 85 | 6 1.0001 0.0885 0.9194 0.0963 86 | 7 1.0788 0.1020 0.9631 0.1027 87 | 8 1.1637 0.1195 0.9939 0.1149 88 | 89 | TIME TOTALS 90 | Time Model std.err. Imputed std.err. 91 | 1 1424 137 1536 145 92 | 2 1052 87 1162 100 93 | 3 1134 84 1224 101 94 | 4 1224 83 1244 103 95 | 5 1320 86 1234 108 96 | 6 1424 95 1412 118 97 | 7 1536 109 1479 126 98 | 8 1657 130 1526 144 99 | 100 | OVERALL SLOPE MODEL (with intercept) 101 | Additive std.err. Multiplicative std.err. 102 | 0.0442 0.0134 1.0452 0.0140 103 | 104 | OVERALL SLOPE IMPUTED (with intercept, recommended) 105 | Additive std.err. Multiplicative std.err. 106 | 0.0188 0.0150 1.0190 0.0152 107 | 108 | OVERALL SLOPE MODEL (through base time point) 109 | Additive std.err. Multiplicative std.err. 110 | 0.0000 0.0177 1.0000 0.0177 111 | 112 | OVERALL SLOPE IMPUTED (through base time point) 113 | Additive std.err. Multiplicative std.err. 114 | -0.0209 0.0186 0.9793 0.0182 115 | 116 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4e.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark_wt.dat 2 | TITLE skylark-4e 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT present 11 | 12 | comment Now with both overdispersion and serial correlation 13 | model 2 14 | changepoints 1 2 15 | weighting on 16 | overdisp on 17 | serialcor on 18 | run 19 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4f.out: -------------------------------------------------------------------------------- 1 | TRIM 3.70 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 3-10-2016 11:55:22 5 | 6 | Title : skylark-4f 7 | 8 | Comment: Now with automatic selection of changepoints 9 | 10 | The following 6 variables have been read from file: 11 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark_wt.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. weight 17 | 5. HABITAT number of values: 2 18 | 6. COV2 number of values: 4 19 | 20 | Number of observed zero counts 0 21 | Number of observed positive counts 202 22 | Total number of observed counts 202 23 | Number of missing counts 238 24 | Total number of counts 440 25 | 26 | Total count 2536.0 27 | 28 | Sites containing more than 10% of the total count 29 | Site Number Observed Total % 30 | 3 431.0 17.0 31 | 37 266.0 10.5 32 | 40 624.0 24.6 33 | 34 | Time Point Averages 35 | Weighted Weighted 36 | TimePoint Observations Average Index Average Index 37 | 1 25 8.52 1.00 30.12 1.00 38 | 2 20 8.10 0.95 27.00 0.90 39 | 3 30 10.90 1.28 24.10 0.80 40 | 4 30 11.27 1.32 23.87 0.79 41 | 5 28 12.75 1.50 23.36 0.78 42 | 6 29 14.66 1.72 36.38 1.21 43 | 7 22 17.00 2.00 31.73 1.05 44 | 8 18 18.89 2.22 35.39 1.17 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | -------------------------------- 48 | Changes in Slope at Timepoints 49 | 1 2 3 4 5 6 7 50 | 51 | WEIGHTING = On 52 | 53 | ESTIMATION METHOD = Generalised Estimating Equations 54 | 55 | 56 | Estimated Overdispersion = 1.367 57 | Estimated Serial Correlation = 0.302 58 | 59 | GOODNESS OF FIT 60 | Chi-square 191.40, df 140, p 0.0000 61 | Likelihood Ratio 194.80, df 140, p 0.0000 62 | AIC (up to a constant) -85.20 63 | 64 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 65 | Changepoint Wald-Test df p 66 | 1 9.22 1 0.0024 67 | 2 6.85 1 0.0089 68 | 3 1.44 1 0.2298 69 | 4 1.03 1 0.3107 70 | 5 0.00 1 0.9735 71 | 6 0.04 1 0.8358 72 | 7 0.03 1 0.8519 73 | 74 | STEPWISE SELECTION OF CHANGEPOINTS 75 | Deleted ChangePoint 5 Significance to delete 0.9735 76 | Deleted ChangePoint 7 Significance to delete 0.8511 77 | Deleted ChangePoint 6 Significance to delete 0.5817 78 | Deleted ChangePoint 4 Significance to delete 0.2462 79 | Deleted ChangePoint 3 Significance to delete 0.6041 80 | 81 | Remaining Changepoints at time: 82 | 1 2 83 | 84 | Total time used: 7.43 seconds 85 | 86 | Estimated Overdispersion = 1.334 87 | Estimated Serial Correlation = 0.305 88 | 89 | GOODNESS OF FIT 90 | Chi-square 193.36, df 145, p 0.0000 91 | Likelihood Ratio 191.81, df 145, p 0.0000 92 | AIC (up to a constant) -98.19 93 | 94 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 95 | Changepoint Wald-Test df p 96 | 1 11.08 1 0.0009 97 | 2 14.90 1 0.0001 98 | 99 | PARAMETER ESTIMATES 100 | 101 | Slope for Time Intervals 102 | from upto Additive std.err. Multiplicative std.err. 103 | 1 2 -0.3029 0.0910 0.7387 0.0672 104 | 2 8 0.0757 0.0157 1.0787 0.0169 105 | 106 | Time INDICES 107 | Time Model std.err. Imputed std.err. 108 | 1 1 1 109 | 2 0.7387 0.0672 0.7567 0.0743 110 | 3 0.7968 0.0687 0.7971 0.0797 111 | 4 0.8595 0.0723 0.8100 0.0816 112 | 5 0.9272 0.0787 0.8038 0.0844 113 | 6 1.0001 0.0885 0.9194 0.0963 114 | 7 1.0788 0.1020 0.9631 0.1027 115 | 8 1.1637 0.1195 0.9939 0.1149 116 | 117 | TIME TOTALS 118 | Time Model std.err. Imputed std.err. 119 | 1 1424 137 1536 145 120 | 2 1052 87 1162 100 121 | 3 1134 84 1224 101 122 | 4 1224 83 1244 103 123 | 5 1320 86 1234 108 124 | 6 1424 95 1412 118 125 | 7 1536 109 1479 126 126 | 8 1657 130 1526 144 127 | 128 | OVERALL SLOPE MODEL (with intercept) 129 | Additive std.err. Multiplicative std.err. 130 | 0.0442 0.0134 1.0452 0.0140 131 | 132 | OVERALL SLOPE IMPUTED (with intercept, recommended) 133 | Additive std.err. Multiplicative std.err. 134 | 0.0188 0.0150 1.0190 0.0152 135 | 136 | OVERALL SLOPE MODEL (through base time point) 137 | Additive std.err. Multiplicative std.err. 138 | 0.0000 0.0177 1.0000 0.0177 139 | 140 | OVERALL SLOPE IMPUTED (through base time point) 141 | Additive std.err. Multiplicative std.err. 142 | -0.0209 0.0186 0.9793 0.0182 143 | 144 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-4f.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark_wt.dat 2 | TITLE skylark-4f 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT present 11 | 12 | comment Now with automatic selection of changepoints 13 | model 2 14 | changepoints 1 2 3 4 5 6 7 15 | weighting on 16 | overdisp on 17 | serialcor on 18 | stepwise on 19 | run 20 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-minimal.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-1a 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT Absent 11 | 12 | model 3 13 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-x1.ocv: -------------------------------------------------------------------------------- 1 | 1861.8241 276.2929 189.4899 147.6759 110.1104 69.6187 24.1249 -39.9638 2 | 276.2929 643.2048 393.8749 285.0148 191.5011 92.3230 -17.3878 -172.8159 3 | 189.4899 393.8749 597.4887 289.1726 171.7971 108.2057 51.7909 -21.1465 4 | 147.6759 285.0148 289.1726 594.1666 277.8361 168.5299 127.6078 98.2448 5 | 110.1104 191.5011 171.7971 277.8361 627.3867 308.5601 232.6469 240.1975 6 | 69.6187 92.3230 108.2057 168.5299 308.5601 716.6970 420.9176 420.4143 7 | 24.1249 -17.3878 51.7909 127.6078 232.6469 420.9176 886.7587 715.0285 8 | -39.9638 -172.8159 -21.1465 98.2448 240.1975 420.4143 715.0285 1273.7900 9 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-x1.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 4-10-2016 11:57:42 5 | 6 | Title : skylark-x1 7 | 8 | Comment: Variance-Covariance output test 9 | 10 | The following 5 variables have been read from file: 11 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. HABITAT number of values: 2 17 | 5. COV2 number of values: 4 18 | 19 | Number of sites without positive counts (removed) 0 20 | 21 | Number of observed zero counts 0 22 | Number of observed positive counts 202 23 | Total number of observed counts 202 24 | Number of missing counts 238 25 | Total number of counts 440 26 | 27 | Total count 2536.0 28 | 29 | Sites containing more than 10% of the total count 30 | Site Number Observed Total % 31 | 3 431.0 17.0 32 | 37 266.0 10.5 33 | 40 624.0 24.6 34 | 35 | Time Point Averages 36 | TimePoint Observations Average Index 37 | 1 25 8.52 1.00 38 | 2 20 8.10 0.95 39 | 3 30 10.90 1.28 40 | 4 30 11.27 1.32 41 | 5 28 12.75 1.50 42 | 6 29 14.66 1.72 43 | 7 22 17.00 2.00 44 | 8 18 18.89 2.22 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | -------------------------------- 48 | Changes in Slope at Timepoints 49 | 1 2 50 | 51 | ESTIMATION METHOD = Generalised Estimating Equations 52 | 53 | 54 | Total time used: 1.02 seconds 55 | 56 | Estimated Overdispersion = 1.334 57 | Estimated Serial Correlation = 0.305 58 | 59 | GOODNESS OF FIT 60 | Chi-square 193.36, df 145, p 0.0045 61 | Likelihood Ratio 191.81, df 145, p 0.0056 62 | AIC (up to a constant) -98.19 63 | 64 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 65 | Changepoint Wald-Test df p 66 | 1 11.08 1 0.0009 67 | 2 14.90 1 0.0001 68 | 69 | PARAMETER ESTIMATES 70 | 71 | Slope for Time Intervals 72 | from upto Additive std.err. Multiplicative std.err. 73 | 1 2 -0.3029 0.0910 0.7387 0.0672 74 | 2 8 0.0757 0.0157 1.0787 0.0169 75 | 76 | Time INDICES 77 | Time Model std.err. Imputed std.err. 78 | 1 1 1 79 | 2 0.7387 0.0672 0.7276 0.0686 80 | 3 0.7968 0.0687 0.8295 0.0774 81 | 4 0.8595 0.0723 0.8353 0.0794 82 | 5 0.9272 0.0787 0.9193 0.0875 83 | 6 1.0001 0.0885 1.0150 0.0977 84 | 7 1.0788 0.1020 1.0945 0.1084 85 | 8 1.1637 0.1195 1.1776 0.1230 86 | 87 | TIME TOTALS 88 | Time Model std.err. Imputed std.err. 89 | 1 513 43 511 43 90 | 2 379 23 372 25 91 | 3 409 20 424 24 92 | 4 441 18 427 24 93 | 5 476 17 470 25 94 | 6 513 19 519 27 95 | 7 553 25 559 30 96 | 8 597 34 602 36 97 | 98 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 99 | Additive std.err. Multiplicative std.err. 100 | 0.0442 0.0134 1.0452 0.0140 101 | 102 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 103 | Additive std.err. Multiplicative std.err. 104 | 0.0463 0.0135 1.0474 0.0141 105 | 106 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-x1.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark.dat 2 | TITLE skylark-x1 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT absent 11 | 12 | comment Variance-Covariance output test 13 | model 2 14 | changepoints 1 2 15 | overdisp on 16 | serialcor on 17 | run 18 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-x2.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 4-10-2016 17:40:30 5 | 6 | Title : skylark-x2 7 | 8 | Comment: Actual year test 9 | 10 | The following 5 variables have been read from file: 11 | Z:\Users\patrick\Work\CBS\TRIM\wine\skylark_yr.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. HABITAT number of values: 2 17 | 5. COV2 number of values: 4 18 | 19 | Number of sites without positive counts (removed) 0 20 | 21 | Number of observed zero counts 0 22 | Number of observed positive counts 202 23 | Total number of observed counts 202 24 | Number of missing counts 238 25 | Total number of counts 440 26 | 27 | Total count 2536.0 28 | 29 | Sites containing more than 10% of the total count 30 | Site Number Observed Total % 31 | 3 431.0 17.0 32 | 37 266.0 10.5 33 | 40 624.0 24.6 34 | 35 | Time Point Averages 36 | TimePoint Observations Average Index 37 | 1984 25 8.52 1.00 38 | 1985 20 8.10 0.95 39 | 1986 30 10.90 1.28 40 | 1987 30 11.27 1.32 41 | 1988 28 12.75 1.50 42 | 1989 29 14.66 1.72 43 | 1990 22 17.00 2.00 44 | 1991 18 18.89 2.22 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | -------------------------------- 48 | Changes in Slope at Timepoints 49 | 1984 1985 50 | 51 | ESTIMATION METHOD = Generalised Estimating Equations 52 | 53 | 54 | Total time used: 1.07 seconds 55 | 56 | Estimated Overdispersion = 1.334 57 | Estimated Serial Correlation = 0.305 58 | 59 | GOODNESS OF FIT 60 | Chi-square 193.36, df 145, p 0.0045 61 | Likelihood Ratio 191.81, df 145, p 0.0056 62 | AIC (up to a constant) -98.19 63 | 64 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 65 | Changepoint Wald-Test df p 66 | 1984 11.08 1 0.0009 67 | 1985 14.90 1 0.0001 68 | 69 | PARAMETER ESTIMATES 70 | 71 | Slope for Time Intervals 72 | from upto Additive std.err. Multiplicative std.err. 73 | 1984 1985 -0.3029 0.0910 0.7387 0.0672 74 | 1985 1991 0.0757 0.0157 1.0787 0.0169 75 | 76 | Time INDICES 77 | Time Model std.err. Imputed std.err. 78 | 1984 1 1 79 | 1985 0.7387 0.0672 0.7276 0.0686 80 | 1986 0.7968 0.0687 0.8295 0.0774 81 | 1987 0.8595 0.0723 0.8353 0.0794 82 | 1988 0.9272 0.0787 0.9193 0.0875 83 | 1989 1.0001 0.0885 1.0150 0.0977 84 | 1990 1.0788 0.1020 1.0945 0.1084 85 | 1991 1.1637 0.1195 1.1776 0.1230 86 | 87 | TIME TOTALS 88 | Time Model std.err. Imputed std.err. 89 | 1984 513 43 511 43 90 | 1985 379 23 372 25 91 | 1986 409 20 424 24 92 | 1987 441 18 427 24 93 | 1988 476 17 470 25 94 | 1989 513 19 519 27 95 | 1990 553 25 559 30 96 | 1991 597 34 602 36 97 | 98 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 99 | Additive std.err. Multiplicative std.err. 100 | 0.0442 0.0134 1.0452 0.0140 101 | 102 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 103 | Additive std.err. Multiplicative std.err. 104 | 0.0463 0.0135 1.0474 0.0141 105 | 106 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-x2.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark_yr.dat 2 | TITLE skylark-x2 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT absent 11 | 12 | comment Actual year test 13 | model 2 14 | changepoints 1 2 15 | overdisp on 16 | serialcor on 17 | run 18 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-x3.out: -------------------------------------------------------------------------------- 1 | TRIM 3.61 : TRend analysis and Indices for Monitoring data 2 | STATISTICS NETHERLANDS 3 | 4 | Date/Time: 4-10-2016 20:56:06 5 | 6 | Title : skylark-x2 7 | 8 | Comment: Testing overall changepoints 9 | 10 | The following 5 variables have been read from file: 11 | Z:\Users\patrick\Work\CBS\TRIM\work_in_progress\skylark-x\skylark_yr.dat 12 | 13 | 1. Site number of values: 55 14 | 2. Time number of values: 8 15 | 3. Count missing = 999 16 | 4. HABITAT number of values: 2 17 | 5. COV2 number of values: 4 18 | 19 | Number of sites without positive counts (removed) 0 20 | 21 | Number of observed zero counts 0 22 | Number of observed positive counts 202 23 | Total number of observed counts 202 24 | Number of missing counts 238 25 | Total number of counts 440 26 | 27 | Total count 2536.0 28 | 29 | Sites containing more than 10% of the total count 30 | Site Number Observed Total % 31 | 3 431.0 17.0 32 | 37 266.0 10.5 33 | 40 624.0 24.6 34 | 35 | Time Point Averages 36 | TimePoint Observations Average Index 37 | 1984 25 8.52 1.00 38 | 1985 20 8.10 0.95 39 | 1986 30 10.90 1.28 40 | 1987 30 11.27 1.32 41 | 1988 28 12.75 1.50 42 | 1989 29 14.66 1.72 43 | 1990 22 17.00 2.00 44 | 1991 18 18.89 2.22 45 | 46 | RESULTS FOR MODEL: Linear Trend 47 | -------------------------------- 48 | Changes in Slope at Timepoints 49 | 1984 1985 50 | 51 | ESTIMATION METHOD = Generalised Estimating Equations 52 | 53 | 54 | Total time used: 1.01 seconds 55 | 56 | Estimated Overdispersion = 1.334 57 | Estimated Serial Correlation = 0.305 58 | 59 | GOODNESS OF FIT 60 | Chi-square 193.36, df 145, p 0.0045 61 | Likelihood Ratio 191.81, df 145, p 0.0056 62 | AIC (up to a constant) -98.19 63 | 64 | WALD-TEST FOR SIGNIFICANCE OF CHANGES IN SLOPE 65 | Changepoint Wald-Test df p 66 | 1984 11.08 1 0.0009 67 | 1985 14.90 1 0.0001 68 | 69 | PARAMETER ESTIMATES 70 | 71 | Slope for Time Intervals 72 | from upto Additive std.err. Multiplicative std.err. 73 | 1984 1985 -0.3029 0.0910 0.7387 0.0672 74 | 1985 1991 0.0757 0.0157 1.0787 0.0169 75 | 76 | Time INDICES 77 | Time Model std.err. Imputed std.err. 78 | 1984 1 1 79 | 1985 0.7387 0.0672 0.7276 0.0686 80 | 1986 0.7968 0.0687 0.8295 0.0774 81 | 1987 0.8595 0.0723 0.8353 0.0794 82 | 1988 0.9272 0.0787 0.9193 0.0875 83 | 1989 1.0001 0.0885 1.0150 0.0977 84 | 1990 1.0788 0.1020 1.0945 0.1084 85 | 1991 1.1637 0.1195 1.1776 0.1230 86 | 87 | TIME TOTALS 88 | Time Model std.err. Imputed std.err. 89 | 1984 513 43 511 43 90 | 1985 379 23 372 25 91 | 1986 409 20 424 24 92 | 1987 441 18 427 24 93 | 1988 476 17 470 25 94 | 1989 513 19 519 27 95 | 1990 553 25 559 30 96 | 1991 597 34 602 36 97 | 98 | OVERALL SLOPE MODEL: Moderate increase (p<0.01) ** 99 | Additive std.err. Multiplicative std.err. 100 | 0.0442 0.0134 1.0452 0.0140 101 | 102 | OVERALL SLOPE MODEL (with intercept) FOR TIME INTERVALS 103 | from upto Additive std.err. Multiplicative std.err. 104 | 1984 1985 -0.3029 0.0910 0.7387 0.0672 105 | 1985 1991 0.0757 0.0157 1.0787 0.0169 106 | 107 | OVERALL SLOPE IMPUTED+(recommended): Moderate increase (p<0.01) ** 108 | Additive std.err. Multiplicative std.err. 109 | 0.0463 0.0135 1.0474 0.0141 110 | 111 | OVERALL SLOPE IMPUTED (with intercept) FOR TIME INTERVALS 112 | from upto Additive std.err. Multiplicative std.err. 113 | 1984 1985 -0.3180 0.0942 0.7276 0.0686 114 | 1985 1991 0.0783 0.0157 1.0815 0.0170 115 | 116 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark-x3.tcf: -------------------------------------------------------------------------------- 1 | FILE outfiles/skylark_yr.dat 2 | TITLE skylark-x2 3 | NTIMES 8 4 | NCOVARS 2 5 | LABELS 6 | Habitat 7 | Cov2 8 | End 9 | MISSING 999 10 | WEIGHT absent 11 | 12 | comment Testing overall changepoints 13 | model 2 14 | changepoints 1 2 15 | overdisp on 16 | serialcor on 17 | overallchangepoints 1 2 18 | run 19 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark.dat: -------------------------------------------------------------------------------- 1 | 1 1 11 2 2 2 | 1 2 8 2 2 3 | 1 3 5 2 2 4 | 1 4 4 2 2 5 | 1 5 10 2 2 6 | 1 6 7 2 2 7 | 1 7 999 2 2 8 | 1 8 999 2 2 9 | 2 1 15 2 4 10 | 2 2 9 2 4 11 | 2 3 7 2 4 12 | 2 4 8 2 4 13 | 2 5 6 2 4 14 | 2 6 21 2 4 15 | 2 7 999 2 4 16 | 2 8 999 2 4 17 | 3 1 41 2 4 18 | 3 2 29 2 4 19 | 3 3 36 2 4 20 | 3 4 37 2 4 21 | 3 5 49 2 4 22 | 3 6 74 2 4 23 | 3 7 68 2 4 24 | 3 8 97 2 4 25 | 4 1 13 2 4 26 | 4 2 11 2 4 27 | 4 3 6 2 4 28 | 4 4 11 2 4 29 | 4 5 17 2 4 30 | 4 6 22 2 4 31 | 4 7 28 2 4 32 | 4 8 36 2 4 33 | 5 1 999 1 3 34 | 5 2 999 1 3 35 | 5 3 1 1 3 36 | 5 4 999 1 3 37 | 5 5 999 1 3 38 | 5 6 1 1 3 39 | 5 7 999 1 3 40 | 5 8 999 1 3 41 | 6 1 15 1 2 42 | 6 2 16 1 2 43 | 6 3 14 1 2 44 | 6 4 12 1 2 45 | 6 5 12 1 2 46 | 6 6 13 1 2 47 | 6 7 12 1 2 48 | 6 8 11 1 2 49 | 7 1 11 2 3 50 | 7 2 9 2 3 51 | 7 3 7 2 3 52 | 7 4 4 2 3 53 | 7 5 5 2 3 54 | 7 6 7 2 3 55 | 7 7 10 2 3 56 | 7 8 10 2 3 57 | 8 1 1 1 3 58 | 8 2 999 1 3 59 | 8 3 999 1 3 60 | 8 4 999 1 3 61 | 8 5 999 1 3 62 | 8 6 999 1 3 63 | 8 7 999 1 3 64 | 8 8 999 1 3 65 | 9 1 4 1 1 66 | 9 2 999 1 1 67 | 9 3 999 1 1 68 | 9 4 999 1 1 69 | 9 5 999 1 1 70 | 9 6 999 1 1 71 | 9 7 999 1 1 72 | 9 8 999 1 1 73 | 10 1 1 2 3 74 | 10 2 999 2 3 75 | 10 3 999 2 3 76 | 10 4 999 2 3 77 | 10 5 999 2 3 78 | 10 6 999 2 3 79 | 10 7 999 2 3 80 | 10 8 999 2 3 81 | 11 1 23 2 3 82 | 11 2 16 2 3 83 | 11 3 40 2 3 84 | 11 4 35 2 3 85 | 11 5 28 2 3 86 | 11 6 21 2 3 87 | 11 7 31 2 3 88 | 11 8 18 2 3 89 | 12 1 25 2 4 90 | 12 2 12 2 4 91 | 12 3 11 2 4 92 | 12 4 12 2 4 93 | 12 5 15 2 4 94 | 12 6 17 2 4 95 | 12 7 999 2 4 96 | 12 8 999 2 4 97 | 13 1 999 2 3 98 | 13 2 999 2 3 99 | 13 3 2 2 3 100 | 13 4 2 2 3 101 | 13 5 999 2 3 102 | 13 6 1 2 3 103 | 13 7 1 2 3 104 | 13 8 1 2 3 105 | 14 1 4 2 3 106 | 14 2 4 2 3 107 | 14 3 4 2 3 108 | 14 4 7 2 3 109 | 14 5 6 2 3 110 | 14 6 8 2 3 111 | 14 7 8 2 3 112 | 14 8 8 2 3 113 | 15 1 7 2 2 114 | 15 2 999 2 2 115 | 15 3 5 2 2 116 | 15 4 4 2 2 117 | 15 5 5 2 2 118 | 15 6 999 2 2 119 | 15 7 999 2 2 120 | 15 8 999 2 2 121 | 16 1 999 2 1 122 | 16 2 999 2 1 123 | 16 3 1 2 1 124 | 16 4 1 2 1 125 | 16 5 1 2 1 126 | 16 6 999 2 1 127 | 16 7 999 2 1 128 | 16 8 999 2 1 129 | 17 1 2 1 2 130 | 17 2 999 1 2 131 | 17 3 999 1 2 132 | 17 4 1 1 2 133 | 17 5 999 1 2 134 | 17 6 999 1 2 135 | 17 7 999 1 2 136 | 17 8 999 1 2 137 | 18 1 1 1 2 138 | 18 2 999 1 2 139 | 18 3 999 1 2 140 | 18 4 999 1 2 141 | 18 5 999 1 2 142 | 18 6 999 1 2 143 | 18 7 999 1 2 144 | 18 8 999 1 2 145 | 19 1 1 1 3 146 | 19 2 999 1 3 147 | 19 3 999 1 3 148 | 19 4 999 1 3 149 | 19 5 999 1 3 150 | 19 6 999 1 3 151 | 19 7 999 1 3 152 | 19 8 999 1 3 153 | 20 1 5 1 3 154 | 20 2 3 1 3 155 | 20 3 999 1 3 156 | 20 4 999 1 3 157 | 20 5 999 1 3 158 | 20 6 999 1 3 159 | 20 7 999 1 3 160 | 20 8 999 1 3 161 | 21 1 3 1 2 162 | 21 2 3 1 2 163 | 21 3 2 1 2 164 | 21 4 999 1 2 165 | 21 5 999 1 2 166 | 21 6 999 1 2 167 | 21 7 999 1 2 168 | 21 8 999 1 2 169 | 22 1 8 1 3 170 | 22 2 3 1 3 171 | 22 3 1 1 3 172 | 22 4 999 1 3 173 | 22 5 1 1 3 174 | 22 6 999 1 3 175 | 22 7 999 1 3 176 | 22 8 999 1 3 177 | 23 1 3 1 2 178 | 23 2 1 1 2 179 | 23 3 999 1 2 180 | 23 4 999 1 2 181 | 23 5 999 1 2 182 | 23 6 3 1 2 183 | 23 7 999 1 2 184 | 23 8 999 1 2 185 | 24 1 3 1 2 186 | 24 2 3 1 2 187 | 24 3 5 1 2 188 | 24 4 3 1 2 189 | 24 5 2 1 2 190 | 24 6 3 1 2 191 | 24 7 2 1 2 192 | 24 8 1 1 2 193 | 25 1 6 1 3 194 | 25 2 8 1 3 195 | 25 3 4 1 3 196 | 25 4 2 1 3 197 | 25 5 2 1 3 198 | 25 6 1 1 3 199 | 25 7 999 1 3 200 | 25 8 1 1 3 201 | 26 1 999 1 2 202 | 26 2 999 1 2 203 | 26 3 1 1 2 204 | 26 4 999 1 2 205 | 26 5 999 1 2 206 | 26 6 999 1 2 207 | 26 7 999 1 2 208 | 26 8 999 1 2 209 | 27 1 999 2 4 210 | 27 2 4 2 4 211 | 27 3 7 2 4 212 | 27 4 6 2 4 213 | 27 5 7 2 4 214 | 27 6 7 2 4 215 | 27 7 7 2 4 216 | 27 8 999 2 4 217 | 28 1 999 2 4 218 | 28 2 14 2 4 219 | 28 3 999 2 4 220 | 28 4 999 2 4 221 | 28 5 999 2 4 222 | 28 6 999 2 4 223 | 28 7 999 2 4 224 | 28 8 999 2 4 225 | 29 1 999 1 2 226 | 29 2 999 1 2 227 | 29 3 999 1 2 228 | 29 4 999 1 2 229 | 29 5 999 1 2 230 | 29 6 999 1 2 231 | 29 7 1 1 2 232 | 29 8 999 1 2 233 | 30 1 1 1 2 234 | 30 2 999 1 2 235 | 30 3 999 1 2 236 | 30 4 999 1 2 237 | 30 5 999 1 2 238 | 30 6 999 1 2 239 | 30 7 999 1 2 240 | 30 8 999 1 2 241 | 31 1 999 1 1 242 | 31 2 1 1 1 243 | 31 3 2 1 1 244 | 31 4 1 1 1 245 | 31 5 1 1 1 246 | 31 6 999 1 1 247 | 31 7 999 1 1 248 | 31 8 999 1 1 249 | 32 1 999 2 2 250 | 32 2 999 2 2 251 | 32 3 2 2 2 252 | 32 4 1 2 2 253 | 32 5 2 2 2 254 | 32 6 1 2 2 255 | 32 7 999 2 2 256 | 32 8 3 2 2 257 | 33 1 999 2 4 258 | 33 2 4 2 4 259 | 33 3 4 2 4 260 | 33 4 6 2 4 261 | 33 5 7 2 4 262 | 33 6 5 2 4 263 | 33 7 3 2 4 264 | 33 8 999 2 4 265 | 34 1 999 1 3 266 | 34 2 999 1 3 267 | 34 3 1 1 3 268 | 34 4 999 1 3 269 | 34 5 999 1 3 270 | 34 6 999 1 3 271 | 34 7 999 1 3 272 | 34 8 999 1 3 273 | 35 1 999 2 3 274 | 35 2 999 2 3 275 | 35 3 1 2 3 276 | 35 4 2 2 3 277 | 35 5 2 2 3 278 | 35 6 2 2 3 279 | 35 7 3 2 3 280 | 35 8 3 2 3 281 | 36 1 999 1 2 282 | 36 2 999 1 2 283 | 36 3 1 1 2 284 | 36 4 1 1 2 285 | 36 5 999 1 2 286 | 36 6 999 1 2 287 | 36 7 999 1 2 288 | 36 8 999 1 2 289 | 37 1 999 2 4 290 | 37 2 999 2 4 291 | 37 3 57 2 4 292 | 37 4 60 2 4 293 | 37 5 55 2 4 294 | 37 6 50 2 4 295 | 37 7 44 2 4 296 | 37 8 999 2 4 297 | 38 1 999 1 3 298 | 38 2 999 1 3 299 | 38 3 7 1 3 300 | 38 4 8 1 3 301 | 38 5 5 1 3 302 | 38 6 10 1 3 303 | 38 7 5 1 3 304 | 38 8 6 1 3 305 | 39 1 999 1 2 306 | 39 2 999 1 2 307 | 39 3 999 1 2 308 | 39 4 1 1 2 309 | 39 5 999 1 2 310 | 39 6 999 1 2 311 | 39 7 999 1 2 312 | 39 8 999 1 2 313 | 40 1 999 2 3 314 | 40 2 999 2 3 315 | 40 3 88 2 3 316 | 40 4 80 2 3 317 | 40 5 108 2 3 318 | 40 6 104 2 3 319 | 40 7 131 2 3 320 | 40 8 113 2 3 321 | 41 1 999 1 2 322 | 41 2 999 1 2 323 | 41 3 999 1 2 324 | 41 4 6 1 2 325 | 41 5 3 1 2 326 | 41 6 4 1 2 327 | 41 7 999 1 2 328 | 41 8 999 1 2 329 | 42 1 999 1 3 330 | 42 2 999 1 3 331 | 42 3 999 1 3 332 | 42 4 999 1 3 333 | 42 5 1 1 3 334 | 42 6 999 1 3 335 | 42 7 1 1 3 336 | 42 8 999 1 3 337 | 43 1 999 2 4 338 | 43 2 999 2 4 339 | 43 3 999 2 4 340 | 43 4 999 2 4 341 | 43 5 1 2 4 342 | 43 6 999 2 4 343 | 43 7 999 2 4 344 | 43 8 999 2 4 345 | 44 1 999 2 3 346 | 44 2 999 2 3 347 | 44 3 999 2 3 348 | 44 4 999 2 3 349 | 44 5 999 2 3 350 | 44 6 8 2 3 351 | 44 7 999 2 3 352 | 44 8 999 2 3 353 | 45 1 999 2 4 354 | 45 2 999 2 4 355 | 45 3 999 2 4 356 | 45 4 16 2 4 357 | 45 5 999 2 4 358 | 45 6 999 2 4 359 | 45 7 999 2 4 360 | 45 8 999 2 4 361 | 46 1 999 1 3 362 | 46 2 999 1 3 363 | 46 3 999 1 3 364 | 46 4 3 1 3 365 | 46 5 4 1 3 366 | 46 6 2 1 3 367 | 46 7 1 1 3 368 | 46 8 2 1 3 369 | 47 1 7 1 4 370 | 47 2 4 1 4 371 | 47 3 4 1 4 372 | 47 4 3 1 4 373 | 47 5 1 1 4 374 | 47 6 2 1 4 375 | 47 7 4 1 4 376 | 47 8 4 1 4 377 | 48 1 999 2 3 378 | 48 2 999 2 3 379 | 48 3 999 2 3 380 | 48 4 999 2 3 381 | 48 5 999 2 3 382 | 48 6 999 2 3 383 | 48 7 1 2 3 384 | 48 8 999 2 3 385 | 49 1 2 2 3 386 | 49 2 999 2 3 387 | 49 3 999 2 3 388 | 49 4 999 2 3 389 | 49 5 999 2 3 390 | 49 6 999 2 3 391 | 49 7 2 2 3 392 | 49 8 2 2 3 393 | 50 1 999 1 4 394 | 50 2 999 1 4 395 | 50 3 999 1 4 396 | 50 4 999 1 4 397 | 50 5 999 1 4 398 | 50 6 20 1 4 399 | 50 7 999 1 4 400 | 50 8 999 1 4 401 | 51 1 999 1 3 402 | 51 2 999 1 3 403 | 51 3 999 1 3 404 | 51 4 999 1 3 405 | 51 5 999 1 3 406 | 51 6 1 1 3 407 | 51 7 999 1 3 408 | 51 8 999 1 3 409 | 52 1 999 1 3 410 | 52 2 999 1 3 411 | 52 3 999 1 3 412 | 52 4 999 1 3 413 | 52 5 999 1 3 414 | 52 6 9 1 3 415 | 52 7 10 1 3 416 | 52 8 8 1 3 417 | 53 1 999 2 3 418 | 53 2 999 2 3 419 | 53 3 999 2 3 420 | 53 4 999 2 3 421 | 53 5 999 2 3 422 | 53 6 999 2 3 423 | 53 7 1 2 3 424 | 53 8 999 2 3 425 | 54 1 999 2 4 426 | 54 2 999 2 4 427 | 54 3 999 2 4 428 | 54 4 999 2 4 429 | 54 5 999 2 4 430 | 54 6 999 2 4 431 | 54 7 999 2 4 432 | 54 8 16 2 4 433 | 55 1 999 1 1 434 | 55 2 999 1 1 435 | 55 3 1 1 1 436 | 55 4 1 1 1 437 | 55 5 1 1 1 438 | 55 6 1 1 1 439 | 55 7 999 1 1 440 | 55 8 999 1 1 441 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark_wt.dat: -------------------------------------------------------------------------------- 1 | 1 1 11 1 2 2 2 | 1 2 8 1 2 2 3 | 1 3 5 1 2 2 4 | 1 4 4 1 2 2 5 | 1 5 10 1 2 2 6 | 1 6 7 1 2 2 7 | 1 7 999 1 2 2 8 | 1 8 999 1 2 2 9 | 2 1 15 1 2 4 10 | 2 2 9 1 2 4 11 | 2 3 7 1 2 4 12 | 2 4 8 1 2 4 13 | 2 5 6 1 2 4 14 | 2 6 21 1 2 4 15 | 2 7 999 1 2 4 16 | 2 8 999 1 2 4 17 | 3 1 41 1 2 4 18 | 3 2 29 1 2 4 19 | 3 3 36 1 2 4 20 | 3 4 37 1 2 4 21 | 3 5 49 1 2 4 22 | 3 6 74 1 2 4 23 | 3 7 68 1 2 4 24 | 3 8 97 1 2 4 25 | 4 1 13 1 2 4 26 | 4 2 11 1 2 4 27 | 4 3 6 1 2 4 28 | 4 4 11 1 2 4 29 | 4 5 17 1 2 4 30 | 4 6 22 1 2 4 31 | 4 7 28 1 2 4 32 | 4 8 36 1 2 4 33 | 5 1 999 10 1 3 34 | 5 2 999 10 1 3 35 | 5 3 1 10 1 3 36 | 5 4 999 10 1 3 37 | 5 5 999 10 1 3 38 | 5 6 1 10 1 3 39 | 5 7 999 10 1 3 40 | 5 8 999 10 1 3 41 | 6 1 15 10 1 2 42 | 6 2 16 10 1 2 43 | 6 3 14 10 1 2 44 | 6 4 12 10 1 2 45 | 6 5 12 10 1 2 46 | 6 6 13 10 1 2 47 | 6 7 12 10 1 2 48 | 6 8 11 10 1 2 49 | 7 1 11 1 2 3 50 | 7 2 9 1 2 3 51 | 7 3 7 1 2 3 52 | 7 4 4 1 2 3 53 | 7 5 5 1 2 3 54 | 7 6 7 1 2 3 55 | 7 7 10 1 2 3 56 | 7 8 10 1 2 3 57 | 8 1 1 10 1 3 58 | 8 2 999 10 1 3 59 | 8 3 999 10 1 3 60 | 8 4 999 10 1 3 61 | 8 5 999 10 1 3 62 | 8 6 999 10 1 3 63 | 8 7 999 10 1 3 64 | 8 8 999 10 1 3 65 | 9 1 4 10 1 1 66 | 9 2 999 10 1 1 67 | 9 3 999 10 1 1 68 | 9 4 999 10 1 1 69 | 9 5 999 10 1 1 70 | 9 6 999 10 1 1 71 | 9 7 999 10 1 1 72 | 9 8 999 10 1 1 73 | 10 1 1 1 2 3 74 | 10 2 999 1 2 3 75 | 10 3 999 1 2 3 76 | 10 4 999 1 2 3 77 | 10 5 999 1 2 3 78 | 10 6 999 1 2 3 79 | 10 7 999 1 2 3 80 | 10 8 999 1 2 3 81 | 11 1 23 1 2 3 82 | 11 2 16 1 2 3 83 | 11 3 40 1 2 3 84 | 11 4 35 1 2 3 85 | 11 5 28 1 2 3 86 | 11 6 21 1 2 3 87 | 11 7 31 1 2 3 88 | 11 8 18 1 2 3 89 | 12 1 25 1 2 4 90 | 12 2 12 1 2 4 91 | 12 3 11 1 2 4 92 | 12 4 12 1 2 4 93 | 12 5 15 1 2 4 94 | 12 6 17 1 2 4 95 | 12 7 999 1 2 4 96 | 12 8 999 1 2 4 97 | 13 1 999 1 2 3 98 | 13 2 999 1 2 3 99 | 13 3 2 1 2 3 100 | 13 4 2 1 2 3 101 | 13 5 999 1 2 3 102 | 13 6 1 1 2 3 103 | 13 7 1 1 2 3 104 | 13 8 1 1 2 3 105 | 14 1 4 1 2 3 106 | 14 2 4 1 2 3 107 | 14 3 4 1 2 3 108 | 14 4 7 1 2 3 109 | 14 5 6 1 2 3 110 | 14 6 8 1 2 3 111 | 14 7 8 1 2 3 112 | 14 8 8 1 2 3 113 | 15 1 7 1 2 2 114 | 15 2 999 1 2 2 115 | 15 3 5 1 2 2 116 | 15 4 4 1 2 2 117 | 15 5 5 1 2 2 118 | 15 6 999 1 2 2 119 | 15 7 999 1 2 2 120 | 15 8 999 1 2 2 121 | 16 1 999 1 2 1 122 | 16 2 999 1 2 1 123 | 16 3 1 1 2 1 124 | 16 4 1 1 2 1 125 | 16 5 1 1 2 1 126 | 16 6 999 1 2 1 127 | 16 7 999 1 2 1 128 | 16 8 999 1 2 1 129 | 17 1 2 10 1 2 130 | 17 2 999 10 1 2 131 | 17 3 999 10 1 2 132 | 17 4 1 10 1 2 133 | 17 5 999 10 1 2 134 | 17 6 999 10 1 2 135 | 17 7 999 10 1 2 136 | 17 8 999 10 1 2 137 | 18 1 1 10 1 2 138 | 18 2 999 10 1 2 139 | 18 3 999 10 1 2 140 | 18 4 999 10 1 2 141 | 18 5 999 10 1 2 142 | 18 6 999 10 1 2 143 | 18 7 999 10 1 2 144 | 18 8 999 10 1 2 145 | 19 1 1 10 1 3 146 | 19 2 999 10 1 3 147 | 19 3 999 10 1 3 148 | 19 4 999 10 1 3 149 | 19 5 999 10 1 3 150 | 19 6 999 10 1 3 151 | 19 7 999 10 1 3 152 | 19 8 999 10 1 3 153 | 20 1 5 10 1 3 154 | 20 2 3 10 1 3 155 | 20 3 999 10 1 3 156 | 20 4 999 10 1 3 157 | 20 5 999 10 1 3 158 | 20 6 999 10 1 3 159 | 20 7 999 10 1 3 160 | 20 8 999 10 1 3 161 | 21 1 3 10 1 2 162 | 21 2 3 10 1 2 163 | 21 3 2 10 1 2 164 | 21 4 999 10 1 2 165 | 21 5 999 10 1 2 166 | 21 6 999 10 1 2 167 | 21 7 999 10 1 2 168 | 21 8 999 10 1 2 169 | 22 1 8 10 1 3 170 | 22 2 3 10 1 3 171 | 22 3 1 10 1 3 172 | 22 4 999 10 1 3 173 | 22 5 1 10 1 3 174 | 22 6 999 10 1 3 175 | 22 7 999 10 1 3 176 | 22 8 999 10 1 3 177 | 23 1 3 10 1 2 178 | 23 2 1 10 1 2 179 | 23 3 999 10 1 2 180 | 23 4 999 10 1 2 181 | 23 5 999 10 1 2 182 | 23 6 3 10 1 2 183 | 23 7 999 10 1 2 184 | 23 8 999 10 1 2 185 | 24 1 3 10 1 2 186 | 24 2 3 10 1 2 187 | 24 3 5 10 1 2 188 | 24 4 3 10 1 2 189 | 24 5 2 10 1 2 190 | 24 6 3 10 1 2 191 | 24 7 2 10 1 2 192 | 24 8 1 10 1 2 193 | 25 1 6 10 1 3 194 | 25 2 8 10 1 3 195 | 25 3 4 10 1 3 196 | 25 4 2 10 1 3 197 | 25 5 2 10 1 3 198 | 25 6 1 10 1 3 199 | 25 7 999 10 1 3 200 | 25 8 1 10 1 3 201 | 26 1 999 10 1 2 202 | 26 2 999 10 1 2 203 | 26 3 1 10 1 2 204 | 26 4 999 10 1 2 205 | 26 5 999 10 1 2 206 | 26 6 999 10 1 2 207 | 26 7 999 10 1 2 208 | 26 8 999 10 1 2 209 | 27 1 999 1 2 4 210 | 27 2 4 1 2 4 211 | 27 3 7 1 2 4 212 | 27 4 6 1 2 4 213 | 27 5 7 1 2 4 214 | 27 6 7 1 2 4 215 | 27 7 7 1 2 4 216 | 27 8 999 1 2 4 217 | 28 1 999 1 2 4 218 | 28 2 14 1 2 4 219 | 28 3 999 1 2 4 220 | 28 4 999 1 2 4 221 | 28 5 999 1 2 4 222 | 28 6 999 1 2 4 223 | 28 7 999 1 2 4 224 | 28 8 999 1 2 4 225 | 29 1 999 10 1 2 226 | 29 2 999 10 1 2 227 | 29 3 999 10 1 2 228 | 29 4 999 10 1 2 229 | 29 5 999 10 1 2 230 | 29 6 999 10 1 2 231 | 29 7 1 10 1 2 232 | 29 8 999 10 1 2 233 | 30 1 1 10 1 2 234 | 30 2 999 10 1 2 235 | 30 3 999 10 1 2 236 | 30 4 999 10 1 2 237 | 30 5 999 10 1 2 238 | 30 6 999 10 1 2 239 | 30 7 999 10 1 2 240 | 30 8 999 10 1 2 241 | 31 1 999 10 1 1 242 | 31 2 1 10 1 1 243 | 31 3 2 10 1 1 244 | 31 4 1 10 1 1 245 | 31 5 1 10 1 1 246 | 31 6 999 10 1 1 247 | 31 7 999 10 1 1 248 | 31 8 999 10 1 1 249 | 32 1 999 1 2 2 250 | 32 2 999 1 2 2 251 | 32 3 2 1 2 2 252 | 32 4 1 1 2 2 253 | 32 5 2 1 2 2 254 | 32 6 1 1 2 2 255 | 32 7 999 1 2 2 256 | 32 8 3 1 2 2 257 | 33 1 999 1 2 4 258 | 33 2 4 1 2 4 259 | 33 3 4 1 2 4 260 | 33 4 6 1 2 4 261 | 33 5 7 1 2 4 262 | 33 6 5 1 2 4 263 | 33 7 3 1 2 4 264 | 33 8 999 1 2 4 265 | 34 1 999 10 1 3 266 | 34 2 999 10 1 3 267 | 34 3 1 10 1 3 268 | 34 4 999 10 1 3 269 | 34 5 999 10 1 3 270 | 34 6 999 10 1 3 271 | 34 7 999 10 1 3 272 | 34 8 999 10 1 3 273 | 35 1 999 1 2 3 274 | 35 2 999 1 2 3 275 | 35 3 1 1 2 3 276 | 35 4 2 1 2 3 277 | 35 5 2 1 2 3 278 | 35 6 2 1 2 3 279 | 35 7 3 1 2 3 280 | 35 8 3 1 2 3 281 | 36 1 999 10 1 2 282 | 36 2 999 10 1 2 283 | 36 3 1 10 1 2 284 | 36 4 1 10 1 2 285 | 36 5 999 10 1 2 286 | 36 6 999 10 1 2 287 | 36 7 999 10 1 2 288 | 36 8 999 10 1 2 289 | 37 1 999 1 2 4 290 | 37 2 999 1 2 4 291 | 37 3 57 1 2 4 292 | 37 4 60 1 2 4 293 | 37 5 55 1 2 4 294 | 37 6 50 1 2 4 295 | 37 7 44 1 2 4 296 | 37 8 999 1 2 4 297 | 38 1 999 10 1 3 298 | 38 2 999 10 1 3 299 | 38 3 7 10 1 3 300 | 38 4 8 10 1 3 301 | 38 5 5 10 1 3 302 | 38 6 10 10 1 3 303 | 38 7 5 10 1 3 304 | 38 8 6 10 1 3 305 | 39 1 999 10 1 2 306 | 39 2 999 10 1 2 307 | 39 3 999 10 1 2 308 | 39 4 1 10 1 2 309 | 39 5 999 10 1 2 310 | 39 6 999 10 1 2 311 | 39 7 999 10 1 2 312 | 39 8 999 10 1 2 313 | 40 1 999 1 2 3 314 | 40 2 999 1 2 3 315 | 40 3 88 1 2 3 316 | 40 4 80 1 2 3 317 | 40 5 108 1 2 3 318 | 40 6 104 1 2 3 319 | 40 7 131 1 2 3 320 | 40 8 113 1 2 3 321 | 41 1 999 10 1 2 322 | 41 2 999 10 1 2 323 | 41 3 999 10 1 2 324 | 41 4 6 10 1 2 325 | 41 5 3 10 1 2 326 | 41 6 4 10 1 2 327 | 41 7 999 10 1 2 328 | 41 8 999 10 1 2 329 | 42 1 999 10 1 3 330 | 42 2 999 10 1 3 331 | 42 3 999 10 1 3 332 | 42 4 999 10 1 3 333 | 42 5 1 10 1 3 334 | 42 6 999 10 1 3 335 | 42 7 1 10 1 3 336 | 42 8 999 10 1 3 337 | 43 1 999 1 2 4 338 | 43 2 999 1 2 4 339 | 43 3 999 1 2 4 340 | 43 4 999 1 2 4 341 | 43 5 1 1 2 4 342 | 43 6 999 1 2 4 343 | 43 7 999 1 2 4 344 | 43 8 999 1 2 4 345 | 44 1 999 1 2 3 346 | 44 2 999 1 2 3 347 | 44 3 999 1 2 3 348 | 44 4 999 1 2 3 349 | 44 5 999 1 2 3 350 | 44 6 8 1 2 3 351 | 44 7 999 1 2 3 352 | 44 8 999 1 2 3 353 | 45 1 999 1 2 4 354 | 45 2 999 1 2 4 355 | 45 3 999 1 2 4 356 | 45 4 16 1 2 4 357 | 45 5 999 1 2 4 358 | 45 6 999 1 2 4 359 | 45 7 999 1 2 4 360 | 45 8 999 1 2 4 361 | 46 1 999 10 1 3 362 | 46 2 999 10 1 3 363 | 46 3 999 10 1 3 364 | 46 4 3 10 1 3 365 | 46 5 4 10 1 3 366 | 46 6 2 10 1 3 367 | 46 7 1 10 1 3 368 | 46 8 2 10 1 3 369 | 47 1 7 10 1 4 370 | 47 2 4 10 1 4 371 | 47 3 4 10 1 4 372 | 47 4 3 10 1 4 373 | 47 5 1 10 1 4 374 | 47 6 2 10 1 4 375 | 47 7 4 10 1 4 376 | 47 8 4 10 1 4 377 | 48 1 999 1 2 3 378 | 48 2 999 1 2 3 379 | 48 3 999 1 2 3 380 | 48 4 999 1 2 3 381 | 48 5 999 1 2 3 382 | 48 6 999 1 2 3 383 | 48 7 1 1 2 3 384 | 48 8 999 1 2 3 385 | 49 1 2 1 2 3 386 | 49 2 999 1 2 3 387 | 49 3 999 1 2 3 388 | 49 4 999 1 2 3 389 | 49 5 999 1 2 3 390 | 49 6 999 1 2 3 391 | 49 7 2 1 2 3 392 | 49 8 2 1 2 3 393 | 50 1 999 10 1 4 394 | 50 2 999 10 1 4 395 | 50 3 999 10 1 4 396 | 50 4 999 10 1 4 397 | 50 5 999 10 1 4 398 | 50 6 20 10 1 4 399 | 50 7 999 10 1 4 400 | 50 8 999 10 1 4 401 | 51 1 999 10 1 3 402 | 51 2 999 10 1 3 403 | 51 3 999 10 1 3 404 | 51 4 999 10 1 3 405 | 51 5 999 10 1 3 406 | 51 6 1 10 1 3 407 | 51 7 999 10 1 3 408 | 51 8 999 10 1 3 409 | 52 1 999 10 1 3 410 | 52 2 999 10 1 3 411 | 52 3 999 10 1 3 412 | 52 4 999 10 1 3 413 | 52 5 999 10 1 3 414 | 52 6 9 10 1 3 415 | 52 7 10 10 1 3 416 | 52 8 8 10 1 3 417 | 53 1 999 1 2 3 418 | 53 2 999 1 2 3 419 | 53 3 999 1 2 3 420 | 53 4 999 1 2 3 421 | 53 5 999 1 2 3 422 | 53 6 999 1 2 3 423 | 53 7 1 1 2 3 424 | 53 8 999 1 2 3 425 | 54 1 999 1 2 4 426 | 54 2 999 1 2 4 427 | 54 3 999 1 2 4 428 | 54 4 999 1 2 4 429 | 54 5 999 1 2 4 430 | 54 6 999 1 2 4 431 | 54 7 999 1 2 4 432 | 54 8 16 1 2 4 433 | 55 1 999 10 1 1 434 | 55 2 999 10 1 1 435 | 55 3 1 10 1 1 436 | 55 4 1 10 1 1 437 | 55 5 1 10 1 1 438 | 55 6 1 10 1 1 439 | 55 7 999 10 1 1 440 | 55 8 999 10 1 1 441 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/skylark_yr.dat: -------------------------------------------------------------------------------- 1 | 1 1984 11 2 2 2 | 1 1985 8 2 2 3 | 1 1986 5 2 2 4 | 1 1987 4 2 2 5 | 1 1988 10 2 2 6 | 1 1989 7 2 2 7 | 1 1990 999 2 2 8 | 1 1991 999 2 2 9 | 2 1984 15 2 4 10 | 2 1985 9 2 4 11 | 2 1986 7 2 4 12 | 2 1987 8 2 4 13 | 2 1988 6 2 4 14 | 2 1989 21 2 4 15 | 2 1990 999 2 4 16 | 2 1991 999 2 4 17 | 3 1984 41 2 4 18 | 3 1985 29 2 4 19 | 3 1986 36 2 4 20 | 3 1987 37 2 4 21 | 3 1988 49 2 4 22 | 3 1989 74 2 4 23 | 3 1990 68 2 4 24 | 3 1991 97 2 4 25 | 4 1984 13 2 4 26 | 4 1985 11 2 4 27 | 4 1986 6 2 4 28 | 4 1987 11 2 4 29 | 4 1988 17 2 4 30 | 4 1989 22 2 4 31 | 4 1990 28 2 4 32 | 4 1991 36 2 4 33 | 5 1984 999 1 3 34 | 5 1985 999 1 3 35 | 5 1986 1 1 3 36 | 5 1987 999 1 3 37 | 5 1988 999 1 3 38 | 5 1989 1 1 3 39 | 5 1990 999 1 3 40 | 5 1991 999 1 3 41 | 6 1984 15 1 2 42 | 6 1985 16 1 2 43 | 6 1986 14 1 2 44 | 6 1987 12 1 2 45 | 6 1988 12 1 2 46 | 6 1989 13 1 2 47 | 6 1990 12 1 2 48 | 6 1991 11 1 2 49 | 7 1984 11 2 3 50 | 7 1985 9 2 3 51 | 7 1986 7 2 3 52 | 7 1987 4 2 3 53 | 7 1988 5 2 3 54 | 7 1989 7 2 3 55 | 7 1990 10 2 3 56 | 7 1991 10 2 3 57 | 8 1984 1 1 3 58 | 8 1985 999 1 3 59 | 8 1986 999 1 3 60 | 8 1987 999 1 3 61 | 8 1988 999 1 3 62 | 8 1989 999 1 3 63 | 8 1990 999 1 3 64 | 8 1991 999 1 3 65 | 9 1984 4 1 1 66 | 9 1985 999 1 1 67 | 9 1986 999 1 1 68 | 9 1987 999 1 1 69 | 9 1988 999 1 1 70 | 9 1989 999 1 1 71 | 9 1990 999 1 1 72 | 9 1991 999 1 1 73 | 10 1984 1 2 3 74 | 10 1985 999 2 3 75 | 10 1986 999 2 3 76 | 10 1987 999 2 3 77 | 10 1988 999 2 3 78 | 10 1989 999 2 3 79 | 10 1990 999 2 3 80 | 10 1991 999 2 3 81 | 11 1984 23 2 3 82 | 11 1985 16 2 3 83 | 11 1986 40 2 3 84 | 11 1987 35 2 3 85 | 11 1988 28 2 3 86 | 11 1989 21 2 3 87 | 11 1990 31 2 3 88 | 11 1991 18 2 3 89 | 12 1984 25 2 4 90 | 12 1985 12 2 4 91 | 12 1986 11 2 4 92 | 12 1987 12 2 4 93 | 12 1988 15 2 4 94 | 12 1989 17 2 4 95 | 12 1990 999 2 4 96 | 12 1991 999 2 4 97 | 13 1984 999 2 3 98 | 13 1985 999 2 3 99 | 13 1986 2 2 3 100 | 13 1987 2 2 3 101 | 13 1988 999 2 3 102 | 13 1989 1 2 3 103 | 13 1990 1 2 3 104 | 13 1991 1 2 3 105 | 14 1984 4 2 3 106 | 14 1985 4 2 3 107 | 14 1986 4 2 3 108 | 14 1987 7 2 3 109 | 14 1988 6 2 3 110 | 14 1989 8 2 3 111 | 14 1990 8 2 3 112 | 14 1991 8 2 3 113 | 15 1984 7 2 2 114 | 15 1985 999 2 2 115 | 15 1986 5 2 2 116 | 15 1987 4 2 2 117 | 15 1988 5 2 2 118 | 15 1989 999 2 2 119 | 15 1990 999 2 2 120 | 15 1991 999 2 2 121 | 16 1984 999 2 1 122 | 16 1985 999 2 1 123 | 16 1986 1 2 1 124 | 16 1987 1 2 1 125 | 16 1988 1 2 1 126 | 16 1989 999 2 1 127 | 16 1990 999 2 1 128 | 16 1991 999 2 1 129 | 17 1984 2 1 2 130 | 17 1985 999 1 2 131 | 17 1986 999 1 2 132 | 17 1987 1 1 2 133 | 17 1988 999 1 2 134 | 17 1989 999 1 2 135 | 17 1990 999 1 2 136 | 17 1991 999 1 2 137 | 18 1984 1 1 2 138 | 18 1985 999 1 2 139 | 18 1986 999 1 2 140 | 18 1987 999 1 2 141 | 18 1988 999 1 2 142 | 18 1989 999 1 2 143 | 18 1990 999 1 2 144 | 18 1991 999 1 2 145 | 19 1984 1 1 3 146 | 19 1985 999 1 3 147 | 19 1986 999 1 3 148 | 19 1987 999 1 3 149 | 19 1988 999 1 3 150 | 19 1989 999 1 3 151 | 19 1990 999 1 3 152 | 19 1991 999 1 3 153 | 20 1984 5 1 3 154 | 20 1985 3 1 3 155 | 20 1986 999 1 3 156 | 20 1987 999 1 3 157 | 20 1988 999 1 3 158 | 20 1989 999 1 3 159 | 20 1990 999 1 3 160 | 20 1991 999 1 3 161 | 21 1984 3 1 2 162 | 21 1985 3 1 2 163 | 21 1986 2 1 2 164 | 21 1987 999 1 2 165 | 21 1988 999 1 2 166 | 21 1989 999 1 2 167 | 21 1990 999 1 2 168 | 21 1991 999 1 2 169 | 22 1984 8 1 3 170 | 22 1985 3 1 3 171 | 22 1986 1 1 3 172 | 22 1987 999 1 3 173 | 22 1988 1 1 3 174 | 22 1989 999 1 3 175 | 22 1990 999 1 3 176 | 22 1991 999 1 3 177 | 23 1984 3 1 2 178 | 23 1985 1 1 2 179 | 23 1986 999 1 2 180 | 23 1987 999 1 2 181 | 23 1988 999 1 2 182 | 23 1989 3 1 2 183 | 23 1990 999 1 2 184 | 23 1991 999 1 2 185 | 24 1984 3 1 2 186 | 24 1985 3 1 2 187 | 24 1986 5 1 2 188 | 24 1987 3 1 2 189 | 24 1988 2 1 2 190 | 24 1989 3 1 2 191 | 24 1990 2 1 2 192 | 24 1991 1 1 2 193 | 25 1984 6 1 3 194 | 25 1985 8 1 3 195 | 25 1986 4 1 3 196 | 25 1987 2 1 3 197 | 25 1988 2 1 3 198 | 25 1989 1 1 3 199 | 25 1990 999 1 3 200 | 25 1991 1 1 3 201 | 26 1984 999 1 2 202 | 26 1985 999 1 2 203 | 26 1986 1 1 2 204 | 26 1987 999 1 2 205 | 26 1988 999 1 2 206 | 26 1989 999 1 2 207 | 26 1990 999 1 2 208 | 26 1991 999 1 2 209 | 27 1984 999 2 4 210 | 27 1985 4 2 4 211 | 27 1986 7 2 4 212 | 27 1987 6 2 4 213 | 27 1988 7 2 4 214 | 27 1989 7 2 4 215 | 27 1990 7 2 4 216 | 27 1991 999 2 4 217 | 28 1984 999 2 4 218 | 28 1985 14 2 4 219 | 28 1986 999 2 4 220 | 28 1987 999 2 4 221 | 28 1988 999 2 4 222 | 28 1989 999 2 4 223 | 28 1990 999 2 4 224 | 28 1991 999 2 4 225 | 29 1984 999 1 2 226 | 29 1985 999 1 2 227 | 29 1986 999 1 2 228 | 29 1987 999 1 2 229 | 29 1988 999 1 2 230 | 29 1989 999 1 2 231 | 29 1990 1 1 2 232 | 29 1991 999 1 2 233 | 30 1984 1 1 2 234 | 30 1985 999 1 2 235 | 30 1986 999 1 2 236 | 30 1987 999 1 2 237 | 30 1988 999 1 2 238 | 30 1989 999 1 2 239 | 30 1990 999 1 2 240 | 30 1991 999 1 2 241 | 31 1984 999 1 1 242 | 31 1985 1 1 1 243 | 31 1986 2 1 1 244 | 31 1987 1 1 1 245 | 31 1988 1 1 1 246 | 31 1989 999 1 1 247 | 31 1990 999 1 1 248 | 31 1991 999 1 1 249 | 32 1984 999 2 2 250 | 32 1985 999 2 2 251 | 32 1986 2 2 2 252 | 32 1987 1 2 2 253 | 32 1988 2 2 2 254 | 32 1989 1 2 2 255 | 32 1990 999 2 2 256 | 32 1991 3 2 2 257 | 33 1984 999 2 4 258 | 33 1985 4 2 4 259 | 33 1986 4 2 4 260 | 33 1987 6 2 4 261 | 33 1988 7 2 4 262 | 33 1989 5 2 4 263 | 33 1990 3 2 4 264 | 33 1991 999 2 4 265 | 34 1984 999 1 3 266 | 34 1985 999 1 3 267 | 34 1986 1 1 3 268 | 34 1987 999 1 3 269 | 34 1988 999 1 3 270 | 34 1989 999 1 3 271 | 34 1990 999 1 3 272 | 34 1991 999 1 3 273 | 35 1984 999 2 3 274 | 35 1985 999 2 3 275 | 35 1986 1 2 3 276 | 35 1987 2 2 3 277 | 35 1988 2 2 3 278 | 35 1989 2 2 3 279 | 35 1990 3 2 3 280 | 35 1991 3 2 3 281 | 36 1984 999 1 2 282 | 36 1985 999 1 2 283 | 36 1986 1 1 2 284 | 36 1987 1 1 2 285 | 36 1988 999 1 2 286 | 36 1989 999 1 2 287 | 36 1990 999 1 2 288 | 36 1991 999 1 2 289 | 37 1984 999 2 4 290 | 37 1985 999 2 4 291 | 37 1986 57 2 4 292 | 37 1987 60 2 4 293 | 37 1988 55 2 4 294 | 37 1989 50 2 4 295 | 37 1990 44 2 4 296 | 37 1991 999 2 4 297 | 38 1984 999 1 3 298 | 38 1985 999 1 3 299 | 38 1986 7 1 3 300 | 38 1987 8 1 3 301 | 38 1988 5 1 3 302 | 38 1989 10 1 3 303 | 38 1990 5 1 3 304 | 38 1991 6 1 3 305 | 39 1984 999 1 2 306 | 39 1985 999 1 2 307 | 39 1986 999 1 2 308 | 39 1987 1 1 2 309 | 39 1988 999 1 2 310 | 39 1989 999 1 2 311 | 39 1990 999 1 2 312 | 39 1991 999 1 2 313 | 40 1984 999 2 3 314 | 40 1985 999 2 3 315 | 40 1986 88 2 3 316 | 40 1987 80 2 3 317 | 40 1988 108 2 3 318 | 40 1989 104 2 3 319 | 40 1990 131 2 3 320 | 40 1991 113 2 3 321 | 41 1984 999 1 2 322 | 41 1985 999 1 2 323 | 41 1986 999 1 2 324 | 41 1987 6 1 2 325 | 41 1988 3 1 2 326 | 41 1989 4 1 2 327 | 41 1990 999 1 2 328 | 41 1991 999 1 2 329 | 42 1984 999 1 3 330 | 42 1985 999 1 3 331 | 42 1986 999 1 3 332 | 42 1987 999 1 3 333 | 42 1988 1 1 3 334 | 42 1989 999 1 3 335 | 42 1990 1 1 3 336 | 42 1991 999 1 3 337 | 43 1984 999 2 4 338 | 43 1985 999 2 4 339 | 43 1986 999 2 4 340 | 43 1987 999 2 4 341 | 43 1988 1 2 4 342 | 43 1989 999 2 4 343 | 43 1990 999 2 4 344 | 43 1991 999 2 4 345 | 44 1984 999 2 3 346 | 44 1985 999 2 3 347 | 44 1986 999 2 3 348 | 44 1987 999 2 3 349 | 44 1988 999 2 3 350 | 44 1989 8 2 3 351 | 44 1990 999 2 3 352 | 44 1991 999 2 3 353 | 45 1984 999 2 4 354 | 45 1985 999 2 4 355 | 45 1986 999 2 4 356 | 45 1987 16 2 4 357 | 45 1988 999 2 4 358 | 45 1989 999 2 4 359 | 45 1990 999 2 4 360 | 45 1991 999 2 4 361 | 46 1984 999 1 3 362 | 46 1985 999 1 3 363 | 46 1986 999 1 3 364 | 46 1987 3 1 3 365 | 46 1988 4 1 3 366 | 46 1989 2 1 3 367 | 46 1990 1 1 3 368 | 46 1991 2 1 3 369 | 47 1984 7 1 4 370 | 47 1985 4 1 4 371 | 47 1986 4 1 4 372 | 47 1987 3 1 4 373 | 47 1988 1 1 4 374 | 47 1989 2 1 4 375 | 47 1990 4 1 4 376 | 47 1991 4 1 4 377 | 48 1984 999 2 3 378 | 48 1985 999 2 3 379 | 48 1986 999 2 3 380 | 48 1987 999 2 3 381 | 48 1988 999 2 3 382 | 48 1989 999 2 3 383 | 48 1990 1 2 3 384 | 48 1991 999 2 3 385 | 49 1984 2 2 3 386 | 49 1985 999 2 3 387 | 49 1986 999 2 3 388 | 49 1987 999 2 3 389 | 49 1988 999 2 3 390 | 49 1989 999 2 3 391 | 49 1990 2 2 3 392 | 49 1991 2 2 3 393 | 50 1984 999 1 4 394 | 50 1985 999 1 4 395 | 50 1986 999 1 4 396 | 50 1987 999 1 4 397 | 50 1988 999 1 4 398 | 50 1989 20 1 4 399 | 50 1990 999 1 4 400 | 50 1991 999 1 4 401 | 51 1984 999 1 3 402 | 51 1985 999 1 3 403 | 51 1986 999 1 3 404 | 51 1987 999 1 3 405 | 51 1988 999 1 3 406 | 51 1989 1 1 3 407 | 51 1990 999 1 3 408 | 51 1991 999 1 3 409 | 52 1984 999 1 3 410 | 52 1985 999 1 3 411 | 52 1986 999 1 3 412 | 52 1987 999 1 3 413 | 52 1988 999 1 3 414 | 52 1989 9 1 3 415 | 52 1990 10 1 3 416 | 52 1991 8 1 3 417 | 53 1984 999 2 3 418 | 53 1985 999 2 3 419 | 53 1986 999 2 3 420 | 53 1987 999 2 3 421 | 53 1988 999 2 3 422 | 53 1989 999 2 3 423 | 53 1990 1 2 3 424 | 53 1991 999 2 3 425 | 54 1984 999 2 4 426 | 54 1985 999 2 4 427 | 54 1986 999 2 4 428 | 54 1987 999 2 4 429 | 54 1988 999 2 4 430 | 54 1989 999 2 4 431 | 54 1990 999 2 4 432 | 54 1991 16 2 4 433 | 55 1984 999 1 1 434 | 55 1985 999 1 1 435 | 55 1986 1 1 1 436 | 55 1987 1 1 1 437 | 55 1988 1 1 1 438 | 55 1989 1 1 1 439 | 55 1990 999 1 1 440 | 55 1991 999 1 1 441 | -------------------------------------------------------------------------------- /pkg/tests/testthat/outfiles/test_skylark_1a_to_2b.R: -------------------------------------------------------------------------------- 1 | rm(list=ls()) 2 | library(ggplot2) 3 | library(plyr) 4 | library(testthat) 5 | #library(rtrim) 6 | source("../../pkg/R/read_tcf.R") 7 | source("../../pkg/R/read_tdf.R") 8 | 9 | printf <- function(fmt,...) { cat(sprintf(fmt,...)) } 10 | 11 | source("trim_workhorse.R") 12 | source("trim_post.R") 13 | source("trim_wald.R") 14 | source("trim_gof.R") 15 | source("trim_index.R") 16 | source("trim_overall.R") 17 | 18 | source("out2test.R") 19 | 20 | jobs = c("1a","1b","1c","1d","1e","2a","2b") 21 | for (j in jobs) { 22 | 23 | job = sprintf("skylark-%s", j) 24 | 25 | # Load command file 26 | tcf <- read_tcf(sprintf("%s.tcf", job)) 27 | # Check for mandatory TCF elements 28 | if (is.na(tcf$file)) stop("No file in TCF") 29 | if (is.na(tcf$model)) stop ("No model specified in TCF file") 30 | 31 | # Read data and calc some stats. We store the results in a special TRIMdata object. 32 | df <- read_tdf(tcf) 33 | dat <- list(df=df, # Data as read into the data.table 34 | # TODO: sites with only missings 35 | nzero = sum(df$count==0, na.rm=TRUE), # Number of observed zero counts 36 | npos = sum(df$count>0, na.rm=TRUE), # Number of observed positive counts 37 | nobs = sum(is.finite(df$count)), # Number of observed counts 38 | nmis = sum(is.na(df$count)), # Number of missing counts 39 | ncount = length(df$count), # Total number of counts 40 | totcount = sum(df$count, na.rm=TRUE), # Total count 41 | nsite = length(unique(df$site)), # Number of actual sites 42 | ntime = length(unique(df$time)), # Number of actual time points 43 | weight = tcf$weight, 44 | missing = tcf$missing, 45 | file = tcf$file) 46 | class(dat) <- "TRIMdata" 47 | 48 | # Do some checks on the datax 49 | if (tcf$ntimes != dat$ntime) { 50 | stop(sprintf("Data contains different number of time points (%d) than specified in TCF file (%d)", 51 | dat$ntime, tcf$ntimes)) 52 | } 53 | 54 | #=============================================================================== 55 | # Data 56 | #=============================================================================== 57 | 58 | target = read_out(sprintf("%s.out", job), tcf, debug=TRUE) 59 | 60 | print(summary(dat)) 61 | s = summary(dat) 62 | expect_equal(s$ncols, as.numeric(target$ncols)) 63 | expect_equal(s$nsite, as.numeric(target$nsite)) 64 | expect_equal(s$ntime, as.numeric(target$ntime)) 65 | expect_equal(s$missing, as.numeric(target$missing)) 66 | 67 | expect_equal(s$nzero, as.numeric(target$nzero)) 68 | expect_equal(s$npos, as.numeric(target$npos)) 69 | expect_equal(s$nobs, as.numeric(target$nobs)) 70 | expect_equal(s$nmis, as.numeric(target$nmis)) 71 | expect_equal(s$ncount, as.numeric(target$ncount)) 72 | expect_equal(s$totcount, as.numeric(target$totcount)) 73 | 74 | print(dominant_sites(dat)) 75 | dom = dominant_sites(dat) 76 | expect_equal(as.numeric(dom$sites$site), target$dom$sites$site.nr) 77 | expect_equal(dom$sites$total, target$dom$sites$obs.tot) 78 | expect_equal(dom$sites$percent, target$dom$sites$percent, tol=5e-2) 79 | 80 | avg = average(dat) 81 | print(avg) 82 | expect_equal(as.numeric(avg$time), target$avg$TimePoint) 83 | expect_equal(avg$observations, target$avg$Observations) 84 | expect_equal(avg$average, target$avg$Average, tol=5e-3) 85 | expect_equal(avg$index, target$avg$Index, tol=5e-3) 86 | 87 | #=============================================================================== 88 | # Run 89 | #=============================================================================== 90 | 91 | 92 | count <- dat$df$count 93 | 94 | # Convert site/time back to their original values to test our new TRIM workhorse 95 | # unfactor <- function(x) as.integer(levels(x))[x] 96 | # time <- unfactor(dat$df$time) 97 | # site <- unfactor(dat$df$site) 98 | time <- dat$df$time 99 | site <- dat$df$site 100 | covars = list() 101 | ncovar = length(tcf$covariates) 102 | if (ncovar>0) { 103 | for (i in 1:ncovar) { 104 | cv = tcf$covariates[i] # Covariate number 105 | name = tcf$labels[cv] # Covariate name 106 | cv.col = cv + ifelse(tcf$weight,4,3) # shift columns (site, time, count, [weight]) 107 | covars[[name]] = dat$df[[cv.col]] 108 | } 109 | } 110 | z <- trim_estimate(count,time,site, covars=covars, model=tcf$model, 111 | serialcor=tcf$serialcor, overdisp=tcf$overdisp, 112 | changepoints=tcf$changepoints) 113 | 114 | 115 | #=============================================================================== 116 | # test output 117 | #=============================================================================== 118 | 119 | fit = gof(z) 120 | print(fit) 121 | expect_equal(fit$chi2$chi2, as.numeric(target$gof$chi2[1]), tol=1e-2) 122 | expect_equal(fit$chi2$df, as.numeric(target$gof$chi2[2])) 123 | expect_equal(fit$chi2$p, as.numeric(target$gof$chi2[3]), tol=1e-4) 124 | expect_equal(fit$LR$LR, as.numeric(target$gof$LR[1]), tol=1e-2) 125 | expect_equal(fit$LR$df, as.numeric(target$gof$LR[2])) 126 | expect_equal(fit$LR$p, as.numeric(target$gof$LR[3]), tol=1e-4) 127 | expect_equal(fit$AIC, as.numeric(target$gof$AIC), tol=1e-2) 128 | 129 | W = wald(z) 130 | print(W) 131 | for (type in c("slope","dslope","deviations","covar")) { 132 | if (!is.null(W[[type]])) { 133 | expect_equal(W[[type]]$W, as.numeric(target$wald[[type]]$W), tol=1e-2) 134 | expect_equal(W[[type]]$p, as.numeric(target$wald[[type]]$p), tol=1e-4) 135 | } 136 | } 137 | 138 | xx = coef(z,"both") 139 | print(xx) 140 | yy = target$coefficients 141 | for (i in 1:ncol(xx)) if (is.numeric(xx[[i]])) { 142 | expect_true(max(abs(xx[[i]]-yy[, i]))<1e-4, info=sprintf("Coefficients, column %d", i)) 143 | } 144 | 145 | 146 | if (tcf$model==3 && z$ncovar==0) { 147 | # Test linear trend 148 | xx <- linear(z)$trend 149 | yy <- target$linear$trend 150 | for (col in 1:4) expect_equal(xx[1,col], yy[1,col], tol=5e-5) 151 | 152 | # ... and deviations 153 | 154 | xx = linear(z)$dev 155 | yy = target$linear$dev 156 | for (i in 1:ncol(xx)) { 157 | xcol = xx[[i]] 158 | ycol = yy[[i]] 159 | expect_true(max(abs(xcol-ycol))<6e-5, info=sprintf("Deviations, column %d", i)) 160 | #! Fails strict tol. Needs to loosen: 5e-5 --> 6e-5 161 | } 162 | } 163 | 164 | # Test time indices 165 | xx = index(z, which="both") 166 | print(xx) 167 | yy = target$time.idx 168 | for (i in 1:ncol(xx)) { 169 | xcol = xx[[i]] 170 | ycol = yy[[i]] 171 | if (all(is.finite(xcol))) 172 | expect_true(max(abs(xcol-ycol), na.rm=TRUE)<1e-4, info=sprintf("Time indices, column %d", i)) 173 | } 174 | 175 | # Test time totals 176 | xx <- totals(z, "both") 177 | yy = target$time.tot 178 | for (i in 1:ncol(xx)) { 179 | xcol = xx[[i]] 180 | ycol = yy[[i]] 181 | if (all(is.finite(xcol))) 182 | expect_true(max(abs(xcol-ycol), na.rm=TRUE)<1e-4, info=sprintf("Time totals, column %d", i)) 183 | } 184 | 185 | # Test overall slope (modelled) 186 | O = overall(z) 187 | print(O) 188 | xx = overall(z, "model")$coef 189 | yy = target$overall$mod$par 190 | for (col in 2:4) { 191 | expect_equal(xx[2,col], yy[1,col], tol=1e-4, info="overall slope (modelled)") 192 | } 193 | 194 | # Test overall slope (imputed) 195 | xx = overall(z, "imputed")$coef 196 | yy = target$overall$imp$par 197 | for (col in c(1,3)) { 198 | expect_equal(xx[2,col], yy[1,col], tol=1e-4, info="overall slope (imputed)") 199 | } 200 | 201 | } 202 | printf("\n *** All tests succeeded ***\n") -------------------------------------------------------------------------------- /pkg/tests/testthat/test_checks.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | context("data checkers") 4 | 5 | test_that("basic assertions",{ 6 | expect_error(assert_positive(-1,"foo"),regexp = "foo") 7 | expect_error(assert_positive(0,"foo"),regexp = "foo") 8 | expect_true(assert_positive(1,"foo")) 9 | 10 | # expect_error(assert_increasing(c(3,1,4)) ) 11 | # expect_true(assert_increasing(1:2)) 12 | 13 | expect_error(assert_sufficient_counts(count=0:2,index = 1:3)) 14 | expect_true(assert_sufficient_counts(count=1:3,index=1:3)) 15 | }) 16 | 17 | 18 | test_that("check_observations",{ 19 | # model 3, no covariates 20 | d <- data.frame(year = 1:3, count=0:2) 21 | out <- check_observations(d,model=3) 22 | expect_false(out$sufficient) 23 | expect_equal(out$errors,list(year="1")) 24 | 25 | # model 3 with covariates 26 | d <- data.frame(count=rep(0:1,2), year=rep(1:2,each=2), cov = rep(1:2,2)) 27 | out <- check_observations(d, model=3, covars="cov") 28 | expect_false(out$sufficient) 29 | target = list(cov=data.frame(year=factor(1:2),cov=factor(c(1,1),levels=1:2))) 30 | expect_equal(out$errors, target) 31 | 32 | # model 2, no covariates 33 | d <- data.frame(year=1:10, count=c(rep(1,7), rep(0,3))) 34 | out <- check_observations(d, model=2, changepoints = c(4,7)) 35 | expect_false(out$sufficient) 36 | expect_equal(out$errors$changepoint, 7) 37 | 38 | # model 2, with covariates 39 | d <- data.frame( 40 | year=1:4 41 | , X = c("A","A","A","B") 42 | , count = c(1,1,1,0) 43 | ) 44 | # browser() 45 | out <- check_observations(d, model=2, covars="X", changepoints=1) 46 | expect_false(out$sufficient) 47 | expect_equal(out$errors$X, data.frame(changepoint=factor(1), X=factor("B",levels=c("A","B")) )) 48 | }) 49 | 50 | 51 | test_that("Sufficient data for piecewise linear trend model (Model 2)",{ 52 | 53 | ## no covariates 54 | d <- data.frame(count=rep(0:10,2), site=rep(1:2,11), time=rep(1:11,each=2)) 55 | expect_true(assert_plt_model(d$count, d$time, changepoints=c(1,4,10),covars=list())) 56 | expect_true(assert_plt_model(d$count, d$time, changepoints=integer(0),covars=list())) 57 | 58 | d$count[d$time>10] <- 0 59 | expect_error(assert_plt_model(d$count, d$time, changepoints=c(1,4,10),covars=list()),regexp="10") 60 | 61 | ## with covariates: 62 | d <- data.frame( 63 | count = c(0,1,7,3) 64 | , time = rep(1:2,each=2) 65 | , cov=c("a","b","a","a") 66 | ) 67 | cpts <- 1 68 | d$piece <- pieces_from_changepoints(d$time, cpts) 69 | expect_error(assert_plt_model(d$count,d$time,changepoints=1, covars=list(cov=d$cov))) 70 | 71 | # time : 1 2 3 72 | # count a: 2 7 0 73 | # count b: 1 3 2 74 | # chpts : 1 2 # Fails, because covar 'a' has no data for chpt 2 75 | d <- data.frame( 76 | time = c(1:3, 1:3), 77 | count = c(2,7,0, 1,3,2), 78 | cov = c(rep("a",3), rep("b",3))) 79 | expect_error(assert_plt_model(d$count, d$time, changepoints=c(1,2), covars=list(cov=d$cov))) 80 | }) 81 | 82 | 83 | test_that("Sufficient data for model 3 with covariates",{ 84 | 85 | d <- data.frame(count = rep(0:1,2), time = rep(1:2,each=2), cov = rep(1:2,2)) 86 | expect_error(assert_covariate_counts(d$count, d$time, d["cov"]),regexp="cov") 87 | d <- data.frame(count = rep(7:8,2),time = rep(1:2,each=2), cov = rep(1:2,2)) 88 | expect_true(assert_covariate_counts(d$count, d$time, d["cov"])) 89 | 90 | # with errors in two covariates 91 | d <- data.frame(count = rep(0:1,2), time = rep(1:2,each=2) 92 | , covA = rep(1:2,2) 93 | , covB = rep(letters[1:2],2)) 94 | expect_error(assert_covariate_counts(d$count, d$time, d[3:4]),regexp="covB") 95 | expect_true(assert_covariate_counts(d$count, d$time, data.frame())) 96 | }) 97 | 98 | 99 | context("Autodelete") 100 | 101 | test_that("autodelete w/o covariates",{ 102 | 103 | # case nothing to delete 104 | time <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) 105 | count <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1) 106 | cpts <- c( 4, 7 ) 107 | expect_equal(autodelete(count, time, cpts), cpts) 108 | 109 | # case something to delete 110 | time <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) 111 | count <- c(1, 1, 1, 1, 0, 0, 0, 1, 1, 1) 112 | cpts <- c( 4, 7 ) # cpt 7 is removed to provide data to cpt 4 113 | expect_equal(autodelete(count, time, cpts), 4) 114 | }) 115 | 116 | test_that("autodelete with covariates", { 117 | d <- data.frame( 118 | time = rep(1:10, times=2) 119 | , covar = rep(letters[1:2], each=10) 120 | , count = rep(1, 20) 121 | ) 122 | 123 | # case all fine 124 | # BUG: there is no real time 1 for class b 125 | expect_equal( 126 | autodelete(count = d$count, time = d$time, changepoints=c(4,7),covars=list(cov=d$covar)) 127 | , c(4,7) 128 | ) 129 | 130 | #case delete 7 131 | d$count[8:10] <- 0 132 | expect_equal( 133 | autodelete(count = d$count, time = d$time, changepoints=c(4,7),covars=list(cov=d$covar)) 134 | , 4 135 | ) 136 | # 137 | 138 | expect_equal( 139 | autodelete(count = d$count, time = d$time, changepoints=c(1,4,7),covars=list(cov=d$covar)) 140 | ,c(1, 4) 141 | ) 142 | 143 | }) 144 | -------------------------------------------------------------------------------- /pkg/tests/testthat/test_errors.R: -------------------------------------------------------------------------------- 1 | context("Errors") 2 | 3 | test_that("data issues", { 4 | # test duplicate counts 5 | df <- data.frame(site=1, year=c(1,2,2), count=1) 6 | expect_error(trim(count ~ site+year, data=df, model=3), 7 | "More than one observation given for at least one site/year combination.") 8 | }) 9 | 10 | context("formula interface errors") 11 | 12 | test_that("invalid formulas", { 13 | 14 | df <- data.frame(count=1, site=2, year=3, month=4, habitat=5, acid=5) 15 | 16 | expect_error(trim(count + onzin, df), "object .* not found") 17 | # expect_error(trim(count ~ site + year), "no data given", fixed=TRUE) 18 | # expect_error(trim(count ~ site + year, pi), "data should be a data frame", fixed=TRUE) 19 | # expect_error(trim(count ~ site, df), "model should have form 'count ~ site + year ...", fixed=TRUE) 20 | # expect_error(trim(count + site ~ year, df), "model should have form 'count ~ ...'", fixed=TRUE) 21 | # expect_error(trim(count ~ site - year, df), "Model contains unallowed operator: -", fixed=TRUE) 22 | # expect_error(trim(count ~ site + year + habitat : acid, df), "covariates should be included using the '+' operator (':' found)", fixed=TRUE) 23 | }) 24 | 25 | context("Warnings") 26 | 27 | test_that("empty sites", { 28 | A <- data.frame(site="A", year=1:2, count=c(1,1)) # filled site 29 | B <- data.frame(site="B", year=1:2, count=0) # empty site 30 | AB <- rbind(A,B) 31 | expect_warning(trim(count ~ site+year, data=AB, model=3), 32 | "Removed 1 site without positive observations: (B)", fixed=TRUE) 33 | }) -------------------------------------------------------------------------------- /pkg/tests/testthat/test_tcf.R: -------------------------------------------------------------------------------- 1 | 2 | context("Test tcf parsing") 3 | 4 | f <- tempfile() 5 | writeLines(" 6 | FILE F:\\TRIM\\Skylark.dat 7 | TITLE Skylark.dat 8 | NTIMES 8 9 | NCOVARS 2 10 | LABELS 11 | HABITAT 12 | Cov2 13 | END 14 | COMMENT Hello Bird 15 | MISSING -1 16 | WEIGHT Present 17 | WEIGHTING off 18 | SERIALCOR on 19 | OVERDISP on 20 | BASETIME 1 21 | MODEL 3 22 | COVARIATES 2 23 | AUTODELETE off 24 | OUTPUTFILES F 25 | RUN", con=f) 26 | 27 | 28 | test_that("read_tcf parses tcf files", { 29 | x <- read_tcf(f) 30 | expect_equal(x$file,"F:/TRIM/Skylark.dat") 31 | expect_equal(x$title,"Skylark.dat") 32 | expect_equal(x$ntimes,8L) 33 | expect_equal(x$ncovars,2L) 34 | expect_equal(x$labels,c("HABITAT","Cov2")) 35 | expect_equal(x$missing, -1L) 36 | expect_equal(x$weight,TRUE) 37 | expect_equal(x$weighting, FALSE) 38 | expect_equal(x$comment,"Hello Bird") 39 | expect_equal(x$serialcor,TRUE) 40 | expect_equal(x$overdisp,TRUE) 41 | expect_equal(x$basetime,1L) 42 | expect_equal(x$model,3L) 43 | expect_equal(x$covariates,2L) 44 | expect_equal(x$changepoints,integer(0)) 45 | expect_equal(x$stepwise,FALSE) 46 | expect_equal(x$outputfiles,"F") 47 | expect_equal(x$autodelete,FALSE) 48 | }) 49 | 50 | 51 | 52 | 53 | f <- tempfile() 54 | writeLines(" 55 | FILE F:\\TRIM\\Skylark.dat 56 | TITLE Skylark.dat 57 | NTIMES 8 58 | NCOVARS 2 59 | LABELS 60 | HABITAT 61 | Cov2 62 | END 63 | COMMENT Hello Bird 64 | MISSING -1 65 | WEIGHT Present 66 | WEIGHTING off 67 | SERIALCOR on 68 | OVERDISP on 69 | BASETIME 1 70 | MODEL 3 71 | COVARIATES 2 72 | OUTPUTFILES F 73 | RUN 74 | MODEL 1 75 | OVERDISP off", con=f) 76 | 77 | test_that("parsing multi-model files",{ 78 | x <- read_tcf(f) 79 | expect_equal(length(x),2L) 80 | expect_equal(x[[2]]$model, 1L) 81 | expect_equal(x[[2]]$overdisp, FALSE) 82 | # crashtest 83 | capture.output(print(x)) 84 | }) 85 | 86 | 87 | 88 | f <- tempfile() 89 | writeLines(" 90 | FILE F:\\TRIM\\Skylark.dat 91 | TITLE Skylark.dat 92 | NTIMES 8 93 | NCOVARS 2 94 | LABELS 95 | HABITAT 96 | Cov2 97 | END 98 | COMMENT Hello Bird 99 | MISSING -1 100 | WEIGHT Present 101 | WEIGHTING off 102 | SERIALCOR on 103 | OVERDISP on 104 | BASETIME 1 105 | MODEL 3 106 | BEAVIS 7 107 | COVARIATES 2 108 | AUTODELETE off 109 | BUTTHEAD 110 | OUTPUTFILES F 111 | RUN", con=f) 112 | 113 | 114 | 115 | g <- tempfile() 116 | writeLines(" 117 | FILE F:\\TRIM\\Skylark.dat 118 | TITLE Skylark.dat 119 | NTIMES 8 120 | NCOVARS 2 121 | END 122 | HABITAT 123 | Cov2 124 | LABELS 125 | COMMENT Hello Bird 126 | MISSING -1 127 | WEIGHT Present 128 | WEIGHTING off 129 | SERIALCOR on 130 | OVERDISP on 131 | BASETIME 1 132 | MODEL 3 133 | COVARIATES 2 134 | AUTODELETE off 135 | OUTPUTFILES F 136 | RUN", con=g) 137 | 138 | 139 | test_that("warning on invalid keys",{ 140 | expect_warning(read_tcf(f),regex = "BEAVIS.*?BUTTHEAD") 141 | expect_error(read_tcf(g),regex="LABELS") 142 | }) 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /pkg/tests/testthat/test_tdf.R: -------------------------------------------------------------------------------- 1 | testthat::context("Reading TRIM input files") 2 | 3 | 4 | test_that("reading happy flow",{ 5 | f <- tempfile() 6 | # happy flow 7 | writeLines("1 1992 186 1.0000 1 8 | 1 1993 60 1.0000 1 9 | 1 1994 39 1.0000 1 10 | 1 1995 3 1.0000 1 11 | 1 1996 3 1.0000 1 12 | 1 1997 0 1.0000 1 13 | 1 1998 0 1.0000 1 14 | 1 1999 0 1.0000 1 15 | 1 2000 0 1.0000 1 16 | 1 2001 -1 1.0000 1",con=f) 17 | 18 | dat <- read_tdf(x=f,missing=-1L, weight=TRUE, ncovars=1,labels="covar01") 19 | expect_equal(dat[1,], 20 | data.frame( 21 | site= 1 22 | ,time=1992 23 | ,count=186 24 | ,weight=1 25 | ,covar01=1 26 | )) 27 | expect_equal(dat[10,3],NA_integer_) 28 | tryCatch(unlink(f),error=function(e)cat(sprintf("Could not unlink temporary file %s",f))) 29 | }) 30 | 31 | 32 | test_that("reading with errors",{ 33 | f <- tempfile() 34 | writeLines("1 1992 186 1 35 | 1 1993 60 1.0000 1 36 | 1 1994 39 1.0000 1 37 | 1 1995 3 1.0000 1 38 | 1 1996 3 1.0000 1 39 | 1 1997 0 1.0000 1 40 | 1 1998 0 1.0000 1 41 | 1 1999 0 1.0000 1 42 | 1 2000 0 1.0000 1 43 | 1 2001 -1 1.0000 1",con=f) 44 | expect_error(read_tdf(x=f,weight=TRUE),regexp = "Expected 4 columns") 45 | }) 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /pkg/tests/testthat/test_utils.R: -------------------------------------------------------------------------------- 1 | 2 | context("Options") 3 | test_that("options can be set",{ 4 | set_trim_verbose(TRUE) 5 | expect_true(getOption("trim_verbose")) 6 | set_trim_verbose(FALSE) 7 | expect_false(getOption("trim_verbose")) 8 | 9 | }) 10 | 11 | -------------------------------------------------------------------------------- /pkg/tests/testthat/testdata/131183.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SNStatComp/rtrim/a0dbbb5cf3987625d0e5562d8b383d7a01cc7b6f/pkg/tests/testthat/testdata/131183.RData -------------------------------------------------------------------------------- /pkg/vignettes/TRIM_methods_v2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SNStatComp/rtrim/a0dbbb5cf3987625d0e5562d8b383d7a01cc7b6f/pkg/vignettes/TRIM_methods_v2.pdf -------------------------------------------------------------------------------- /pkg/vignettes/TRIM_methods_v2.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{Models and statistical methods in rtrim} 2 | %\VignetteEngine{R.rsp::asis} 3 | %\VignetteKeyword{PDF} 4 | %\VignetteKeyword{vignette} 5 | %\VignetteKeyword{package} -------------------------------------------------------------------------------- /pkg/vignettes/UIndex_Oystercatcher_output.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SNStatComp/rtrim/a0dbbb5cf3987625d0e5562d8b383d7a01cc7b6f/pkg/vignettes/UIndex_Oystercatcher_output.RData -------------------------------------------------------------------------------- /pkg/vignettes/rtrim_for_TRIM_users.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "rtrim for TRIM3 users" 3 | author: Patrick Bogaart, Mark van der Loo and Jeroen Pannekoek 4 | date: "`r Sys.Date()`" 5 | output: 6 | rmarkdown::html_vignette: 7 | toc: true 8 | vignette: > 9 | %\VignetteIndexEntry{rtrim for TRIM3 users} 10 | %\VignetteEngine{knitr::rmarkdown} 11 | %\VignetteEncoding{UTF-8} 12 | --- 13 | 14 | ```{r, echo = FALSE} 15 | knitr::opts_chunk$set( 16 | collapse = TRUE, 17 | comment = "#>", 18 | fig.width = 7, 19 | fig.height = 5 20 | ) 21 | rm(list=ls()) 22 | ``` 23 | 24 | ## Introduction 25 | 26 | The `rtrim` package is an complete reimplementation of the [original 27 | TRIM](https://www.cbs.nl/en-gb/society/nature-and-environment/indices-and-trends--trim--) 28 | software developed by Jeroen Pannekoek and Arco van Strien from the 1990's 29 | onwards. This vignette provides a quick getting started manual that demonstrates 30 | the R-based workflow for computing TRIM models. 31 | 32 | - An extensive introduction showing many of the options can be found in the [trim by example](Skylark_example.html) vignette. 33 | - To use legacy TRIM command files and TRIM data files, see the [section on tcf files](#tcf). 34 | 35 | TRIM was developed to estimate animal populations, based on repeated counts at various sites while counts may be missing for certain sites at certain times. Estimation is based on a model-based imputation method. 36 | 37 | We assume that the reader is already familiar with the methodology behind TRIM but in short, TRIM estimates a piecewise loglinear growth model to compute imputations. There are three variants of this model which differ by their basic assumptions. 38 | 39 | - **Model 1:** Populations vary accross sites, but not over time. 40 | - **Model 2:** Populations vary accross sites, but show the same growth everywhere. 41 | Growth rates are constant during specifief time intervals 42 | - **Model 3:** Similar, but time effects are independent for each time point. 43 | 44 | Note that both Model 1 and Model 3 can be seen as special cases of Model 2 (Model 1 is equivalent with Model 2 when where time effects or growth rate is set to zero; Model 3 is equivalent with Model 2 when growth rates are assumed to change every time point). 45 | 46 | For each variant it is possible to include categorical covariates in the model, 47 | or to weight sites. Certain simplifying assumptions are made to keep 48 | computations tractable. A detailed description of the methodology can be found 49 | in the [original TRIM3 50 | manual](https://www.cbs.nl/-/media/imported/onze%20diensten/methoden/trim/documents/2006/13/trim3man.pdf). 51 | 52 | ## Computing TRIM models 53 | 54 | We are going to use the `skylark` dataset, which is included with the package. 55 | ```{r} 56 | library(rtrim) 57 | data(skylark) 58 | head(skylark,3) # inspect the dataset 59 | ``` 60 | Here, `skylark` is a regular R `data.frame`. 61 | 62 | The central function for computing TRIM models is called `trim`. Calling this function is very similar to calling basic R modeling functions like `lm`. Here, we compute TRIM model 2. 63 | 64 | ```{r} 65 | m1 <- trim(count ~ site + time, data=skylark, model=2) 66 | ``` 67 | Note that the data is passed to `trim` as an R data.frame. Information on which columns in the data frame represent the counts, the site ID's etc is encoded in the first argument, which is of the special type `formula'. 68 | Because site identifiers and time points are treated differently by the model, the order matters (see also [model specification](#modelspec)). 69 | 70 | Alternatively, one can just pass the data frame as argument 1, and explictly tell `trim` in which columns the counts etc are: 71 | ```{r} 72 | m1 <- trim(skylark, count_col="count", site_col="site", year_col="time", model=2) 73 | ``` 74 | Note that although the name `year_col` suggests that counts must be on an annual interval, this is not necesarily the case. 75 | 76 | The result is an object of class `trim`. Just like with objects of class `lm`, its various components can be extracted using specialized functions. Here are some examples. 77 | ```{r} 78 | summary(m1) # summarize the model 79 | ``` 80 | 81 | ```{r} 82 | totals(m1) # Return time-totals 83 | ``` 84 | 85 | ```{r} 86 | gof(m1) # Retrieve goodness-of-fit 87 | ``` 88 | 89 | ```{r} 90 | coefficients(m1) # Extract the coefficients 91 | ``` 92 | 93 | 94 | ```{r} 95 | plot(overall(m1)) # Plot with overall slope 96 | ``` 97 | 98 | These are just a few of of the functions that can be used to analyse the model. See any of their help files for a complete list of links to all analyses functions. 99 | 100 | ## Model specification {#modelspec} 101 | 102 | The names of variables in the dataset are not important and neither is their order. However, since TRIM models 103 | are designed to estimate the number of counts at counting sites, the formula specifying the model 104 | has to satisfy certain rules. 105 | 106 | - The single variable at the left-hand side of hr tilde must represent the counted numbers. 107 | - The **first variable** on the right-hand of the tilde side must represent the **site** variable. 108 | - The **second variable** on the right-hand side must represent the **time** identifier. 109 | - All other variables on the right-hand-side are interpreted as covariates. 110 | 111 | For example, to use the variable `Habitat` as covariate when analysing the `skylark` dataset (under model 2) one does the following. 112 | 113 | ```{r} 114 | m2 <- trim(count ~ site + time + Habitat, data=skylark, model=2) 115 | ``` 116 | 117 | It is also possible to apply weights by specifyinga `weights` argument. 118 | The TRIM options `overdisp` (for overdispersion) and `serialcor` (for serial 119 | correlation), are simple `TRUE/FALSE` toggles. The breaks of 120 | the piecewise loglinear model can be specified with the `changepoints` option. 121 | The `trim` function will give an error when too little observations are present 122 | in a time segment, except when the `autodelete` option is set to `TRUE`. In that 123 | case time segments are combined until enough observations are present for a model 124 | to be estimated. See `?trim` for a precise description of all options. Below is an example where 125 | we specify the maximum number of changepoints and let `trim` delete change 126 | points where necessary. 127 | ```{r} 128 | m3 <- trim(count ~ site + time + Habitat, data=skylark, model=2 129 | , overdisp = TRUE, serialcor = TRUE, changepoints=1:7, autodelete=TRUE) 130 | m3$changepoints 131 | ``` 132 | In this case, no change points are deleted. 133 | 134 | In this example, the data sets consists of 8 time points, so time points 1 to 7 are explicitly specified as change point. This notation, which requires the prior identification of the number of time points present within the data, can be replaced by the more convenient expression `changepoints="all"`. 135 | 136 | Alternatively the `stepwise` algorithm can be used. This algorithm removes changepoints 137 | when the slope does not change significantly from before to after a changepoint, yielding a 138 | simpler (more sparse) model. 139 | ```{r} 140 | m4 <- trim(count ~ site + time + Habitat, data=skylark, model=2 141 | , overdisp = TRUE, serialcor = TRUE, changepoints=1:7, stepwise = TRUE) 142 | m4$changepoints 143 | ``` 144 | 145 | Again, the explicit setting of initial changepoints can be replaced by the more convenient `changepoints="auto"`, which combines `changepoints="all"` with `stepwise=TRUE`. 146 | 147 | ## TRIM Command Files {#tcf} 148 | 149 | The original TRIM software can be controlled with text files containing a series of commands that specify both the location and format of the data, an the model (or models) to compute. Such TRIM command files (usually stored with the extension `.tcf`) should be considered legacy but for backwards compatability they can be used from R. 150 | 151 | To try this, execute the code below to create a `tcf` file and a TRIM data file in the current 152 | working directory of R. 153 | ```{r} 154 | library(rtrim) 155 | tmp <- "FILE skylark.dat 156 | TITLE skylark-1d 157 | NTIMES 8 158 | NCOVARS 2 159 | LABELS 160 | Habitat 161 | Cov2 162 | END 163 | MISSING 999 164 | WEIGHT Absent 165 | COMMENT Example 1; using linear trend model 166 | WEIGHTING off 167 | OVERDISP on 168 | SERIALCOR on 169 | MODEL 2 170 | " 171 | write(tmp,file="skylark.tcf") 172 | data(skylark) 173 | skylark[is.na(skylark)] <- 999 174 | write.table(skylark,file="skylark.dat",col.names=FALSE,row.names=FALSE) 175 | ``` 176 | 177 | Executing a TRIM command file is as easy as reading the file using `read_tcf` and passing the result to `trim`. 178 | 179 | ```{r} 180 | tc <- read_tcf("skylark.tcf") 181 | m <- trim(tc) 182 | ``` 183 | The resulting `trim` object can be evaluated as described above. For example 184 | ```{r} 185 | wald(m) 186 | ``` 187 | 188 | 189 | The object `tc`, resulting from `read_tcf` is an object of class `trimcommand`. It stores all commands defined in the TRIM command file. Note that logical parameters such as `WEIGHT` are transformed to `logical` in R. 190 | ```{r} 191 | tc 192 | ``` 193 | 194 | 195 | **NOTE.** Be aware that R has its own present working directory. If relative paths (that is, file names not starting with the full path to their location) are used in the TRIM command file, R will interpret them as relative to the current working directory. 196 | 197 | ## TRIM data files 198 | 199 | TRIM data files are basically space-separated, tabular textfiles where the order and type of columns is fixed by a few parameters. Given such a specification, a file can be read with `read_tdf`. 200 | 201 | ## Utility functions 202 | 203 | An overview of count data can be obtained with the function `count_summary` 204 | ```{r} 205 | data(skylark) 206 | count_summary(skylark) 207 | 208 | ``` 209 | The result is an overview similar to the one that used to be printed at the start of TRIM output files. 210 | 211 | The TRIM model can only be computed when sufficient data is present. With the function 212 | `check_observations` one can check if a certain model can be computed. 213 | Note the use of `year_col` to specify a non-default column name. 214 | ```{r} 215 | check_observations(skylark, model=2, year_col="time", changepoints=c(1,4)) 216 | ``` 217 | The result is a `list` with boolean element `sufficient`. If `sufficient==FALSE`, the element `errors` 218 | contains a `data.frame` with the sites/times/covariates with insufficient counts. 219 | -------------------------------------------------------------------------------- /pkg/vignettes/taming_overdispersion.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Taming overdispersion" 3 | author: "Patrick Bogaart" 4 | date: "`r Sys.Date()`" 5 | output: 6 | rmarkdown::html_vignette: 7 | toc: true 8 | vignette: > 9 | %\VignetteIndexEntry{Taming overdispersion} 10 | %\VignetteEncoding{UTF-8} 11 | %\VignetteEngine{knitr::rmarkdown} 12 | --- 13 | 14 | ```{r, echo = FALSE} 15 | knitr::opts_chunk$set( 16 | collapse = TRUE, 17 | comment = "#>", 18 | fig.width = 7, 19 | fig.height = 5 20 | ) 21 | rm(list=ls()) 22 | ``` 23 | 24 | # Introduction 25 | 26 | For many species, count data is very skewly distributed. 27 | This is especially the case for species which tend to flock or cluster together at one or more resting places, that may or may not change from year to year. Especially when these places do change, it will become difficult for `rtrim` (or any GLM) to model this correctly, because the episodic high counts are not captured well in a site factor, nor in a time-point factor. The result is that model deviations for these place/time combinations are large, which for serveral resons may affect the computed overdispersion. To some extent, this effect will be true (because of the larger-than-expected variance), but there is also the risk of methodologival artefacts, which we would like to avoid. 28 | 29 | This vigenette aims as understanding the nature of huge overdispersion for these cases, and presents a number of mitigating approaches implemented in `rtrim`. 30 | 31 | Let's look at an example for Oystercatcher data that comes with RTRIM. 32 | First we plot the sorted raw counts. 33 | 34 | ```{r} 35 | library(rtrim) 36 | data(oystercatcher) 37 | 38 | # Collect all raw count data 39 | ok <- is.finite(oystercatcher$count) & oystercatcher$count > 0 40 | count <- oystercatcher$count[ok] 41 | plot(count, type='p', pch=16, col="red", cex=0.4) 42 | ``` 43 | 44 | Of course, sorting does help to see the big picture. 45 | ```{r} 46 | count <- sort(count) 47 | plot(count/1000, type='p', pch=16, col="red", cex=0.4, las=TRUE, ylab="count (x1000)") 48 | ``` 49 | 50 | 51 | So it appears that a few site/year/month combinations have the majority of all individuals. 52 | We can plot exactly this: 53 | ```{r} 54 | cum_count <- cumsum(sort(count, decreasing = TRUE)) # cumulative counts, largest first 55 | cum_pct <- 100 * cum_count / sum(count) # express as percentage of total 56 | n <- length(count) 57 | obs_pct <- 100 * (1:n)/n 58 | plot(obs_pct, cum_pct, type='n', xlab="Observations (%)", ylab="Cum. counts (%)", las=1) 59 | points(obs_pct, cum_pct, pch=16, cex=0.3, col="red") 60 | abline(a=100, b=-1, lty="dashed") 61 | grid() 62 | ``` 63 | 64 | In this case, we see the typical "Pareto-principle": 20% of the data points represent 80% of the total counts. 65 | This is likely to have a strong impact on estimated overdispersion. 66 | 67 | # Overdispersion for clustered observations: the problem 68 | 69 | Let's see how large overdispersion actually is. Because using the full dataset may be a bit slow for use within a vignette, we create a second dataset using only the last 10 year, and the sites that have the best coverage (percentage of years and months that have positive counts). 70 | ```{r} 71 | oystercatcher2 <- subset(oystercatcher, year>=2005) 72 | 73 | calc_coverage <- function(x) 100*mean(is.finite(x) & x>0) 74 | coverage <- aggregate(count ~ site, data=oystercatcher2, calc_coverage, na.action=na.pass) 75 | coverage <- coverage[order(coverage$count, decreasing=TRUE), ] 76 | plot(coverage$count, ylab="coverage (%)", pch=16, col=gray(0,0.5), las=1) 77 | abline(a=50, b=0, col="red") 78 | ``` 79 | Based on above figure, we decide to use a threshold of 50% coverage, which is about 20 sites. 80 | ```{r} 81 | ok <- subset(coverage, count > 45) 82 | oystercatcher3 <- subset(oystercatcher2, site %in% ok$site) 83 | ``` 84 | 85 | ```{r} 86 | z <- trim(count ~ site + (year+month), data=oystercatcher3, model=3, overdisp=TRUE) 87 | summary(z) 88 | 89 | ``` 90 | 91 | So overdispersion is indeed huge! 92 | 93 | You may recall from [Models and statistical methods in rtrim](TRIM_methods_v2.pdf) that the formula for overdispersion is given by 94 | $$ \sigma^2 = \frac{1}{n-p} \sum_{ijm} r^2_{ijm}$$ 95 | with $n$ the number of observations, $p$ the number of model parameters and $r$ Pearson residuals, given by 96 | $$ r = \frac{f_{ijm} -\mu_{ijm}}{\sqrt{\mu_{ijm}}}. $$ 97 | 98 | Species like Oystercatcher are known to cluster in winter: many individuals may appear at one monitoring site at one time, and at another site at another time. This clustering behaviour is unlikely to be captured in full by the TRIM model. Therefore, residuals (i.e. $f-\mu$) may be large, and more than proportionally affect overdispersion $\sigma^2$ because the nonlinearity involved (i.e. the squaring process). So, while overdispersion may be high, it is also very likely to be overestimated. 99 | 100 | We can see the effect of large deviations on the computed overdispersion by skipping the 0,1,2,etc largest values from the computation. 101 | ```{r} 102 | # Retrieve raw observed and modelled counts 103 | out <- results(z) 104 | ok <- is.finite(out$observed) 105 | f <- out$observed[ok] 106 | mu <- out$fitted[ok] 107 | 108 | # Compute Pearson residuals, and sort 109 | r <- (f - mu) / sqrt(mu) 110 | idx <- order(r^2, decreasing=TRUE) 111 | r <- r[idx] 112 | 113 | # How many obervations and parameters? 114 | n <- length(f) 115 | p <- z$nsite + z$nyear-1 + z$nmonth-1 116 | 117 | # Set up 118 | skips <- 0 : (n %/% 2) # skip none to approx 50% of all residuals 119 | sig2 <- numeric(length(skips)) # Allocate a vector with the computed overdispersion 120 | for (i in seq_along(skips)) { 121 | r2 <- r[skips[i] : n]^2 122 | df <- n - p - skips[i] # correct for skipped 123 | sig2[i] <- sum(r2) / df 124 | } 125 | plot(skips, sig2, type='l', col="red", las=1) 126 | abline(h=0.0, lty="dashed", col="red") 127 | ``` 128 | 129 | Indeed, overdispersion appears to be very sensitive for the largest residuals, suggesting that the actual value is not very robust against the contingent observations. 130 | 131 | The question now is, how to compute a more robust and realistic estimate of overdispersion, which may be (much) larger than 1, but not affected by artefacts resulting from the estimation procedure? 132 | 133 | # Approach #1: average first, square later. 134 | 135 | The formal approach to compute overdispersion is to first square the residuals, and then do the averaging, which is very sensitive for outliers. 136 | Let's see what happens if we reverse this order: first we do the averaging (using the absolute values of the residuals), then the squaring. Again, formally this is not correct, but at least if gives us some hint whether this phenomenon is the cause of the problem. 137 | ```{r} 138 | sig2_alt1 <- numeric(length(skips)) 139 | for (i in seq_along(skips)) { 140 | rr <- r[skips[i] : n] 141 | df <- n - p - skips[i] # correct for skipped 142 | sig2_alt1[i] <- (sum(abs(rr))/df)^2 143 | } 144 | plot(skips, sig2, type='l', col="red", las=1) 145 | lines(skips, sig2_alt1, col="blue") 146 | ``` 147 | 148 | Indeed, it appears that the effect is strongly mitigated reducing $\sigma^2$ from about 850 to about 200, and converges to about the same value once all outliers have beem removed. 149 | But again, this approach is formally incorrect. 150 | 151 | # Approach #2: remove outliers (nonparametric) 152 | 153 | A second approach is to just remove the outliers. One way to do this is to use a nonparametric methods, such as based on the interquantile interval (`Tuckey's Fence') 154 | ```{r} 155 | # residuals, and their square 156 | r <- (f - mu) / sqrt(mu) 157 | r2 <- r^2 158 | 159 | # classic overdispersion 160 | n <- length(f) 161 | p <- z$nsite + z$nyear-1 + z$nmonth-1 162 | sig2_std <- sum(r^2) / (n-p) 163 | 164 | # Compute interquantile distance and outlier limits 165 | Q <- quantile(r2, c(0.25, 0.50, 0.75)) # such that Q[3] is what you expect 166 | IQR <- (Q[3]-Q[1]) # Interquartile range 167 | k <- 3 # Tuckey's criterion for "far out" 168 | lo <- Q[1] - k * IQR # low threshold value added for completeless only 169 | hi <- Q[3] + k * IQR 170 | cat(sprintf("Using r2 limit %f -- %f\n", lo, hi)) 171 | nlo <- sum(r2hi) 174 | cat(sprintf(" removing %d upper outliers (%.1f%%)\n", nhi, 100*nhi/length(f))) 175 | ok <- (r2>lo) & (r21$) represents the IQR multiplier (defaulting to 3, for `far out', in above example) 182 | ```{r, eval=FALSE} 183 | z <- trim(..., overdisp=TRUE, constrain_overdisp=3, ...) 184 | ``` 185 | 186 | 187 | # Approach #3: remove outliers (parametric) 188 | 189 | One of the assumptions behind TRIM is that the residuals are approximately $\chi^2$-distributed. Thus, it makes sense to fit such a distribution to find the, say, 99% percentile, using an estimate of $\sigma^2$ as a scaling parameter, to obtain a threshold value to identify outliers. Since the value of $\sigma^2$ will depend on the outliers removed, an iterative approach is required. 190 | ```{r} 191 | level <- 0.99 192 | niter <- 10 193 | sig2_alt3 <- numeric(niter) 194 | ok <- !logical(length(r2)) # all TRUE 195 | for (i in 1:niter) { 196 | # Compute current overdispersion 197 | df <- sum(ok) - p 198 | sig2_alt3[i] <- sum(r2[ok]) / df 199 | # Compute new cutoff value 200 | cutoff <- sig2_alt3[i] * qchisq(level, 1) 201 | ok <- r2 < cutoff 202 | } 203 | ntotal <- length(r2) 204 | noutlier <- ntotal - sum(ok) 205 | cat(sprintf("Removed %d outliers (%.1f%%)\n", noutlier, 100*noutlier/ntotal)) 206 | plot(sig2_alt3, type='l', col="red", ylim=range(0,range(sig2_alt3)), las=1) 207 | points(sig2_alt3, pch=16, col="red") 208 | text(25,400, sprintf("Convergence at %.1f", sig2_alt3[niter])) 209 | ``` 210 | Again, this method works well, and is implemented in R-TRIM using the `constrain_overdisp` option, for values in the range $0\ldots1$, e.g. 211 | ```{r, eval=FALSE} 212 | z <- trim(..., overdisp=TRUE, constrain_overdisp=0.99, ...) 213 | ``` 214 | 215 | Note that constrain_overdisp has 3 possible values: 216 | 217 | * $0 \ldots 1$ : Using Chi-squared outlier detection, with the specified level. 218 | * $1$ : No constraints. 219 | * $>1$ : Using Tuckey's Fence, with the specified IQR multiplier. 220 | 221 | # Application 222 | 223 | Here is an example where we compare unconstrained / constrained overdispersion, using the 3th approach. 224 | ```{r} 225 | z1 <- trim(count ~ site + (year+month), data=oystercatcher3, model=3, 226 | overdisp=TRUE, constrain_overdisp=0.99) 227 | z2 <- trim(count ~ site + (year+month), data=oystercatcher3, model=3, 228 | overdisp=TRUE, constrain_overdisp=1) 229 | idx1 <- index(z1) 230 | idx2 <- index(z2) 231 | plot(idx1, idx2, names=c("constrained","unconstrained")) 232 | ``` 233 | 234 | Where it is clear that the standard errors for the constrained overdispersion run are considerable smaller. -------------------------------------------------------------------------------- /roxygen.R: -------------------------------------------------------------------------------- 1 | library(roxygen2) 2 | library(devtools) 3 | options(error=traceback) 4 | unlink( 'pkg/man', TRUE) 5 | 6 | #setwd('pkg') 7 | document('./pkg') 8 | #roxygenize( '.' 9 | # , roxygen.dir='.' 10 | # , copy.package=FALSE 11 | # , unlink.target=TRUE 12 | # ) 13 | 14 | 15 | if (length(list.files('inst/doc')) == 0){ 16 | unlink( 'inst/doc', TRUE) 17 | } 18 | -------------------------------------------------------------------------------- /test.r: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/Rscript 2 | 3 | suppressPackageStartupMessages({ 4 | if (!require("docopt")) stop("docopt not installed") 5 | }) 6 | 7 | "Usage: test.r [nocovr] [snitch] 8 | 9 | nocovr Skip measuring test coverage. 10 | snitch Report lines not covered. 11 | " -> doc 12 | 13 | opt <- docopt(doc) 14 | 15 | if(!require(devtools)) stop('devtools not installed first') 16 | devtools::test('pkg') 17 | 18 | if (!opt$nocovr){ 19 | if(require(covr)){ 20 | cv <- covr::package_coverage('pkg') 21 | print(cv) 22 | if (opt$snitch) print(subset(tally_coverage(cv), value == 0),row.names=FALSE) 23 | } else { 24 | stop("covr not installed") 25 | } 26 | } 27 | --------------------------------------------------------------------------------