├── .gitattributes ├── engression-python ├── engression │ ├── data │ │ ├── __init__.py │ │ ├── loader.py │ │ └── simulator.py │ ├── __init__.py │ ├── utils.py │ ├── loss_func.py │ ├── engression.py │ └── models.py ├── requirements.txt ├── examples │ ├── .DS_Store │ └── example_air.ipynb ├── setup.py ├── LICENSE └── README.md ├── engression-r ├── LICENSE ├── NAMESPACE ├── man │ ├── dftomat.Rd │ ├── energyloss.Rd │ ├── print.engression.Rd │ ├── energylossbeta.Rd │ ├── energylossall.Rd │ ├── print.engressionBagged.Rd │ ├── engressionfit.Rd │ ├── predict.engression.Rd │ ├── predict.engressionBagged.Rd │ ├── engression.Rd │ └── engressionBagged.Rd ├── R │ ├── dftomat.R │ ├── energyloss.R │ ├── energylossbeta.R │ ├── energylossall.R │ ├── print.engression.R │ ├── print.engressionBagged.R │ ├── predict.engression.R │ ├── predict.engressionBagged.R │ ├── engressionfit.R │ ├── engression.R │ └── engressionBagged.R └── DESCRIPTION ├── LICENSE ├── .gitignore └── README.md /.gitattributes: -------------------------------------------------------------------------------- 1 | *.ipynb linguist-detectable=false -------------------------------------------------------------------------------- /engression-python/engression/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /engression-python/requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | numpy 3 | matplotlib -------------------------------------------------------------------------------- /engression-r/LICENSE: -------------------------------------------------------------------------------- 1 | YEAR: 2023 2 | COPYRIGHT HOLDER: Xinwei Shen and Nicolai Meinshausen 3 | -------------------------------------------------------------------------------- /engression-python/examples/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xwshen51/engression/HEAD/engression-python/examples/.DS_Store -------------------------------------------------------------------------------- /engression-r/NAMESPACE: -------------------------------------------------------------------------------- 1 | import(torch) 2 | importFrom("stats", "predict", "quantile", "rnorm", "sd") 3 | S3method(predict,engression) 4 | S3method(predict,engressionBagged) 5 | S3method(print,engression) 6 | S3method(print,engressionBagged) 7 | export(engression) 8 | export(engressionBagged) 9 | -------------------------------------------------------------------------------- /engression-python/engression/__init__.py: -------------------------------------------------------------------------------- 1 | from .engression import engression 2 | 3 | try: 4 | # pylint: disable=wrong-import-position 5 | import torch 6 | except ModuleNotFoundError: 7 | raise ModuleNotFoundError( 8 | "No module named 'torch', and engression depends on PyTorch (aka 'torch')." 9 | "Visit https://pytorch.org/ for installation instructions.") 10 | -------------------------------------------------------------------------------- /engression-r/man/dftomat.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/dftomat.R 3 | \name{dftomat} 4 | \alias{dftomat} 5 | \title{Convert Data Frame to Numeric Matrix} 6 | \usage{ 7 | dftomat(X) 8 | } 9 | \arguments{ 10 | \item{X}{A data frame to be converted to a numeric matrix.} 11 | } 12 | \value{ 13 | A numeric matrix corresponding to the input data frame. 14 | } 15 | \description{ 16 | This function converts a data frame into a numeric matrix. If the data frame 17 | contains factor or character variables, they are first converted to numeric. 18 | } 19 | \keyword{internal} 20 | -------------------------------------------------------------------------------- /engression-python/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | 4 | with open('README.md') as f: 5 | long_description = f.read() 6 | 7 | with open('requirements.txt') as f: 8 | install_requires = [l.strip() for l in f] 9 | 10 | 11 | setup( 12 | name='engression', 13 | version='0.1.14', 14 | description='Engression Modelling', 15 | url='https://github.com/xwshen51/engression', 16 | author='Xinwei Shen and Nicolai Meinshausen', 17 | author_email='xinwei.shen@stat.math.ethz.ch', 18 | install_requires=install_requires, 19 | long_description=long_description, 20 | long_description_content_type="text/markdown", 21 | packages=find_packages(), 22 | license="BSD 3-Clause License", 23 | ) -------------------------------------------------------------------------------- /engression-r/R/dftomat.R: -------------------------------------------------------------------------------- 1 | #' Convert Data Frame to Numeric Matrix 2 | #' 3 | #' This function converts a data frame into a numeric matrix. If the data frame 4 | #' contains factor or character variables, they are first converted to numeric. 5 | #' 6 | #' @param X A data frame to be converted to a numeric matrix. 7 | #' 8 | #' @return A numeric matrix corresponding to the input data frame. 9 | #' 10 | #' 11 | #' @keywords internal 12 | #' 13 | dftomat <- function(X){ 14 | X <- data.frame(lapply(X, function(x){ 15 | if (is.factor(x)){ 16 | as.numeric(as.character(x)) 17 | }else if(is.character(x)){ 18 | as.numeric(as.factor(x)) 19 | }else{ 20 | as.numeric(x) 21 | } 22 | })) 23 | X = as.matrix(X) 24 | return(X) 25 | } 26 | -------------------------------------------------------------------------------- /engression-r/R/energyloss.R: -------------------------------------------------------------------------------- 1 | #' Energy Loss Calculation 2 | #' 3 | #' This function calculates the energy loss for given tensors. The loss is calculated 4 | #' as the mean of the L2 norms between `yt` and `mxt` and between `yt` and `mxpt`, 5 | #' subtracted by half the mean of the L2 norm between `mxt` and `mxpt`. 6 | #' 7 | #' @param yt A tensor representing the target values. 8 | #' @param mxt A tensor representing the model's stochastic predictions. 9 | #' @param mxpt A tensor representing another draw of the model's stochastic predictions. 10 | #' 11 | #' @return A scalar representing the calculated energy loss. 12 | #' 13 | #' 14 | #' @keywords internal 15 | #' 16 | energyloss <- function(yt,mxt,mxpt){ 17 | s1 = torch_mean(torch_norm(yt - mxt, 2, dim=2)) / 2 + torch_mean(torch_norm(yt - mxpt, 2, dim=2)) / 2 18 | s2 = torch_mean(torch_norm(mxt - mxpt, 2, dim=2)) 19 | return (s1 - s2/2) 20 | } 21 | -------------------------------------------------------------------------------- /engression-r/man/energyloss.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/energyloss.R 3 | \name{energyloss} 4 | \alias{energyloss} 5 | \title{Energy Loss Calculation} 6 | \usage{ 7 | energyloss(yt, mxt, mxpt) 8 | } 9 | \arguments{ 10 | \item{yt}{A tensor representing the target values.} 11 | 12 | \item{mxt}{A tensor representing the model's stochastic predictions.} 13 | 14 | \item{mxpt}{A tensor representing another draw of the model's stochastic predictions.} 15 | } 16 | \value{ 17 | A scalar representing the calculated energy loss. 18 | } 19 | \description{ 20 | This function calculates the energy loss for given tensors. The loss is calculated 21 | as the mean of the L2 norms between \code{yt} and \code{mxt} and between \code{yt} and \code{mxpt}, 22 | subtracted by half the mean of the L2 norm between \code{mxt} and \code{mxpt}. 23 | } 24 | \keyword{internal} 25 | -------------------------------------------------------------------------------- /engression-r/man/print.engression.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/print.engression.R 3 | \name{print.engression} 4 | \alias{print.engression} 5 | \title{Print an Engression Model Object} 6 | \usage{ 7 | \method{print}{engression}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{A trained engression model returned from the engressionfit function.} 11 | 12 | \item{...}{additional arguments (currently ignored)} 13 | } 14 | \value{ 15 | This function does not return anything. It prints a summary of the model, 16 | including information about its architecture and training process, and the loss 17 | values achieved at several epochs during training. 18 | } 19 | \description{ 20 | This function is a utility that displays a summary of a fitted Engression model object. 21 | } 22 | \examples{ 23 | \donttest{ 24 | n = 1000 25 | p = 5 26 | 27 | X = matrix(rnorm(n*p),ncol=p) 28 | Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 29 | 30 | ## fit engression object 31 | engr = engression(X,Y) 32 | print(engr) 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /engression-r/man/energylossbeta.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/energylossbeta.R 3 | \name{energylossbeta} 4 | \alias{energylossbeta} 5 | \title{Energy Loss Calculation with Beta Scaling} 6 | \usage{ 7 | energylossbeta(yt, mxt, mxpt, beta) 8 | } 9 | \arguments{ 10 | \item{yt}{A tensor representing the target values.} 11 | 12 | \item{mxt}{A tensor representing the model's stochastic predictions.} 13 | 14 | \item{mxpt}{A tensor representing another draw of the model's stochastic predictions.} 15 | 16 | \item{beta}{A numeric value for scaling the energy loss.} 17 | } 18 | \value{ 19 | A scalar representing the calculated energy loss. 20 | } 21 | \description{ 22 | This function calculates the energy loss for given tensors. The loss is calculated 23 | as the mean of the L2 norms between \code{yt} and \code{mxt} and between \code{yt} and \code{mxpt}, 24 | each raised to the power of \code{beta}, subtracted by half the mean of the L2 norm between \code{mxt} and \code{mxpt}, 25 | also raised to the power of \code{beta}. 26 | } 27 | \keyword{internal} 28 | -------------------------------------------------------------------------------- /engression-r/R/energylossbeta.R: -------------------------------------------------------------------------------- 1 | #' Energy Loss Calculation with Beta Scaling 2 | #' 3 | #' This function calculates the energy loss for given tensors. The loss is calculated 4 | #' as the mean of the L2 norms between `yt` and `mxt` and between `yt` and `mxpt`, 5 | #' each raised to the power of `beta`, subtracted by half the mean of the L2 norm between `mxt` and `mxpt`, 6 | #' also raised to the power of `beta`. 7 | #' 8 | #' @param yt A tensor representing the target values. 9 | #' @param mxt A tensor representing the model's stochastic predictions. 10 | #' @param mxpt A tensor representing another draw of the model's stochastic predictions. 11 | #' @param beta A numeric value for scaling the energy loss. 12 | #' 13 | #' @return A scalar representing the calculated energy loss. 14 | #' 15 | #' @keywords internal 16 | #' 17 | energylossbeta <- function(yt,mxt,mxpt,beta){ 18 | s1 = torch_pow(torch_mean(torch_norm(yt - mxt, 2, dim=2)),beta) / 2 + torch_pow(torch_mean(torch_norm(yt - mxpt, 2, dim=2)),beta) / 2 19 | s2 = torch_pow(torch_mean(torch_norm(mxt - mxpt, 2, dim=2)),beta) 20 | return (s1 - s2/2) 21 | } 22 | -------------------------------------------------------------------------------- /engression-r/R/energylossall.R: -------------------------------------------------------------------------------- 1 | #' Energy Loss Calculation (Extended Output) 2 | #' 3 | #' This function calculates the energy loss for given tensors, similar to `energyloss()`. The loss is calculated 4 | #' as the mean of the L2 norms between `yt` and `mxt` and between `yt` and `mxpt`, 5 | #' subtracted by half the mean of the L2 norm between `mxt` and `mxpt`. Unlike `energyloss()`, this function 6 | #' also returns the prediction loss s1 = E(|yt-mxt|) and variance loss s2 = E(|mxt-mxpt'|) as part of the output. 7 | #' 8 | #' @param yt A tensor representing the target values. 9 | #' @param mxt A tensor representing the model's stochastic predictions. 10 | #' @param mxpt A tensor representing another draw of the model's stochastic predictions. 11 | #' 12 | #' @return A vector containing the calculated energy loss, `s1`, and `s2`. 13 | #' 14 | #' 15 | #' @keywords internal 16 | energylossall <- function(yt,mxt,mxpt){ 17 | s1 = torch_mean(torch_norm(yt - mxt, 2, dim=2)) / 2 + torch_mean(torch_norm(yt - mxpt, 2, dim=2)) / 2 18 | s2 = torch_mean(torch_norm(mxt - mxpt, 2, dim=2)) 19 | return (c((s1 - s2/2),s1,s2)) 20 | } 21 | -------------------------------------------------------------------------------- /engression-r/man/energylossall.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/energylossall.R 3 | \name{energylossall} 4 | \alias{energylossall} 5 | \title{Energy Loss Calculation (Extended Output)} 6 | \usage{ 7 | energylossall(yt, mxt, mxpt) 8 | } 9 | \arguments{ 10 | \item{yt}{A tensor representing the target values.} 11 | 12 | \item{mxt}{A tensor representing the model's stochastic predictions.} 13 | 14 | \item{mxpt}{A tensor representing another draw of the model's stochastic predictions.} 15 | } 16 | \value{ 17 | A vector containing the calculated energy loss, \code{s1}, and \code{s2}. 18 | } 19 | \description{ 20 | This function calculates the energy loss for given tensors, similar to \code{energyloss()}. The loss is calculated 21 | as the mean of the L2 norms between \code{yt} and \code{mxt} and between \code{yt} and \code{mxpt}, 22 | subtracted by half the mean of the L2 norm between \code{mxt} and \code{mxpt}. Unlike \code{energyloss()}, this function 23 | also returns the prediction loss s1 = E(|yt-mxt|) and variance loss s2 = E(|mxt-mxpt'|) as part of the output. 24 | } 25 | \keyword{internal} 26 | -------------------------------------------------------------------------------- /engression-r/DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: engression 2 | Title: Engression Modelling 3 | Version: 0.1.3 4 | Authors@R: c(person("Xinwei", "Shen", role = c("aut"), email = "xinwei.shen@stat.math.ethz.ch"), person("Nicolai", "Meinshausen", role = c("aut", "cre"), email = "meinshausen@stat.math.ethz.ch")) 5 | Description: Fits engression models for nonlinear distributional regression. Predictors and targets can be univariate or multivariate. Functionality includes estimation of conditional mean, estimation of conditional quantiles, or sampling from the fitted distribution. Training is done full-batch on CPU (the python version offers GPU-accelerated stochastic gradient descent). Based on "Engression: Extrapolation for nonlinear regression?" by Xinwei Shen and Nicolai Meinshausen (2023) . 6 | URL: https://github.com/xwshen51/engression/ 7 | BugReports: https://github.com/xwshen51/engression/issues 8 | License: MIT + file LICENSE 9 | Encoding: UTF-8 10 | Roxygen: list(markdown = TRUE) 11 | RoxygenNote: 7.2.3 12 | Imports: torch 13 | NeedsCompilation: no 14 | Packaged: 2023-09-15 12:19:37 UTC; nicolai 15 | Author: Xinwei Shen [aut], 16 | Nicolai Meinshausen [aut, cre] 17 | Maintainer: Nicolai Meinshausen 18 | -------------------------------------------------------------------------------- /engression-r/man/print.engressionBagged.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/print.engressionBagged.R 3 | \name{print.engressionBagged} 4 | \alias{print.engressionBagged} 5 | \title{Print a Bagged Engression Model Object} 6 | \usage{ 7 | \method{print}{engressionBagged}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{A trained bagged engression model object returned from 11 | the engressionBagged function.} 12 | 13 | \item{...}{additional arguments (currently ignored)} 14 | } 15 | \value{ 16 | This function does not return anything. It prints a summary of the 17 | model, including the architecture of the individual models, the number 18 | of models in the bagged ensemble, and the loss values achieved at several 19 | epochs during training. 20 | } 21 | \description{ 22 | This function displays a summary of a bagged Engression model object. The 23 | summary includes details about the individual models as well as the overall 24 | ensemble. 25 | } 26 | \examples{ 27 | \donttest{ 28 | n = 1000 29 | p = 5 30 | X = matrix(rnorm(n*p),ncol=p) 31 | Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 32 | 33 | ## fit bagged engression object 34 | engb = engressionBagged(X,Y,K=3) 35 | print(engb) 36 | 37 | } 38 | 39 | } 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2023, Xinwei Shen and Nicolai Meinshausen 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /engression-python/LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2023, Xinwei Shen and Nicolai Meinshausen 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /engression-r/R/print.engression.R: -------------------------------------------------------------------------------- 1 | #' Print an Engression Model Object 2 | #' 3 | #' This function is a utility that displays a summary of a fitted Engression model object. 4 | #' 5 | #' @param x A trained engression model returned from the engressionfit function. 6 | #' @param ... additional arguments (currently ignored) 7 | #' 8 | #' @return This function does not return anything. It prints a summary of the model, 9 | #' including information about its architecture and training process, and the loss 10 | #' values achieved at several epochs during training. 11 | #' 12 | #' @examples 13 | #' \donttest{ 14 | #' n = 1000 15 | #' p = 5 16 | #' 17 | #' X = matrix(rnorm(n*p),ncol=p) 18 | #' Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 19 | #' 20 | #' ## fit engression object 21 | #' engr = engression(X,Y) 22 | #' print(engr) 23 | #' } 24 | #' 25 | #' @export 26 | print.engression <- function(x, ...){ 27 | cat("\n engression object with ") 28 | cat("\n \t noise dimensions: ",x$noise_dim) 29 | cat("\n \t hidden dimensions: ",x$hidden_dim) 30 | cat("\n \t number of layers: ",x$num_layer) 31 | cat("\n \t dropout rate: ",x$dropout) 32 | cat("\n \t batch normalization: ",x$batch_norm) 33 | cat("\n \t number of epochs: ",x$num_epochs) 34 | cat("\n \t learning rate: ",x$lr) 35 | cat("\n \t standardization: ",x$standardize) 36 | 37 | m = nrow(x$lossvec) 38 | printat = pmax(1, floor(seq(1,m, length=11))) 39 | pr = cbind(printat, x$lossvec[printat,]) 40 | colnames(pr) = c("epoch", colnames(x$lossvec)) 41 | cat("\n training loss: \n") 42 | print(pr) 43 | cat("\n prediction-loss E(|Y-Yhat|) and variance-loss E(|Yhat-Yhat'|)should ideally be equally large --\n consider training for more epochs if there is a mismatch \n\n") 44 | 45 | } 46 | -------------------------------------------------------------------------------- /engression-r/man/engressionfit.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/engressionfit.R 3 | \name{engressionfit} 4 | \alias{engressionfit} 5 | \title{Engression Fit Function} 6 | \usage{ 7 | engressionfit( 8 | X, 9 | Y, 10 | noise_dim = 100, 11 | hidden_dim = 100, 12 | num_layer = 3, 13 | dropout = 0.01, 14 | batch_norm = TRUE, 15 | num_epochs = 200, 16 | lr = 10^(-3), 17 | beta = 1, 18 | silent = FALSE 19 | ) 20 | } 21 | \arguments{ 22 | \item{X}{A matrix or data frame representing the predictors.} 23 | 24 | \item{Y}{A matrix representing the target variable(s).} 25 | 26 | \item{noise_dim}{The dimension of the noise introduced in the model (default: 100).} 27 | 28 | \item{hidden_dim}{The size of the hidden layer in the model (default: 100).} 29 | 30 | \item{num_layer}{The number of layers in the model (default: 3).} 31 | 32 | \item{dropout}{The dropout rate to be used in the model in case no batch normalization is used (default: 0.01)} 33 | 34 | \item{batch_norm}{A boolean indicating whether to use batch-normalization (default: TRUE).} 35 | 36 | \item{num_epochs}{The number of epochs to be used in training (default: 200).} 37 | 38 | \item{lr}{The learning rate to be used in training (default: 10^-3).} 39 | 40 | \item{beta}{The beta scaling factor for energy loss (default: 1).} 41 | 42 | \item{silent}{A boolean indicating whether to suppress output during model training (default: FALSE).} 43 | } 44 | \value{ 45 | A list containing the trained engression model and a vector of loss values. 46 | } 47 | \description{ 48 | This function fits an Engression model to the provided data. It allows for the tuning of 49 | several parameters related to model complexity and training. The function is not meant to 50 | be exported but can be used within the package or for internal testing purposes. 51 | } 52 | \keyword{internal} 53 | -------------------------------------------------------------------------------- /engression-r/R/print.engressionBagged.R: -------------------------------------------------------------------------------- 1 | #' Print a Bagged Engression Model Object 2 | #' 3 | #' This function displays a summary of a bagged Engression model object. The 4 | #' summary includes details about the individual models as well as the overall 5 | #' ensemble. 6 | #' 7 | #' @param x A trained bagged engression model object returned from 8 | #' the engressionBagged function. 9 | #' @param ... additional arguments (currently ignored) 10 | #' 11 | #' @return This function does not return anything. It prints a summary of the 12 | #' model, including the architecture of the individual models, the number 13 | #' of models in the bagged ensemble, and the loss values achieved at several 14 | #' epochs during training. 15 | #' 16 | #' @examples 17 | #' \donttest{ 18 | #' n = 1000 19 | #' p = 5 20 | #' X = matrix(rnorm(n*p),ncol=p) 21 | #' Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 22 | #' 23 | #' ## fit bagged engression object 24 | #' engb = engressionBagged(X,Y,K=3) 25 | #' print(engb) 26 | #' 27 | #' } 28 | #' 29 | #' @export 30 | print.engressionBagged <- function(x, ...){ 31 | cat("\n bagged engression object with", length(x$models), "models") 32 | cat("\n \t noise dimensions: ",x$noise_dim) 33 | cat("\n \t hidden dimensions: ",x$hidden_dim) 34 | cat("\n \t number of layers: ",x$num_layer) 35 | cat("\n \t dropout rate: ",x$dropout) 36 | cat("\n \t batch normalization: ",x$batch_norm) 37 | cat("\n \t number of epochs: ",x$num_epochs) 38 | cat("\n \t learning rate: ",x$lr) 39 | cat("\n \t standardization: ",x$standardize) 40 | 41 | avloss = Reduce("+",lapply(x$models, function(x) x$lossvec))/length(x$models) 42 | m = nrow(avloss) 43 | printat = pmax(1,floor((seq(1,m, length=11)))) 44 | pr = cbind(printat, avloss[printat,]) 45 | colnames(pr) = c("epoch", colnames(avloss)) 46 | cat("\n average training loss : \n") 47 | print(pr) 48 | cat("\n prediction-loss E(|Y-Yhat|) and variance-loss E(|Yhat-Yhat'|)should ideally be equally large --\n consider training for more epochs if there is a mismatch \n\n") 49 | 50 | } 51 | -------------------------------------------------------------------------------- /engression-python/engression/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | 4 | def vectorize(x, multichannel=False): 5 | """Vectorize data in any shape. 6 | 7 | Args: 8 | x (torch.Tensor): input data 9 | multichannel (bool, optional): whether to keep the multiple channels (in the second dimension). Defaults to False. 10 | 11 | Returns: 12 | torch.Tensor: data of shape (sample_size, dimension) or (sample_size, num_channel, dimension) if multichannel is True. 13 | """ 14 | if len(x.shape) == 1: 15 | return x.unsqueeze(1) 16 | if len(x.shape) == 2: 17 | return x 18 | else: 19 | if not multichannel: # one channel 20 | return x.reshape(x.shape[0], -1) 21 | else: # multi-channel 22 | return x.reshape(x.shape[0], x.shape[1], -1) 23 | 24 | def cor(x, y): 25 | """Compute the correlation between two signals. 26 | 27 | Args: 28 | x (torch.Tensor): input data 29 | y (torch.Tensor): input data 30 | 31 | Returns: 32 | torch.Tensor: correlation between x and y 33 | """ 34 | x = vectorize(x) 35 | y = vectorize(y) 36 | x = x - x.mean(0) 37 | y = y - y.mean(0) 38 | return ((x * y).mean()) / (x.std(unbiased=False) * y.std(unbiased=False)) 39 | 40 | def make_folder(name): 41 | """Make a folder. 42 | 43 | Args: 44 | name (str): folder name. 45 | """ 46 | if not os.path.exists(name): 47 | print('Creating folder: {}'.format(name)) 48 | os.makedirs(name) 49 | 50 | def check_for_gpu(device): 51 | """Check if a CUDA device is available. 52 | 53 | Args: 54 | device (torch.device): current set device. 55 | """ 56 | if device.type == "cuda": 57 | if torch.cuda.is_available(): 58 | print("GPU is available, running on GPU.\n") 59 | else: 60 | print("GPU is NOT available, running instead on CPU.\n") 61 | else: 62 | if torch.cuda.is_available(): 63 | print("Warning: You have a CUDA device, so you may consider using GPU for potential acceleration\n by setting device to 'cuda'.\n") 64 | else: 65 | print("Running on CPU.\n") 66 | -------------------------------------------------------------------------------- /engression-python/engression/data/loader.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data import TensorDataset, DataLoader 3 | 4 | def make_dataloader(x, y=None, batch_size=128, shuffle=True, num_workers=0): 5 | """Make dataloader. 6 | 7 | Args: 8 | x (torch.Tensor): data of predictors. 9 | y (torch.Tensor): data of responses. 10 | batch_size (int, optional): batch size. Defaults to 128. 11 | shuffle (bool, optional): whether to shuffle data. Defaults to True. 12 | num_workers (int, optional): number of workers. Defaults to 0. 13 | 14 | Returns: 15 | DataLoader: data loader 16 | """ 17 | if y is None: 18 | dataset = TensorDataset(x) 19 | else: 20 | dataset = TensorDataset(x, y) 21 | dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) 22 | return dataloader 23 | 24 | def partition_data(x_full, y_full, cut_quantile=0.3, split_train="smaller"): 25 | """Partition data into training and test sets. 26 | 27 | Args: 28 | x_full (torch.Tensor): full data of x. 29 | y_full (torch.Tensor): full data of y. 30 | cut_quantile (float, optional): quantile of the cutting point of x. Defaults to 0.3. 31 | split_train (str, optional): which subset is used for for training. choices=["smaller", "larger"]. Defaults to "smaller". 32 | 33 | Returns: 34 | tuple of torch.Tensors: training and test data. 35 | """ 36 | # Split data into training and test sets. 37 | x_cut = torch.quantile(x_full, cut_quantile) 38 | train_idx = x_full <= x_cut if split_train == "smaller" else x_full >= x_cut 39 | x_tr = x_full[train_idx] 40 | y_tr = y_full[train_idx] 41 | x_te = x_full[~train_idx] 42 | y_te = y_full[~train_idx] 43 | 44 | # Standardize data based on training statistics. 45 | x_tr_mean = x_tr.mean() 46 | x_tr_std = x_tr.std() 47 | y_tr_mean = y_tr.mean() 48 | y_tr_std = y_tr.std() 49 | x_tr = (x_tr - x_tr_mean)/x_tr_std 50 | y_tr = (y_tr - y_tr_mean)/y_tr_std 51 | x_te = (x_te - x_tr_mean)/x_tr_std 52 | y_te = (y_te - y_tr_mean)/y_tr_std 53 | x_full_normal = (x_full - x_tr_mean)/x_tr_std 54 | return x_tr.unsqueeze(1), y_tr.unsqueeze(1), x_te.unsqueeze(1), y_te.unsqueeze(1), x_full_normal -------------------------------------------------------------------------------- /engression-r/man/predict.engression.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.engression.R 3 | \name{predict.engression} 4 | \alias{predict.engression} 5 | \title{Prediction Function for Engression Models} 6 | \usage{ 7 | \method{predict}{engression}( 8 | object, 9 | Xtest, 10 | type = c("mean", "sample", "quantile")[1], 11 | trim = 0.05, 12 | quantiles = 0.1 * (1:9), 13 | nsample = 200, 14 | drop = TRUE, 15 | ... 16 | ) 17 | } 18 | \arguments{ 19 | \item{object}{A trained engression model returned from engression, engressionBagged or engressionfit functions.} 20 | 21 | \item{Xtest}{A matrix or data frame representing the predictors in the test set.} 22 | 23 | \item{type}{The type of prediction to make. "mean" for point estimates, "sample" for samples from the estimated distribution, 24 | or "quantile" for quantiles of the estimated distribution (default: "mean").} 25 | 26 | \item{trim}{The proportion of extreme values to trim when calculating the mean (default: 0.05).} 27 | 28 | \item{quantiles}{The quantiles to estimate if type is "quantile" (default: 0.1*(1:9)).} 29 | 30 | \item{nsample}{The number of samples to draw if type is "sample" (default: 200).} 31 | 32 | \item{drop}{A boolean indicating whether to drop dimensions of length 1 from the output (default: TRUE).} 33 | 34 | \item{...}{additional arguments (currently ignored)} 35 | } 36 | \value{ 37 | A matrix or array of predictions. 38 | } 39 | \description{ 40 | This function computes predictions from a trained engression model. It allows for the generation of point estimates, quantiles, 41 | or samples from the estimated distribution. 42 | } 43 | \examples{ 44 | \donttest{ 45 | n = 1000 46 | p = 5 47 | 48 | X = matrix(rnorm(n*p),ncol=p) 49 | Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 50 | Xtest = matrix(rnorm(n*p),ncol=p) 51 | Ytest = (Xtest[,1]+rnorm(n)*0.1)^2 + (Xtest[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 52 | 53 | ## fit engression object 54 | engr = engression(X,Y) 55 | print(engr) 56 | 57 | ## prediction on test data 58 | Yhat = predict(engr,Xtest,type="mean") 59 | cat("\n correlation between predicted and realized values: ", signif(cor(Yhat, Ytest),3)) 60 | plot(Yhat, Ytest,xlab="prediction", ylab="observation") 61 | 62 | ## quantile prediction 63 | Yhatquant = predict(engr,Xtest,type="quantiles") 64 | ord = order(Yhat) 65 | matplot(Yhat[ord], Yhatquant[ord,], type="l", col=2,lty=1,xlab="prediction", ylab="observation") 66 | points(Yhat[ord],Ytest[ord],pch=20,cex=0.5) 67 | 68 | ## sampling from estimated model 69 | Ysample = predict(engr,Xtest,type="sample",nsample=1) 70 | 71 | } 72 | 73 | 74 | } 75 | -------------------------------------------------------------------------------- /engression-python/engression/data/simulator.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | 5 | 6 | def preanm_simulator(true_function="softplus", n=10000, x_lower=0, x_upper=2, noise_std=1, noise_dist="gaussian", train=True, device=torch.device("cpu")): 7 | """Data simulator for a pre-additive noise model (pre-ANM). 8 | 9 | Args: 10 | true_function (str, optional): true function g^\star. Defaults to "softplus". Choices: ["softplus", "cubic","square", "log"]. 11 | n (int, optional): sample size. Defaults to 10000. 12 | x_lower (int, optional): lower bound of the training support. Defaults to 0. 13 | x_upper (int, optional): upper bound of the training support. Defaults to 2. 14 | noise_std (int, optional): standard deviation of the noise. Defaults to 1. 15 | noise_dist (str, optional): noise distribution. Defaults to "gaussian". Choices: ["gaussian", "uniform"]. 16 | train (bool, optional): generate data for training. Defaults to True. 17 | device (str or torch.device, optional): device. Defaults to torch.device("cpu"). 18 | 19 | Returns: 20 | tuple of torch.Tensors: data simulated from a pre-ANM. 21 | """ 22 | if isinstance(true_function, str): 23 | if true_function == "softplus": 24 | true_function = lambda x: nn.Softplus()(x) 25 | elif true_function == "cubic": 26 | true_function = lambda x: x.pow(3)/3 27 | elif true_function == "square": 28 | true_function = lambda x: (nn.functional.relu(x)).pow(2)/2 29 | elif true_function == "log": 30 | true_function = lambda x: (x/3 + np.log(3) - 2/3)*(x <= 2) + (torch.log(1 + x*(x > 2)))*(x > 2) 31 | 32 | if isinstance(device, str): 33 | device = torch.device(device) 34 | 35 | if train: 36 | x = torch.rand(n, 1)*(x_upper - x_lower) + x_lower 37 | if noise_dist == "gaussian": 38 | eps = torch.randn(n, 1)*noise_std 39 | else: 40 | assert noise_dist == "uniform" 41 | eps = (torch.rand(n, 1) - 0.5)*noise_std*np.sqrt(12) 42 | xn = x + eps 43 | y = true_function(xn) 44 | return x.to(device), y.to(device) 45 | 46 | else: 47 | x_eval = torch.linspace(x_lower, x_upper, n).unsqueeze(1) 48 | y_eval_med = true_function(x_eval) 49 | gen_sample_size = 10000 50 | x_rep = torch.repeat_interleave(x_eval, (gen_sample_size * torch.ones(n)).long(), dim=0) 51 | x_rep = x_rep + torch.randn(x_rep.size(0), 1)*noise_std 52 | y_eval_mean = true_function(x_rep) 53 | y_eval_mean = list(torch.split(y_eval_mean, gen_sample_size)) 54 | y_eval_mean = torch.cat([y_eval_mean[i].mean().unsqueeze(0) for i in range(n)], dim=0).unsqueeze(1) 55 | return x_eval.to(device), y_eval_med.to(device), y_eval_mean.to(device) 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Initially taken from Github's Python gitignore file 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | _build 8 | 9 | # C extensions 10 | *.so 11 | 12 | # tests and logs 13 | tests/fixtures/cached_*_text.txt 14 | logs/ 15 | lightning_logs/ 16 | lang_code_data/ 17 | log/ 18 | regression_test/*/new_output_models 19 | regression_test/*/new_log 20 | output_dir/ 21 | 22 | # data files 23 | data/ 24 | 25 | # output models 26 | output_models/ 27 | 28 | # Distribution / packaging 29 | .Python 30 | build/ 31 | develop-eggs/ 32 | dist/ 33 | downloads/ 34 | eggs/ 35 | .eggs/ 36 | lib/ 37 | lib64/ 38 | parts/ 39 | sdist/ 40 | var/ 41 | wheels/ 42 | *.egg-info/ 43 | .installed.cfg 44 | *.egg 45 | MANIFEST 46 | 47 | # PyInstaller 48 | # Usually these files are written by a python script from a template 49 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 50 | *.manifest 51 | *.spec 52 | 53 | # Installer logs 54 | pip-log.txt 55 | pip-delete-this-directory.txt 56 | 57 | # Unit test / coverage reports 58 | htmlcov/ 59 | .tox/ 60 | .nox/ 61 | .coverage 62 | .coverage.* 63 | .cache 64 | nosetests.xml 65 | coverage.xml 66 | *.cover 67 | .hypothesis/ 68 | .pytest_cache/ 69 | 70 | # Translations 71 | *.mo 72 | *.pot 73 | 74 | # Django stuff: 75 | *.log 76 | local_settings.py 77 | db.sqlite3 78 | 79 | # Flask stuff: 80 | instance/ 81 | .webassets-cache 82 | 83 | # Scrapy stuff: 84 | .scrapy 85 | 86 | # Sphinx documentation 87 | docs/_build/ 88 | 89 | # PyBuilder 90 | target/ 91 | 92 | # Jupyter Notebook 93 | .ipynb_checkpoints 94 | 95 | # IPython 96 | profile_default/ 97 | ipython_config.py 98 | 99 | # pyenv 100 | .python-version 101 | 102 | # celery beat schedule file 103 | celerybeat-schedule 104 | 105 | # SageMath parsed files 106 | *.sage.py 107 | 108 | # Environments 109 | .env 110 | .venv 111 | env/ 112 | venv/ 113 | ENV/ 114 | env.bak/ 115 | venv.bak/ 116 | 117 | # Spyder project settings 118 | .spyderproject 119 | .spyproject 120 | 121 | # Rope project settings 122 | .ropeproject 123 | 124 | # mkdocs documentation 125 | /site 126 | 127 | # mypy 128 | .mypy_cache/ 129 | .dmypy.json 130 | dmypy.json 131 | 132 | # Pyre type checker 133 | .pyre/ 134 | 135 | # vscode 136 | .vs 137 | .vscode 138 | 139 | # Pycharm 140 | .idea 141 | 142 | # TF code 143 | tensorflow_code 144 | 145 | # Models 146 | proc_data 147 | 148 | # examples 149 | runs 150 | /runs_old 151 | /wandb 152 | /examples/runs 153 | /examples/**/*.args 154 | /examples/rag/sweep 155 | 156 | # data 157 | # /data 158 | serialization_dir 159 | 160 | # emacs 161 | *.*~ 162 | debug.env 163 | 164 | # vim 165 | .*.swp 166 | 167 | #ctags 168 | tags 169 | 170 | # pre-commit 171 | .pre-commit* 172 | 173 | # .lock 174 | *.lock 175 | 176 | # DS_Store (MacOS) 177 | .DS_Store 178 | 179 | # ruff 180 | .ruff_cache 181 | 182 | # lm_evaluation cache 183 | lm_cache/ 184 | 185 | 186 | .codegpt -------------------------------------------------------------------------------- /engression-r/man/predict.engressionBagged.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.engressionBagged.R 3 | \name{predict.engressionBagged} 4 | \alias{predict.engressionBagged} 5 | \title{Prediction Function for Bagged Engression Models} 6 | \usage{ 7 | \method{predict}{engressionBagged}( 8 | object, 9 | Xtest = NULL, 10 | type = c("mean", "sample", "quantile")[1], 11 | trim = 0.05, 12 | quantiles = 0.1 * (1:9), 13 | nsample = 200, 14 | drop = TRUE, 15 | ... 16 | ) 17 | } 18 | \arguments{ 19 | \item{object}{A trained bagged engression model returned from the engressionBagged function.} 20 | 21 | \item{Xtest}{A matrix or data frame representing the predictors in the test set. If NULL, out-of-bag samples from the training 22 | set are used for prediction (default: NULL).} 23 | 24 | \item{type}{The type of prediction to make. "mean" for point estimates, "sample" for samples from the estimated distribution, 25 | or "quantile" for quantiles of the estimated distribution (default: "mean").} 26 | 27 | \item{trim}{The proportion of extreme values to trim when calculating the mean (default: 0.05).} 28 | 29 | \item{quantiles}{The quantiles to estimate if type is "quantile" (default: 0.1*(1:9)).} 30 | 31 | \item{nsample}{The number of samples to draw if type is "sample" (default: 200).} 32 | 33 | \item{drop}{A boolean indicating whether to drop dimensions of length 1 from the output (default: TRUE).} 34 | 35 | \item{...}{additional arguments (currently ignored)} 36 | } 37 | \value{ 38 | A matrix or array of predictions. 39 | #' 40 | } 41 | \description{ 42 | This function computes predictions from a trained bagged Engression model. It allows for the generation of point estimates, 43 | quantiles, or samples from the estimated distribution. 44 | } 45 | \examples{ 46 | \donttest{ 47 | n = 1000 48 | p = 5 49 | X = matrix(rnorm(n*p),ncol=p) 50 | Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 51 | Xtest = matrix(rnorm(n*p),ncol=p) 52 | Ytest = (Xtest[,1]+rnorm(n)*0.1)^2 + (Xtest[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 53 | 54 | ## fit bagged engression object 55 | engb = engressionBagged(X,Y,K=3) 56 | print(engb) 57 | 58 | ## prediction on test data 59 | Yhat = predict(engb,Xtest,type="mean") 60 | cat("\n correlation between predicted and realized values: ", signif(cor(Yhat, Ytest),3)) 61 | plot(Yhat, Ytest,xlab="estimated conditional mean", ylab="observation") 62 | 63 | ## out-of-bag prediction 64 | Yhat_oob = predict(engb,type="mean") 65 | cat("\n correlation between predicted and realized values on oob data: ") 66 | print(signif(cor(Yhat_oob, Y),3)) 67 | plot(Yhat_oob, Y,xlab="estimated conditional mean", ylab="observation") 68 | 69 | ## quantile prediction 70 | Yhatquant = predict(engb,Xtest,type="quantiles") 71 | ord = order(Yhat) 72 | matplot(Yhat[ord], Yhatquant[ord,], type="l", col=2,lty=1,xlab="prediction", ylab="observation") 73 | points(Yhat[ord],Ytest[ord],pch=20,cex=0.5) 74 | 75 | ## sampling from estimated model 76 | Ysample = predict(engb,Xtest,type="sample",nsample=1) 77 | 78 | } 79 | 80 | } 81 | -------------------------------------------------------------------------------- /engression-r/man/engression.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/engression.R 3 | \name{engression} 4 | \alias{engression} 5 | \title{Engression Function} 6 | \usage{ 7 | engression( 8 | X, 9 | Y, 10 | noise_dim = 5, 11 | hidden_dim = 100, 12 | num_layer = 3, 13 | dropout = 0.05, 14 | batch_norm = TRUE, 15 | num_epochs = 1000, 16 | lr = 10^(-3), 17 | beta = 1, 18 | silent = FALSE, 19 | standardize = TRUE 20 | ) 21 | } 22 | \arguments{ 23 | \item{X}{A matrix or data frame representing the predictors.} 24 | 25 | \item{Y}{A matrix or vector representing the target variable(s).} 26 | 27 | \item{noise_dim}{The dimension of the noise introduced in the model (default: 5).} 28 | 29 | \item{hidden_dim}{The size of the hidden layer in the model (default: 100).} 30 | 31 | \item{num_layer}{The number of layers in the model (default: 3).} 32 | 33 | \item{dropout}{The dropout rate to be used in the model in case no batch normalization is used (default: 0.01)} 34 | 35 | \item{batch_norm}{A boolean indicating whether to use batch-normalization (default: TRUE).} 36 | 37 | \item{num_epochs}{The number of epochs to be used in training (default: 1000).} 38 | 39 | \item{lr}{The learning rate to be used in training (default: 10^-3).} 40 | 41 | \item{beta}{The beta scaling factor for energy loss (default: 1).} 42 | 43 | \item{silent}{A boolean indicating whether to suppress output during model training (default: FALSE).} 44 | 45 | \item{standardize}{A boolean indicating whether to standardize the input data (default: TRUE).} 46 | } 47 | \value{ 48 | An engression model object with class "engression". 49 | } 50 | \description{ 51 | This function fits an engression model to the data. It allows for 52 | the tuning of several parameters related to model complexity. 53 | Variables are per default internally standardized (predictions are on original scale). 54 | } 55 | \examples{ 56 | \donttest{ 57 | n = 1000 58 | p = 5 59 | 60 | X = matrix(rnorm(n*p),ncol=p) 61 | Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 62 | Xtest = matrix(rnorm(n*p),ncol=p) 63 | Ytest = (Xtest[,1]+rnorm(n)*0.1)^2 + (Xtest[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 64 | 65 | ## fit engression object 66 | engr = engression(X,Y) 67 | print(engr) 68 | 69 | ## prediction on test data 70 | Yhat = predict(engr,Xtest,type="mean") 71 | cat("\n correlation between predicted and realized values: ", signif(cor(Yhat, Ytest),3)) 72 | plot(Yhat, Ytest,xlab="prediction", ylab="observation") 73 | 74 | ## quantile prediction 75 | Yhatquant = predict(engr,Xtest,type="quantiles") 76 | ord = order(Yhat) 77 | matplot(Yhat[ord], Yhatquant[ord,], type="l", col=2,lty=1,xlab="prediction", ylab="observation") 78 | points(Yhat[ord],Ytest[ord],pch=20,cex=0.5) 79 | 80 | ## sampling from estimated model 81 | Ysample = predict(engr,Xtest,type="sample",nsample=1) 82 | 83 | ## plot of realized values against first variable 84 | oldpar <- par() 85 | par(mfrow=c(1,2)) 86 | plot(Xtest[,1], Ytest, xlab="Variable 1", ylab="Observation") 87 | ## plot of sampled values against first variable 88 | plot(Xtest[,1], Ysample, xlab="Variable 1", ylab="Sample from engression model") 89 | par(oldpar) 90 | } 91 | 92 | } 93 | -------------------------------------------------------------------------------- /engression-python/engression/loss_func.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .utils import vectorize 3 | from torch.linalg import vector_norm 4 | 5 | 6 | def energy_loss(x_true, x_est, beta=1, verbose=True): 7 | """Loss function based on the energy score. 8 | 9 | Args: 10 | x_true (torch.Tensor): iid samples from the true distribution of shape (data_size, data_dim) 11 | x_est (list of torch.Tensor): 12 | - a list of length sample_size, where each element is a tensor of shape (data_size, data_dim) that contains one sample for each data point from the estimated distribution, or 13 | - a tensor of shape (data_size*sample_size, response_dim) such that x_est[data_size*(i-1):data_size*i,:] contains one sample for each data point, for i = 1, ..., sample_size. 14 | beta (float): power parameter in the energy score. 15 | verbose (bool): whether to return two terms of the loss. 16 | 17 | Returns: 18 | loss (torch.Tensor): energy loss. 19 | """ 20 | EPS = 0 if float(beta).is_integer() else 1e-5 21 | x_true = vectorize(x_true).unsqueeze(1) 22 | if not isinstance(x_est, list): 23 | x_est = list(torch.split(x_est, x_true.shape[0], dim=0)) 24 | m = len(x_est) 25 | x_est = [vectorize(x_est[i]).unsqueeze(1) for i in range(m)] 26 | x_est = torch.cat(x_est, dim=1) 27 | 28 | s1 = (vector_norm(x_est - x_true, 2, dim=2) + EPS).pow(beta).mean() 29 | s2 = (torch.cdist(x_est, x_est, 2) + EPS).pow(beta).mean() * m / (m - 1) 30 | if verbose: 31 | return torch.cat([(s1 - s2 / 2).reshape(1), s1.reshape(1), s2.reshape(1)], dim=0) 32 | else: 33 | return (s1 - s2 / 2) 34 | 35 | 36 | def energy_loss_two_sample(x0, x, xp, x0p=None, beta=1, verbose=True, weights=None): 37 | """Loss function based on the energy score (estimated based on two samples). 38 | 39 | Args: 40 | x0 (torch.Tensor): an iid sample from the true distribution. 41 | x (torch.Tensor): an iid sample from the estimated distribution. 42 | xp (torch.Tensor): another iid sample from the estimated distribution. 43 | xp0 (torch.Tensor): another iid sample from the true distribution. 44 | beta (float): power parameter in the energy score. 45 | verbose (bool): whether to return two terms of the loss. 46 | 47 | Returns: 48 | loss (torch.Tensor): energy loss. 49 | """ 50 | EPS = 0 if float(beta).is_integer() else 1e-5 51 | x0 = vectorize(x0) 52 | x = vectorize(x) 53 | xp = vectorize(xp) 54 | if weights is None: 55 | weights = 1 / x0.size(0) 56 | if x0p is None: 57 | s1 = ((vector_norm(x - x0, 2, dim=1) + EPS).pow(beta) * weights).sum() / 2 + ((vector_norm(xp - x0, 2, dim=1) + EPS).pow(beta) * weights).sum() / 2 58 | s2 = ((vector_norm(x - xp, 2, dim=1) + EPS).pow(beta) * weights).sum() 59 | loss = s1 - s2/2 60 | else: 61 | x0p = vectorize(x0p) 62 | s1 = ((vector_norm(x - x0, 2, dim=1) + EPS).pow(beta).sum() + (vector_norm(xp - x0, 2, dim=1) + EPS).pow(beta).sum() + 63 | (vector_norm(x - x0p, 2, dim=1) + EPS).pow(beta).sum() + (vector_norm(xp - x0p, 2, dim=1) + EPS).pow(beta).sum()) / 4 64 | s2 = (vector_norm(x - xp, 2, dim=1) + EPS).pow(beta).sum() 65 | s3 = (vector_norm(x0 - x0p, 2, dim=1) + EPS).pow(beta).sum() 66 | loss = s1 - s2/2 - s3/2 67 | if verbose: 68 | return torch.cat([loss.reshape(1), s1.reshape(1), s2.reshape(1)], dim=0) 69 | else: 70 | return loss 71 | -------------------------------------------------------------------------------- /engression-r/R/predict.engression.R: -------------------------------------------------------------------------------- 1 | #' Prediction Function for Engression Models 2 | #' 3 | #' This function computes predictions from a trained engression model. It allows for the generation of point estimates, quantiles, 4 | #' or samples from the estimated distribution. 5 | #' 6 | #' @param object A trained engression model returned from engression, engressionBagged or engressionfit functions. 7 | #' @param Xtest A matrix or data frame representing the predictors in the test set. 8 | #' @param type The type of prediction to make. "mean" for point estimates, "sample" for samples from the estimated distribution, 9 | #' or "quantile" for quantiles of the estimated distribution (default: "mean"). 10 | #' @param trim The proportion of extreme values to trim when calculating the mean (default: 0.05). 11 | #' @param quantiles The quantiles to estimate if type is "quantile" (default: 0.1*(1:9)). 12 | #' @param nsample The number of samples to draw if type is "sample" (default: 200). 13 | #' @param drop A boolean indicating whether to drop dimensions of length 1 from the output (default: TRUE). 14 | #' @param ... additional arguments (currently ignored) 15 | #' 16 | #' @return A matrix or array of predictions. 17 | #' 18 | #' @examples 19 | #' \donttest{ 20 | #' n = 1000 21 | #' p = 5 22 | #' 23 | #' X = matrix(rnorm(n*p),ncol=p) 24 | #' Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 25 | #' Xtest = matrix(rnorm(n*p),ncol=p) 26 | #' Ytest = (Xtest[,1]+rnorm(n)*0.1)^2 + (Xtest[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 27 | #' 28 | #' ## fit engression object 29 | #' engr = engression(X,Y) 30 | #' print(engr) 31 | #' 32 | #' ## prediction on test data 33 | #' Yhat = predict(engr,Xtest,type="mean") 34 | #' cat("\n correlation between predicted and realized values: ", signif(cor(Yhat, Ytest),3)) 35 | #' plot(Yhat, Ytest,xlab="prediction", ylab="observation") 36 | #' 37 | #' ## quantile prediction 38 | #' Yhatquant = predict(engr,Xtest,type="quantiles") 39 | #' ord = order(Yhat) 40 | #' matplot(Yhat[ord], Yhatquant[ord,], type="l", col=2,lty=1,xlab="prediction", ylab="observation") 41 | #' points(Yhat[ord],Ytest[ord],pch=20,cex=0.5) 42 | #' 43 | #' ## sampling from estimated model 44 | #' Ysample = predict(engr,Xtest,type="sample",nsample=1) 45 | #' 46 | #' } 47 | #' 48 | #' 49 | #' @export 50 | predict.engression <- function(object, Xtest, type=c("mean","sample","quantile")[1],trim=0.05, quantiles=0.1*(1:9), nsample=200, drop=TRUE, ...){ 51 | 52 | if (is.data.frame(Xtest)) Xtest = dftomat(Xtest) 53 | if (is.vector(Xtest) && is.numeric(Xtest)) Xtest <- matrix(Xtest, ncol = 1) 54 | 55 | if(object$standardize){ 56 | Xtest = sweep(sweep(Xtest,2,object$muX,FUN="-"),2,object$sddX,FUN="/") 57 | } 58 | 59 | Yhat1 = object$engressor(Xtest) 60 | Yhat = array(dim=c(dim(Yhat1)[1], dim(Yhat1)[2], nsample)) 61 | for (sam in 1:nsample) Yhat[, ,sam] = if(!object$standardize) object$engressor(Xtest) else sweep(sweep(object$engressor(Xtest),2,object$sddY,FUN="*"),2,object$muY,FUN="+") 62 | 63 | if(type=="sample") dimnames(Yhat)[[3]] = paste("sample_",1:nsample,sep="") 64 | if(type=="mean") Yhat = apply(Yhat,1:(length(dim(Yhat))-1), mean,trim=trim) 65 | if(type %in% c("quantile","quantiles")){ 66 | if(length(quantiles)==1){ 67 | Yhat = apply(Yhat,1:(length(dim(Yhat))-1), quantile, quantiles) 68 | }else{ 69 | Yhat = aperm( apply(Yhat,1:(length(dim(Yhat))-1), quantile, quantiles), if(length(dim(Yhat)==3)) c(2,3,1) else c(2,1) ) 70 | } 71 | } 72 | 73 | return(if(drop) drop(Yhat) else Yhat) 74 | 75 | } 76 | -------------------------------------------------------------------------------- /engression-r/man/engressionBagged.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/engressionBagged.R 3 | \name{engressionBagged} 4 | \alias{engressionBagged} 5 | \title{Bagged Engression Function} 6 | \usage{ 7 | engressionBagged( 8 | X, 9 | Y, 10 | K = 5, 11 | keepoutbag = TRUE, 12 | noise_dim = 10, 13 | hidden_dim = 100, 14 | num_layer = 3, 15 | dropout = 0.05, 16 | batch_norm = TRUE, 17 | num_epochs = 1000, 18 | lr = 10^(-3), 19 | beta = 1, 20 | silent = FALSE, 21 | standardize = TRUE 22 | ) 23 | } 24 | \arguments{ 25 | \item{X}{A matrix or data frame representing the predictors.} 26 | 27 | \item{Y}{A matrix or vector representing the target variable(s).} 28 | 29 | \item{K}{The number of bagged models to fit (default: 5).} 30 | 31 | \item{keepoutbag}{A boolean indicating whether to keep the out-of-bag samples and training data (default: TRUE).} 32 | 33 | \item{noise_dim}{The dimension of the noise introduced in the model (default: 10).} 34 | 35 | \item{hidden_dim}{The size of the hidden layer in the model (default: 100).} 36 | 37 | \item{num_layer}{The number of layers in the model (default: 3).} 38 | 39 | \item{dropout}{The dropout rate to be used in the model (default: 0.05).} 40 | 41 | \item{batch_norm}{A boolean indicating whether to use batch-normalization (default: TRUE).} 42 | 43 | \item{num_epochs}{The number of epochs to be used in training (default: 1000).} 44 | 45 | \item{lr}{The learning rate to be used in training (default: 10^-3).} 46 | 47 | \item{beta}{The beta scaling factor for energy loss (default: 1).} 48 | 49 | \item{silent}{A boolean indicating whether to suppress output during model training (default: FALSE).} 50 | 51 | \item{standardize}{A boolean indicating whether to standardize the input data (default: TRUE).} 52 | } 53 | \value{ 54 | A bagged engression model object with class "engressionBagged". 55 | } 56 | \description{ 57 | This function fits a bagged engression model to the data by fitting multiple 58 | engression models to subsamples of the data. It allows for the tuning of several parameters 59 | related to model complexity. 60 | } 61 | \examples{ 62 | \donttest{ 63 | n = 1000 64 | p = 5 65 | X = matrix(rnorm(n*p),ncol=p) 66 | Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 67 | Xtest = matrix(rnorm(n*p),ncol=p) 68 | Ytest = (Xtest[,1]+rnorm(n)*0.1)^2 + (Xtest[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 69 | 70 | ## fit bagged engression object 71 | engb = engressionBagged(X,Y,K=3) 72 | print(engb) 73 | 74 | ## prediction on test data 75 | Yhat = predict(engb,Xtest,type="mean") 76 | cat("\n correlation between predicted and realized values: ", signif(cor(Yhat, Ytest),3)) 77 | plot(Yhat, Ytest,xlab="estimated conditional mean", ylab="observation") 78 | 79 | ## out-of-bag prediction 80 | Yhat_oob = predict(engb,type="mean") 81 | cat("\n correlation between predicted and realized values on oob data: ") 82 | print(signif(cor(Yhat_oob, Y),3)) 83 | plot(Yhat_oob, Y,xlab="prediction", ylab="observation") 84 | 85 | ## quantile prediction 86 | Yhatquant = predict(engb,Xtest,type="quantiles") 87 | ord = order(Yhat) 88 | matplot(Yhat[ord], Yhatquant[ord,], type="l", col=2,lty=1,xlab="prediction", ylab="observation") 89 | points(Yhat[ord],Ytest[ord],pch=20,cex=0.5) 90 | 91 | ## sampling from estimated model 92 | Ysample = predict(engb,Xtest,type="sample",nsample=1) 93 | 94 | ## plot of realized values against first variable 95 | oldpar <- par() 96 | par(mfrow=c(1,2)) 97 | plot(Xtest[,1], Ytest, xlab="Variable 1", ylab="Observation") 98 | ## plot of sampled values against first variable 99 | plot(Xtest[,1], Ysample[,1], xlab="Variable 1", ylab="Sample from engression model") 100 | par(oldpar) 101 | } 102 | 103 | } 104 | -------------------------------------------------------------------------------- /engression-python/README.md: -------------------------------------------------------------------------------- 1 | # Engression 2 | 3 | Engression is a neural network-based distributional regression method proposed in the paper "[*Engression: Extrapolation through the Lens of Distributional Regression?*](https://arxiv.org/abs/2307.00835)" by Xinwei Shen and Nicolai Meinshausen (2023). This repository contains the software implementations of engression in both R and Python. 4 | 5 | Consider targets $Y\in\mathbb{R}^k$ and predictors $X\in\mathbb{R}^d$; both variables can be univariate or multivariate, continuous or discrete. Engression can be used to 6 | * estimate the conditional mean $\mathbb{E}[Y|X=x]$ (as in least-squares regression), 7 | * estimate the conditional quantiles of $Y$ given $X=x$ (as in quantile regression), and 8 | * sample from the fitted conditional distribution of $Y$ given $X=x$ (as a generative model). 9 | 10 | The results in the paper show the advantages of engression over existing regression approaches in terms of extrapolation. 11 | 12 | 13 | ## Installation 14 | The latest release of the Python package can be installed through pip: 15 | ```sh 16 | pip install engression 17 | ``` 18 | 19 | The development version can be installed from github: 20 | 21 | ```sh 22 | pip install -e "git+https://github.com/xwshen51/engression#egg=engression&subdirectory=engression-python" 23 | ``` 24 | 25 | 26 | ## Usage Example 27 | 28 | ### Python 29 | 30 | Below is one simple demonstration. See [this tutorial](https://github.com/xwshen51/engression/blob/main/engression-python/examples/example_simu.ipynb) for more details on simulated data and [this tutorial](https://github.com/xwshen51/engression/blob/main/engression-python/examples/example_air.ipynb) for a real data example. We demonstrate in [another tutorial](https://github.com/xwshen51/engression/blob/main/engression-python/examples/example_bag.ipynb) how to fit a bagged engression model, which also helps with hyperparameter tuning. 31 | ```python 32 | from engression import engression 33 | from engression.data.simulator import preanm_simulator 34 | 35 | ## Simulate data 36 | x, y = preanm_simulator("square", n=10000, x_lower=0, x_upper=2, noise_std=1, train=True, device=device) 37 | x_eval, y_eval_med, y_eval_mean = preanm_simulator("square", n=1000, x_lower=0, x_upper=4, noise_std=1, train=False, device=device) 38 | 39 | ## Fit an engression model 40 | engressor = engression(x, y, lr=0.01, num_epochs=500, batch_size=1000, device="cuda") 41 | ## Summarize model information 42 | engressor.summary() 43 | 44 | ## Evaluation 45 | print("L2 loss:", engressor.eval_loss(x_eval, y_eval_mean, loss_type="l2")) 46 | print("correlation between predicted and true means:", engressor.eval_loss(x_eval, y_eval_mean, loss_type="cor")) 47 | 48 | ## Predictions 49 | y_pred_mean = engressor.predict(x_eval, target="mean") ## for the conditional mean 50 | y_pred_med = engressor.predict(x_eval, target="median") ## for the conditional median 51 | y_pred_quant = engressor.predict(x_eval, target=[0.025, 0.5, 0.975]) ## for the conditional 2.5% and 97.5% quantiles 52 | ``` 53 | 54 | 55 | ## Contact information 56 | If you meet any problems with the code, please submit an issue or contact [Xinwei Shen](mailto:xinwei.shen@stat.math.ethz.ch). 57 | 58 | 59 | ## Citation 60 | If you would refer to or extend our work, please cite the following paper: 61 | ``` 62 | @article{10.1093/jrsssb/qkae108, 63 | author = {Shen, Xinwei and Meinshausen, Nicolai}, 64 | title = {Engression: extrapolation through the lens of distributional regression}, 65 | journal = {Journal of the Royal Statistical Society Series B: Statistical Methodology}, 66 | pages = {qkae108}, 67 | year = {2024}, 68 | month = {11}, 69 | issn = {1369-7412}, 70 | doi = {10.1093/jrsssb/qkae108}, 71 | url = {https://doi.org/10.1093/jrsssb/qkae108}, 72 | eprint = {https://academic.oup.com/jrsssb/advance-article-pdf/doi/10.1093/jrsssb/qkae108/60827977/qkae108.pdf}, 73 | } 74 | ``` -------------------------------------------------------------------------------- /engression-r/R/predict.engressionBagged.R: -------------------------------------------------------------------------------- 1 | #' Prediction Function for Bagged Engression Models 2 | #' 3 | #' This function computes predictions from a trained bagged Engression model. It allows for the generation of point estimates, 4 | #' quantiles, or samples from the estimated distribution. 5 | #' 6 | #' @param object A trained bagged engression model returned from the engressionBagged function. 7 | #' @param Xtest A matrix or data frame representing the predictors in the test set. If NULL, out-of-bag samples from the training 8 | #' set are used for prediction (default: NULL). 9 | #' @param type The type of prediction to make. "mean" for point estimates, "sample" for samples from the estimated distribution, 10 | #' or "quantile" for quantiles of the estimated distribution (default: "mean"). 11 | #' @param trim The proportion of extreme values to trim when calculating the mean (default: 0.05). 12 | #' @param quantiles The quantiles to estimate if type is "quantile" (default: 0.1*(1:9)). 13 | #' @param nsample The number of samples to draw if type is "sample" (default: 200). 14 | #' @param drop A boolean indicating whether to drop dimensions of length 1 from the output (default: TRUE). 15 | #' @param ... additional arguments (currently ignored) 16 | #' 17 | #' @return A matrix or array of predictions. 18 | #'#' 19 | #' @examples 20 | #' \donttest{ 21 | #' n = 1000 22 | #' p = 5 23 | #' X = matrix(rnorm(n*p),ncol=p) 24 | #' Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 25 | #' Xtest = matrix(rnorm(n*p),ncol=p) 26 | #' Ytest = (Xtest[,1]+rnorm(n)*0.1)^2 + (Xtest[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 27 | #' 28 | #' ## fit bagged engression object 29 | #' engb = engressionBagged(X,Y,K=3) 30 | #' print(engb) 31 | #' 32 | #' ## prediction on test data 33 | #' Yhat = predict(engb,Xtest,type="mean") 34 | #' cat("\n correlation between predicted and realized values: ", signif(cor(Yhat, Ytest),3)) 35 | #' plot(Yhat, Ytest,xlab="estimated conditional mean", ylab="observation") 36 | #' 37 | #' ## out-of-bag prediction 38 | #' Yhat_oob = predict(engb,type="mean") 39 | #' cat("\n correlation between predicted and realized values on oob data: ") 40 | #' print(signif(cor(Yhat_oob, Y),3)) 41 | #' plot(Yhat_oob, Y,xlab="estimated conditional mean", ylab="observation") 42 | #' 43 | #' ## quantile prediction 44 | #' Yhatquant = predict(engb,Xtest,type="quantiles") 45 | #' ord = order(Yhat) 46 | #' matplot(Yhat[ord], Yhatquant[ord,], type="l", col=2,lty=1,xlab="prediction", ylab="observation") 47 | #' points(Yhat[ord],Ytest[ord],pch=20,cex=0.5) 48 | #' 49 | #' ## sampling from estimated model 50 | #' Ysample = predict(engb,Xtest,type="sample",nsample=1) 51 | #' 52 | #' } 53 | #' 54 | #' @export 55 | predict.engressionBagged <- function(object, Xtest=NULL, type=c("mean","sample","quantile")[1],trim=0.05, quantiles=0.1*(1:9), nsample=200, drop=TRUE, ...){ 56 | useoob=FALSE 57 | if(is.null(Xtest)){ 58 | useoob = TRUE 59 | if(!is.null(object$Xtrain)) Xtest = object$Xtrain else stop("if Xtest is not provided, need to set keepoutbag=TRUE when fitting bagged engression model") 60 | } 61 | if (is.data.frame(Xtest)) Xtest = dftomat(Xtest) 62 | if (is.vector(Xtest) && is.numeric(Xtest)) Xtest <- matrix(Xtest, ncol = 1) 63 | 64 | K = length(object$models) 65 | 66 | nsam = if(useoob) 5*ceiling(nsample/K) else ceiling(nsample/K) 67 | Yhat1 = predict.engression(object$models[[1]],Xtest, type="sample", nsample=nsam, drop=FALSE) 68 | 69 | Yhat = array(dim=c(dim(Yhat1),K)) 70 | for (k in 1:K){ 71 | if(!useoob){ 72 | Yhat[,,,k] = predict.engression(object$models[[k]],Xtest, type="sample", nsample=nsam, drop=FALSE) 73 | }else{ 74 | usesam = which(apply(object$inbag!=k,1,all)) 75 | Yhat[usesam,,,k] = predict.engression(object$models[[k]],Xtest[usesam,], type="sample", nsample=nsam, drop=FALSE) 76 | } 77 | } 78 | Yhat = aperm( apply(Yhat,c(1,2),as.vector ),c(2,3,1)) 79 | if(useoob) Yhat = aperm(apply(Yhat,1:2,function(x) x[which(!is.na(x))]),c(2,3,1)) 80 | if(type=="sample") dimnames(Yhat)[[length(dim(Yhat))]] = paste("sample_",1:dim(Yhat)[length(dim(Yhat))],sep="") 81 | if(type=="mean") Yhat = apply(Yhat,1:(length(dim(Yhat))-1), mean, trim=trim) 82 | if(type %in% c("quantile","quantiles")){ 83 | if(length(quantiles)==1){ 84 | Yhat = apply(Yhat,1:(length(dim(Yhat))-1), quantile, quantiles) 85 | }else{ 86 | Yhat = aperm( apply(Yhat,1:(length(dim(Yhat))-1), quantile, quantiles), if(length(dim(Yhat)==3)) c(2,3,1) else c(2,1) ) 87 | } 88 | } 89 | return(if(drop) drop(Yhat) else Yhat) 90 | } -------------------------------------------------------------------------------- /engression-r/R/engressionfit.R: -------------------------------------------------------------------------------- 1 | #' Engression Fit Function 2 | #' 3 | #' This function fits an Engression model to the provided data. It allows for the tuning of 4 | #' several parameters related to model complexity and training. The function is not meant to 5 | #' be exported but can be used within the package or for internal testing purposes. 6 | #' 7 | #' @param X A matrix or data frame representing the predictors. 8 | #' @param Y A matrix representing the target variable(s). 9 | #' @param noise_dim The dimension of the noise introduced in the model (default: 100). 10 | #' @param hidden_dim The size of the hidden layer in the model (default: 100). 11 | #' @param num_layer The number of layers in the model (default: 3). 12 | #' @param dropout The dropout rate to be used in the model in case no batch normalization is used (default: 0.01) 13 | #' @param batch_norm A boolean indicating whether to use batch-normalization (default: TRUE). 14 | #' @param num_epochs The number of epochs to be used in training (default: 200). 15 | #' @param lr The learning rate to be used in training (default: 10^-3). 16 | #' @param beta The beta scaling factor for energy loss (default: 1). 17 | #' @param silent A boolean indicating whether to suppress output during model training (default: FALSE). 18 | #' 19 | #' @return A list containing the trained engression model and a vector of loss values. 20 | #' 21 | #' @keywords internal 22 | #' 23 | engressionfit <- function(X,Y, noise_dim=100, hidden_dim=100, num_layer=3, dropout=0.01,batch_norm=TRUE, num_epochs=200,lr=10^(-3), beta=1, silent=FALSE){ 24 | in_dim = dim(X)[2] 25 | out_dim = dim(Y)[2] 26 | if(num_layer<=2){ 27 | if(!batch_norm){ 28 | model = nn_sequential( nn_linear(in_dim+noise_dim,hidden_dim),nn_dropout(dropout), nn_elu(), nn_linear(hidden_dim,out_dim)) 29 | }else{ 30 | model = nn_sequential( nn_linear(in_dim+noise_dim,hidden_dim), nn_elu(),nn_batch_norm1d(hidden_dim), nn_linear(hidden_dim,out_dim)) 31 | 32 | } 33 | }else{ 34 | if(!batch_norm){ 35 | hid = nn_sequential(nn_linear(hidden_dim, hidden_dim),nn_elu()) 36 | if(num_layer>3) for (lay in 3:num_layer) hid = nn_sequential(hid,nn_sequential(nn_linear(hidden_dim, hidden_dim),nn_elu()) ) 37 | model = nn_sequential( nn_sequential(nn_linear(in_dim+noise_dim,hidden_dim),nn_dropout(dropout), nn_elu()),hid, nn_linear(hidden_dim,out_dim)) 38 | }else{ 39 | hid = nn_sequential(nn_linear(hidden_dim, hidden_dim),nn_elu(),nn_batch_norm1d(hidden_dim)) 40 | if(num_layer>3) for (lay in 3:num_layer) hid = nn_sequential(hid,nn_sequential(nn_linear(hidden_dim, hidden_dim),nn_elu(),nn_batch_norm1d(hidden_dim)) ) 41 | model = nn_sequential( nn_sequential(nn_linear(in_dim+noise_dim,hidden_dim), nn_elu(),nn_batch_norm1d(hidden_dim)),hid, nn_linear(hidden_dim,out_dim)) 42 | } 43 | } 44 | model$train() 45 | 46 | optimizer = optim_adam(model$parameters,lr=lr) 47 | 48 | n= dim(X)[1] 49 | lossvec = matrix(nrow=num_epochs, ncol=3) 50 | colnames(lossvec) = c("energy-loss","E(|Y-Yhat|)","E(|Yhat-Yhat'|)") 51 | printat = pmax(1,floor((seq(1,num_epochs, length=11)))) 52 | 53 | for (iter in 1:num_epochs){ 54 | optimizer$zero_grad() 55 | if(noise_dim>0){ 56 | xt = torch_tensor(cbind(X, matrix(rnorm(n*noise_dim),ncol=noise_dim) ), dtype=torch_float(),requires_grad=TRUE) 57 | xpt = torch_tensor(cbind(X, matrix(rnorm(n*noise_dim),ncol=noise_dim) ), dtype=torch_float(),requires_grad=TRUE) 58 | yt = torch_tensor(Y, dtype=torch_float(),requires_grad=TRUE) 59 | }else{ 60 | xt = torch_tensor(X, dtype=torch_float(),requires_grad=TRUE) 61 | xpt = torch_tensor(X , dtype=torch_float(),requires_grad=TRUE) 62 | yt = torch_tensor(Y, dtype=torch_float(),requires_grad=TRUE) 63 | } 64 | la = energylossall(yt,model(xt),model(xpt)) 65 | lossvec[iter, ] = signif(c(sapply(la, as.numeric)),3 ) 66 | if(beta==1) loss = energyloss(yt,model(xt),model(xpt)) else loss= energylossbeta(yt,model(xt),model(xpt),beta) 67 | loss$backward() 68 | optimizer$step() 69 | if(!silent){ 70 | cat("\r ", round(100*iter/num_epochs), "% complete, epoch: ", iter) 71 | if(iter %in% printat){cat("\n"); print(lossvec[iter,])} 72 | } 73 | } 74 | if(batch_norm) model$train(mode=FALSE) 75 | 76 | if(noise_dim>0){ 77 | engressor = function(x) as.matrix(model( torch_tensor(cbind(x, matrix(rnorm(nrow(x)*noise_dim),ncol=noise_dim) ), dtype=torch_float())),ncol=out_dim) 78 | }else{ 79 | engressor = function(x) as.matrix(model( torch_tensor(x, dtype=torch_float())),ncol=out_dim) 80 | } 81 | return(list(engressor=engressor, lossvec=lossvec)) 82 | } 83 | -------------------------------------------------------------------------------- /engression-r/R/engression.R: -------------------------------------------------------------------------------- 1 | #' Engression Function 2 | #' 3 | #' This function fits an engression model to the data. It allows for 4 | #' the tuning of several parameters related to model complexity. 5 | #' Variables are per default internally standardized (predictions are on original scale). 6 | #' 7 | #' @param X A matrix or data frame representing the predictors. 8 | #' @param Y A matrix or vector representing the target variable(s). 9 | #' @param noise_dim The dimension of the noise introduced in the model (default: 5). 10 | #' @param hidden_dim The size of the hidden layer in the model (default: 100). 11 | #' @param num_layer The number of layers in the model (default: 3). 12 | #' @param dropout The dropout rate to be used in the model in case no batch normalization is used (default: 0.01) 13 | #' @param batch_norm A boolean indicating whether to use batch-normalization (default: TRUE). 14 | #' @param num_epochs The number of epochs to be used in training (default: 1000). 15 | #' @param lr The learning rate to be used in training (default: 10^-3). 16 | #' @param beta The beta scaling factor for energy loss (default: 1). 17 | #' @param silent A boolean indicating whether to suppress output during model training (default: FALSE). 18 | #' @param standardize A boolean indicating whether to standardize the input data (default: TRUE). 19 | #' 20 | #' @return An engression model object with class "engression". 21 | #' 22 | #' @examples 23 | #' \donttest{ 24 | #' n = 1000 25 | #' p = 5 26 | #' 27 | #' X = matrix(rnorm(n*p),ncol=p) 28 | #' Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 29 | #' Xtest = matrix(rnorm(n*p),ncol=p) 30 | #' Ytest = (Xtest[,1]+rnorm(n)*0.1)^2 + (Xtest[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 31 | #' 32 | #' ## fit engression object 33 | #' engr = engression(X,Y) 34 | #' print(engr) 35 | #' 36 | #' ## prediction on test data 37 | #' Yhat = predict(engr,Xtest,type="mean") 38 | #' cat("\n correlation between predicted and realized values: ", signif(cor(Yhat, Ytest),3)) 39 | #' plot(Yhat, Ytest,xlab="prediction", ylab="observation") 40 | #' 41 | #' ## quantile prediction 42 | #' Yhatquant = predict(engr,Xtest,type="quantiles") 43 | #' ord = order(Yhat) 44 | #' matplot(Yhat[ord], Yhatquant[ord,], type="l", col=2,lty=1,xlab="prediction", ylab="observation") 45 | #' points(Yhat[ord],Ytest[ord],pch=20,cex=0.5) 46 | #' 47 | #' ## sampling from estimated model 48 | #' Ysample = predict(engr,Xtest,type="sample",nsample=1) 49 | #' 50 | #' ## plot of realized values against first variable 51 | #' oldpar <- par() 52 | #' par(mfrow=c(1,2)) 53 | #' plot(Xtest[,1], Ytest, xlab="Variable 1", ylab="Observation") 54 | #' ## plot of sampled values against first variable 55 | #' plot(Xtest[,1], Ysample, xlab="Variable 1", ylab="Sample from engression model") 56 | #' par(oldpar) 57 | #' } 58 | #' 59 | #' @export 60 | 61 | engression <- function(X,Y, noise_dim=5, hidden_dim=100, num_layer=3, dropout=0.05, batch_norm=TRUE, num_epochs=1000,lr=10^(-3),beta=1, silent=FALSE, standardize=TRUE){ 62 | 63 | if (is.data.frame(X)) { 64 | if (any(sapply(X, is.factor))) warning("Data frame contains factor variables. Mapping to numeric values. Dummy variables would need to be created explicitly by the user.") 65 | X = dftomat(X) 66 | } 67 | 68 | if (is.vector(X) && !is.numeric(X)) X <- as.numeric(X) 69 | if (is.vector(X) && is.numeric(X)) X <- matrix(X, ncol = 1) 70 | if(is.vector(Y)) Y= matrix(Y, ncol=1) 71 | for (k in 1:ncol(Y)) Y[,k] = as.numeric(Y[,k]) 72 | 73 | if(dropout<=0 & noise_dim==0){ 74 | warning("dropout and noise_dim cannot both be equal to 0 as model needs to be stochastic. setting dropout to 0.5") 75 | dropout = 0.5 76 | } 77 | 78 | muX = apply(X,2,mean) 79 | sddX = apply(X,2,sd) 80 | if(any(sddX<=0)){ 81 | warning("predictor variable(s) ", colnames(X)[which(sddX<=0)]," are constant on training data -- results might be unreliable") 82 | sddX = pmax(sddX, 10^(03)) 83 | } 84 | muY = apply(Y,2,mean) 85 | sddY = apply(Y,2,sd) 86 | if(any(sddY<=0)){ 87 | warning("target variable(s) ", colnames(Y)[which(sddY<=0)]," are constant on training data -- results might be unreliable") 88 | } 89 | 90 | if(standardize){ 91 | X = sweep(sweep(X,2,muX,FUN="-"),2,sddX,FUN="/") 92 | Y = sweep(sweep(Y,2,muY,FUN="-"),2,sddY,FUN="/") 93 | } 94 | eng = engressionfit(X,Y, noise_dim=noise_dim,hidden_dim=hidden_dim,num_layer=num_layer,dropout=dropout, batch_norm=batch_norm, num_epochs=num_epochs,lr=lr,beta=beta, silent=silent) 95 | engressor = list(engressor = eng$engressor, lossvec= eng$lossvec, muX=muX, sddX=sddX,muY=muY, sddY=sddY, standardize=standardize, noise_dim=noise_dim,hidden_dim=hidden_dim,num_layer=num_layer,dropout=dropout, batch_norm=batch_norm, num_epochs=num_epochs,lr=lr) 96 | class(engressor) = "engression" 97 | return(engressor) 98 | } 99 | -------------------------------------------------------------------------------- /engression-r/R/engressionBagged.R: -------------------------------------------------------------------------------- 1 | #' Bagged Engression Function 2 | #' 3 | #' This function fits a bagged engression model to the data by fitting multiple 4 | #' engression models to subsamples of the data. It allows for the tuning of several parameters 5 | #' related to model complexity. 6 | #' 7 | #' @param X A matrix or data frame representing the predictors. 8 | #' @param Y A matrix or vector representing the target variable(s). 9 | #' @param K The number of bagged models to fit (default: 5). 10 | #' @param keepoutbag A boolean indicating whether to keep the out-of-bag samples and training data (default: TRUE). 11 | #' @param noise_dim The dimension of the noise introduced in the model (default: 10). 12 | #' @param hidden_dim The size of the hidden layer in the model (default: 100). 13 | #' @param num_layer The number of layers in the model (default: 3). 14 | #' @param dropout The dropout rate to be used in the model (default: 0.05). 15 | #' @param batch_norm A boolean indicating whether to use batch-normalization (default: TRUE). 16 | #' @param num_epochs The number of epochs to be used in training (default: 1000). 17 | #' @param lr The learning rate to be used in training (default: 10^-3). 18 | #' @param beta The beta scaling factor for energy loss (default: 1). 19 | #' @param silent A boolean indicating whether to suppress output during model training (default: FALSE). 20 | #' @param standardize A boolean indicating whether to standardize the input data (default: TRUE). 21 | #' 22 | #' @return A bagged engression model object with class "engressionBagged". 23 | #' 24 | #' @examples 25 | #' \donttest{ 26 | #' n = 1000 27 | #' p = 5 28 | #' X = matrix(rnorm(n*p),ncol=p) 29 | #' Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 30 | #' Xtest = matrix(rnorm(n*p),ncol=p) 31 | #' Ytest = (Xtest[,1]+rnorm(n)*0.1)^2 + (Xtest[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 32 | #' 33 | #' ## fit bagged engression object 34 | #' engb = engressionBagged(X,Y,K=3) 35 | #' print(engb) 36 | #' 37 | #' ## prediction on test data 38 | #' Yhat = predict(engb,Xtest,type="mean") 39 | #' cat("\n correlation between predicted and realized values: ", signif(cor(Yhat, Ytest),3)) 40 | #' plot(Yhat, Ytest,xlab="estimated conditional mean", ylab="observation") 41 | #' 42 | #' ## out-of-bag prediction 43 | #' Yhat_oob = predict(engb,type="mean") 44 | #' cat("\n correlation between predicted and realized values on oob data: ") 45 | #' print(signif(cor(Yhat_oob, Y),3)) 46 | #' plot(Yhat_oob, Y,xlab="prediction", ylab="observation") 47 | #' 48 | #' ## quantile prediction 49 | #' Yhatquant = predict(engb,Xtest,type="quantiles") 50 | #' ord = order(Yhat) 51 | #' matplot(Yhat[ord], Yhatquant[ord,], type="l", col=2,lty=1,xlab="prediction", ylab="observation") 52 | #' points(Yhat[ord],Ytest[ord],pch=20,cex=0.5) 53 | #' 54 | #' ## sampling from estimated model 55 | #' Ysample = predict(engb,Xtest,type="sample",nsample=1) 56 | #' 57 | #' ## plot of realized values against first variable 58 | #' oldpar <- par() 59 | #' par(mfrow=c(1,2)) 60 | #' plot(Xtest[,1], Ytest, xlab="Variable 1", ylab="Observation") 61 | #' ## plot of sampled values against first variable 62 | #' plot(Xtest[,1], Ysample[,1], xlab="Variable 1", ylab="Sample from engression model") 63 | #' par(oldpar) 64 | #' } 65 | #' 66 | #' @export 67 | #' 68 | engressionBagged <- function(X,Y, K=5, keepoutbag=TRUE, noise_dim=10, hidden_dim=100, num_layer=3, dropout=0.05, batch_norm=TRUE, num_epochs=1000,lr=10^(-3),beta=1, silent=FALSE, standardize=TRUE){ 69 | 70 | if (is.data.frame(X)) { 71 | if (any(sapply(X, is.factor))) warning("Data frame contains factor variables. Mapping to numeric values. Dummy variables would need to be created explicitly by the user.") 72 | X <- dftomat(X) 73 | } 74 | if (is.vector(X) && is.numeric(X)) X <- matrix(X, ncol = 1) 75 | if(is.vector(Y)) Y= matrix(Y, ncol=1) 76 | for (k in 1:ncol(Y)) Y[,k] = as.numeric(Y[,k]) 77 | 78 | 79 | if(dropout<=0 & noise_dim==0){ 80 | warning("dropout and noise_dim cannot both be equal to 0 as model needs to be stochastic. setting dropout to 0.5") 81 | dropout = 0.5 82 | } 83 | 84 | inbagno = min(K-1,ceiling(K*0.8)) 85 | inbag = matrix(nrow=nrow(X), ncol=inbagno) 86 | for (i in 1:nrow(X)) inbag[i,] = sort(sample(1:K,inbagno)) 87 | 88 | models = list() 89 | for (k in 1:K){ 90 | if(k==1) pr="st" 91 | if(k==2) pr="nd" 92 | if(k==3) pr="rd" 93 | if(k>=4) pr="th" 94 | if(!silent) cat(paste("\n fitting ",k,"-", pr," out of ",K," engression models \n",sep="")) 95 | useinbag = which(apply(inbag==k,1,any)) 96 | models[[k]] = engression(X[useinbag,],Y[useinbag], noise_dim=noise_dim, hidden_dim=hidden_dim, num_layer=num_layer, dropout=dropout, batch_norm=batch_norm, num_epochs=num_epochs,lr=lr,beta=beta, silent=silent, standardize=standardize) 97 | } 98 | 99 | engBagged = list(models= models, inbag=if(keepoutbag) inbag else NULL, Xtrain=if(keepoutbag) X else NULL, noise_dim=noise_dim,hidden_dim=hidden_dim,num_layer=num_layer,dropout=dropout, batch_norm=batch_norm, num_epochs=num_epochs,lr=lr, standardize=standardize) 100 | class(engBagged) = "engressionBagged" 101 | print(engBagged) 102 | return(engBagged) 103 | } 104 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Engression 2 | 3 | Engression is a neural network-based distributional regression method proposed in the paper "[*Engression: Extrapolation through the Lens of Distributional Regression?*](https://arxiv.org/abs/2307.00835)" by Xinwei Shen and Nicolai Meinshausen (2023). This repository contains the software implementations of engression in both R and Python. 4 | 5 | Consider targets $Y\in\mathbb{R}^k$ and predictors $X\in\mathbb{R}^d$; both variables can be univariate or multivariate, continuous or discrete. Engression can be used to 6 | * estimate the conditional mean $\mathbb{E}[Y|X=x]$ (as in least-squares regression), 7 | * estimate the conditional quantiles of $Y$ given $X=x$ (as in quantile regression), and 8 | * sample from the fitted conditional distribution of $Y$ given $X=x$ (as a generative model). 9 | 10 | The results in the paper show the advantages of engression over existing regression approaches in terms of extrapolation. 11 | 12 | 13 | ## Installation 14 | 15 | ### Python package 16 | The latest release of the Python package can be installed via pip: 17 | ```sh 18 | pip install engression 19 | ``` 20 | 21 | The development version can be installed from github: 22 | 23 | ```sh 24 | pip install -e "git+https://github.com/xwshen51/engression#egg=engression&subdirectory=engression-python" 25 | ``` 26 | 27 | ### R package 28 | 29 | The latest release of the R package can be installed through CRAN: 30 | 31 | ```R 32 | install.packages("engression") 33 | ``` 34 | 35 | The development version can be installed from github: 36 | 37 | ```R 38 | devtools::install_github("xwshen51/engression", subdir = "engression-r") 39 | ``` 40 | 41 | 42 | ## Usage Example 43 | 44 | ### Python 45 | Below is one simple demonstration. See [this tutorial](https://github.com/xwshen51/engression/blob/main/engression-python/examples/example_simu.ipynb) for more details on simulated data and [this tutorial](https://github.com/xwshen51/engression/blob/main/engression-python/examples/example_air.ipynb) for a real data example. We demonstrate in [another tutorial](https://github.com/xwshen51/engression/blob/main/engression-python/examples/example_bag.ipynb) how to fit a bagged engression model, which also helps with hyperparameter tuning. 46 | ```python 47 | from engression import engression 48 | from engression.data.simulator import preanm_simulator 49 | 50 | ## Simulate data 51 | x, y = preanm_simulator("square", n=10000, x_lower=0, x_upper=2, noise_std=1, train=True, device=device) 52 | x_eval, y_eval_med, y_eval_mean = preanm_simulator("square", n=1000, x_lower=0, x_upper=4, noise_std=1, train=False, device=device) 53 | 54 | ## Fit an engression model 55 | engressor = engression(x, y, lr=0.01, num_epochs=500, batch_size=1000, device="cuda") 56 | ## Summarize model information 57 | engressor.summary() 58 | 59 | ## Evaluation 60 | print("L2 loss:", engressor.eval_loss(x_eval, y_eval_mean, loss_type="l2")) 61 | print("correlation between predicted and true means:", engressor.eval_loss(x_eval, y_eval_mean, loss_type="cor")) 62 | 63 | ## Predictions 64 | y_pred_mean = engressor.predict(x_eval, target="mean") ## for the conditional mean 65 | y_pred_med = engressor.predict(x_eval, target="median") ## for the conditional median 66 | y_pred_quant = engressor.predict(x_eval, target=[0.025, 0.5, 0.975]) ## for the conditional 2.5% and 97.5% quantiles 67 | ``` 68 | 69 | ### R 70 | ```R 71 | require(engression) 72 | n = 1000 73 | p = 5 74 | 75 | X = matrix(rnorm(n*p),ncol=p) 76 | Y = (X[,1]+rnorm(n)*0.1)^2 + (X[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 77 | Xtest = matrix(rnorm(n*p),ncol=p) 78 | Ytest = (Xtest[,1]+rnorm(n)*0.1)^2 + (Xtest[,2]+rnorm(n)*0.1) + rnorm(n)*0.1 79 | 80 | ## fit engression object 81 | engr = engression(X,Y) 82 | print(engr) 83 | 84 | ## prediction on test data 85 | Yhat = predict(engr,Xtest,type="mean") 86 | cat("\n correlation between predicted and realized values: ", signif(cor(Yhat, Ytest),3)) 87 | plot(Yhat, Ytest,xlab="prediction", ylab="observation") 88 | 89 | ## quantile prediction 90 | Yhatquant = predict(engr,Xtest,type="quantiles") 91 | ord = order(Yhat) 92 | matplot(Yhat[ord], Yhatquant[ord,], type="l", col=2,lty=1,xlab="prediction", ylab="observation") 93 | points(Yhat[ord],Ytest[ord],pch=20,cex=0.5) 94 | 95 | ## sampling from estimated model 96 | Ysample = predict(engr,Xtest,type="sample",nsample=1) 97 | par(mfrow=c(1,2)) 98 | ## plot of realized values against first variable 99 | plot(Xtest[,1], Ytest, xlab="Variable 1", ylab="Observation") 100 | ## plot of sampled values against first variable 101 | plot(Xtest[,1], Ysample, xlab="Variable 1", ylab="Sample from engression model") 102 | ``` 103 | 104 | 105 | ## Contact information 106 | If you meet any problems with the code, please submit an issue or contact [Xinwei Shen](mailto:xinwei.shen@stat.math.ethz.ch). 107 | 108 | 109 | ## Citation 110 | If you would refer to or extend our work, please cite the following paper: 111 | ``` 112 | @article{10.1093/jrsssb/qkae108, 113 | author = {Shen, Xinwei and Meinshausen, Nicolai}, 114 | title = {Engression: extrapolation through the lens of distributional regression}, 115 | journal = {Journal of the Royal Statistical Society Series B: Statistical Methodology}, 116 | pages = {qkae108}, 117 | year = {2024}, 118 | month = {11}, 119 | issn = {1369-7412}, 120 | doi = {10.1093/jrsssb/qkae108}, 121 | url = {https://doi.org/10.1093/jrsssb/qkae108}, 122 | eprint = {https://academic.oup.com/jrsssb/advance-article-pdf/doi/10.1093/jrsssb/qkae108/60827977/qkae108.pdf}, 123 | } 124 | ``` -------------------------------------------------------------------------------- /engression-python/engression/engression.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import matplotlib.pyplot as plt 3 | 4 | from .loss_func import * 5 | from .models import StoNet 6 | from .data.loader import make_dataloader 7 | from .utils import * 8 | 9 | 10 | def engression(x, y, classification=False, 11 | num_layer=2, hidden_dim=100, noise_dim=100, out_act=None, 12 | add_bn=True, resblock=False, beta=1, 13 | lr=0.0001, num_epochs=500, batch_size=None, 14 | print_every_nepoch=100, print_times_per_epoch=1, 15 | device="cpu", standardize=True, verbose=True): 16 | """This function fits an engression model to the data. It allows multivariate predictors and response variables. Variables are per default internally standardized (training with standardized data, while predictions and evaluations are on original scale). 17 | 18 | Args: 19 | x (torch.Tensor): training data of predictors. 20 | y (torch.Tensor): training data of responses. 21 | classification (bool, optional): classification or not. 22 | num_layer (int, optional): number of (linear) layers. Defaults to 2. 23 | hidden_dim (int, optional): number of neurons per layer. Defaults to 100. 24 | noise_dim (int, optional): noise dimension. Defaults to 100. 25 | out_act (str, optional): output activation function. Defaults to None. 26 | add_bn (bool, optional): whether to add BN layer. Defaults to True. 27 | resblock (bool, optional): whether to use residual blocks (skip connections). Defaults to False. 28 | beta (float, optional): power parameter in the energy loss. 29 | lr (float, optional): learning rate. Defaults to 0.0001. 30 | num_epochs (int, optional): number of epochs. Defaults to 500. 31 | batch_size (int, optional): batch size. Defaults to None. 32 | print_every_nepoch (int, optional): print losses every print_every_nepoch number of epochs. Defaults to 100. 33 | print_times_per_epoch (int, optional): print losses for print_times_per_epoch times per epoch. Defaults to 1. 34 | device (str, torch.device, optional): device. Defaults to "cpu". Choices = ["cpu", "gpu", "cuda"]. 35 | standardize (bool, optional): whether to standardize data during training. Defaults to True. 36 | verbose (bool, optional): whether to print losses and info. Defaults to True. 37 | 38 | Returns: 39 | Engressor object: a fitted engression model. 40 | """ 41 | if x.shape[0] != y.shape[0]: 42 | raise Exception("The sample sizes for the covariates and response do not match. Please check.") 43 | engressor = Engressor(in_dim=x.shape[1], out_dim=y.shape[1], classification=classification, 44 | num_layer=num_layer, hidden_dim=hidden_dim, noise_dim=noise_dim, 45 | out_act=out_act, resblock=resblock, add_bn=add_bn, beta=beta, 46 | lr=lr, num_epochs=num_epochs, batch_size=batch_size, 47 | standardize=standardize, device=device, check_device=verbose, verbose=verbose) 48 | engressor.train(x, y, num_epochs=num_epochs, batch_size=batch_size, 49 | print_every_nepoch=print_every_nepoch, print_times_per_epoch=print_times_per_epoch, 50 | standardize=standardize, verbose=verbose) 51 | return engressor 52 | 53 | 54 | class Engressor(object): 55 | """Engressor class. 56 | 57 | Args: 58 | in_dim (int): input dimension 59 | out_dim (int): output dimension 60 | classification (bool, optional): classification or not. 61 | num_layer (int, optional): number of layers. Defaults to 2. 62 | hidden_dim (int, optional): number of neurons per layer. Defaults to 100. 63 | noise_dim (int, optional): noise dimension. Defaults to 100. 64 | out_act (str, optional): output activation function. Defaults to None. 65 | resblock (bool, optional): whether to use residual blocks (skip-connections). Defaults to False. 66 | add_bn (bool, optional): whether to add BN layer. Defaults to True. 67 | beta (float, optional): power parameter in the energy loss. 68 | lr (float, optional): learning rate. Defaults to 0.0001. 69 | num_epochs (int, optional): number of epochs. Defaults to 500. 70 | batch_size (int, optional): batch size. Defaults to None, referring to the full batch. 71 | standardize (bool, optional): whether to standardize data during training. Defaults to True. 72 | device (str or torch.device, optional): device. Defaults to "cpu". Choices = ["cpu", "gpu", "cuda"]. 73 | check_device (bool, optional): whether to check the device. Defaults to True. 74 | """ 75 | def __init__(self, 76 | in_dim, out_dim, classification=False, 77 | num_layer=2, hidden_dim=100, noise_dim=100, 78 | out_act=False, resblock=False, add_bn=True, beta=1, 79 | lr=0.0001, num_epochs=500, batch_size=None, standardize=True, 80 | device="cpu", check_device=True, verbose=True): 81 | super().__init__() 82 | self.classification = classification 83 | if classification: 84 | out_act = "softmax" 85 | self.num_layer = num_layer 86 | self.hidden_dim = hidden_dim 87 | self.noise_dim = noise_dim 88 | self.out_act = out_act 89 | self.resblock = resblock 90 | self.add_bn = add_bn 91 | self.beta = beta 92 | self.lr = lr 93 | self.num_epochs = num_epochs 94 | self.batch_size = batch_size 95 | if isinstance(device, str): 96 | if device == "gpu" or device == "cuda": 97 | device = torch.device("cuda") 98 | else: 99 | device = torch.device(device) 100 | self.device = device 101 | if check_device: 102 | check_for_gpu(self.device) 103 | self.standardize = standardize 104 | self.x_mean = None 105 | self.x_std = None 106 | self.y_mean = None 107 | self.y_std = None 108 | 109 | self.model = StoNet(in_dim, out_dim, num_layer, hidden_dim, noise_dim, add_bn, out_act, resblock).to(self.device) 110 | self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr) 111 | self.verbose = verbose 112 | 113 | self.tr_loss = None 114 | 115 | def train_mode(self): 116 | self.model.train() 117 | 118 | def eval_mode(self): 119 | self.model.eval() 120 | 121 | def summary(self): 122 | """Print the model architecture and hyperparameters.""" 123 | print("Engression model with\n" + 124 | "\t number of layers: {}\n".format(self.num_layer) + 125 | "\t hidden dimensions: {}\n".format(self.hidden_dim) + 126 | "\t noise dimensions: {}\n".format(self.noise_dim) + 127 | "\t residual blocks: {}\n".format(self.resblock) + 128 | "\t number of epochs: {}\n".format(self.num_epochs) + 129 | "\t batch size: {}\n".format(self.batch_size) + 130 | "\t learning rate: {}\n".format(self.lr) + 131 | "\t standardization: {}\n".format(self.standardize) + 132 | "\t training mode: {}\n".format(self.model.training) + 133 | "\t device: {}\n".format(self.device)) 134 | print("Training loss (original scale):\n" + 135 | "\t energy-loss: {:.2f}, \n\tE(|Y-Yhat|): {:.2f}, \n\tE(|Yhat-Yhat'|): {:.2f}".format( 136 | self.tr_loss[0], self.tr_loss[1], self.tr_loss[2])) 137 | 138 | def _standardize_data_and_record_stats(self, x, y): 139 | """Standardize the data and record the mean and standard deviation of the training data. 140 | 141 | Args: 142 | x (torch.Tensor): training data of predictors. 143 | y (torch.Tensor): training data of responses. 144 | 145 | Returns: 146 | torch.Tensor: standardized data. 147 | """ 148 | self.x_mean = torch.mean(x, dim=0) 149 | self.x_std = torch.std(x, dim=0) 150 | self.x_std[self.x_std == 0] += 1e-5 151 | if not self.classification: 152 | self.y_mean = torch.mean(y, dim=0) 153 | self.y_std = torch.std(y, dim=0) 154 | self.y_std[self.y_std == 0] += 1e-5 155 | else: 156 | self.y_mean = torch.zeros(y.shape[1:], device=y.device).unsqueeze(0) 157 | self.y_std = torch.ones(y.shape[1:], device=y.device).unsqueeze(0) 158 | x_standardized = (x - self.x_mean) / self.x_std 159 | y_standardized = (y - self.y_mean) / self.y_std 160 | self.x_mean = self.x_mean.to(self.device) 161 | self.x_std = self.x_std.to(self.device) 162 | self.y_mean = self.y_mean.to(self.device) 163 | self.y_std = self.y_std.to(self.device) 164 | return x_standardized, y_standardized 165 | 166 | def standardize_data(self, x, y=None): 167 | """Standardize the data, if self.standardize is True. 168 | 169 | Args: 170 | x (torch.Tensor): training data of predictors. 171 | y (torch.Tensor, optional): training data of responses. Defaults to None. 172 | 173 | Returns: 174 | torch.Tensor: standardized or original data. 175 | """ 176 | if y is None: 177 | if self.standardize: 178 | return (x - self.x_mean) / self.x_std 179 | else: 180 | return x 181 | else: 182 | if self.standardize: 183 | return (x - self.x_mean) / self.x_std, (y - self.y_mean) / self.y_std 184 | else: 185 | return x, y 186 | 187 | def unstandardize_data(self, y, x=None, expand_dim=False): 188 | """Transform the predictions back to the original scale, if self.standardize is True. 189 | 190 | Args: 191 | y (torch.Tensor): data in the standardized scale 192 | 193 | Returns: 194 | torch.Tensor: data in the original scale 195 | """ 196 | if x is None: 197 | if self.standardize: 198 | if expand_dim: 199 | return y * self.y_std.unsqueeze(0).unsqueeze(2) + self.y_mean.unsqueeze(0).unsqueeze(2) 200 | else: 201 | return y * self.y_std + self.y_mean 202 | else: 203 | return y 204 | else: 205 | if self.standardize: 206 | return x * self.x_std + self.x_mean, y * self.y_std + self.y_mean 207 | else: 208 | return x, y 209 | 210 | def train(self, x, y, num_epochs=None, batch_size=None, lr=None, print_every_nepoch=100, print_times_per_epoch=1, standardize=None, verbose=True): 211 | """Fit the model. 212 | 213 | Args: 214 | x (torch.Tensor): training data of predictors. 215 | y (torch.Tensor): trainging data of responses. 216 | num_epochs (int, optional): number of training epochs. Defaults to None. 217 | batch_size (int, optional): batch size for mini-batch SGD. Defaults to None. 218 | lr (float, optional): learning rate. 219 | print_every_nepoch (int, optional): print losses every print_every_nepoch number of epochs. Defaults to 100. 220 | print_times_per_epoch (int, optional): print losses for print_times_per_epoch times per epoch. Defaults to 1. 221 | standardize (bool, optional): whether to standardize the data. Defaults to True. 222 | verbose (bool, optional): whether to print losses and info. Defaults to True. 223 | """ 224 | self.train_mode() 225 | if num_epochs is not None: 226 | self.num_epochs = num_epochs 227 | if batch_size is None: 228 | batch_size = self.batch_size if self.batch_size is not None else x.size(0) 229 | if lr is not None: 230 | if lr != self.lr: 231 | self.lr = lr 232 | self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr) 233 | if standardize is not None: 234 | self.standardize = standardize 235 | 236 | x = vectorize(x) 237 | y = vectorize(y) 238 | if self.standardize: 239 | if verbose: 240 | print("Data is standardized for training only; the printed training losses are on the standardized scale. \n" + 241 | "However during evaluation, the predictions, evaluation metrics, and plots will be on the original scale.\n") 242 | x, y = self._standardize_data_and_record_stats(x, y) 243 | x = x.to(self.device) 244 | y = y.to(self.device) 245 | 246 | if batch_size >= x.size(0)//2: 247 | if verbose: 248 | print("Batch is larger than half of the sample size. Training based on full-batch gradient descent.") 249 | self.batch_size = x.size(0) 250 | for epoch_idx in range(self.num_epochs): 251 | self.model.zero_grad() 252 | y_sample1 = self.model(x) 253 | y_sample2 = self.model(x) 254 | loss, loss1, loss2 = energy_loss_two_sample(y, y_sample1, y_sample2, beta=self.beta, verbose=True) 255 | loss.backward() 256 | self.optimizer.step() 257 | if (epoch_idx == 0 or (epoch_idx + 1) % print_every_nepoch == 0) and verbose: 258 | print("[Epoch {} ({:.0f}%)] energy-loss: {:.4f}, E(|Y-Yhat|): {:.4f}, E(|Yhat-Yhat'|): {:.4f}".format( 259 | epoch_idx + 1, 100 * epoch_idx / self.num_epochs, loss.item(), loss1.item(), loss2.item())) 260 | else: 261 | train_loader = make_dataloader(x, y, batch_size=batch_size, shuffle=True) 262 | if verbose: 263 | print("Training based on mini-batch gradient descent with a batch size of {}.".format(batch_size)) 264 | for epoch_idx in range(self.num_epochs): 265 | self.zero_loss() 266 | for batch_idx, (x_batch, y_batch) in enumerate(train_loader): 267 | self.train_one_iter(x_batch, y_batch) 268 | if (epoch_idx == 0 or (epoch_idx + 1) % print_every_nepoch == 0) and verbose: 269 | if (batch_idx + 1) % ((len(train_loader) - 1) // print_times_per_epoch) == 0: 270 | self.print_loss(epoch_idx, batch_idx) 271 | 272 | # Evaluate performance on the training data (on the original scale) 273 | self.model.eval() 274 | x, y = self.unstandardize_data(y, x) 275 | self.tr_loss = self.eval_loss(x, y, loss_type="energy", verbose=True) 276 | 277 | if verbose: 278 | print("\nTraining loss on the original (non-standardized) scale:\n" + 279 | "\tEnergy-loss: {:.4f}, E(|Y-Yhat|): {:.4f}, E(|Yhat-Yhat'|): {:.4f}".format( 280 | self.tr_loss[0], self.tr_loss[1], self.tr_loss[2])) 281 | 282 | if verbose: 283 | print("\nPrediction-loss E(|Y-Yhat|) and variance-loss E(|Yhat-Yhat'|) should ideally be equally large" + 284 | "\n-- consider training for more epochs or adjusting hyperparameters if there is a mismatch ") 285 | 286 | def zero_loss(self): 287 | self.tr_loss = 0 288 | self.tr_loss1 = 0 289 | self.tr_loss2 = 0 290 | 291 | def train_one_iter(self, x_batch, y_batch): 292 | self.model.zero_grad() 293 | y_sample1 = self.model(x_batch) 294 | y_sample2 = self.model(x_batch) 295 | loss, loss1, loss2 = energy_loss_two_sample(y_batch, y_sample1, y_sample2, beta=self.beta, verbose=True) 296 | loss.backward() 297 | self.optimizer.step() 298 | self.tr_loss += loss.item() 299 | self.tr_loss1 += loss1.item() 300 | self.tr_loss2 += loss2.item() 301 | 302 | def print_loss(self, epoch_idx, batch_idx, return_loss=False): 303 | loss_str = "[Epoch {} ({:.0f}%), batch {}] energy-loss: {:.4f}, E(|Y-Yhat|): {:.4f}, E(|Yhat-Yhat'|): {:.4f}".format( 304 | epoch_idx + 1, 100 * epoch_idx / self.num_epochs, batch_idx + 1, 305 | self.tr_loss / (batch_idx + 1), self.tr_loss1 / (batch_idx + 1), self.tr_loss2 / (batch_idx + 1)) 306 | if return_loss: 307 | return loss_str 308 | else: 309 | print(loss_str) 310 | 311 | @torch.no_grad() 312 | def predict(self, x, target="mean", sample_size=100): 313 | """Point prediction. 314 | 315 | Args: 316 | x (torch.Tensor): data of predictors. 317 | target (str or float or list, optional): a quantity of interest to predict. float refers to the quantiles. Defaults to "mean". 318 | sample_size (int, optional): generated sample sizes for each x. Defaults to 100. 319 | 320 | Returns: 321 | torch.Tensor or list of torch.Tensor: point predictions. 322 | """ 323 | self.eval_mode() 324 | x = vectorize(x) 325 | x = x.to(self.device) 326 | x = self.standardize_data(x) 327 | y_pred = self.model.predict(x, target, sample_size) 328 | if isinstance(y_pred, list): 329 | for i in range(len(y_pred)): 330 | y_pred[i] = self.unstandardize_data(y_pred[i]) 331 | else: 332 | y_pred = self.unstandardize_data(y_pred) 333 | return y_pred 334 | 335 | @torch.no_grad() 336 | def sample(self, x, sample_size=100, expand_dim=True): 337 | """Sample new response data. 338 | 339 | Args: 340 | x (torch.Tensor): test data of predictors. 341 | sample_size (int, optional): generated sample sizes for each x. Defaults to 100. 342 | expand_dim (bool, optional): whether to expand the sample dimension. Defaults to True. 343 | 344 | Returns: 345 | torch.Tensor of shape (data_size, response_dim, sample_size). 346 | - [:,:,i] consists of the i-th sample of all x. 347 | - [i,:,:] consists of all samples of x_i. 348 | """ 349 | self.eval_mode() 350 | x = vectorize(x) 351 | x = x.to(self.device) 352 | x = self.standardize_data(x) 353 | y_samples = self.model.sample(x, sample_size, expand_dim=expand_dim) 354 | y_samples = self.unstandardize_data(y_samples, expand_dim=expand_dim) 355 | if sample_size == 1: 356 | y_samples = y_samples.squeeze(len(y_samples.shape) - 1) 357 | return y_samples 358 | 359 | @torch.no_grad() 360 | def eval_loss(self, x, y, loss_type="l2", sample_size=None, beta=1, verbose=False): 361 | """Compute the loss for evaluation. 362 | 363 | Args: 364 | x (torch.Tensor): data of predictors. 365 | y (torch.Tensor): data of responses. 366 | loss_type (str, optional): loss type. Defaults to "l2". Choices: ["l2", "l1", "energy", "cor"]. 367 | sample_size (int, optional): generated sample sizes for each x. Defaults to 100. 368 | beta (float, optional): beta in energy score. Defaults to 1. 369 | 370 | Returns: 371 | float: evaluation loss. 372 | """ 373 | if sample_size is None: 374 | sample_size = 2 if loss_type == "energy" else 100 375 | self.eval_mode() 376 | x = vectorize(x) 377 | y = vectorize(y) 378 | x = x.to(self.device) 379 | y = y.to(self.device) 380 | if loss_type == "l2": 381 | y_pred = self.predict(x, target="mean", sample_size=sample_size) 382 | loss = (y - y_pred).pow(2).mean() 383 | elif loss_type == "cor": 384 | y_pred = self.predict(x, target="mean", sample_size=sample_size) 385 | loss = cor(y, y_pred) 386 | elif loss_type == "l1": 387 | y_pred = self.predict(x, target=0.5, sample_size=sample_size) 388 | loss = (y - y_pred).abs().mean() 389 | else: 390 | assert loss_type == "energy" 391 | y_samples = self.sample(x, sample_size=sample_size, expand_dim=False) 392 | loss = energy_loss(y, y_samples, beta=beta, verbose=verbose) 393 | if not verbose: 394 | return loss.item() 395 | else: 396 | loss, loss1, loss2 = loss 397 | return loss.item(), loss1.item(), loss2.item() 398 | 399 | @torch.no_grad() 400 | def plot(self, x_te, y_te, x_tr=None, y_tr=None, x_idx=0, y_idx=0, 401 | target="mean", sample_size=100, save_dir=None, 402 | alpha=0.8, ymin=None, ymax=None): 403 | """Plot true data and predictions. 404 | 405 | Args: 406 | x_te (torch.Tensor): test data of predictors 407 | y_te (torch.Tensor): test data of responses 408 | x_tr (torch.Tensor): training data of predictors 409 | y_tr (torch.Tensor): training data of responses 410 | x_idx (int, optional): index of the predictor to plot (if there are multiple). Defaults to 0. 411 | y_idx (int, optional): index of the response to plot (if there are multiple). Defaults to 0. 412 | target (str or float, optional): target quantity. Defaults to "mean". Choice: ["mean", "median", "sample", float]. 413 | sample_size (int, optional): generated sample sizes for each x. Defaults to 100. 414 | save_dir (str, optional): directory to save the plot. Defaults to None. 415 | alpha (float, optional): transparency of the sampled data points. Defaults to 0.8. 416 | ymin (float, optional): minimum value of y in the plot. Defaults to None. 417 | ymax (float, optional): maximum value of y in the plot. Defaults to None. 418 | """ 419 | if x_tr is not None and y_tr is not None: 420 | # Plot training data as well. 421 | x_tr = vectorize(x_tr) 422 | y_tr = vectorize(y_tr) 423 | plt.scatter(x_tr[:,x_idx].cpu(), y_tr[:,y_idx].cpu(), s=1, label="training data", color="silver") 424 | plt.scatter(x_te[:,x_idx].cpu(), y_te[:,y_idx].cpu(), s=1, label="test data", color="gold") 425 | x = torch.cat((x_tr, x_te), dim=0) 426 | y = torch.cat((y_tr, y_te), dim=0) 427 | else: 428 | # Plot only the test data. 429 | x_te = vectorize(x_te) 430 | y_te = vectorize(y_te) 431 | plt.scatter(x_te[:,x_idx].cpu(), y_te[:,y_idx].cpu(), s=1, label="true data", color="silver") 432 | x = x_te 433 | y = y_te 434 | x = x.to(self.device) 435 | y = y.to(self.device) 436 | 437 | if target != "sample": 438 | y_pred = self.predict(x, target=target, sample_size=sample_size) 439 | plt.scatter(x[:,x_idx].cpu(), y_pred[:,y_idx].cpu(), s=1, label="predictions", color="lightskyblue") 440 | else: 441 | y_sample = self.sample(x, sample_size=sample_size, expand_dim=False) 442 | x_rep = x.repeat(sample_size, 1) 443 | plt.scatter(x_rep[:,x_idx].cpu(), y_sample[:,y_idx].cpu(), s=1, label="samples", color="lightskyblue", alpha=alpha) 444 | plt.legend(markerscale=2) 445 | plt.ylim(ymin, ymax) 446 | if x.shape[1] == 1: 447 | plt.xlabel(r"$x$") 448 | else: 449 | plt.xlabel(r"$x_{}$".format(x_idx)) 450 | if y.shape[1] == 1: 451 | plt.ylabel(r"$y$") 452 | else: 453 | plt.ylabel(r"$y_{}$".format(y_idx)) 454 | if save_dir is not None: 455 | make_folder(save_dir) 456 | plt.savefig(save_dir, bbox_inches="tight") 457 | plt.close() 458 | else: 459 | plt.show() 460 | -------------------------------------------------------------------------------- /engression-python/engression/models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from .data.loader import make_dataloader 4 | 5 | 6 | class StoLayer(nn.Module): 7 | """A stochastic layer. 8 | 9 | Args: 10 | in_dim (int): input dimension 11 | out_dim (int): output dimension 12 | noise_dim (int, optional): noise dimension. Defaults to 100. 13 | add_bn (bool, optional): whether to add BN layer. Defaults to True. 14 | """ 15 | def __init__(self, in_dim, out_dim, noise_dim=100, add_bn=False, out_act=None, noise_std=1, verbose=True): 16 | super().__init__() 17 | self.in_dim = in_dim 18 | self.out_dim = out_dim 19 | self.noise_dim = noise_dim 20 | self.add_bn = add_bn 21 | self.noise_std = noise_std 22 | self.verbose = verbose 23 | 24 | layer = [nn.Linear(in_dim + noise_dim, out_dim)] 25 | if add_bn: 26 | layer += [nn.BatchNorm1d(out_dim)] 27 | self.layer = nn.Sequential(*layer) 28 | if out_act == "softmax" and out_dim == 1: 29 | out_act = "sigmoid" 30 | self.out_act = get_act_func(out_act) 31 | 32 | def forward(self, x): 33 | device = next(self.layer.parameters()).device 34 | if isinstance(x, int): 35 | # For unconditional generation, x is the batch size. 36 | assert self.in_dim == 0 37 | out = torch.randn(x, self.noise_dim, device=device) * self.noise_std 38 | else: 39 | if x.size(1) < self.in_dim and self.verbose: 40 | print("Warning: covariate dimension does not aligned with the specified input dimension; filling in the remaining dimension with noise.") 41 | eps = torch.randn(x.size(0), self.noise_dim + self.in_dim - x.size(1), device=device) * self.noise_std 42 | out = torch.cat([x, eps], dim=1) 43 | out = self.layer(out) 44 | if self.out_act is not None: 45 | out = self.out_act(out) 46 | return out 47 | 48 | 49 | def get_act_func(name): 50 | if name == "relu": 51 | return nn.ReLU(inplace=True) 52 | elif name == "sigmoid": 53 | return nn.Sigmoid() 54 | elif name == "tanh": 55 | return nn.Tanh() 56 | elif name == "softmax": 57 | return nn.Softmax(dim=1) 58 | elif name == "elu": 59 | return nn.ELU(inplace=True) 60 | elif name == "softplus": 61 | return nn.Softplus() 62 | else: 63 | return None 64 | 65 | 66 | class StoResBlock(nn.Module): 67 | """A stochastic residual net block. 68 | 69 | Args: 70 | dim (int, optional): input dimension. Defaults to 100. 71 | hidden_dim (int, optional): hidden dimension (default to dim). Defaults to None. 72 | out_dim (int, optional): output dimension (default to dim). Defaults to None. 73 | noise_dim (int, optional): noise dimension. Defaults to 100. 74 | add_bn (bool, optional): whether to add batch normalization. Defaults to True. 75 | out_act (str, optional): output activation function. Defaults to None. 76 | """ 77 | def __init__(self, dim=100, hidden_dim=None, out_dim=None, noise_dim=100, add_bn=False, out_act=None, noise_std=1): 78 | super().__init__() 79 | self.noise_dim = noise_dim 80 | self.noise_std = noise_std 81 | if hidden_dim is None: 82 | hidden_dim = dim 83 | if out_dim is None: 84 | out_dim = dim 85 | self.layer1 = [nn.Linear(dim + noise_dim, hidden_dim)] 86 | self.add_bn = add_bn 87 | if add_bn: 88 | self.layer1.append(nn.BatchNorm1d(hidden_dim)) 89 | self.layer1.append(nn.ReLU()) 90 | self.layer1 = nn.Sequential(*self.layer1) 91 | self.layer2 = nn.Linear(hidden_dim + noise_dim, out_dim) 92 | if add_bn and out_act == "relu": # for intermediate blocks 93 | self.layer2 = nn.Sequential(*[self.layer2, nn.BatchNorm1d(out_dim)]) 94 | if out_dim != dim: 95 | self.layer3 = nn.Linear(dim, out_dim) 96 | self.dim = dim 97 | self.out_dim = out_dim 98 | self.noise_dim = noise_dim 99 | if out_act == "softmax" and out_dim == 1: 100 | out_act = "sigmoid" 101 | self.out_act = get_act_func(out_act) 102 | 103 | def forward(self, x): 104 | if self.noise_dim > 0: 105 | eps = torch.randn(x.size(0), self.noise_dim, device=x.device) * self.noise_std 106 | out = self.layer1(torch.cat([x, eps], dim=1)) 107 | eps = torch.randn(x.size(0), self.noise_dim, device=x.device) * self.noise_std 108 | out = self.layer2(torch.cat([out, eps], dim=1)) 109 | else: 110 | out = self.layer2(self.layer1(x)) 111 | if self.out_dim != self.dim: 112 | out2 = self.layer3(x) 113 | out = out + out2 114 | else: 115 | out += x 116 | if self.out_act is not None: 117 | out = self.out_act(out) 118 | return out 119 | 120 | 121 | class FiLMBlock(nn.Module): 122 | def __init__(self, in_dim, out_dim, condition_dim, 123 | hidden_dim=512, noise_dim=0, add_bn=False, resblock=False, 124 | out_act=None, film_pos='out', film_level=1): 125 | super().__init__() 126 | self.film_pos = film_pos 127 | self.film_level = film_level 128 | film_out_dim = out_dim if film_pos == 'out' else in_dim 129 | if film_level > 1: 130 | self.condition_layer = nn.Linear(condition_dim, film_out_dim * 2) 131 | elif film_level == 1: 132 | self.condition_layer = nn.Linear(condition_dim, film_out_dim) 133 | if resblock: 134 | self.net = StoLayer(in_dim, out_dim, noise_dim, add_bn, out_act) 135 | else: 136 | self.net = StoResBlock(in_dim, hidden_dim, out_dim, noise_dim, add_bn, out_act) 137 | 138 | def forward(self, x, condition): 139 | out = self.net(x) if self.film_pos == 'out' else x 140 | if self.film_level > 1: 141 | gamma, beta = self.condition_layer(condition).chunk(2, dim=1) 142 | out = gamma * out + beta 143 | elif self.film_level == 1: 144 | beta = self.condition_layer(condition) 145 | out = out + beta 146 | if self.film_pos == 'in': 147 | out = self.net(out) 148 | return out 149 | 150 | 151 | # class FiLMBlockIn(nn.Module): 152 | # def __init__(self, in_dim, out_dim, condition_dim, 153 | # hidden_dim=512, noise_dim=0, add_bn=False, resblock=False, 154 | # out_act=None, film_level=1): 155 | # super().__init__() 156 | # self.condition_layer = nn.Linear(condition_dim, in_dim * 2) 157 | # if resblock: 158 | # self.net = StoLayer(in_dim, out_dim, noise_dim, add_bn, out_act) 159 | # else: 160 | # self.net = StoResBlock(in_dim, hidden_dim, out_dim, noise_dim, add_bn, out_act) 161 | 162 | # def forward(self, x, condition): 163 | # gamma, beta = self.condition_layer(condition).chunk(2, dim=1) 164 | # out = self.net(gamma * x + beta) 165 | # return out 166 | 167 | 168 | class StoNetBase(nn.Module): 169 | def __init__(self, forward_sampling=True): 170 | super().__init__() 171 | self.sampling_func = self.forward if forward_sampling else self.sampling_func 172 | 173 | @torch.no_grad() 174 | def predict(self, x, target=["mean"], sample_size=100): 175 | """Point prediction. 176 | 177 | Args: 178 | x (torch.Tensor): input data 179 | target (str or float or list, optional): quantities to predict. float refers to the quantiles. Defaults to ["mean"]. 180 | sample_size (int, optional): sample sizes for each x. Defaults to 100. 181 | 182 | Returns: 183 | torch.Tensor or list of torch.Tensor: point predictions 184 | - [:,:,i] gives the i-th sample of all x. 185 | - [i,:,:] gives all samples of x_i. 186 | 187 | Here we do not call `sample` but directly call `forward`. 188 | """ 189 | samples = self.sample(x=x, sample_size=sample_size, expand_dim=True) 190 | if not isinstance(target, list): 191 | target = [target] 192 | results = [] 193 | extremes = [] 194 | for t in target: 195 | if t == "mean": 196 | results.append(samples.mean(dim=len(samples.shape) - 1)) 197 | else: 198 | if t == "median": 199 | t = 0.5 200 | assert isinstance(t, float) 201 | results.append(samples.quantile(t, dim=len(samples.shape) - 1)) 202 | if min(t, 1 - t) * sample_size < 10: 203 | extremes.append(t) 204 | 205 | if len(extremes) > 0: 206 | print("Warning: the estimate for quantiles at {} with a sample size of {} could be inaccurate. Please increase the `sample_size`.".format(extremes, sample_size)) 207 | 208 | if len(results) == 1: 209 | return results[0] 210 | else: 211 | return results 212 | 213 | def sample_onebatch(self, x, sample_size=100, expand_dim=True, require_grad=False): 214 | """Sampling new response data (for one batch of data). 215 | 216 | Args: 217 | x (torch.Tensor): new data of predictors of shape [data_size, covariate_dim] 218 | sample_size (int, optional): new sample size. Defaults to 100. 219 | expand_dim (bool, optional): whether to expand the sample dimension. Defaults to True. 220 | 221 | Returns: 222 | torch.Tensor of shape (data_size, response_dim, sample_size) if expand_dim else (data_size*sample_size, response_dim), where response_dim could have multiple channels. 223 | """ 224 | data_size = x.size(0) ## input data size 225 | if not require_grad: 226 | with torch.no_grad(): 227 | ## repeat the data for sample_size times, get a tensor [data, data, ..., data] 228 | x_rep = x.repeat(sample_size, 1) 229 | ## samples of shape (data_size*sample_size, response_dim) such that samples[data_size*(i-1):data_size*i,:] contains one sample for each data point, for i = 1, ..., sample_size 230 | samples = self.sampling_func(x_rep).detach() 231 | else: 232 | x_rep = x.repeat(sample_size, 1) 233 | samples = self.sampling_func(x_rep) 234 | if not expand_dim:# or sample_size == 1: 235 | return samples 236 | else: 237 | expand_dim = len(samples.shape) 238 | samples = samples.unsqueeze(expand_dim) ## (data_size*sample_size, response_dim, 1) 239 | ## a list of length data_size, each element is a tensor of shape (data_size, response_dim, 1) 240 | samples = list(torch.split(samples, data_size)) 241 | samples = torch.cat(samples, dim=expand_dim) ## (data_size, response_dim, sample_size) 242 | return samples 243 | # without expanding dimensions: 244 | # samples.reshape(-1, *samples.shape[1:-1]) 245 | 246 | def sample_batch(self, x, sample_size=100, expand_dim=True, batch_size=None): 247 | """Sampling with mini-batches; only used when out-of-memory. 248 | 249 | Args: 250 | x (torch.Tensor): new data of predictors of shape [data_size, covariate_dim] 251 | sample_size (int, optional): new sample size. Defaults to 100. 252 | expand_dim (bool, optional): whether to expand the sample dimension. Defaults to True. 253 | batch_size (int, optional): batch size. Defaults to None. 254 | 255 | Returns: 256 | torch.Tensor of shape (data_size, response_dim, sample_size) if expand_dim else (data_size*sample_size, response_dim), where response_dim could have multiple channels. 257 | """ 258 | if batch_size is not None and batch_size < x.shape[0]: 259 | test_loader = make_dataloader(x, batch_size=batch_size, shuffle=False) 260 | samples = [] 261 | for (x_batch,) in test_loader: 262 | samples.append(self.sample_onebatch(x_batch, sample_size, expand_dim)) 263 | samples = torch.cat(samples, dim=0) 264 | else: 265 | samples = self.sample_onebatch(x, sample_size, expand_dim) 266 | return samples 267 | 268 | def sample(self, x, sample_size=100, expand_dim=True, verbose=True): 269 | """Sampling that adaptively adjusts the batch size according to the GPU memory.""" 270 | batch_size = x.shape[0] 271 | while True: 272 | try: 273 | samples = self.sample_batch(x, sample_size, expand_dim, batch_size) 274 | break 275 | except RuntimeError as e: 276 | if "out of memory" in str(e): 277 | batch_size = batch_size // 2 278 | if verbose: 279 | print("Out of memory; reduce the batch size to {}".format(batch_size)) 280 | return samples 281 | 282 | 283 | class StoNet(StoNetBase): 284 | """Stochastic neural network. 285 | 286 | Args: 287 | in_dim (int): input dimension 288 | out_dim (int): output dimension 289 | num_layer (int, optional): number of layers. Defaults to 2. 290 | hidden_dim (int, optional): number of neurons per layer. Defaults to 100. 291 | noise_dim (int, optional): noise dimension. Defaults to 100. 292 | add_bn (bool, optional): whether to add BN layer. Defaults to False. 293 | out_act (str, optional): output activation function. Defaults to None. 294 | resblock (bool, optional): whether to use residual blocks. Defaults to False. 295 | """ 296 | def __init__(self, in_dim, out_dim, num_layer=2, hidden_dim=100, 297 | noise_dim=100, add_bn=False, out_act=None, resblock=False, 298 | noise_all_layer=True, out_bias=True, verbose=True, forward_sampling=True): 299 | super().__init__(forward_sampling=forward_sampling) 300 | self.in_dim = in_dim 301 | self.out_dim = out_dim 302 | self.hidden_dim = hidden_dim 303 | self.noise_dim = noise_dim 304 | self.add_bn = add_bn 305 | self.noise_all_layer = noise_all_layer 306 | self.out_bias = out_bias 307 | if out_act == "softmax" and out_dim == 1: 308 | out_act = "sigmoid" 309 | self.out_act = get_act_func(out_act) 310 | 311 | self.num_blocks = None 312 | if resblock: 313 | if num_layer % 2 != 0: 314 | num_layer += 1 315 | print("The number of layers must be an even number for residual blocks. Changed to {}".format(str(num_layer))) 316 | num_blocks = num_layer // 2 317 | self.num_blocks = num_blocks 318 | self.resblock = resblock 319 | self.num_layer = num_layer 320 | 321 | if self.resblock: 322 | if self.num_blocks == 1: 323 | self.net = StoResBlock(dim=in_dim, hidden_dim=hidden_dim, out_dim=out_dim, 324 | noise_dim=noise_dim, add_bn=add_bn, out_act=out_act) 325 | else: 326 | self.input_layer = StoResBlock(dim=in_dim, hidden_dim=hidden_dim, out_dim=hidden_dim, 327 | noise_dim=noise_dim, add_bn=add_bn, out_act="relu") 328 | if not noise_all_layer: 329 | noise_dim = 0 330 | self.inter_layer = nn.Sequential(*[StoResBlock(dim=hidden_dim, noise_dim=noise_dim, add_bn=add_bn, out_act="relu")]*(self.num_blocks - 2)) 331 | self.out_layer = StoResBlock(dim=hidden_dim, hidden_dim=hidden_dim, out_dim=out_dim, 332 | noise_dim=noise_dim, add_bn=add_bn, out_act=out_act) # output layer with concatinated noise 333 | else: 334 | self.input_layer = StoLayer(in_dim=in_dim, out_dim=hidden_dim, noise_dim=noise_dim, add_bn=add_bn, out_act="relu", verbose=verbose) 335 | if not noise_all_layer: 336 | noise_dim = 0 337 | self.inter_layer = nn.Sequential(*[StoLayer(in_dim=hidden_dim, out_dim=hidden_dim, noise_dim=noise_dim, add_bn=add_bn, out_act="relu")]*(num_layer - 2)) 338 | # self.out_layer = StoLayer(in_dim=hidden_dim, out_dim=out_dim, noise_dim=noise_dim, add_bn=False, out_act=out_act) # output layer with concatinated noise 339 | self.out_layer = nn.Linear(hidden_dim, out_dim, bias=out_bias) 340 | if self.out_act is not None: 341 | self.out_layer = nn.Sequential(*[self.out_layer, self.out_act]) 342 | 343 | def forward(self, x): 344 | if self.num_blocks == 1: 345 | return self.net(x) 346 | else: 347 | return self.out_layer(self.inter_layer(self.input_layer(x))) 348 | 349 | 350 | class CondStoNet(StoNetBase): 351 | """Conditional stochastic neural network. 352 | 353 | Args: 354 | in_dim (int): input dimension 355 | out_dim (int): output dimension 356 | num_layer (int, optional): number of layers. Defaults to 2. 357 | hidden_dim (int, optional): number of neurons per layer. Defaults to 100. 358 | noise_dim (int, optional): noise dimension. Defaults to 100. 359 | add_bn (bool, optional): whether to add BN layer. Defaults to True. 360 | out_act (str, optional): output activation function. Defaults to None. 361 | resblock (bool, optional): whether to use residual blocks. Defaults to False. 362 | condition_dim 363 | """ 364 | def __init__(self, in_dim, out_dim, condition_dim, num_layer=2, hidden_dim=100, 365 | noise_dim=100, add_bn=False, out_act=None, resblock=False, 366 | noise_all_layer=True, film_pos='out', film_level=1): 367 | super().__init__() 368 | self.in_dim = in_dim 369 | self.out_dim = out_dim 370 | self.condition_dim = condition_dim 371 | self.hidden_dim = hidden_dim 372 | self.noise_dim = noise_dim 373 | self.add_bn = add_bn 374 | self.noise_all_layer = noise_all_layer 375 | 376 | self.num_blocks = None 377 | if resblock: 378 | if num_layer % 2 != 0: 379 | num_layer += 1 380 | print("The number of layers must be an even number for residual blocks. Changed to {}".format(str(num_layer))) 381 | self.num_blocks = num_layer // 2 382 | self.resblock = resblock 383 | self.num_layer = num_layer 384 | 385 | if resblock: 386 | num_layer = self.num_blocks 387 | if self.num_blocks == 1: 388 | self.net = nn.ModuleList([FiLMBlock(in_dim=in_dim, out_dim=out_dim, condition_dim=condition_dim, hidden_dim=hidden_dim, noise_dim=noise_dim, add_bn=add_bn, resblock=resblock, out_act=out_act, film_pos=film_pos, film_level=film_level)]) 389 | else: 390 | layers = [FiLMBlock(in_dim=in_dim, out_dim=hidden_dim, condition_dim=condition_dim, hidden_dim=hidden_dim, noise_dim=noise_dim, add_bn=add_bn, resblock=resblock, out_act="relu", film_pos=film_pos, film_level=film_level)] 391 | if not noise_all_layer: 392 | noise_dim = 0 393 | for i in range(num_layer - 2): 394 | layers.append(FiLMBlock(in_dim=hidden_dim, out_dim=hidden_dim, condition_dim=condition_dim, noise_dim=noise_dim, add_bn=add_bn, resblock=resblock, out_act="relu", film_pos=film_pos, film_level=film_level)) 395 | layers.append(FiLMBlock(in_dim=hidden_dim, out_dim=out_dim, condition_dim=condition_dim, hidden_dim=hidden_dim, noise_dim=noise_dim, add_bn=add_bn, resblock=resblock, out_act=out_act, film_pos=film_pos, film_level=film_level)) 396 | self.net = nn.ModuleList(layers) 397 | 398 | def forward(self, x, condition): 399 | out = x 400 | for layer in self.net: 401 | out = layer(out, condition) 402 | return out 403 | 404 | 405 | class Net(nn.Module): 406 | """Deterministic neural network. 407 | 408 | Args: 409 | in_dim (int, optional): input dimension. Defaults to 1. 410 | out_dim (int, optional): output dimension. Defaults to 1. 411 | num_layer (int, optional): number of layers. Defaults to 2. 412 | hidden_dim (int, optional): number of neurons per layer. Defaults to 100. 413 | add_bn (bool, optional): whether to add BN layer. Defaults to False. 414 | sigmoid (bool, optional): whether to add sigmoid or softmax at the end. Defaults to False. 415 | """ 416 | def __init__(self, in_dim=1, out_dim=1, num_layer=2, hidden_dim=100, 417 | add_bn=False, sigmoid=False): 418 | super().__init__() 419 | self.in_dim = in_dim 420 | self.out_dim = out_dim 421 | self.num_layer = num_layer 422 | self.hidden_dim = hidden_dim 423 | self.add_bn = add_bn 424 | self.sigmoid = sigmoid 425 | 426 | net = [nn.Linear(in_dim, hidden_dim)] 427 | if add_bn: 428 | net += [nn.BatchNorm1d(hidden_dim)] 429 | net += [nn.ReLU(inplace=True)] 430 | for _ in range(num_layer - 2): 431 | net += [nn.Linear(hidden_dim, hidden_dim)] 432 | if add_bn: 433 | net += [nn.BatchNorm1d(hidden_dim)] 434 | net += [nn.ReLU(inplace=True)] 435 | net.append(nn.Linear(hidden_dim, out_dim)) 436 | if sigmoid: 437 | out_act = nn.Sigmoid() if out_dim == 1 else nn.Softmax(dim=1) 438 | net.append(out_act) 439 | self.net = nn.Sequential(*net) 440 | 441 | def forward(self, x): 442 | return self.net(x) 443 | 444 | 445 | class ResMLPBlock(nn.Module): 446 | """MLP residual net block. 447 | 448 | Args: 449 | dim (int): dimension of input and output. 450 | """ 451 | def __init__(self, dim): 452 | super().__init__() 453 | self.layer1 = nn.Sequential( 454 | nn.Linear(dim, dim), 455 | nn.BatchNorm1d(dim), 456 | nn.ReLU(inplace=True) 457 | ) 458 | self.layer2 = nn.Sequential( 459 | nn.Linear(dim, dim), 460 | nn.BatchNorm1d(dim), 461 | ) 462 | self.relu = nn.ReLU(inplace=True) 463 | 464 | def forward(self, x): 465 | out = self.layer2(self.layer1(x)) 466 | out += x 467 | return self.relu(out) 468 | 469 | 470 | class ResMLP(nn.Module): 471 | """Residual MLP. 472 | 473 | Args: 474 | in_dim (int, optional): input dimension. Defaults to 1. 475 | out_dim (int, optional): output dimension. Defaults to 1. 476 | num_layer (int, optional): number of layers. Defaults to 2. 477 | hidden_dim (int, optional): number of neurons per layer. Defaults to 100. 478 | """ 479 | def __init__(self, in_dim=1, out_dim=1, num_layer=2, hidden_dim=100, add_bn=False, sigmoid=False): 480 | super().__init__() 481 | out_act = "sigmoid" if sigmoid else None 482 | if num_layer % 2 != 0: 483 | num_layer += 1 484 | print("The number of layers must be an even number for residual blocks. Added one layer.") 485 | num_blocks = num_layer // 2 486 | self.num_blocks = num_blocks 487 | if num_blocks == 1: 488 | self.net = StoResBlock(dim=in_dim, hidden_dim=hidden_dim, out_dim=out_dim, 489 | noise_dim=0, add_bn=add_bn, out_act=out_act) 490 | else: 491 | self.input_layer = StoResBlock(dim=in_dim, hidden_dim=hidden_dim, out_dim=hidden_dim, 492 | noise_dim=0, add_bn=add_bn, out_act="relu") 493 | self.inter_layer = nn.Sequential(*[StoResBlock(dim=hidden_dim, noise_dim=0, add_bn=add_bn, out_act="relu")]*(self.num_blocks - 2)) 494 | self.out_layer = StoResBlock(dim=hidden_dim, hidden_dim=hidden_dim, out_dim=out_dim, 495 | noise_dim=0, add_bn=add_bn, out_act=out_act) 496 | 497 | def forward(self, x): 498 | if self.num_blocks == 1: 499 | return self.net(x) 500 | else: 501 | return self.out_layer(self.inter_layer(self.input_layer(x))) -------------------------------------------------------------------------------- /engression-python/examples/example_air.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import sys\n", 10 | "sys.path.append(\"..\")\n", 11 | "import torch\n", 12 | "import pandas as pd\n", 13 | "import matplotlib.pyplot as plt\n", 14 | "plt.rcParams[\"figure.figsize\"] = [4, 4]\n", 15 | "torch.manual_seed(0)\n", 16 | "\n", 17 | "from engression import engression\n", 18 | "from engression.data.loader import partition_data" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 3, 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "# load data\n", 37 | "data = pd.read_csv(\"../engression/data/resources/air_quality.csv\")\n", 38 | "x_full = torch.Tensor(data[\"PT08.S2.NMHC.\"]).unsqueeze(1).to(device)\n", 39 | "y_full = torch.Tensor(data[\"PT08.S3.NOx.\"]).unsqueeze(1).to(device)\n", 40 | "# partition training/test\n", 41 | "x_tr, y_tr, x_te, y_te, x_full_normal = partition_data(x_full, y_full, 0.3, \"smaller\")" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 4, 47 | "metadata": {}, 48 | "outputs": [ 49 | { 50 | "name": "stdout", 51 | "output_type": "stream", 52 | "text": [ 53 | "Running on CPU.\n", 54 | "\n", 55 | "Data is standardized for training only; the printed training losses are on the standardized scale. \n", 56 | "However during evaluation, the predictions, evaluation metrics, and plots will be on the original scale.\n", 57 | "\n", 58 | "Batch is larger than half of the sample size. Training based on full-batch gradient descent.\n", 59 | "[Epoch 1 (0%)] energy-loss: 0.6088, E(|Y-Yhat|): 0.9915, E(|Yhat-Yhat'|): 0.7653\n", 60 | "[Epoch 500 (25%)] energy-loss: 0.3914, E(|Y-Yhat|): 0.7620, E(|Yhat-Yhat'|): 0.7412\n", 61 | "[Epoch 1000 (50%)] energy-loss: 0.3842, E(|Y-Yhat|): 0.7734, E(|Yhat-Yhat'|): 0.7784\n", 62 | "[Epoch 1500 (75%)] energy-loss: 0.3809, E(|Y-Yhat|): 0.7390, E(|Yhat-Yhat'|): 0.7161\n", 63 | "[Epoch 2000 (100%)] energy-loss: 0.3861, E(|Y-Yhat|): 0.7765, E(|Yhat-Yhat'|): 0.7806\n", 64 | "\n", 65 | "Training loss on the original (non-standardized) scale:\n", 66 | "\tEnergy-loss: 0.3782, E(|Y-Yhat|): 0.7618, E(|Yhat-Yhat'|): 0.7672\n", 67 | "\n", 68 | "Prediction-loss E(|Y-Yhat|) and variance-loss E(|Yhat-Yhat'|) should ideally be equally large\n", 69 | "-- consider training for more epochs or adjusting hyperparameters if there is a mismatch \n" 70 | ] 71 | } 72 | ], 73 | "source": [ 74 | "# Fit an engression model\n", 75 | "engressor = engression(x_tr, y_tr, resblock=True, num_layer=6, hidden_dim=100, noise_dim=100, \n", 76 | " lr=0.001, num_epochs=2000, print_every_nepoch=500, device=device)" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": 5, 82 | "metadata": {}, 83 | "outputs": [ 84 | { 85 | "name": "stdout", 86 | "output_type": "stream", 87 | "text": [ 88 | "Engression model with\n", 89 | "\t number of layers: 6\n", 90 | "\t hidden dimensions: 100\n", 91 | "\t noise dimensions: 100\n", 92 | "\t residual blocks: True\n", 93 | "\t number of epochs: 2000\n", 94 | "\t batch size: 2707\n", 95 | "\t learning rate: 0.001\n", 96 | "\t standardization: True\n", 97 | "\t training mode: False\n", 98 | "\t device: cpu\n", 99 | "\n", 100 | "Training loss (original scale):\n", 101 | "\t energy-loss: 0.38, \n", 102 | "\tE(|Y-Yhat|): 0.76, \n", 103 | "\tE(|Yhat-Yhat'|): 0.77\n" 104 | ] 105 | } 106 | ], 107 | "source": [ 108 | "# Summarize model information\n", 109 | "engressor.summary()" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": 6, 115 | "metadata": {}, 116 | "outputs": [ 117 | { 118 | "name": "stdout", 119 | "output_type": "stream", 120 | "text": [ 121 | "L2 loss: 0.27850815653800964\n", 122 | "correlation between predicted and true means: 0.6972660422325134\n", 123 | "energy score: 0.27972766757011414\n" 124 | ] 125 | } 126 | ], 127 | "source": [ 128 | "# Evaluation\n", 129 | "print(\"L2 loss:\", engressor.eval_loss(x_te, y_te, loss_type=\"l2\"))\n", 130 | "print(\"correlation between predicted and true means:\", engressor.eval_loss(x_te, y_te, loss_type=\"cor\"))\n", 131 | "print(\"energy score:\", engressor.eval_loss(x_te, y_te, loss_type=\"energy\"))" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 7, 137 | "metadata": {}, 138 | "outputs": [], 139 | "source": [ 140 | "# prediction\n", 141 | "y_pred = engressor.predict(x_full_normal)" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": 8, 147 | "metadata": {}, 148 | "outputs": [ 149 | { 150 | "data": { 151 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXwAAAFzCAYAAAA9sbIfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABcWUlEQVR4nO29fXxT9fn//zonSdOUNr0jtASatkCVGwHLjVD4KjjcYHN4MxWGTsH50amgMnTK5iZTp6Dy2VDwZmOfiX42UTcnus9+gowBagXkpqCuCAUKhYaWUgppaJsmOe/fHyfn5Jw0SZM0aZLmej4eMc3JOSfvxvI617ne1/t1cYwxBoIgCKLPw8d7AARBEETvQIJPEASRIpDgEwRBpAgk+ARBECkCCT5BEESKQIJPEASRIpDgEwRBpAgk+ARBECmCNt4D6G0EQYDVakVWVhY4jov3cAiCIHoMYwytra0wm83g+cBxfMoJvtVqRVFRUbyHQRAEEXVOnjyJwYMHB3w/5QQ/KysLgPjFGI3GOI+GIAii59hsNhQVFcn6FoiUE3wpjWM0GknwCYLoU3SXpqZJW4IgiBSBBJ8gCCJFIMEnCIJIEVIuh08QiYbb7YbT6Yz3MIgERqPRQKvV9riUnASfIOKI3W7HqVOnQH2IiO7IyMjAwIEDkZaWFvE5SPAJIk643W6cOnUKGRkZMJlMtBCQ8AtjDJ2dnWhqakJtbS3KysqCLq4KBgk+QcQJp9MJxhhMJhMMBkO8h0MkMAaDATqdDidOnEBnZyfS09MjOg9N2hJEnKHIngiFSKN61TmiMA6CIAgiCSDBJwiCSBFI8BMAq9WKnTt3wmq1xnsoBNHrlJSUYNWqVSHvv23bNnAch/Pnz8dsTIFYt24dcnJyev1zowUJfgJQV1cHh8OBurq6eA+FILpl+vTpWLx4cdTOt3v3btxzzz0h7z9lyhScPn0a2dnZURtDLAn3ghZLSPATAIvFAr1eD4vFEu+hEERUYIzB5XKFtK/JZEJGRkbI505LS0NhYSFNdkcACX4CYDabMXnyZJjN5ngPhSCCsmDBAmzfvh0vvvgiOI4Dx3E4fvy4nGb56KOPMH78eOj1enz22Wc4evQorr/+ehQUFCAzMxMTJ07Ev/71L9U5fSNgjuPwxz/+ETfeeCMyMjJQVlaGDz/8UH7fN6UjpVk2bdqEESNGIDMzE7NmzcLp06flY1wuFx588EHk5OQgPz8fjz32GObPn48bbrgh6O+7bt06WCwWZGRk4MYbb0Rzc7Pq/e5+v+nTp+PEiRP46U9/Kn9fANDc3Ix58+Zh0KBByMjIwOjRo7F+/fpw/ldEBAk+QRAh8+KLL6KiogJ33303Tp8+jdOnT6saCi1duhQrVqzAwYMHMWbMGNjtdnzve9/Dli1bUFVVhVmzZmH27Nndpi+ffPJJzJkzB19++SW+973v4bbbbsO5c+cC7t/W1oaVK1fif//3f/HJJ5+grq4OjzzyiPz+c889h7/85S94/fXXUVlZCZvNhg0bNgQdw65du3DXXXdh0aJF2L9/P66++mr85je/Ue3T3e/397//HYMHD8ZTTz0lf18A0NHRgfHjx+Of//wnvv76a9xzzz24/fbb8cUXXwQdU49hKcaFCxcYAHbhwoV4D4VIcdrb21l1dTVrb2/v8bmamppYTU0Na2pqisLIgjNt2jT20EMPqbZt3bqVAWAbNmzo9vhRo0ax1atXy6+Li4vZ7373O/k1APbLX/5Sfm232xkA9tFHH6k+q6WlhTHG2Ouvv84AsCNHjsjHvPzyy6ygoEB+XVBQwF544QX5tcvlYhaLhV1//fUBxzlv3jz2ve99T7Vt7ty5LDs7u0e/XyCuvfZa9vDDDwd8P9jfS6i6RhE+QSQ5Z8+exX/+8x/U19fjP//5D86ePRu3sUyYMEH12m6345FHHsGIESOQk5ODzMxMHDx4sNsIf8yYMfLP/fr1g9FoxJkzZwLun5GRgaFDh8qvBw4cKO9/4cIFNDY24oorrpDf12g0GD9+fNAxHDx4EJMmTVJtq6ioiMrv53a78fTTT2P06NHIy8tDZmYmNm3aFPPCDbJWIIgkx7c88fz58+jfv39cxtKvXz/V60ceeQSbN2/GypUrMWzYMBgMBtx8883o7OwMeh6dTqd6zXEcBEEIa3/WC4Z0kf5+L7zwAl588UWsWrUKo0ePRr9+/bB48eJuj+spJPgEkeTk5OSgvr5e9TqWpKWlwe12h7RvZWUlFixYgBtvvBGAGBEfP348hqPrSnZ2NgoKCrB7925cddVVAMQIe9++fbj88ssDHjdixAjs2rVLtW3nzp2q16H8fv6+r8rKSlx//fX40Y9+BAAQBAGHDx/GyJEjI/kVQ4ZSOgSR5PTv3x+jRo3CoEGDMGrUqJhH9yUlJdi1axeOHz+Os2fPBo28y8rK8Pe//x379+/HgQMHcOuttwbdP1Y88MADWL58OT744AMcOnQIDz30EFpaWoKWdj744IPYuHEjVq5ciZqaGqxZswYbN25U7RPK71dSUoJPPvkE9fX1crqtrKwMmzdvxueff46DBw/iJz/5CRobG6P/i/tAgk8QfYD+/ftj2LBhvZLKeeSRR6DRaDBy5EiYTKageeff/va3yM3NxZQpUzB79mzMnDkT48aNi/kYfXnssccwb9483HHHHaioqEBmZiZmzpwZ1HVy8uTJWLt2LV588UWMHTsWH3/8MX75y1+q9gnl93vqqadw/PhxDB06FCaTCQDwy1/+EuPGjcPMmTMxffp0FBYWdlsiGg041huJrgTCZrMhOzsbFy5cgNFojPdwiBSmo6MDtbW1KC0tjdjulogMQRAwYsQIzJkzB08//XS8hxMSwf5eQtU1yuETBNHnOXHiBD7++GNMmzYNDocDa9asQW1tLW699dZ4D61XoZQOQRB9Hp7nsW7dOkycOBFTp07FV199hX/9618YMWJEvIfWq1CEHyWsVivq6upgsVjIIoEgEoyioiJUVlbGexhxhyL8KEGOlx5aXgOOlIjPBEEkFCT4UYIcLz00rwBcJ8RngiASCkrpRAmz2UypHADIXyqKff7SeI+EIAgfki7Cr6+vx49+9CPk5+fDYDBg9OjR2LNnT7yHRUjk3gsMOy4+EwSRUCRVhN/S0oKpU6fi6quvxkcffQSTyYSamhrk5ubGe2gEQRAJT1IJ/nPPPYeioiK8/vrr8rbS0tI4joggiHgzffp0XH755QnTRjCRSaqUzocffogJEybglltuwYABA1BeXo61a9cGPcbhcMBms6keBEFETrR72gJiJ63esBYA4tsEPd4kleAfO3YMr776KsrKyrBp0ybcd999ePDBB/HGG28EPGb58uXIzs6WH8ruPARBEClFt21YEgidTscqKipU2x544AE2efLkgMd0dHSwCxcuyI+TJ09SxysiIYhmx6veYv78+QyA6lFbW8sYY+yrr75is2bNYv369WMDBgxgP/rRj1QduP7617+yyy67jKWnp7O8vDw2Y8YMZrfb2bJly7qcc+vWrX4/3263s9tvv53169ePFRYWspUrV3bpwPXmm2+y8ePHs8zMTFZQUMDmzZvHGhsbGWOM1dbWdvms+fPnM8YY++ijj9jUqVNZdnY2y8vLY9dee62qi1a8SbmOVwMHDuziFz1ixIigi530ej2MRqPqQRBEZATqaXv+/Hl861vfQnl5Ofbs2YONGzeisbERc+bMAQCcPn0a8+bNw49//GMcPHgQ27Ztww9+8AMwxvDII49gzpw5cuPx06dPY8qUKX4//2c/+xm2b9+ODz74AB9//DG2bduGffv2qfZxOp14+umnceDAAWzYsAHHjx/HggULAIgrbt977z0AwKFDh3D69Gm8+OKLAICLFy9iyZIl2LNnD7Zs2QKe53HjjTfGxc45ViTVpO3UqVNx6NAh1bbDhw+juLg4TiMiiNQiOzsbaWlpyMjIQGFhobx9zZo1KC8vx7PPPitv+9Of/oSioiIcPnwYdrsdLpcLP/jBD+R/r6NHj5b3NRgMcDgcqnP6Yrfb8T//8z/485//jBkzZgAA3njjDQwePFi1349//GP55yFDhuCll17CxIkTYbfbkZmZiby8PADAgAEDVM1ibrrpJtV5/vSnP8FkMqG6uhqXXXZZqF9RQpNUEf5Pf/pT7Ny5E88++yyOHDmCt956C3/4wx+wcOHCeA+NIOJL64dA40/F5zhw4MABbN26FZmZmfJj+PDhAICjR49i7NixmDFjBkaPHo1bbrkFa9euRUtLS1ifcfToUXR2dqr6zObl5eHSSy9V7bd3717Mnj0bFosFWVlZmDZtGgB0a3tSU1ODefPmYciQITAajSgpKQnpuGQiqQR/4sSJeP/997F+/XpcdtllePrpp7Fq1Srcdttt8R4aQcSP1g+B+uuBltXicxxE3263Y/bs2di/f7/qUVNTg6uuugoajQabN2/GRx99hJEjR2L16tW49NJLUVtbG9VxXLx4ETNnzoTRaMRf/vIX7N69G++//z4AdNsvdvbs2Th37hzWrl2LXbt2ye0NY91ntjdJqpQOAHz/+9/H97///XgPgyASh7atADQA3OJz2zYg67qYfZy/Hq3jxo3De++9h5KSEmi1/mWF4zhMnToVU6dOxRNPPIHi4mK8//77WLJkSUh9cocOHQqdToddu3bJnlUtLS04fPiwHMV/8803aG5uxooVK+SKPN+V+GlpaQCg+rzm5mYcOnQIa9euxZVXXgkA+Oyzz0L9SpKGpIrwCYLwQ8bVkMUebiBjekw/zl9P24ULF+LcuXOYN28edu/ejaNHj2LTpk2488474Xa7sWvXLjz77LPYs2cP6urq8Pe//x1NTU2yH31JSQm+/PJLHDp0CGfPnoXT6ezyuZmZmbjrrrvws5/9DP/+97/x9ddfY8GCBeB5r4xZLBakpaVh9erVOHbsGD788MMuHa2Ki4vBcRz+7//+D01NTbDb7cjNzUV+fj7+8Ic/4MiRI/j3v/+NJUuWxPR7jAsxqiBKWEItXyKIWBPVskzbB4w1/FR8jjGHDh1ikydPZgaDQVWWefjwYXbjjTeynJwcZjAY2PDhw9nixYuZIAisurqazZw5k5lMJqbX69kll1zCVq9eLZ/zzJkz7Nvf/jbLzMwMWpbZ2trKfvSjH7GMjAxWUFDAnn/++S5lmW+99RYrKSlher2eVVRUsA8//JABYFVVVfI+Tz31FCssLGQcx8llmZs3b2YjRoxger2ejRkzhm3bto0BYO+//350v8AIiUZZJvW0JYg4QT1tiXCIRk9bSukQBEGkCCT4BEEQKQIJPkEQRIpAgk8QBJEikOATBEGkCCT4BEEQKQIJPkEQRIpAgh9DrFYrdu7cCavVGu+h9IyW14AjJeIzQRBJCwl+DKmrq4PD4Uh+t73mFYDrhPjc29DFhiCiBgl+DLFYLNDr9bLRU9KSvxTQFovPvU08LzZE3CkpKVE1J+c4Dhs2bOjROaNxjmQl6dwykwmz2Qyz2RzvYfSc3HvFRzzIXyqKfTwuNkTCcfr0aeTm5oa0769//Wts2LAB+/fvj/gcfQ0S/BhjtVpRV1cHi8XSN8S/t4nnxYaICp2dnbIlcU8J1hGrN8+RrFBKJ8b0mTw+QXiYPn06Fi1ahEWLFiE7Oxv9+/fHr371K0g+jCUlJXj66adxxx13wGg04p577gEg+stfeeWVMBgMKCoqwoMPPoiLFy/K5z1z5gxmz54Ng8GA0tJS/OUvf+ny2b7pmFOnTmHevHnIy8tDv379MGHCBOzatQvr1q3Dk08+iQMHDoDjOHAch3Xr1vk9x1dffYVvfetbMBgMyM/Pxz333AO73S6/v2DBAtxwww1YuXIlBg4ciPz8fCxcuFBl4fzKK6+grKwM6enpKCgowM033xyNrzrqkODHmD6TxycIBW+88Qa0Wi2++OILvPjii/jtb3+LP/7xj/L7K1euxNixY1FVVYVf/epXOHr0KGbNmoWbbroJX375Jd555x189tlnWLRokXzMggULcPLkSWzduhV/+9vf8Morr+DMmTMBx2C32zFt2jTU19fjww8/xIEDB/Doo49CEATMnTsXDz/8MEaNGiU3Rp87d26Xc0gdsnJzc7F792789a9/xb/+9S/VuABg69atOHr0KLZu3Yo33ngD69atky8ge/bswYMPPoinnnoKhw4dwsaNG3HVVVf18BuOETExbk5gyA+fSBSi6Yd/+LybbT7pYofPu6MwsuBMmzaNjRgxggmCIG977LHH2IgRIxhjjBUXF7MbbrhBdcxdd93F7rnnHtW2Tz/9lPE8z9rb29mhQ4cYAPbFF1/I7x88eJABYL/73e/kbVD40//+979nWVlZrLm52e84ly1bxsaOHdtlu/Icf/jDH1hubi6z2+3y+//85z8Zz/OsoaGBMcbY/PnzWXFxMXO5XPI+t9xyC5s7dy5jjLH33nuPGY1GZrPZ/I4jWkTDD58ifIJIcmouCHjvmBt7m8TnmgtCzD9z8uTJ4DhOfl1RUYGamhq5beCECRNU+x84cADr1q1TNTmfOXMmBEFAbW0tDh48CK1Wi/Hjx8vHDB8+HDk5OQHHsH//fpSXlyMvLy/i3+PgwYMYO3Ys+vXrJ2+bOnUqBEHAoUOH5G2jRo2CRqORXw8cOFC++/j2t7+N4uJiDBkyBLfffjv+8pe/oK2tLeIxxRISfIJIck60MnAAGAAOQF1r/HsaKQUUENMvP/nJT1QNzg8cOICamhoMHTo0os8wGAzRGGpI6HQ61WuO4yAI4oU1KysL+/btw/r16zFw4EA88cQTGDt2LM6fP99r4wsVEnyCSHKKszhZ7BkASxbXzRE9Z9euXarXO3fuRFlZmSoKVjJu3DhUV1dj2LBhXR5paWkYPnw4XC4X9u7dKx9z6NChoKI5ZswY7N+/H+fOnfP7fiiN0UeMGIEDBw6oJo8rKyvB8zwuvfTSoMcq0Wq1uOaaa/D888/jyy+/xPHjx/Hvf/875ON7CxJ8gkhyyrJ53DREgwkm8bksO/b/rOvq6rBkyRIcOnQI69evx+rVq/HQQw8F3P+xxx7D559/jkWLFmH//v2oqanBBx98IE+OXnrppZg1axZ+8pOfYNeuXdi7dy/+67/+K2gUP2/ePBQWFuKGG25AZWUljh07hvfeew87duwAIFYL1dbWYv/+/Th79iwcDkeXc9x2221IT0/H/Pnz8fXXX2Pr1q144IEHcPvtt6OgoCCk7+L//u//8NJLL2H//v04ceIE3nzzTQiCENYFo7cgwSeIPkBZNo8Zg3tH7AHgjjvuQHt7O6644gosXLgQDz30kFx+6Y8xY8Zg+/btOHz4MK688kqUl5fjiSeeUK1Nef3112E2mzFt2jT84Ac/wD333IMBAwYEPGdaWho+/vhjDBgwAN/73vcwevRorFixQr7LuOmmmzBr1ixcffXVMJlMWL9+fZdzZGRkYNOmTTh37hwmTpyIm2++GTNmzMCaNWtC/i5ycnLw97//Hd/61rcwYsQIvPbaa1i/fj1GjRoV8jl6C2piThBxIlmbmE+fPh2XX365yvKAiD3UxJwgCIIIGRJ8giCIFIG8dAiCCItt27bFewhEhFCETxAEkSKQ4BMEQaQIJPgEEWdSrFCOiJBo/J2Q4BNEnJDqxTs7O+M8EiIZkPx5fG0ewoEmbQkiTmi1WmRkZKCpqQk6nQ48T/EX0RXGGNra2nDmzBnk5OQEtK8IBRJ8gogTHMdh4MCBqK2txYkTJ+I9HCLBycnJ6XG3LhL8OEGtDwlAtAcoKyujtA4RFJ1O16PIXoIEP04oWx+S4Kc2PM8nlbUCkbxQ0jBOUOtDgiB6G4rw44TZbI44srdaraitrQUAlJaW0h0CQRAhQRF+ElJXVweXywWXy4W6urp4D4cgiCSBIvwIiNeEq/S5RqMRLS0tAEApIYIgQoYEPwLiNeEqfa7NZsPUqVN77XMJgugbJHVKZ8WKFeA4DosXL+61z7RarXC73dBqtb0eXQea6LVardi5cyesVmuvjocgiOQiaSP83bt34/e//z3GjBnTq58r5c/1en2vT5YGmuilEk+CIEIhKSN8u92O2267DWvXrkVubm6vfnYillNaLBZotVo4nU5UVlZSpE8QhF+SUvAXLlyIa6+9Ftdcc02vf7bZbMbkyZMTKpI2m83QaDQQBIEqdwiCCEjSpXTefvtt7Nu3D7t37w5pf4fDAYfDIb+22WyxGlpcsVgscm1+It19EASROCSV4J88eRIPPfQQNm/eHPJS9OXLl+PJJ5+M8cjiT08WchEEkRpwLIm6L2zYsAE33nijykTI7XaD4zjwPA+Hw9HFYMhfhF9UVIQLFy7AaDT22th7GzJnI4jUwWazITs7u1tdS6oIf8aMGfjqq69U2+68804MHz4cjz32mF83Ob1eD71e31tDTBiococgCF+SSvCzsrJw2WWXqbb169cP+fn5XbYnC7GKxC0Wi3xegiAIIMkEvy8Sq0iccvoEQfiS9IK/bdu2eA+hR4QSiVM+niCIaJD0gh8voiXCoUTilI8nCCIaJOXCq0RAKcKxJlqre8lzhyBSG4rwI8Q3FRPLtEu08vF0p0AQqQ0JfoT4irBvxB+p+MfywkGVOwSR2lBKJ0pIaRej0YiampqI0z2xShXRxC9BECT4UUIyVZM6UQHBPW0C5dNj5cbZm3MOBEEkJpTSiROB8umxqp+ndA5BEBThRwkpYlf68weLphPRV58giL4NRfhRQtlvtqysLGA0bbVaZRvj0tLSXsunU4UOQRAU4YdIdzXsyog9WJMUqUVibzcqoTsKgiAowg+R7iLkUHPv8WpUQt46BEGQ4IeIxWLBjlNtsOoHQ3/WjfL+Xa2YQ4GElyCIeEEpnRAxm81ozCjGRUGDHQ1CvIdDEAQRNiT4YVBRyMOoE58JgiCSDUrphEF5f03EqRyCIIh4Q4IfAyQbA6PRCJvNRnYGBEEkBJSbCJGqs2688rUTVWfd3e4rVfQ0NTV1a2dAlsUEQfQWJPghsqNBgM2JbidsrVYr3G71RSFYF/mE97hpeQ04UiI+EwSR1JDgh8jgTA4cGPQdzUGjcWlhlVbrzZYpDdV8ieaCqJjcLTSvAFwnxGeCIJIaEvwQOXLeDQYOTXwedpxqU72nFFpJwJWeOsEItio3XGJyt5C/FODzAKGVonyCSHJI8ENEEDxpGo7DKZ1anH1X4U6ePBk2m01+Pz09vVfGGBP7hNx7AT4LEM5RlE8QSQ4JfoiMN3ZAIzihEZwY1HlK3m61WuF0OgGoc/VK0bXb7WF/XiTpmWjeLajIXwpoi8VngiCSFirLDJGrh+UhrbJSkZ+/BIAY3QuCOJGrzNWbzWacP38eTU1NMJlMYX9eQrlb5t4rPgiCSGoowg+D0tJSnDUMws6M8Vi5rx1bj5wLmj7JycmBXq8HgLCjdXK3JAgi2lCEHyJWqxU7TrXhiM4CBg7ggN22DFw9LE+O5JUTtVarFTU1NQCApqYmAAgrWieTNYIgog1F+CFSV1eHE5oCUezBAAACJ3590gStcqJWWSljMplCjtZpIRZBELGCBD9ELBYLit2N6Me7YdR5RJ8Bb1e3dkm/SIuvtFotysrKMHLkyJAnU5W5+0Din9AXBVqoRRAJCwl+iJjNZtx0xTD8v0E6tDoBgAM4Dsc79F2qY6TFVxqNaLTmK87BBFt58QhUV5/Qq3NpoRZBJCwk+GGyo0HwJHTE/4JDF48dqTzTaDT6Fedggq28eASauE3oCV0q4SSIhIVjjLF4D6I3sdlsyM7OxoULF4J63ASi6qxb9tOxOcUrpgCgH+/GxPb9qshcr9fD7XbD5XKB53nodDpZpKUm5/7SPJLbZixcNmN5btTfCrS+C2TNAQa9Fd1zEwQRkFB1jSL8MCnobMQ4+150uETRFwCk84DZcUqO2pURuMvlEvcThC6rcQMJbixTNjFNB7W+C8DteSYIItEgwQ8TSTClxVYAkKYBKgZnyCKvFHSpDl+r1YachpEuGEajMeqTszFNB2XNAaDxPBMEkWhQSidMrFYramtrcVprQp2+GDzPY5qZj0knrErFyt6pU6d2O66YpWoIgkhoQtU1WngVJmazGbW1tTB1nBYfJhMOHkvDZ/WDMZRvRr9zR8DzPIYOHdqrwhuqFQNdGAgidaGUTg9o0BXgH+1DUKO14KKgwX86xZW2giBEJUdeWloKvV6P0tLSbvcNNVWT0CWdBEHEFIrwI6C0tBRHjx5FvX4QGO9N5XAcJz9HI0cejr1CsH2VUb1URRTzks6W18Ra/PylZLxGEAkCRfgRYDabodPpMMhRDyimQFzQ4MuM0UhLS4PZbE6YlbL+/Ppjns6hBVgEkXCQ4EeIxWJBMX8ek4wKr3uOw0VNJnYbLkfVWbff9IlkqtabaZW4LNTyXYBFlgsEEXdI8HvIpRkdqtccx+GioMGOBsGv0CpF3u12o7q6OubRfq9F9Upy7wWGHfemcyjiJ4i4Q4IfIVL0/vlJO8AEQGG4oOOAikK+i9BWV1fD4XCA53nwPA+Xy4WmpqaA0X5Cm6SFC1kuEETcIcGPECl6P6k1AxwPgJPfczKG7fWCyl8H8PriC4IAnhe/ep7nAy6y6lMVNb4RP0EQvU5SCf7y5csxceJEZGVlYcCAAbjhhhtw6NChuIxFit4vz+6Eljkhm6kBADh0CKLRmjJKl1odmkwmueRy6NChmDx5MlpaWuBwOFBbWyufpSe59z51d0AQRFRIqrLM7du3Y+HChZg4cSJcLhd+8Ytf4Dvf+Q6qq6vRr1+/uIzp0owOGM4eRnv/S7CrNRPeSJ+holCDuiNilF5TUyN74ysJFr0r7ZaVrwOhLL9MqJ64BEEkBEkV4W/cuBELFizAqFGjMHbsWKxbtw51dXXYu3dv3MYkCavh7GFwirSOFPAHmrBVVuscPXo04CKrQJU+/qJ35b4JbaFMEERcSCrB9+XChQsAgLy8vLiNQel9Lyd1GAM4DtvrBZjNZpSVlUGr1cLtdssirRRwQRBQW1sLt9sNXwJV+vjriqXcNy6VOQRBJDRJK/iCIGDx4sWYOnUqLrvssoD7ORwO2Gw21SOaSOdraWmBXuiQxR4AHB5DTUl0XS6XnKP3jbxdLhdcLhdqampUkbs/4Q7UFYtEniCIYCRVDl/JwoUL8fXXX+Ozzz4Lut/y5cvx5JNPxmwckui63W64oJHFHgAYGCorKwFAtlMWBAE7d+6ExWIBz/MQBAEcx0Gj0cje+d3l3X1tFHrFKoEgiKQnKe2RFy1ahA8++ACffPJJt8ZiDocDDodDfm2z2VBUVBSxPbI/JMvkHenlcPE6iAl8DmAMHBhKOmoxmDVDo9HIHbCkUsympiaYTCaMHDlSNekKBO+K1euQNw5BJCx90h6ZMYYHHngA77//PrZt2xaSi6Rer5ebkEQbSaCdTicEQUCJ6xTO9CuFzenZgePAwMGqH4xhuAi73WvDYDQa0dzcDECsz7dararIfefOnYlVZaNcKUuCTxBJSVLl8BcuXIg///nPeOutt5CVlYWGhgY0NDSgvb09LuPx7X4ltT+Ezz0Tp9WrxB4QRV7ZNcu3PDPhqmxopSxBJD1JldLhFPlxJa+//joWLFgQ0jl62vFKiRThG41G2Gw2OV2zK3MSBI73lOSLY75OXyOvtPVHZmYmxo8f36PxdDfOhEkPhQKlkAgiZPpsSieR8J08lYSV8WqrBQByjr62tla2VtBoNPL8gu8dQCiEKuTBFmEl7MWAUkgEEXWSKqWT6JjNZlgsFvRzXxTLM6ULFGN4rqoTjWkFKC0tlatz3G43MjMz5eOrq6thtVpRWVmJysrKbm0RJCGvra0NaqMQLD2UsH49lEIiiKiTVCmdaBDNlI6S6upqOWWzN3McOvl0VU0+ABh1wDj7XlXVkF6vD/har9dj8uTJAaNw6Y5BKueUmp2HE7UnbIRPEETIhKprFOFHiK+9gTI/P8hRD73g8DmCIUMHuf4eEOcknE6nai+l+Ot0OgBAbW1tF2M1QLyj0Gg08CWcqF1arAWAzNYIoo9Dgh8hkqhKK2MlJ0ye5zE6R4DF3eBzBIeGNm97RECck1BW6vhit9tRXV2t2uZ7obFYLNBqtdBqtXKZaqAUTjAHzYRN7RAEETWSatI2kbBYLKipqQEgiqUy9dLS0oIT6WNU6ZxAx3ZHU1MTysrKArpg+mteHmgyWaoi8jd522vNzQmCiBthCf7JkydRVFQUq7EkFUrrYuXKWIfDAa1Wi2J3I2q4IgCcSvhXVDlRos9CkVYLQRCCRviAmMdX5tjPnz+Ppqambucf/Fkla7XagJO3/i4cBEH0LcJK6QwfPhxPPPEE2traYjWepMI3/200GqHX65Gbm4tBwlnomVMUe5958eMderhcrm7FHvBaQ0ipI8msTWkC5y9V488qubS0lMzVCCKFCUvwN2/ejE2bNqGsrAzr1q2L0ZCSD0lcbTYbLBaL3Kd2UGe9T6WOR/g54KxhEEwmU5fFZJmZmfKkLgDVz4F87v3l38kqmSAIXyIqy3zzzTfx+OOPY8CAAVi1ahWuvPLKWIwtJsSiLNNf+kSiQVeA2vQhXfL5Rh1w/2U6bN++Xd4m1ecrSzNNJhNycnKClk5SaSVBpDYxLcu84447cOjQIVx77bX47ne/i5tvvrlLyWAqoUzt+DYxMbuboGGuLsdk6EShVkb4UopHecFoamrC0aNH5fMq0zfSIi3l+wRBEIGIeOFVW1sb9u3bh7/97W9YvXo10tLS8MADD+CJJ55QrR5NNGK18ArwOlxqtd658NLSUnx+0o4jumL4TuCCMeQ7z+KSjtAqdiTXT4fDofpZCc/z4HkepaWlFO0TRIoQkwj/tddew1133YUxY8YgOzsbM2bMwKeffop7770XL774Ivbs2YORI0diz549Pf4FkhHpixYEAaWlpZg6dSrMZjOmFGWChyKXL11jOQ7Nuv5ylM/zPMrKylTnlHL4PM9Dp9PB4XCofuY4TpXnFwRBLr0MRLB6/KSj5TXgSIn4TBBEUMKK8IuKijBp0iRMnjwZkydPxvjx42EwGFT7PPvss3jrrbfw9ddfR32w0aA3InwAqhaEFosFbzaa/B7DMQH/r32PKiL/5JNPwBgDx3FIS0uTI3rfaF5i2rRpXYzZgkX4yjsRjUaTmA1XQuVIiWiypi0Ghh2P92gIIi6EqmtR99JpbGyE2WxO2JxyLAVfEl23261y9tTr9fgmdzwaLrIuk7c8c2NS6y7ZNweAaiI3MzMTdrsdmZmZaGtrk/P8HMeBMQae5zF06NCwPHMAqDx4ANGHR+rEJY2j14nEEtnfMWStTKQYcfPSGTBgAP79739H+7RJgdlsxtSpU7vYOFssFrQ54XflrQAeTekDVWWWSpsGyTbZbrcjPz9f3kf6DEEQQrJD8F2h68+DJ+4NV5SWyKGSe68Y2SuFPZLzEEQKEHXB5zgO06ZNi/ZpkwbfvLg0gT3gYm2XBVgAAI7Dcb2YfpFy6zk5OXKJppKWlhbVa57nQxZp6aovPUt1+iaTKXEWZUXLEpmslQnCL2SPHEWsVqtfjxwp/66qyfexTuYBlHQcQ0Fng5yukZC8dKTeucrt/jxz/OXhKysr4XK5ZJO1pMzXEwThF7JHjgPK1IoU2ZtMJtkSudDZ6N3ZJ70jADimL0WDrkAl9iaTSa7zV1bjaLVa1NXV+bVTkBqiVFdX+63GIWdMgkhNyC0ziiircnwjZ0lkTcI5NGnyRJcF35w+x6FeP0i+MJhMJrk1Yl1dHdLT0+WcvsvlUjlfWq1WuN1ueQ2Aw+GQPfpra2tVUb00HmUqiFbrEkTfh1I6MUDqfqWsoJEqeADgi/SxcPB69UGeloiljlr1nUAAOI5T1e8DkKtspAuPlAKSOmFJKMcilW9KKR+pxp+EnyCShz7ZxDwZsFqtcmQtCAJqampUqROXy4UilxW12sFwc1pvlB/AO98f0oSuslJHPAUnC7V0kfHnca8syfT1xhcEIWDDc4IgkhvK4UcZf3lxyeJYEtmCzkZc6fyy68Ech9r0IWjQFQQ8f2ZmZkBbZcYYzp8/L78OxSVTuhiUlpaqqnYCVv7QylaCSFoowo8yyjx+oK5WsmD7C+o9ot+qNaKsvevxUg4/EE1NTfIkrbTyljEGxpjsvAlArtbxvROQLw4trwFH/CxeUta406ImgkgqKMKPMsqoWmmiBkD2vdFqtWIEHWj2hONwVts/wFvdp35qa2tRU1MjN1mRUj9NTU2oq6uTJ3wl/FbtBFq8RDXuBJG0kODHEGWaBPA2LRcEAefPn0e+86z/xVgKeuo8yvO8fJHIzMyE0+mU35MEXlqEZTQa5XLOY7Y5cHGDuwq7v5WtBEEkBST4MUKZJvFdISsIApqamrq1RW7QFcDpdKrq77srquJ5Hunp6arPSktLg16vR0dHhyr/75unb2lpkcs5T7Zeiz0tf+19Yac5AoKIGST4MUJKk0iplUD0c9sDWi7Upg/B57pRsGr8O236QxCELnl+5aSxlFJSrtJVdukKaeI2liSiD46/ixBdmIgkhAQ/RnQnllKqZkzbV0gTHAFTO518Our1g6I2Lp7nZZ9+5ViVfjojR470X93TGyKXiHME/i5CiXhhIohuIMGPEWazuUszEyV2u13OrQ/qrPe/k8dzZ5AjwPsR4tsApdvyTUnomx73ilysxD8R5wj8XYT8baOon0hwSPBjiLJSR6vVqnLxgDcfX68fFHTh1fH00qC1+aHC8zxyc3NRU1Mje+4ERCleUjQLeEUuESLc3hJYfxchsmUmkhAS/BgjVeqUlpZ2EXyJQY56aARnwFw+43jU6XueTx86dKhqAtnlcqG6ulpuhl5ZWYnq6mpUVlaiw/prr3hJ0azpGW9XKaEV4DLE53hFtIkmsImYjiIIBeSl00soWxAGWin7ReZEuHmd/xOE2fAcQNC2iN3tN9DwAYoz10M/cJkYxSq7SCkjfiB+7QWpsxVBACB75IRDWvDkK/Ymk0lO9xR3ngxcl+9peL43c1zI6Z1QxB4AdDodtFottFqtPJ4m501oztnrFVJlNJ2/FIDUMUsTv4g2EfP9BJHAkLVCL2GxWLr0u+U4Ds3NzbL1QUFnAxhj3iYpfujk01Gnt6BePwiDHPUhOWt2h91uD9ylTIqiDVOAi61iCgcACtaIk7iB6K3om6J8gggZivB7CamPrDKDJq28VW4TBTxwlA/GIHCaHpdrmkwmeQWw9OxbvQPAG9m3fw7wWYBwzuujo3ztS2/l1xMtjw9QtQ6RsJDgxxBJQKurq/Hpp5/KKRae5+XUiT9vnNKOAP1vAXESFxzShA6/5ZqheO0AkC2c9Xo9bDYbtm/fLlfvHD16FDt37kTbkRsB10lxcjZ/addJyWCTlL01gZmIE6WJeBEiCJDgxxRpBWtTU5Mqd6/T6TBy5EhMnToVGo1G3i5dBAqdjXJDlEC4ofG7PZw5+KamJnkVrhLJE9/g/ACAALAOMaIPJ2cebn490qg4EfP4iXgRIgiQ4McUaQWryWSSSzJ5nletwnW73fLPTU1Nsg1DGjoD1+ZzHNy8DrXpQ3A4PfDirp4iwNOVi0v3v4NvJCuJdv2t4Yt3X4qKE/EiRBCgSduYInWeUiKZqkkEisgHOeq9k7eM+Rd/T+VOg9sWlclbJQMNH0BgadBoMoB+M4HD+eIbpme8Qpa/VJy4FVpFkW99F4AbaD0lPofjmS+Ve0YaFdPkLUF0C0X4vYjValWtcvXXHUui0NkIjkmNUoLk5TkOtemlUR4pUJr5R+h4m/ii/XNxclY4p67MUU7cSmIPDZA1J/yURk+j4kjuEBJpcjWRxkL0WUjwexGllUEwB02JEsdxQBC69cwHOHyROTFs+wW9Xu93O8dx4DmPbz7r8Ag3732tFKb8pQCfB3B68blgDTDoLbV4BxKzaIpcJHnzREojJdJYiD5LUgr+yy+/jJKSEqSnp2PSpEn44osv4j2kiOhuYVShsxEV9p3g4X9lrowipx+O6Af6fKaYMGasDWi8H4DGI+zpojA1LhKFWoryWZv47C9Cl8Ss6XG1wEdT5EK5Q/C9wCTS5GoijYXosySd4L/zzjtYsmQJli1bhn379mHs2LGYOXMmzpw5E++h+UVZ256bmxvROYo7jocQ5QPguIhr87uWc3KK/zIAiogfAOD2ir4U5St9dZTiKokZoBb4cESuJ3cD/tw+gcSaXE2ksRB9lqTz0pk0aRImTpyINWvWABBLCIuKivDAAw9g6dLuhaO3vXR27twJh8Mhp098o2qe55Gfn4/m5uaAHjuA2P0q2ApcAHIpZ6mjNuxJXJPJJBur5ebm4lKMh4bvAIMk+jzA54j5eiWSj86RElFM+Txxu9ACgImvL2kWt/VkYlU6fyS+Pcqx8Vk0sUv0Ofqkl05nZyf27t2La665Rt7G8zyuueYa7NixI44jC4xUmmmxWGCxWLo0NhcEATk5OdDpApimeSh0NkLDusn7cxzA86hNH4JdWZPCSu9IJaEulwtnz56FAHE83suL4BF7jbhVP8H/IizAsx/z/nyoX8/z9D1Jefi6fSab2NOELhElkkrwz549C7fbjYICtZAVFBSgoaHB7zEOhwM2m0316E2UzUXMZjOmTp0qWxlI1NTUqJqLK1GmWiyOupBTOwKniTi9wxjDOcfEAB/lBsCAzmqveCojd9MznihfcSfC2sT3A+XsQxG0nqQ8kj1d0pO5DrpYEAqSSvAjYfny5cjOzpYfRUVF8R4SRo4ciWnTpqk6YgmCoFqcJeHrs1PacQyc4O5e+BmDE7qIG6dkp/1HXgLg//xtYu394XygcaFnIvc+cYJXNwTgDN59uQzRfE1oFS8GhinicYfz1Q1WqELFPz25u6HvllCQVILfv39/aDQaNDaq89ONjY0oLCz0e8zPf/5zXLhwQX6cPHmyN4YaEmazWVUaKQl9sFx+obMRk+27Ajc/l+A4MF6D4/qSiMZWd/HWgOu9ZFrXe9I3yvEywLFHvCAAolAN+G+xTl84J+bQlXX9jYvEC4C2GNCYgG+04oWEIlMvPblDoeofQkFSCX5aWhrGjx+PLVu2yNsEQcCWLVtQUVHh9xi9Xg+j0ah6JAJS9Y5yEldZm89xXMA6eUBsfh4KjOPxZcbosMeXrTsQ9jFepD8rzrsaF25xu2TCxud59nOLF4BhxwFHlfi69V3/kWk41g19+YIRzu+W7OksIqokleADwJIlS7B27Vq88cYbOHjwIO677z5cvHgRd955Z7yHFhaSsVogd0vGWAgNTELL51/UZGJHVkVYvjsD0rcFj+7VH+LzMl2MKrN+KAq2XMrJgDMPixcA0zNAwcvq6DNrDuSVuv4iU+ki0Ppu983U+3Iqoy//bkRMSTrBnzt3LlauXIknnngCl19+Ofbv34+NGzd2mchNZKqrq2Ux79+/f9BIPhga5jFe6y6fz3Gy706onOmYHtL8sGcAPi89q3PtH4jCxJwQq3uYmOoRzonCr/TOOVICZFwFDHeJK3X9RabSRUBp3RBI/PpyKsPf79aX72iIqJF0dfg9JV49bZVs375d/lkq2aypEXvV8jwfNIevpEFXgHr9IHQiDQjQIF1FGHX6Aw0foCxrFThO8eehtQAuX/8fHeRFWUr4PEXNPi9G802PK7ZxAJh6QZZvjb2y21b75/7r58k0TaQn6xSIpKdP1uH3FaSyTMkq2Ww2y9sCib2/ZimFzkaMt+/rZmZVgaJOf2dm8Dr90sw/guOYOsrvIvZAV7HXAeDVC7Sy5opibHpG+RuJ4uRbvaOMUs887EnhvB04hSHdCQCpHeH25TsaImqQ4McBqSzzyiuvlO2Tg60P4DgOubm5Aa2U851nu22Y4nNCMD54nb4G7dKuYeKEumqHAy5u8kbiMi5RnKSKHal6Rynscu4f3YtZb+a1EzF9QpOzRAiQ4CcIyqYovqtxGWNyS0J/XNJRg4rWHSjtOAaN4AxN+BkTU0EB4Dh3wPe6h4eYsvGkbYRzYo2+S9ErF0xM8Wg8i9A0pq6ePFlzxXNxBq/YK+v3AfH5cD7gbhKP7Y28drJOmibihYroVUjwEwSz2YyysjJotdou1smh9qktdDbiCvtuhFq9A44LWL3T6rwkjElbXwTPGHxP4ESXih5HlffZtzF6xlXi/sqVulL9vlSh07hIfC25dQJeUYuVMCdr+iRZL1RE1CDBjzNWqxWVlZWorKwEAFWPW0DM84c7rx60CboSRfWOr/AbtNYI0jmhII2LFztpyX+CAvANJwoSIEb8zSsgN1VR1u9L+f7GRZDr+6XoXilqsRLmZE2fJOuFiogaVKUTZ5SLr6SKnbq6Orjd7oBNUjiO6/Yi0KArQJ3eAjenDT0RzxhKO46h0NmIKwdcA57rvklLZHicN4Gu7psynpQQlw5kXi/m95XVOnLFj6cCqO0TsT5fXy6md1K9aodIKULVNeppG2csFovcCUuq2AHE7liBSjRDuUYXOhtR6GzEzsxJYOBDE32OEy2YAXCIldgDXudNwH9ZJ+fdjznESV/hHNB6UtymTElw6Z6o3vOeo0qs5ScIoguU0okzkoPm1KlTAQCffvopampq4HK5VCZqkTLZvstbxRMKHIfj+hIILLLFYOGjyOtzGUDBq8BwQVylK626lat1NN6UhE68MIE5PWkgj3N/1hyanCSIAJDgJxC1tbWqiN7tVlfK+FbvhMolHTUo7TgWsugzjsfa9v/glfYaVLnujugzw8MzLtbm7aI16C2xP+7FTQATS0TBZ3lz59Jkr3wnwsSFYRlXiefw11KRIFIcEvwEQin2JpOpS+qmJ1F/obMx9Fp9jsN5rgg2FGOz87cRfV7kuEWL5W84sZRTbqaiESd5pbJMrbSGQNE4xjBFYdQG8c6AqlISG7ob61VI8BMIpZg3NzfLPysj+1BtF/xRYd8ZenrHk/MXkIZV7adjG+nzuVCXa/qMj8sQo32lrbLLY3PNZ3qPtX+gXqwlmbgFq0oJR3DCFScSs+6hUtFehQQ/gSgtLZV/Vgp7oGqdSLiko6b7VokqOHQgH5ucq2Mn+lL/20BoTGIaxzBFMSyDeCEQzkOVEpLhA7c0VApxOIITrjiFun/9rd4+AKkGlYr2KiT4CYRy8VWg1I2/RVjB9veHxVEHjgnh2TGAxw7noyF/RtTgMrxi0P65d3vm9WIFD3zueFiHWJNf8HJgoW963CvEhikANOqLSSCCiZO/aD5UMWt9F3IfgFQjWdc0JClUh5+gWK1W1NbWhhTdh1KXH4gvMieGWKvvPT8PB76texjl2rURfWZY8Hnis3AeKnHn88Scfut6/8dc4kmJKd00pdp9LsNr5SBF4T11meyJW2X9raLYZ80RJ6sJIkzILTPJqaurCzmV05NrtmjFEAqc/BCQHqMUj58/Ryln7xvJCy1iBU/WvK7HCee9aRK53+6DnosGvH79gNep058HTyidtSR6kpoY9Ja3DwBBxBCK8BMUq9Uqd8WKNYfTy8TmKGF6KXBw4jFDv+gPSD9BXC0r2SwEQ+W7r0QDuVon4HEXxH2kqFy6GxBaPefUqN+XIA9+IsGgCL+P4NsNyzeHH2ltvhK5Tl8QwsjpAwxarGh3YEV7Bz50vNHjccg49oiCH8TNUyaQNYN2EETB9vcnLm1T+PQA3vQO0LWzlhKqLCGSFBL8BEUZ3Uui789ILVoVPIXORqShU4zyQxZ9Kc3Do1r4IVa2n4teCSdrA9DZzU5B/nxddQCfDejHdX2PS/cYt3lW8kpRumTVzGeKzxlX+Z9QpMoSIkkhwU9QLBaLbKYm/dxdDb7JZIq4Py4ADHLUi2LPeXzsAQQtl1TBwYVMdCAfW5zPocp1d4xX6vLoktf3RTgn3i3IKBZpSZUx7Z8DtRPFhV7Svq4670pdf+Te653wpRp7IomgHH4S8emnnwYUfb1eD7fbDbfb3aNJXKlP7lDt+6gXpsAGC7p42HcLQzrOoQP5MOIE7jd09duPD5Jnj8Fbs++3T68HZbWPEsmH319+nyDiAOXw+yBDhw4N2AzF4XDA5XL1SOwBb5/c69Lm435DGQq5PQg9ypfg0IE8AAx2FMbYj8fTQzcYWfMg5vM9TVkkbx7AR+yl7zZNFPJ+M7tW6SjFXpn/TyVoBXHSQoKfRJjN5i6CHg1HzWC0sQEIP8IHvCWcemxyromN6PN5QMFLYk7euzHQzoqfld+hTjxP1jzvdk4rRu2+PXYBdVOWgjXBq3T6qjDSpHXSQoKfZJhMJtVrfymezMzMqH1ehe55pKMZWtgRuHVhd3DY5FyDFe0OrGw/Fz3x5zNFozWVpYKflFfrenT13Jdwerz23/ZuYh2iSPubnJW2dSf2QN8VRpq0TlqoAUqSMXLkSABAdXV1wMbmHR0dfrcHaqgSjHLtWtWK2lfaa2BDcVjnEBHvElzIxCbnavncPSJQ7j0ilBcxT5MVKTfvG+FL9ffd1eMbpgCtp0KzbfBHotb7596bWOMhQoYi/CTFZrMFfC9QqWZGRkaPP7dC97wn2meILNoHAB7bnU/1eCwxxTBFXGnbeJ+3Ysc3YpdeSx7+vimc9s8hVwJFQl+9QyDiBgl+kmKxWKDVaqHVakNO4djt9pDPzzx/Gr5zwOXatXjEkIelBj2WGvQwQuo2FR4dyIvNoq1wkBqi+6P1XaD1HfU231RG/lLIq3GbHvc2XpEEuqepD0qdEFGGBD9JUbZGdDoD5ae9BKruCbi/Jxfe3WH3Gy7BTN0iRJLXVy7aik0lTzeDZx0+K3XTFN48bnjnAzjRallydgTESL7tE4DTi+8LrZ5jeFGgfdMx3U3g+ns/HCfJvjpBTEQVqsPvA0i+OzqdDna7PaJcvS9XDZgelrXOh443UC3MgSiWkVT1iH+GHFzQw4Yh/Mc4JUxBhe753nHl9Hw6+FyPePtcRCV3zvbPRdsH1ibu3+VCxwPaIq8fjxShd1e33xO3zWgcTyQ1VIefQpjNZkyePBnjx48PaUVuKLAw5/Ov08/HUoMBI/m3EWklD8CBQYcO5KNa+CFsKO7lXD/zRPx+TNeEc2KKx3VC0VXLswaAU86NCOI+QovXhVNZyumbnpEic8MUSv8QMYcEv48h2TBEUpqprOlnTBPR54vCr49wwZYS8S6hA7m91EhdAZ/t+cHfd8B7nzktAEH04JFFXzqGiU3XAa8Fs28pp7SIy3VCvHMIlr7pLmVDjUSIECDB72NI0b4yr8/zfEgLtJR3BhwXxFo4BBakT40wt+8LL3vvV7nuxqr207HvsSu0eH7w/Q4YxLy+9AAADnCd9vyYAfU6AOaxWz4nir8kxsrOW6Gu2KWKHSIKkOD3UaQ8nslkgk6nCzvNwwXzkg+Rcu1aH9GPVPx5bHKuwSbnGnQgP/Y9dgMiTjJ7H4D4O3WKOX3WDtXv6KoTo3suQ3yWonNfG+aCNeLPgSL4ltf8N2ohiDAhwe+jSHX6NptNLuEMhslkUlXynOm4Ohxr/IBIop+OZvBwIPLafd+JYDHyX9Hegf9ub+kl8feJ8FkHxH9COk9072cCVzgnXgiEc6LQ198KuE5Cdu6Uqnj8RfDKOwHhnKdhC0FEDgl+H0VpryyVcAazTm5paVH59HxjewJHWpeARVRxo6ZcuxZpsENAOoyow0zdIsXirXBQjoVBirSd6IePnS/2guj7fhce4eezvF76gFjaWfCq4jCDd0K19V3PcU7vRQDwP+mqvBOQ6v0ppUP0ABL8PoqUyzebzbBardi5cyeMRqOcyw9lUtfafh0upD/rU4USGRW652HECbnM8hFDnqKiB+ip+DNoZb+e2C3kEuB3Ipd1eOwTNKLYZ1zlKcP0pNHSRnr31ZeLz1qLKPCGKcDhfDGK97VQkC4CpmfEtA9V4RA9hOrwU4CdO3fC4XDIEb6/PrmZmZldVuIONHyA4sz10PON6Pnkq3/E+v0fwlvT3vM7CoDJ9fzTdE9EsY4/UJ1+mmjkJpyDGENpfN6X+utqxAogqT5/2HFv/TwQvIY+UX11iISA6vAJGd/uWf7wJ/ZlWb+Dnm9ArMQeEMs4R/Jvg4MLWlxE5Dl+Jd56/k3Ol3rQeYuHuq+uVKcveFbkSihbMXrSNUrk/rpuT95fI6aAjpR4UkGeWv5g0TtV6RBRgCL8FERy2uR5Hvn5+X5dN6eYZkPHt8ZhdMAr7Yc9jpzRiPql471/5iP5t3Gdfn6Pxgg+T23LEKxzFiBeIFrfQVf7Zk+rxkDdtSTqbxXz/1lzgEFvBd6P7gRSEorwiYCMHDlSXpHb0tIim7D5I2g0EIXcvj8kfx4OLqjFOtIVvNKz+KgWfogV7e1Y0d4RoXkbBwjn1ZuCWjVzwMVN8OvVLzVvEc6LYl1/K/CNVnxWIjlvtr4b3C+H7gSIIJDgpyjKxuiSnbKyuUqt/b/Q4S6AwNIDnCHNk47QBXi/Z5Rr1+I7uocUog94RbuncBBTLGKVz0HhljCPl8ozQ0Rb1PUCIZ/KCTnKb1zoadbiEXYlSmfOxkWexuta8Vmq36d6faIbKKWT4lRWVqoEX5ne0Wq1KC9YiQzXhiBnSIM6hx1dqlx3Y7vzKXQiEwJ0UIt+NCZ5ff/8xdcj+XdCSPso7z6kidnu9vMDn+dZ3avYJ2te19SNqp+uD1pPUxoyUEtJ+lxK5/jx47jrrrtQWloKg8GAoUOHYtmyZejsjJ3YpAKlpaXyz0qxLysrw9SRXyHD9Q/PlkDCqvz+Q/HfCe9Prly7FosNA/GoIQtLDenyIi6x7WI0Jnk5n4cY9VcLc+XJ3irX3QEmfpWfG2xlcpDfmcsQXTiV5+LzxNJOCWkB1vn/EffjMgD9BAAa8Vkq1/St5e+JZTLZLfdJkibC37hxI9555x3MmzcPw4YNw9dff427774bt99+O1auXBnyeSjC74pkr2w0GuWVuWazWV0yCHhKElsCnqfbSBacuAhJ1YO256zrqEQDm6D4/OiUdnrPJy7y4tGBRw3R/Jvx930ptnEZwID/9lmABcgN1H0nZ30nbHtimUx2y0lFqLqWNILvjxdeeAGvvvoqjh07FvIxJPhhIAmIUmy4jBAEuzvhjz4fOt7AQeEWcHBBgDTvEK0KH+VrAYXcPrSxAT3w6g/w/Uie+63r1dv1E0QPfo0JcFSJlTrtn3cVZF+Rlv7/GaaI+4dTuUPVPklFn0vp+OPChQvIywvQoo7oOZLlrn6C+Fpr8RiEdUfvxxDX6efjMUMGvq17WGHboIzQI8H3YiFO9jawCbChGJudv42gxl8H+Os1kDVPLMsc9JZPjT9EkR92HEgrA8CA1r+JFwBfUzZlSkcp2NLFIZzKnWB2y5TuSVqSVvCPHDmC1atX4yc/+UnQ/RwOB2w2m+pBhEnpbmC4UkAT989Gsm2Q/PjF52gjXggEpMGGYmxyrgmjtNOJLguzoBPLNutvBQ71A1rfhupikzVHfG59G/LCLtYmPoRzoi3DkRJxH0mkleWZ4TZHkQS9/lb/wk6ln0lL3P/lLl26FBzHBX188803qmPq6+sxa9Ys3HLLLbj77uDR1fLly5GdnS0/ioqKYvnr9G0k4Sh4GeoVqBLRyJ1HhwXpU7HUoMeC9KmKRuuxuvMQa/tXtp+L0Kvf6e2oxdqgGqu2WFGt42/8GnH1rusEcOZhr0ArRT7c5iiSoLe+61/YI+muRXcFCUHcc/hNTU1obg6ywhDAkCFDkJYmCozVasX06dMxefJkrFu3rtvGHg6HQ+UdY7PZUFRURDn8nvIND1mAeE9azfSM18o3AZFKPB0w+mnhGM2LFYMWF3EJ/w9UC3PAw4U02EPw9fHU4yvRWoBhJzwrbX1y+9Lkrfyde44PdaI1UJ6+J7n/QNAkcEzpk5O29fX1uPrqqzF+/Hj8+c9/hkYTfhs+mrSNEoGW+tffKkaqXLqiIUhsqnOixar20+hAHqJf3QPVOTm48B3dQxGIfrFnJa/in6pUwZN7r1egwcT99BPENFx3yFVYGv/tF7ubtA1nYpcmgWNKn5u0ra+vx/Tp02GxWLBy5Uo0NTWhoaEBDQ0N8R5aajLoLWC4q+vioPbPIfd5zfohRMvgH4ri1IU0JEIaaLFhoE8P3mhM9PquChYtnLc4n1NN9Hap8edzPHdMimNdUkpKgeS/r8zdu+rFnx1VosAezhcfgdIoytW7vmmbUPL0TY+L+zQ9HngfCeq5mxAkjeBv3rwZR44cwZYtWzB48GAMHDhQfhAJhDK/q7wo+BWOTsSjoscfUg9eI05gpm6Rj1d/NBAF3IV+8kTv8+2t2Oz8b9hQjC3O5yF3yBLOBfnsNO/36yvKstf+IHFFrnSu5hX+c+i59wb22Y/mIi4iYUiqlE40oJROnFDZAvjUoXMZAHMg+GrV3qfKdTc2O38LQTVBHe07Em8KSOz/C+xwPooK3Que1I/PP08uA7j0ovh9SpG16RlRvA/ney4WQRZvKS8WwRZt+eIvB09pmoShT+bwowEJfhyRBEJoVQgTB2TNFa0Emh4XK0665Pp1AFyI991AletubHKuRvg3xqEuAFPm/QWkowXTdL9BufZl7y6SjbJv/h0AGu9H1+9II86ztL4HoNO7iKvLsZ6LcaBJVRL3hKbP5fCJPoCUxzU9IwoLnwtAEPP+ufeKvWH9Tuw6EXBlai+TjhZoYYcOFxG6Y2aoYq/M+/PoQD62OJ9W5/j7zRTF3jAF4j9ft+iy2fgg/E0Ui86b70D2PHJUdc3dN6+A3JErUAon1Bw8pX4SGhJ8ovfxFX7DFK+IaYvR7Z8lnyvu129mLwzWyw7no+hAPjLQjIcNuZipewDpaIb6YsQQ2aSv70VBPN6b838JVe7F3lWzre+iynWP52JwF9SLuRhkYzVooDK1y5rjzd3zeeLdlvS9Kyt1Il1cFcpxdFGIGyT4RPyQhP/iJlEkLm4SX/M5wY8TWryWAQHhot6gRdmIHfA6eSodPGfqFmGpQR+FSV/O51mDTZ3PYZX9K4/I/xjbnctgQzG2O5/yOdaTqumshhi5e6J3yXK5diLQeJ83tdb+uTd6l8RYugiEuzrXMMV7IQkk6LRSN25QDp+IP9Jko5SfbnnNm4/mMjw1/T65/UBGY8r3fT3mexmxQfsciHGVPw//SMzd/NX5C9CiDVo4xJy/7nXxO+uyAM5zIWi8T71NuZYilAVS/vL5yjkF30btoRxP9AiatA0ACX4C4k8AfCd4+TxP1ygpb8557JoDrer1qQTSTwAcexGPC4Ao/D9UjCl2aw/S0YI02HycPBXNWbTFHtdNhceQNBfCOsRnLt1b+eOPQBU70sQvnyfOx5Cg9xo0aUskD/4mBJV5fjlaF+Cd2OxOuH3edx4TV/uqUEbeseM6/Xw57cOpqo26W+gVzsVJPEcHcjxOnv+N59rbsK5jB17pOOyd9DVM8azCVeT1WYd44ZQM2fis4ELtz0tHWdPfy3MrROiQ4BOJjVS9I/v25AIFr4jCYnrGYyXsI9pcRtcKHr93AoJ4Xsn+WSb6fXrLtWuRBjsYdEjHORhxAukQF1h5fwbUIh/OxUjZtQsQoAeDFg1sHGysCJuca0RjtwvF4u5ZcyDPczDFhC+fJ14UDueLzp3+VuoGqtiRtkdix0z0CpTSIRIff4uMlO+p+rzqgIKX/Ju4BWzewgP6cZ40h85zrjCalIdIletuz6Kqro1TpPfa0R9O9EOsUj8cnNDzbrhYOrTsPKbpHgfAYYfzZ6jQ/Rbl6f+fYp2E9yjwuWLk7mum5s9o7fz/iN9lqJ4+RI+hHH4ASPD7GIEMwORVpwoCCr4OXT3qAxHbbl5VrrvxsfNFj5unv45bkUzySnSdQzDiBDqRhQ7kIR3NWGwYqM7pywZ4gGzsJk2uA+rvX1q45ToFuTJouEs9HJqwjQmUwydSA9mj38ftUarxz5rnef9VxUGcuJ3PEx9cOCmc2MZH5dq1+I7uIRhxAiP5t+UFXjpcxEj+HXjvZELN+atTPV7RF9NZGdwZxXFab6rskmbRwqHgFe/3JOX9pcndltfEuwE+T0wRSXn9rDmQq3988S3JpJr8XoUifCJ1UJZ/8lmi8ESzfJPPBYSLkFe1xgAp9TOY/xzfCDdCgB7hV//4loaK8OhU+AYxjMyoxXWXDvceJt818WITHKU/j7KHbneWysr0nO85iIigCJ8gfJGiftMz3jsDAAHFPuSFWx6RFGwIKPbyRHLPJoTLtWtxv6EM1+nn41GDESP5t8HB5enq5ZmE7vbixfn8LD4E2a6aA8Cjuq0Uz1U58eFxF9DyGqqcd3hX9jY93rVaJ5ToXZqEl1w8I+meRUQMRfhEaiNFnKxDrD9XTkzKE7++0bBG/JlLF+cEQnH7LHg1zG5gfhqhhIg4D7DKMw/Qk9p/73E6tMEJg+e1GzPTfo7yUb9T7+4b4QdaxJVIefxEGksPoEnbAJDgEyGjFANA3ehDSm1Ioq/Cd2JXB2jNnslNxTa5GiiQuEcu+gCwsv0cXMgEjw6k4SJc0IMD5xFuqUVlpJVADCNzeVxXomgV6SueySCmfaT1IqV0CKKnKOvNlakIAGKUL3giewXDmTjR6etYKbtbSjhFzyBtcQDvIF60jQ7HD0hrUb2coXsMRq4O3856G4sNZjxiyJNN38QFYNKFyTfmC5YW8lo6VLcwVJ0V72qqzrqx6vgPsKp1J6pOey5s4Xa5iscEboqllCjCJ4hQUdacX9wkbus3E2j9GwCnWHeec5d6H2lCmM8T5w7kNQMasUOVo0p8dh7zX0YKKNJFaQDfz3NOH4JaRyjvFHgAGlS5FmCTcw0kLx6AgYcTAnSQU1YBHDw9g5P/W5ABNLSpJ4JH5vI4ZWeoKORR3j/E3tN9JNqOBxThE0Q0UaYnLm7yivOgt4DhnWJkX7rbO3HZ/rlY2sjnes+htB8oWCOKPdziIiV/dgSS1YE8N9DpX+wBz6KxQLGboP654CWUa/8oT/iO5N/BUoMBjxqMmKl7MED0789rX9yqFntxn+oWATYnsMN6IcCYRKrOuvHK107xTiHFom2/xPguhyJ8gggFZfSpNHSTFiBJhJPHrr9V4fapMDgLBJehXgiVNQ9ofRtdxJgzdHUW5TMBV51PxytAFf1rLYCrTrUi+KT7/+GgcAtG8O/jlDAJNhQB4GDUATa/a9WUY2Hg4UAaLmJa1j9Qnu9G1ekT2OH6GSrM2Sjvr8ErXzthcwJGrh73F29M3Fx/bxHhXQ5F+AQRTZTRp7K80xffvHWwPPagtzxeQJ70jrRQTFroJP0sCbnGpLhj4MW2kMqFUVk/FEU783rI/7Sli1L+z8Xz59wl/g7SMdL8gbYYGHYCKHgV5Wnv4f6MsSjXrsV1+vl4zJCB6/S3eYbBgYOAikIeWk5SfOWdgNLcjoOAdHQgH9tbZwNnHsYOx12wubPw8UkBVWfdGJzJgYMLg7lPaDEWEPO7HIrwCSKeSBFdIEth34VKgP/+s0pPIamLWOu7Xq/7YCWSvj5Fsl2CmirXQuxwPYKKzM0oF5aiqvNmRfmnRCA7CAYeneDhhgvpAHjVzIIqwqdcfthQhE8QyYByAZg/h8nmFZ70UZa3WkiaB5BcLQ/ne0Rb0Ze2/XPxdeu7oqgHihx9F0JJY5LtkzWyPUV52nrcn16Kctd9gHAO5do/4Du6xR6nT1G6eXR6fhYf4oIwKdrXw4UMSLIjKOYHOvlBqHKLFs5VeFxc4AXPhchfxJ/KdwE9gASfIOKJb39fX0EO5D0v2RAL5zxe9h1qTyHfRuUSbZ90FUp/n8FnQ1pkJbdAlP2J5sgpofLcVizuNxIzdQ/AiBNIgx0AD86zvkDK+XsRMDJXnAMo5L8EBxe0aEOHG9jRIF40dti/DRuKscP+bXFS98QsVHV8B1WnT2DVASdWfekUSz/JgjlsKKVDEMlKy2tA40IAgjihqzH5ty7OX+qtHlK6Wgbyv+kuzaRElf7RoMr9AHZ0LsRg7QGc4q5Du7MDTqR73hcwU/9rlI9cLr6sv1Vsxq59FTs6FmBwJodTdoYMNKHRmYsRGSdwyjEANncWjNxJgM+BzZ0FADByp3B/epm6PaO/7yfRF35FCUrpEERfJ/de0cRMWyyu+PWNeJUTxlIUr3S1BLwXgsZF3qhf2tf0TNfm5r4pFOUEcMEalJsvxf1Z1+C6wU24/zIdvqV/FlL1UTpaUD6w2HusJ+1Ujmdw/2U6HLvQDpsTaHD2B4MWp5xDUaF9QWwcn7YWFdlfys3iB3Of4pX2b1DVkiWmtOpv7To+6XdrepzSPx4owieIvkCk0azvZG+gSdJIJ1JbXkOV9RB2OB9ARW4dyounBxzzqqoz6EAutLiIDF0/cdGWZq23lFP7Asp58YL2SnsNbChGOpqRBjsG8ztwSqhARdrLXo8f377IfXgSmLx0AkCCTxA+hGprHGifKKVOqk5sw46WElTkHlddGORafU0r7k8bAMCNKtfd2O58Gh3IhncNg/g8M28nyo3fqH2QPD9X2YarP6OPpH1I8ANAgk8QUSbGZZRVZ93Y0SDW/p9srsHBtmKM0HyIU/g2bO4scAA0nBMuJlpPG7l6VKT9Hjscd6FC/z/eOQMAr+w/BhsrgpE7ifsvH6LukXBJc9JeACiHTxBE7xDjxULl/TW4/zIdyvtrcLBtKBi0OOj+ASrM2TDqgO8U8ZgxWI90vgPpaEFF7nHscP1MrPRx/Ux1rgrdanFOQLfa/4f5evr3MbTd70IQBBEEaX1ALzAiV3Tp5ABstwqYZvaas5X3zwKQBWAAcNaN7fUCOpGFD4+7ZCM3ZF4PtGg9q5Hh7bolXaykiqY+6udDKR2CIJIKKacPAEYdcP9l/ruISftJRg9Gz242Z/DjkhFK6RAE0SepKOSRzgPpGvHnYPsZdeJdgVEnvpa2BTuuL0MRPkEQKYtyQjhk3/4EhCJ8giCIbtheL/r2b6+PvJVkMkGCTxBEyuJm6mdVQ5Y+CAk+QRApi4ZTP+9o8HTqauibET8JPkEQKcu0QeIk7rRBohT29UldqsMnCCJlKe+vUU3W+r7ua/TNyxhBEATRBRJ8giCIFIEEnyAIIkVISsF3OBy4/PLLwXEc9u/fH+/hEARBJAVJKfiPPvoozGZzvIdBEASRVCSd4H/00Uf4+OOPsXLlyngPhSAIIqlIqrLMxsZG3H333diwYQMyMjLiPRyCIIioEmtvn6SJ8BljWLBgAe69915MmDAh5OMcDgdsNpvqQRAEkYjEeqVv3AV/6dKl4Dgu6OObb77B6tWr0draip///OdhnX/58uXIzs6WH0VFRTH6TQiCIHpGrFf6xt0euampCc3NzUH3GTJkCObMmYN//OMf4DhO3u52u6HRaHDbbbfhjTfe8Husw+GAw+GQX9tsNhQVFZE9MkEQfYY+18S8rq5OlY6xWq2YOXMm/va3v2HSpEkYPHhwSOchP3yCIPoaoepa0kzaWiwW1evMzEwAwNChQ0MWe4IgiFQm7jl8giAIondImgjfl5KSEiRJNoogCCIhoAifIAgiRSDBJwiCSBFI8AmCIFIEEnyCIIgUgQSfIAgiRSDBJwiCSBGStiwzUqRSTjJRIwiiryDpWXel6ikn+K2trQBAJmoEQfQ5WltbkZ2dHfD9pPHSiRaCIMBqtSIrK0tlxNYXkIzhTp48mbI+QfQd0HcApN53wBhDa2srzGYzeD5wpj7lInye5/u8947RaEyJP/Jg0HdA3wGQWt9BsMhegiZtCYIgUgQSfIIgiBSBBL8PodfrsWzZMuj1+ngPJW7Qd0DfAUDfQSBSbtKWIAgiVaEInyAIIkUgwScIgkgRSPAJgiBSBBJ8giCIFIEEv4/w8ssvo6SkBOnp6Zg0aRK++OKLeA+p1/j1r38NjuNUj+HDh8d7WDHlk08+wezZs2E2m8FxHDZs2KB6nzGGJ554AgMHDoTBYMA111yDmpqa+Aw2hnT3PSxYsKDL38asWbPiM9gEgAS/D/DOO+9gyZIlWLZsGfbt24exY8di5syZOHPmTLyH1muMGjUKp0+flh+fffZZvIcUUy5evIixY8fi5Zdf9vv+888/j5deegmvvfYadu3ahX79+mHmzJno6Ojo5ZHGlu6+BwCYNWuW6m9j/fr1vTjCBIMRSc8VV1zBFi5cKL92u93MbDaz5cuXx3FUvceyZcvY2LFj4z2MuAGAvf/++/JrQRBYYWEhe+GFF+Rt58+fZ3q9nq1fvz4OI+wdfL8HxhibP38+u/766+MynkSEIvwkp7OzE3v37sU111wjb+N5Htdccw127NgRx5H1LjU1NTCbzRgyZAhuu+021NXVxXtIcaO2thYNDQ2qv4ns7GxMmjQppf4mJLZt24YBAwbg0ksvxX333Yfm5uZ4DylukOAnOWfPnoXb7UZBQYFqe0FBARoaGuI0qt5l0qRJWLduHTZu3IhXX30VtbW1uPLKK2Ur7FRD+v+eyn8TErNmzcKbb76JLVu24LnnnsP27dvx3e9+F263O95Diwsp55ZJ9D2++93vyj+PGTMGkyZNQnFxMd59913cddddcRwZEW9++MMfyj+PHj0aY8aMwdChQ7Ft2zbMmDEjjiOLDxThJzn9+/eHRqNBY2OjantjYyMKCwvjNKr4kpOTg0suuQRHjhyJ91DigvT/nf4mujJkyBD0798/Zf82SPCTnLS0NIwfPx5btmyRtwmCgC1btqCioiKOI4sfdrsdR48excCBA+M9lLhQWlqKwsJC1d+EzWbDrl27UvZvQuLUqVNobm5O2b8NSun0AZYsWYL58+djwoQJuOKKK7Bq1SpcvHgRd955Z7yH1is88sgjmD17NoqLi2G1WrFs2TJoNBrMmzcv3kOLGXa7XRWl1tbWYv/+/cjLy4PFYsHixYvxm9/8BmVlZSgtLcWvfvUrmM1m3HDDDfEbdAwI9j3k5eXhySefxE033YTCwkIcPXoUjz76KIYNG4aZM2fGcdRxJN5lQkR0WL16NbNYLCwtLY1dccUVbOfOnfEeUq8xd+5cNnDgQJaWlsYGDRrE5s6dy44cORLvYcWUrVu3MgBdHvPnz2eMiaWZv/rVr1hBQQHT6/VsxowZ7NChQ/EddAwI9j20tbWx73znO8xkMjGdTseKi4vZ3XffzRoaGuI97LhB9sgEQRApAuXwCYIgUgQSfIIgiBSBBJ8gCCJFIMEnCIJIEUjwCYIgUgQSfIIgiBSBBJ8gCCJFIMEnCIJIEUjwCYIgUgQSfIIgiBSBBJ8gosj69ethMBhw+vRpedudd96JMWPG4MKFC3EcGUEA5KVDEFGEMYbLL78cV111FVavXo1ly5bhT3/6E3bu3IlBgwbFe3hEikP2yAQRRTiOwzPPPIObb74ZhYWFWL16NT799FMSeyIhoAifIGLAuHHj8J///Acff/wxpk2bFu/hEAQAyuETRNTZuHEjvvnmG7/N5QkinlCETxBRZN++fZg+fTp+//vfY926dTAajfjrX/8a72ERBADK4RNE1Dh+/DiuvfZa/OIXv8C8efMwZMgQVFRUYN++fRg3bly8h0cQFOETRDQ4d+4cpkyZgunTp+O1116Tt1977bVwu93YuHFjHEdHECIk+ARBECkCTdoSBEGkCCT4BEEQKQIJPkEQRIpAgk8QBJEikOATBEGkCCT4BEEQKQIJPkEQRIpAgk8QBJEikOATBEGkCCT4BEEQKQIJPkEQRIpAgk8QBJEi/P+6k8kC0lfzAwAAAABJRU5ErkJggg==", 152 | "text/plain": [ 153 | "
" 154 | ] 155 | }, 156 | "metadata": {}, 157 | "output_type": "display_data" 158 | } 159 | ], 160 | "source": [ 161 | "engressor.plot(x_te, y_te, x_tr, y_tr)" 162 | ] 163 | }, 164 | { 165 | "attachments": {}, 166 | "cell_type": "markdown", 167 | "metadata": {}, 168 | "source": [ 169 | "## Baseline: $L_2$ regression" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": 9, 175 | "metadata": {}, 176 | "outputs": [], 177 | "source": [ 178 | "from engression.models import ResMLP" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 10, 184 | "metadata": {}, 185 | "outputs": [ 186 | { 187 | "data": { 188 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWgAAAFfCAYAAABjmlbAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABNvklEQVR4nO2deXhU5dn/v2eWTBKyhwnJYDYghYAQYliNCAotuKBYt5dSBUvFUqhQSyv+qiLVigvvWxGtWvtW1FLetqJI9RJF2SSyQ1AIhIDBIEkghCTDZJnMzHl+f5yck3Nmy0wyy8nk/lzXXGHOnJnzZEi+c+f73AvHGGMgCIIgVIcm3AsgCIIg3EMCTRAEoVJIoAmCIFQKCTRBEIRKIYEmCIJQKSTQBEEQKoUEmiAIQqXowr0Ab/A8j+rqasTHx4PjuHAvhyAIoscwxnDlyhWYTCZoNN5jZFULdHV1NTIzM8O9DIIgiIBz7tw5XHXVVV7PUbVAx8fHAxC+kYSEhDCvhiAIoueYzWZkZmZK+uYNVQu0aGskJCSQQBMEEVH4YtvSJiFBEIRKIYEmCIJQKSTQBEEQKkXVHjRBqBGHwwGbzRbuZRAqRa/XQ6vVBuS1SKAJwkcYY6itrUVjY2O4l0KonKSkJKSnp/e4foMEmiB8RBTntLQ0xMbGUvEU4QJjDC0tLbh48SIAICMjo0evRwJNED7gcDgkcU5NTQ33cggVExMTAwC4ePEi0tLSemR30CYhQfiA6DnHxsaGeSVEb0D8OenpXgUJNEH4AdkahC8E6ueEBJogCEKlkED7SXV1Nfbu3Yvq6upwL4UgwkJOTg5eeukln8/fsWMHOI4LS/bLunXrkJSUFPLrBgoSaD+pqqqC1WpFVVVVuJdCED4xZcoULF26NGCvd+DAASxYsMDn86+99lrU1NQgMTExYGsIJv5+AAUTEmg/ycrKgsFgQFZWVriXQhABgzEGu93u07lGo9GvzdKoqKiA5AT3RUig/cRkMmHChAkwmUzhXgpBdMm8efOwc+dOrFmzBhzHgeM4nD17VrIdPvnkExQVFcFgMGD37t04c+YMbr/9dgwYMABxcXEYO3YsPv/8c8VrOkeYHMfhr3/9K+644w7ExsYiLy8Pmzdvlh53tjhE2+HTTz9Ffn4+4uLiMGPGDNTU1EjPsdvtePjhh5GUlITU1FQ8+uijmDt3LmbNmuX1+123bh2ysrIQGxuLO+64A/X19YrHu/r+pkyZgu+++w6//vWvpfcLAOrr6zF79mwMHDgQsbGxGDlyJDZs2ODPf0W3IIEmiAhmzZo1mDhxIh588EHU1NSgpqZGMQRj+fLleO6553DixAmMGjUKFosFN998M7744gscOXIEM2bMwMyZM7u09FauXIl77rkHX3/9NW6++WbMmTMHly9f9nh+S0sLVq9ejXfffRe7du1CVVUVli1bJj3+/PPPY/369XjrrbdQUlICs9mMTZs2eV3Dvn37MH/+fCxevBilpaW44YYb8MwzzyjO6er7e//993HVVVfhD3/4g/R+AUBbWxuKiorw8ccf49ixY1iwYAHuu+8+7N+/3+uaegxTMU1NTQwAa2pqCvdSiD5Oa2srKysrY62treFeit9MnjyZLVmyRHFs+/btDADbtGlTl88fMWIEW7t2rXQ/Ozub/elPf5LuA2CPP/64dN9isTAA7JNPPlFcq6GhgTHG2FtvvcUAsNOnT0vPefXVV9mAAQOk+wMGDGAvvviidN9ut7OsrCx2++23e1zn7Nmz2c0336w4du+997LExMQefX+euOWWW9hvfvMbt495+3nxR9cogiaIMKCWbKAxY8Yo7lssFixbtgz5+flISkpCXFwcTpw40WUEPWrUKOnf/fr1Q0JCglTu7I7Y2FgMHjxYup+RkSGd39TUhAsXLmDcuHHS41qtFkVFRV7XcOLECYwfP15xbOLEiQH5/hwOB55++mmMHDkSKSkpiIuLw6effhr0ZAEq9SaIMCDPBgrnfka/fv0U95ctW4atW7di9erVGDJkCGJiYnDXXXehvb3d6+vo9XrFfY7jwPO8X+czxvxcvf909/t78cUXsWbNGrz00ksYOXIk+vXrh6VLl3b5vJ5CAk0QYSArKwtVVVUhyQaKioqCw+Hw6dySkhLMmzcPd9xxBwAh4jx79mwQV+dKYmIiBgwYgAMHDuD6668HIESwhw8fxujRoz0+Lz8/H/v27VMc27t3r+K+L9+fu/erpKQEt99+O376058CAHiex6lTpzB8+PDufIs+QxYHQYSBUGYD5eTkYN++fTh79iwuXbrkNbLNy8vD+++/j9LSUhw9ehQ/+clPvJ4fLH71q19h1apV+PDDD1FeXo4lS5agoaHBa6reww8/jC1btmD16tWoqKjAK6+8gi1btijO8eX7y8nJwa5du3D+/HlcunRJet7WrVvx1Vdf4cSJE3jooYdw4cKFwH/jTvRZgVaLB0gQwWbZsmXQarUYPnw4jEajV9/0f/7nf5CcnIxrr70WM2fOxPTp03HNNdeEcLUCjz76KGbPno37778fEydORFxcHKZPn47o6GiPz5kwYQLefPNNrFmzBgUFBfjss8/w+OOPK87x5fv7wx/+gLNnz2Lw4MEwGo0AgMcffxzXXHMNpk+fjilTpiA9Pb3LlL9AwLFQGD/dxGw2IzExEU1NTQGf6r13715YrVYYDAZMmDAhoK/dm2isWI+G468hecRCJOXNCfdyVEtbWxsqKyuRm5vrVSSI4MDzPPLz83HPPffg6aefDvdyusTbz4s/uhb0CPr8+fP46U9/itTUVMTExGDkyJE4ePBgsC/bJVQRKNBw/DXYW86j4fhr4V4KQUh89913ePPNN3Hq1Cl88803WLhwISorK/GTn/wk3EsLKUHdJGxoaEBxcTFuuOEGfPLJJzAajaioqEBycnIwL+sTJpOJqgEBJI9YKEXQBKEWNBoN1q1bh2XLloExhquvvhqff/458vPzw720kBJUgX7++eeRmZmJt956SzqWm5sbzEsSfpKUN4esDUJ1ZGZmoqSkJNzLCDtBtTg2b96MMWPG4O6770ZaWhoKCwvx5ptvejzfarXCbDYrbgRBEH2VoAr0t99+i9deew15eXn49NNPsXDhQjz88MN4++233Z6/atUqJCYmSjd5zwCCIIi+RlCzOKKiojBmzBh89dVX0rGHH34YBw4cwJ49e1zOt1qtsFqt0n2z2YzMzMygZHEQhD9QFgfhD70iiyMjI8Ol0iY/P99jHqbBYEBCQoLiRhAE0VcJqkAXFxejvLxccezUqVPIzs4O5mUJgiAigqAK9K9//Wvs3bsXzz77LE6fPo1//OMf+Mtf/oJFixYF87IEQRARQVAFeuzYsfjggw+wYcMGXH311Xj66afx0ksvYc4cSusiiL5KoGckRjJBryS89dZb8c0336CtrQ0nTpzAgw8+GOxLEgQhIxiCOG/evJD0ogDCOxU83PTZZkkEQRBqhwSaICIYT0NjAeDYsWO46aabEBcXhwEDBuC+++6T2msCwHvvvYeRI0ciJiYGqampmDZtGpqbm/HUU0/h7bffxocffii95o4dO9xev7m5Gffffz/i4uKQkZGB//7v/3Y5591338WYMWMQHx+P9PR0/OQnP5Gmq5w9exY33HADACA5ORkcx2HevHkAgC1btuC6666TBsveeuutOHPmTODePBVAAk0QEYynobGNjY248cYbUVhYiIMHD2LLli24cOEC7rnnHgBATU0NZs+ejZ/97Gc4ceIEduzYgR//+MdgjGHZsmW45557pEncNTU1uPbaa91e/7e//S127tyJDz/8EJ999hl27NiBw4cPK86x2Wx4+umncfToUWzatAlnz56VRDgzMxMbN24EAJSXl6OmpgZr1qwBIIj/I488goMHD+KLL76ARqPBHXfcEZb+1cGCJqoQRASTmJiIqKgoxMbGIj09XTr+yiuvoLCwEM8++6x07G9/+xsyMzNx6tQpWCwW2O12/PjHP5bSYkeOHCmdGxMTA6vVqnhNZywWC/73f/8Xf//73zF16lQAwNtvv42rrrpKcd7PfvYz6d+DBg3Cyy+/jLFjx8JisSAuLg4pKSkAgLS0NCQlJUnn3nnnnYrX+dvf/gaj0YiysjJcffXVvr5FqoYiaIIIA40V61G56To0VqwPy/WPHj2K7du3Iy4uTroNGzYMAHDmzBkUFBRg6tSpGDlyJO6++268+eabaGho8OsaZ86cQXt7u2KQa0pKCoYOHao479ChQ5g5cyaysrIQHx+PyZMnA0CXA1krKiowe/ZsDBo0CAkJCcjJyfHpeb0JEmgZkTJlJdy//ETXhLsPt8ViwcyZM1FaWqq4VVRU4Prrr4dWq8XWrVvxySefYPjw4Vi7di2GDh2KysrKgK6jubkZ06dPR0JCAtavX48DBw7ggw8+AIAuB7LOnDkTly9fxptvvol9+/ZJ8wiDPcg1lJBAy5BPWu7NhPOXnz4cfCN5xELoYgeGpA+3uyGo11xzDY4fP46cnBwMGTJEcRMnfXMch+LiYqxcuRJHjhxBVFSUJJ6+DKIdPHgw9Hq9YpBrQ0MDTp06Jd0/efIk6uvr8dxzz2HSpEkYNmyYtEEoXz8AxfXq6+tRXl6Oxx9/HFOnTkV+fr7fEX5vgARaRlZWFnQ6HRwOR6+OokP5y+9MuCPD3kJS3hzkztodkl7c7obGLlq0CJcvX8bs2bNx4MABnDlzBp9++ikeeOABOBwO7Nu3D88++ywOHjyIqqoqvP/++6irq5Ma5ufk5ODrr79GeXk5Ll26BJvN5nLduLg4zJ8/H7/97W+xbds2HDt2DPPmzYNG0yk7WVlZiIqKwtq1a/Htt99i8+bNLiOtsrOzwXEcPvroI9TV1cFisSA5ORmpqan4y1/+gtOnT2Pbtm145JFHgvtGhgOmYpqamhgA1tTUFLJr7tmzh+3YsYPt2bMnZNeMJBpO/Z19+0Exazj193AvJaC0traysrIy1traGu6l+E15eTmbMGECi4mJYQBYZWUlY4yxU6dOsTvuuIMlJSWxmJgYNmzYMLZ06VLG8zwrKytj06dPZ0ajkRkMBvaDH/yArV27VnrNixcvsh/+8IcsLi6OAWDbt293e+0rV66wn/70pyw2NpYNGDCAvfDCC2zy5MlsyZIl0jn/+Mc/WE5ODjMYDGzixIls8+bNDAA7cuSIdM4f/vAHlp6ezjiOY3PnzmWMMbZ161aWn5/PDAYDGzVqFNuxYwcDwD744IPAvoHdwNvPiz+61meHxnqiuroaVVVVyMrKopFYhAS1GyX8IVDtRinNzgmaVUgQhFogD5ogCEKlkEATBEGoFBJogiAIlUICTRAEoVJIoAmCIFQKCTRBEIRKIYEmCIJQKSTQBEEQKoUEmiCIgJGTk4OXXnpJus9xHDZt2tSj1wzEa/RWqJKQIIigUVNTg+TkZJ/Ofeqpp7Bp0yaUlpZ2+zUiDRJoH6D+HERfor29XWrx2VO8TVwJ5Wv0Vsji8IFI6RNN9E2mTJmCxYsXY/HixUhMTET//v3xxBNPQOyTlpOTg6effhr3338/EhISsGDBAgDA7t27MWnSJMTExCAzMxMPP/wwmpubpde9ePEiZs6ciZiYGOTm5mL9etce4M72xPfff4/Zs2cjJSUF/fr1w5gxY7Bv3z6sW7cOK1euxNGjR6VBtOvWrXP7Gt988w1uvPFGaZjtggULYLFYpMfnzZuHWbNmYfXq1cjIyEBqaioWLVqkaIn65z//GXl5eYiOjsaAAQNw1113BeKtDjgUQftAVlaWFEH7S3V1tTSFIjc3lyJwIiy8/fbbmD9/Pvbv34+DBw9iwYIFyMrKwoMPPggAWL16NZ588kmsWLECgDCuasaMGXjmmWfwt7/9DXV1dZLIv/XWWwAEIayursb27duh1+vx8MMPuzTbl2OxWDB58mQMHDgQmzdvRnp6Og4fPgye53Hvvffi2LFj2LJlCz7//HMAwjxFZ8QJLBMnTsSBAwdw8eJF/PznP8fixYslQQeA7du3IyMjA9u3b8fp06dx7733YvTo0XjwwQdx8OBBPPzww3j33Xdx7bXX4vLly/jyyy8D9VYHloA3Qg0ggegHff78ebZnzx52/vz5AK7M92vv2LFDulGP6d5Lb+4HPXnyZJafn894npeOPfrooyw/P58xxlh2djabNWuW4jnz589nCxYsUBz78ssvmUajYa2tray8vJwBYPv375ceP3HiBAPA/vSnP0nHIOvP/MYbb7D4+HhWX1/vdp0rVqxgBQUFLsflr/GXv/yFJScnM4vFIj3+8ccfM41Gw2praxljjM2dO5dlZ2czu90unXP33Xeze++9lzHG2MaNG1lCQgIzm81u1xEIAtUPOmQWx3PPPQeO47B06dJQXRLV1dWoqKgImz0hv6ZOp+tWBE5EJkcuOfDnYzYcueR9bFSgmDBhAjiOk+5PnDgRFRUV0hipMWPGKM4/evQo1q1bpxgqO336dPA8j8rKSpw4cQI6nQ5FRUXSc4YNG6aYuu1MaWkpCgsLpSnd3eHEiRMoKCiQxnIBQHFxMXieR3l5uXRsxIgR0Gq10v2MjAwpuv/hD3+I7OxsDBo0CPfddx/Wr1+PlpaWbq8pmIREoA8cOIA33ngDo0aNCsXlJOQCGQ5xzMrKgsFgQF5eHoqLiyV7I1KG0xLdZ08tD7NN+KoG5IIHCHbEQw89pBgoe/ToUVRUVGDw4MHdukZMTEwgluoTer1ecZ/jOPC88F7Hx8fj8OHD2LBhAzIyMvDkk0+ioKAAjY2NIVufrwRdoC0WC+bMmYM333wz5KkycoEMh/drMpkwYcIEl2uLm44VFRUoKSkhoe6DTEzXIEEvfA0F8sGtALB3717k5eUpokw511xzDcrKylwGyg4ZMgRRUVEYNmwY7HY7Dh06JD2nvLzcq8iNGjUKpaWluHz5stvHfRlEm5+fj6NHjyo2K0tKSqDRaDB06FCvz5Wj0+kwbdo0vPDCC/j6669x9uxZbNu2zefnh4qg/3QsWrQIt9xyC6ZNm9bluVarFWazWXHrCZ4EMtzIo3m73U7ZIX2Qwv5a/PJqPQr7uxfIQFNVVYVHHnkE5eXl2LBhA9auXYslS5Z4PP/RRx/FV199hcWLF6O0tBQVFRX48MMPsXjxYgDA0KFDMWPGDDz00EPYt28fDh06hJ///Odeo+TZs2cjPT0ds2bNQklJCb799lts3LgRe/bsASBkk1RWVqK0tBSXLl2C1Wp1eY05c+YgOjoac+fOxbFjx7B9+3b86le/wn333YcBAwb49F589NFHePnll1FaWorvvvsO77zzDnie90vgQ0VQBfr//u//cPjwYaxatcqn81etWoXExETplpmZGczlhQ2TyYS8vDzodDrypomQcP/996O1tRXjxo3DokWLsGTJEimdzh2jRo3Czp07cerUKUyaNAmFhYV48sknFcHOW2+9BZPJhMmTJ+PHP/4xFixYgLS0NI+vGRUVhc8++wxpaWm4+eabMXLkSDz33HNSFH/nnXdixowZuOGGG2A0GrFhwwaX14iNjcWnn36Ky5cvY+zYsbjrrrswdepUvPLKKz6/F0lJSXj//fdx4403Ij8/H6+//jo2bNiAESNG+PwaoSJoQ2PPnTuHMWPGYOvWrZL3PGXKFIwePVpRCirHarUqPjXNZjMyMzNDOjQ2XFAxjLrpzUNju/q9IwKP6ofGHjp0CBcvXsQ111wjHXM4HNi1axdeeeUVWK1WF//LYDDAYDAEa0kBIVhCKi+GIYEmCAIIokBPnToV33zzjeLYAw88gGHDhuHRRx/1uDmhdroS0u4KeE+KYQiCiEyCJtDx8fG4+uqrFcf69euH1NRUl+OhIFCRb1dC2t1I2GQyUeRMBIUdO3aEewlEN+kzpd6BshC6EtJARcLkSRMEEVKBDucnubNwygUQQLfF0FlIAxUJkyetToK0p05EGIH6OekzEbSzcMqLRXQ6nZSP7K8YBktIExISUFdXF/HZK70FsTKtpaUlpBVxRO9ELB13rmj0lz4j0M5kZWWhoqICAKTqJU9i6M1uCNbmnlik09NiHSIwaLVaJCUlSf0cYmNjFb0tCAIQIueWlhZcvHgRSUlJPU6G6LMCLUf8c8RZDEVhdjgcHiPsYG3uUQStPsTG8d5aahIEIBTDBGLQQEQKtC8bbPLyaqPRCLPZ7BIFi/YFEPpudBRBqw+O45CRkYG0tDRF83eCkKPX6wOWRhyRAu2LLyy3JrydI9ogWq02pJt1lBetXrRaba/N4yd6FxEp0L6Imy/WhPh4OISS8qIJgohIgQ6kuJFQEgQRLmhoLEEQhEqJyAjaH8ShrjzPQ6PReB3sStV9BEGEkoiNoH0dK1VZWQm73Q6e52G326UJ3O6Qbz6qkcaK9ajcdB0aK9aHeykEQQSAiBXoyspKWK1WF8HtSrjFuWXuEEdoBWrDMNCzCRuOvwZ7y3k0HH8tIK9HEER4iViBFoXWWXCdo+Dc3FyX55WVlbl9zUCP0Ap0RJ48YiF0sQMRbSyiSJogIoCIFWhfkY+fEqmrq/P7dboTDQc6Ik/Km4PcWbvRVneIImmCiAAiVqA1Go30VS6eYmWgfLSWyWRCcXEx4uLiAAgVY/7aDt2JhoM11FaMpJNHLAzo6xIEEVoiMotDFFedTofc3FyFeBoMBlitVpfRWtXV1bBYLACE3hz+dqdTU+VfUt4cJOXNCfcyCILoIREZQX9SrcGXMWNx0jAYJpNJajjkrfGQPPL1te+Gr7ZGoDcDAwllfhCEeolIgb6kTQE4DnXaFPz5mE3ReMjZ9y0rK8POnTuh1+thMBhgNBoVfRa8ias8Mvdmcag5PY8yPwhCvUSkQOcni98WB7NNuRkn+r4AsHfvXmlD0GKxYMKECbh06ZLUyH/v3r1Sup47cRVfNyEhAQ6Hw2PkHejNwEDRWLEevL0ZGn0S+dUEoUIi0oO+LUeH7y02mG0AwGPP9y2Y6FT9J0a1Go0GPM/DaDQCUI6qsVqt0Ol0HsVV7NOxd+9e2O12GAwGt751T/p5BKt6sbFiPeoOrgCYA7rYgeRZE4QKicgIGgB+ebUeCXoA0KBCl4Uvz5oVNoUY1Q4ePBiTJ0/G8OHDAUAxJcNgMCA3N7fLTIusrCzodDrYbDaUlJQE1JMOlj3ScPw1gDkATkvRM0GolIgVaACYmK4BwACOw1lDDiorKyVxBITG2hUVFTh06JD0nCFDhsBgMCAvL8/nFDiTyQStViuVi3clpv6Iri8bnN1BTMUzjllJ0TNBqJSItDhECvtr8WmVA+AAxmlwOCofqDwhiaiYCy2m1wHdtyOysrKksvKuvOauUvLktkawJqtQKh5BqJ+IFmgAyIm24mybAeA4NGvjcMQwHGO05UhISMClS5fAGJMKVDz5vb74wP4Ie1fnyiPsUORXN1asR8Px15A8YiGJNkGoiIi2OADgv4bHA6KvzHGwaPthwoQJMJvNYIzBYDCgqKgIgHvrobq6GhUVFVJmRyhymd1lnQSzvalzqh3lRhOEOoh4gQaAHEMbIGVncDhyyeE29U1+rLq6GiUlJdJMQhFvIh2ogpRQiLIc59Jwyo0mCHUQVIFetWoVxo4di/j4eKSlpWHWrFkoLy8P5iXdkmM+jty2byWR/rTKgY/qkyT7QBRUuTBWVVXBbre7fb3Kykq3QqzmghRviE2WRHuDenkQhDoIqkDv3LkTixYtwt69e7F161bYbDb86Ec/QnNzczAv60JWVhayNY0Yn2ARRJrjcNmmc+kZLUbAZWVlcDgc0Gg00Ol0MBqNMBgMklftcDjc9poW0+0cDodPUbQ84lZTObizYBMEER6Cukm4ZcsWxf1169YhLS0Nhw4dwvXXXx/MSyuQb8rtO9IuHOSAI4bhGGn/WjpPjIDF6kKDwSBVHZaVlUnH5cUszteRR9Emk8nrBqNzxC1/HkEQREg96KamJgBASkqK28etVivMZrPiFmg4nnVYHcKG4YG4sVLTftGDNhqN0Gg0sFqtUvN+dz2ieZ53iXidvW25CDtHyfJz1VoOThBE+AhZmh3P81i6dCmKi4tx9dVXuz1n1apVWLlyZVDXcV3bQeyOGQMGDgAHO6d8CxwOBxoaGqRJLHV1daiurpZKwuXwPO8S8Tqn0MnT5Jyja+dzKXImCEIOxzz9vR5gFi5ciE8++QS7d+/GVVdd5fYcq9WqaKRvNpuRmZmJpqamgFXSVVdX48uzZlRGDxLS7xiDgVkx0XYcDodD2hjkOE6yMnQ6HZKTkxVRtEajUUwB9yVXOlRTwSmvmSDUi9lsRmJiok+6FpIIevHixfjoo4+wa9cuj+IMCJ6vcyP9QFJdXY3Kykqk2+24qE9DszYO4DhYYZAaI+l0OvA8r4iW7Xa7i8UxadIkxX3n6NgdPWma5A/yNDkSaILovQTVg2aMYfHixfjggw+wbds2lwGtoUaeOjeq5RvFY3XRGQCEIbJ6vb7L1+rKe+4uXWVz+JLtEeo0OSpsIYjgEFSBXrRoEf7+97/jH//4B+Lj41FbW4va2lq0trYG87IeEdPgdDod8vLyALFxHcfhtC4bdrsdlZWVUoqdvLOdfKgsoCxY8dW6EItfvHW86yqX2pdc61CnyVFhC0EEh6B60HKBk/PWW29h3rx5XT7fH6/GH0SrY3d0ERinkbzo3LZvYXLUged5yWqRe+KeEDcQxQjak1jv3btXej2NRiPZJHKBB+BV7EPlY/sDed4E4Tv+6FrINgm7QyAFWsxjNhqNMJvNklDuiZ/Y2auDMQyyViKLa5DE0rnU2xtilC3aKOK1RDEV+3qITJ48GUCncMvzrgHvYqxGoSYIomv80bU+0YsD6Mxjrqurk6wOAEJOtPgZxXH41pAriZ7JZJImrfiC3W5XbC7W1dUp7Aj568lf19m/Fn1mb+O2emtZOUEQvtNnBFoujCaTCcXFxTAYDJho2asUaQDv1KRK/xaLZXQ6HTSart8uxpjkcxuNRpfS76SkJBgMBiQlJUnPMZlMir4g8l7VnjYeqbCFICKfPmNxyBFFMCEhAfX19eB5HnviJghWR4cfPcR2FtdmCr03RCuhsrLSYwMld+Tl5UliK9oXnuwM+XFvPjZBEL0bsji6QBRNs9ksWRITLXs7T+A4nNbnSH5xd1t/ih8CgDCyqqysTIqMnf9jQt0DmiAI9RPxE1XcIUbDDocDcXFx0sirVNsl1Ov7d24aAorCk+joaMV4LG/odDopEgag2JgEgIaGBpSUlIDneakiUR5REwRB9MkIWhzyarfbYbFYpJznH7RVdOZGd9De3i75x+7E2dsmYmVlJWw2GzQaDWw2m5R2KHrZ4qZiV4Nm1dSKtKdQUQtB+E6fFGhAOdiV5/nOFqIyR/5UdB4YY5J4Oud1G41GxWafHFF4xbJx8Ro6nQ6TJk1Cbm6utPEoRtty5KIsWjKVlZVSoUtZWVmvFG0qaiEI3+mzAm0ymYRqQhk6nQ4p+o5NQI5Dvb4/NBqNNAJL3jxp8uTJSEpK8pgn7dz5zt31i4uLMWnSJBQXF3vtFS2mBdrtdunmnMIXarobCbsrQ6eomiDc02cF2h12ux3Xac/K8qIh9eWQC2Fubq5L0Ymv+NKPpLq6GjabDUDnZqI8e0Q+5SVcaXbdjYTdlaFTVE0Q7umTm4QAPApsXV0dED9EuMOgiGLFrIyqqipJQAFla1KxDSkAl5Q8534e8nS/hoYGAIKAV1VVSRG48+CCvLw8VWR3JI9YKJV3q+m1CCKS6JN50IBrXwzxa3JyMja3DZHyoSe1HgAA7I8ugJWLEp7MmDItT4Yo0M79o+WIJeDy/tMiOp0Oubm50rzD3NxcNDY2SmXqw4cPB6AUd3k5OUEQ6obyoH1AXu4tbuI5HA4XUS2JLoLdbhfEWSxk8dAESnwtd/2j5VWIon/scDikEVtyRH+6uLhYOh+AFGUDkMrAw+1FEwQRPPqsQIupdnLEPyb6OTqnf/OcRqgydOJw3DV+Xc/dpiFjDFlZWRg+fLgk0tHR0YrsDOfJ4c5oNBokWHeh//lfu91kow04gui99FmBBpRDYuX+sHMzf5eomeNg5Tonv/RkCsyZM2dQXV0tRckWiwVWqxVnzpwBoBR2cXNy7969SE5OhsFgwODBg5HU8imYtdbtJhttwBFE76VPC7RYUj18+HCpeZILcnF2aqokRta+9Iz2hDh41t3xvXv3KjYfTSaTokxdrIi8HP0jcIZ0t5tswZyuQtE5QQSXPi3QclwKPpzEGIyBYzKbwoMX7Zyp0RUGgwHt7e1uH7NarQqBBqDo7SGO8LLETMalgX9y2yw/mNNV1Bqd0wcHESmQQHcgbrqJKNqQdtwmWPa5CrcT/nS7A5Qi7A5xkK6YPy2m3IkRtNja1DkfOhQiFerZh77i7oODRJvojZBAe2GiZS8mXtkj3DrS6pzT69xtIAYSq9UKq9WKxsZGAK5d74qLizEi7Sys++9BY8V6SYjqS1crRCoYAuVvdB4qkXT3wUGiTfRG+mwetDPinEJfIuD9cWPh4HRSrrSY8eEtPzoQiHnQ4viuuLg42Gw2pJxbAs52EZwhXWgC1XIemqgkaHT9EG0sQlvdIfC2ZvC2RoDTwjhmZVhmB1Zuug72lvPQxQ5E7qzdIb22u7mJ4VwP0XehPOhuIEajeXl5UhMjT0Nvx1kOKA/4kB8dCOrq6iRxBjozPpr6zYBdm4or8TdL0WNsxvUAgJaaXbC3nAfjO+wb5gibZxxOS8RdtK9Wi4YgRPpsqbcnxEwJv7zkIAuzHLGiUB5B9zPeiybzTYIP3bwdgCDMfHsjAA6aqCSAAczRCgCINha5vG4oJnOLryt+QIR7AnhS3pywr4EgvEERtBvEBkndYU/8RNTqBwRwNUri4uLQ0NAAjuNgsVhgs9mQlJQkTWARvVYwAJwWAING1w+po5d13Afa6g65vG6oMjLUmPlBXjShVkigZYhFIM6N+TmOk3psiLaHltk7vefOEwGOQ2V01x3ruoNOp0NbWxvsdruU+cHzPCoqKoTeHFtuF8SZ0yN19DIYx6yU/oRPypuDuKxbAE7rNoL258/9ngiaGm0FNX5oEARAm4QK5A2U5IgDXuX+LyBEy4oiFsDtxmEoNhABwHR+nvRvuTCLyDfFoo1FsFR9DEPyCDja6v2yNiJtcy0U9g5BiNAmYTeRl36L+cdA59grT93pwBiieCdhl28cyjYQ98RNwJ74iQFPz4tt3gYmbSlwsLecx6VDf1SckzxiITRRSeBtzbB89x+AOWC9/LXf0WNPo2C1WQrBLOYhiJ5AAi1DXvo9YcIEKZpmjOH06dOuT5AVsRQ1H+48Ls/okFkge+ImKAQ7kCKd0Ph/4GAHp43pXB7fqhDCpLw50Oj6Cel2gkkNQ8oonyeciMcB9EjQumspqEnY1bQWInIJiUC/+uqryMnJQXR0NMaPH4/9+/eH4rI9Rt4i1J0TJBWy+GJdOKfiiSIdoGiag1AuLmRqiHaLBvaW86g7uBJn/l2IM+8VCv5zx2ahLtaErBkfSjaFKDiigNaXrlaIUKC8Wl8icHcCqCavWE1rISKXoAv0P//5TzzyyCNYsWIFDh8+jIKCAkyfPh0XL14M9qW7hbhRWFZWphBon5CXhjsfA5QbivJjPUzTi23e5nqQ03Zeg9nA2xrBtzeire6Qy2ZhY8V61B14QhJlUUDBQSFCgdpI7MpSaKxYj7qDK1wEUE0bjGpaCxG5BH2TcPz48Rg7dixeeeUVAELWQWZmJn71q19h+fLlinPFsmYRs9mMzMzMkG0SAp43CkXk46084XHzUMTdBuKVPd1ar06nQ8r3S6Bz1Hs8R6NP6lgDkFqwTIr+RIGpO7gCYA7pfE4bg/7X/B6tF/fDUvUx4rJuQUbxGr/W1ZONRPG54ax67Am06Uh4QzWbhO3t7Th06BCmTZvWeUGNBtOmTcOePa6CtGrVKiQmJkq3zMzMYC7PLc4bhXFxcdJjvohzl8hT8+T+dDetDrvdjvaoIWDg4GllQok3oI/LQt3BFeBtzcJzWy+ivnS1QpwBwSZpOP6akC/NHGip3oUz7xXizL8LUVOyxCfvtScRpvjc3ijOQGR47IQ6CGoEXV1djYEDB+Krr77CxIkTpeO/+93vsHPnTuzbt09xvhoiaE84D5mVtxV1OBwK4ZY2AwFwjAeD+01DAJ7T8zoe88XfTqv9DXSO+o5tP3/RQPCslT8GhpRRsFmqxL3EjqrETjhNDBhrl6Jriho7cX4vfH1vIi19kXCPPxG0qkq95altasO5qb63UnBPoronvuNDykMULd3305NujxoCbWt9N8QZAHho9EkdmR0cNFGJ4NsbYb38tbAcTQw4nQGcNgbM0QaAAZwWjBf+ban6GCheo4gaRRFqrFiP+qOrAQahkhHwKFSRJPDOJeTu3ht30HRzwpmgWhz9+/eHVqvFhQsXFMcvXLiA9PT0YF46oJSVlfVoaopHnDcUnae3wLd2plHtp7spzuJ1AXB6AEyyP6Ql8q3g2xvBaQzQRCWC08ZAo4uHIWUkwGmFDUd4bvHJtzeCtzWi4fhrXv/0j+SsCOf3xpOVQfnYhDNBFeioqCgUFRXhiy++kI7xPI8vvvhCYXmoHXmBir8TUxQ4DQAA4D6zQ8THaNoSfwsYtB49aK9wGsG+YLaONdg6cqmV12W8FXx7I5ijDbytEbYrVcibfVqxecjbmlF/dLXkU0cbi4S2p/okJI9YqBAqZ5GK5KwIZ+GN5A8jIrAEPYvjn//8J+bOnYs33ngD48aNw0svvYR//etfOHnyJAYM8N5UKNSl3p4QS7w1Gg0GDx4MAD73jvaGonDFk2D76EOnn58PDRxdnucMp4mR7AoRQ8ooJAy+B/WlqzusDwCcHrqYNDisl8EcreC0MdAaUiRL4sy/C2XnagHm8OqlhtJvVZt9orb1EKHFH10LSS+OV155BS+++CJqa2sxevRovPzyyxg/fnyXz1OLQLvDWzqeRqNRTOP2hiTSoi/tLNZSLrN3oc44P69nNocPxGXfhuZzW8H4NnCaaDC+FRp9EjT6fpJwA4LAO9rqoY1OhbXhuGIj8dKhP4LxbTCkjITtSpWU+ufPZpq/9MbNNxLxyEV1At1d1CjQ8mkmYme5QOEi1iJd5ErHNm9DYuM7QRdowfbw8OPC6SWbRBTCig1DhBS+jnxmRb41p4UuJl0hnMES0t4odr3xQ4XwDdXkQUci8mkmxcXFPfOknRBLx901WfJGQtPGEIgz4FGcgU4PGxx4ezNqSpaA00QB4BCXdUtHNofcgtHAYb0MTVSS5DsHy4fujZtvkezJE75DAu0nRqNR+lpWVga73Q6O4zyOx+oWzpuJXWR0qOlPIE4bDb69EZbv/gPmaIUu1oSM4jVg9k47iNPEAMwG5miFRtcPAALShCmS6I0fKkTgIYH2k+HDh2Py5MlISkqSomnGWM8rDOW4aarkLZq+knhnWEVa6NuhhS52oORDA0wRHctDfMaL53DQRqei7sDjQh+Qo6upko4gZJBAdxPnwpWg4CUql/eVbul3Ixj0YRNpYcSWQ/gqg7ddQevF/TjzXqFMuDsxjn1aKogBADB0mX5G5dDhhd7/0EIC3U2ysrKCfxE33fFEYXaOqjnYQuRD+wCnl1LtLN9tdikTF6k/ulpxP3X0si57U/uaQ9wdIfH1OTUlS1CxYQhqSpb4/NqRAuVwhxYS6G5iMpkkPzqg/jPguZjFj43DsMJsrkU3AJyLX+TCHZd9m4vv6tx2tLFiPXh7s1T44g1vQuJJiH0VH0vVx8KHT9XHXs+LRGjzMrSoqhdHb2P48OEAhEZKgShcEZHnOys2Bp1T72Tn/DggVw4kbvLAOZ0s20NJTNo4nHmvUCg1ZzZAowd4O8QtUG10qpSmp4sdqNg8E9Pooo1FaKs7JFUteupr4ak3hq+9MOKybpHasPY1nPuMEMGF8qADQFc9pAOBZGs4dbtTFLO4gzHcWT4sqGsTqhFdPWb3J+vdirQmKsmjFaJ8fmePaFGYeXuz8FwPFYzd7S5HEMGA8qBDTEj8aBF5W1L5MS+3jUNPBnVJgjj7+KPkIYKWp+F5Iy7rFpeeFsxuBTgtDMkj3P757WxdUAob0VsgiyMAmEwmAEJmR0JCAsxmMxwOR0CrDJ396IlX9ijbl4o4l4qHzK/2rbTdE14jcE4PMMHuaKnehdP/HN6RFSJ8b4y1A8wBm6UKg+864vL0aGMRLFW10ogvgugtUAQdIJwngufm5no8V6fTuUxr6YqJlr2um4ey+3eeHOqxwAVA0KPooMJsEL1o3m5W5FoLXxyKuyKNFetx5r1CWL77D8AcwoQYguhFkAcdRL788kufmyb1BE+NkjYOK1f61SHwowODl54fbk8XOu3JNwmT8uZ0zjYEfJpvGKnedKR+X70V8qBVgnwqeMBT8XzBOVUvBH50YPAvZuA0OiSPWIi2ukMKr1mwNDhwmhif5hv2NMdXrUUclLvceyGBDiK5ubkwGAzIy8vzKtByIe8ODFFuj99ZPsytSEcazNGKS4f+CIf1MgChGrGmZEmHpcGgjU4BAJz5dyHOvFcoCaizoEYbiwBO222vWq1CSLnLvReyOEJESUkJ7Ha7217RPZ0W3lW70Y1DTyp7Tfcaq6MHdFga4p/28uEDutiBwsbhd5sBQOppzduawdsahSkwun5+WwJkJRC+QBaHChGj6cGDB7u0KA32Z6Qiku5VVocMTivcnNDokzw8QYPWi/vB25tRX7oajO9M49NGp0riDAC8rUnwqrmOxk9d9AQJxExBtdohhLoggQ4RYpaHyWRCbm4udDpdj60NkbgrH3fZh0MRMfdGq4M5nPpJC0hjtlzOt0l9QHhbo6JZk/XyN84nAwBiM65H8oiFHWLOubU6nMvPu4ta7RBCXVAedBgwmUxS7nR3y8R1Op30HEv8Lb5NVHHKpd44rLxv2B0uuP+LpfncVrTVHZLEvK3ukMK2ANA5FYbTevR0fbE6fC0rJ/o25EGrBHlKnlx8fcXXmYQKPxpQ5FRHpFB7KC2XnQDngbnWy9+A00aj/zW/lyJdXexAAB2tVTkt4rJuUaT0yTnzXiH4dsHLdlc4Q/RtyIPuhYh2h06n81rk4gkGV3/WHR4zOzgOG4eV9z5vuivk4szp3Z2guCf0p2bQGlKQlDcH2uhU6TGxk55xzEqXlD6Fp9zxknx7E3nMRI8ggVYJ4iZicnIyqqqq/PanzUlzfM4eVoh0XxFqwH2TJn0SwGldNhtF/1n0q+0t56VmTvWlq13mKco95dTRyzo2NBl5zESPIIFWCeImotlshtVq7eYGom9RNCCItEt5OOAq1JEq1h3wtkaAOVw2Gy1VH6OxYj04bbRwgNN3jPaCtOmo0fWT7A15rnFS3hyhFWkPcqoJAiAPWnVUV1ejqqpK6pBXUVHh0/PSzy+ABu3dvq7kTYvI/x3pPrWPcNoYaA0pHT60HsYxKzxuAvriQ1PedN/EH12jLA6VIc/wEBFF2ltBCwdvG2FdIxdeRcTslJLXdzM/hIpFqbcHsyMpbw5qSpZIzfszitd0ntvRPlX86m6ogKfBAQQhQhF0L0CMquUtTEULRMz8SLr8GmJa9wV0LqEiqqaI2jucFnmzT0t3xQia08aA0xjA25oAMGmogOB9C4dSRy8jge5DqCKL4+zZs5g/fz5yc3MRExODwYMHY8WKFWhv7/6f4X0V0Z+WZ3fwPK8oGbfHDAcX4P9On3zqCPan/UE+/qqxYr0wugsA4+0d/jaT0vMkL7u9ERp9P8UMxu5UF1JVYuQSNIE+efIkeJ7HG2+8gePHj+NPf/oTXn/9dfy///f/gnXJiMdkMiEvLw86nQ46nU7qJ200GpHS9hmEpvmBrxDsSqgjfSOxazhYvtuMin8MlqwMKWOE2aHRJ4HTxkCji0dM2jjkztqN1IJlwpgvW7PLxPL60tV+CS5VJUYuIbU4XnzxRbz22mv49ttv3T5utVoVs/3MZjMyMzP7vMXhC/INp7oDT8Dflp3+4FLsAvRB24NDXPZMKR/a+bjlu48A8MK8Rtbe8f4I94f8VxkASP2qxeyP+qOrhf+2jujaebaiJzz54ABtRKoRVVgc7mhqakJKSorHx1etWoXExETplpmZGcLV9W7kjXrisme6bSwUKHzLoz4RtOurAU4jpN+JLU47YVKbU6BjlBdzQBwJxvg21JQsQcWGIdBGp0ri3HD8NcnySC1Y5ld70La6QwBzSKmBcii67t2ETKBPnz6NtWvX4qGHHvJ4zmOPPYampibpdu7cuVAtL6LIKF6DvNmnEZd9G8BpwWljAn6Nrv1pTURbH4xvheW7zYomTCJC7rP7v2DismfCUvUxwByKpk3OedTOXfG8+czJIxZKm4/OQtzdXtDka6sDvy2O5cuX4/nnn/d6zokTJzBsWOefuefPn8fkyZMxZcoU/PWvf/X5WpTFERgaK9bj0uE/gjnaYEgZCUdbvaz5z0rBL+2yZ4V3KOPDO5w2Bv2u+iHa6g5BG53aUVIu4IuVIY3v8jC6y5vNIeKP3SG3X3yxWQjf8UfX/Bbouro61NfXez1n0KBBiIoSpnxUV1djypQpmDBhAtatW+dXhRwJdPARfxE1UUlgDqsQEfZArEmo3SNvtqSLHQh7a21n+1ROD42uH2JN10s50gAUYiq2OQVzuBVNZ0F1FuOunu8MedfBI6gC7Q/nz5/HDTfcgKKiIvz973+HVuufL0oCHXzc/SIqhq12kz4n1J4+1Dg9NPp+AAP08VmwNhxHXNYtsF05q4iiFU/RxkjWiTT9pepjGJJHKP76cRZg+X1nwe4qAidChyo2Cc+fP48pU6YgKysLq1evRl1dHWpra1FbWxusSxLdwJ3fKfqWcdm3QUrbk3WCi8u+Dcaxz3h9XZ97fUSKR+3hL464rJvA266AtzUKgswcaP5+q0dxBqDwtZNHLOz0rBuOSxuK9UdXe938c/aexftycSafWf0ELYJet24dHnjgAbeP+XpJiqDDjzwyEzMCOG0MGN8OQOOjFcIJWR19KaKGEAlzWoPUBa87zx9yb5nkLxuSR8DacFyqRNTo+3mMmL0h/p+KMxi9PYesjsCjigh63rx5YIy5vRG9B3mELUZhzNHWMYLKDl8LY/pCRC22LNXok6CLHYj+1/zep3R0oezbuVc1h35X/VAQ5+/+A0AjZH10THNJHb1MEtXKTdch2ljkNlvDXZQsftCCg0uxjDOUphdeqBcH4TfyjIGYtHGKMVCG5BEd6WPef6y69qh53FmeH5xvIAho9Eng7VekTbjkEQs7Mmdc0/CcngkxR9oV8X1RvpeGlFGwNhxXRNSeomB3kXVjxXrUl66WeoF4i6Ipgg48qtkk7Ckk0L0D+S9xfelqz4NcO3J15XgVajXbHho9wHvwnbNvQ/P3W30QZ/evy3G6zudy+o6/VMRfU+WILuGQVhJr5zQ7TwIrz94RqxdTC6hpUygggSbChtjFrZPOkmghG+ETV99ao8fGvG9chVq1/rQbkQzYS2sA1hlRK3zo7/4DcDrF+8dpYsDpDODbm6Q1xWXfhpaaXS6d8uRi3Xpxv4uvTTnPoUEVHjTRNxHLlOOybxOyBsY+jYziNcidtbujJNkGTtNZ2WhIGQWNtp+rRw147JonZJeEk2DFNJxCnAEho6OmZAlaqnd1HFB+uDHW3tHWNFo6Zqn6GHx7I3hbI+oOPOHSjKnh+GtSebgozgBcpr+4868p8yO0UARNhAxPf26Lx4UKu2+wcWiZEEkC7qPpUyN7VPWoDtx4zx0FKy4WkRtrCBA2+PRxWdIUcrFSMdpY1LGxyKTzUguWSc2YUkcvAwBpgICYxuccQUs2iCxjRD7lnKLt7kEWB9HraaxYj9fNd3VE0IC0YaZa2yMwCEUqbZBH6YaUUW7zpnWxA6VUOUDYqBx89xFhE/DoapnVxEGjT1RsBso/LAEoxNvZEuHtzVJ3PVGkadOw+5DFQfR6kvLmYHmRAcuviYIilc+D7cFpY3zr4MdppbJrT8Rl3xaUBlO+IGwOKmMma4PnzoDCpJYOOt4msTNep5XEAA6KNDzncVti5C5PpxNTLOXd9dwVNoWKvmivkEATqmd5oXOOMASR1miwMf8UNuafwpB7y2Acs9KlAlITlSTlJwMc4rJu6aKMXdPRLtTNJb2KdqB+ldy8jgc7R/g+OsVco+unyInu/FzjkFqwTCGsySMWdqQGCjnQ3rreeRLlUAtmX8zJJoEmegXLC/XuhbqD547Y8Nfme5A7azcyitfAOPZp6GIHQh+X1WkBRCW6iK9x7DMyAQcAXhJwTVQS5NG7EN16+pXhAxR1e8qJ7hp7y3nYW87DUvUxkkcsBKc1ABC+b2fPX0iHbALf3thtwQu1YHa3dWpvhjxoolfy3BHPm4TLC/WSf2pvqYYYZYr9Qy4d+iMY36aYiOJcvu7Zzw0n/qX3iQNrnX1ll4k7slxrcQKML21L5RPK+6If3d0iHtokJPoE3kQaPI87y4dJwutJcNy15XT3SydWT+pi0oVWoT73IZHB6cFpdGC83SWXmfGdRS3yTUHh30JlplCWbZal4nGARgcOOsXz5cg39pz7ULuuT5ktYhz7jNcil76+adjdntkk0ESfwr1QM4Bn+EXCewEVDvGXUsy24DTR6Jf5Q7TU7AKzWzub/2kMUntRThMF5miVqvaEjT0mCaK8vagY9XrKuBALTMQPHG+tYcUiFrGc2zuuaX+62IFue0zLI2exclT+F4e3VMpIEvJQRNDkQRO9HvfetLCJ+LrlHu+Rtp+IPqjg7zJoo1OQUbwGg+86Am10CpijFVpDCgbffQRZMz5E3uzT6H/N74UmU3Zrh1AKtdWcJgqaqCRwGkPHkrUdAxMgbeDVlCxRiIBYYCJ66c7FJZqopI4CoWegjU4RrBkOPmS4OOdka10aMImec1vdoc5NQ9Gil2WQiL60fBMxEjf4QpHRQgJNRAS+bCIGAnepZyKeNrHE53C6DiEGB01UohBV6/ohdXRH9WXWLdDFDkRqwTJo9P3AtzfCUvWxQtiijUWSeAKQbXpyUkGKc/fB1IJlUoaLsPEpQ9Pxnjl302MOtFTvUgiQ/Nqi+MZmXC9dQ36ONjoVdQdXwN5yXmim1YG97SJqSpZ4rFB091hfhiwOIiLpahMxHDgXh3j789jTRpzY60QTlYTBd/lvKcg72Ymi6lywIm6IitcQkXuuANz6r/LJLe6qHwWEzU7RFnH3PG+ju3q7VUIeNEGg66g5XELdE878u1Dh+XrC0waWLwLni+gD7j9g5B8sYgm5HDEVUfDcORjHPo2kvDmKoQTiWC/nD6TUgmWKuYq9dYOSBJogZKgxmu4uvkaQXbUZDUUvDU9/BbgbYOtpXfIPJI2+n2KuYm/tC0ICTRBORGI03R3UYhH4al14i9rV8r34Cwk0QXggkqJpondCaXYE4QFv2R7PHbEFNCWPIHoKCTTRJ+kqJe+loyTURPghgSb6LN6i6TY+cLnTBNFdSKCJPk9X0TQJNREuSKAJAqGrRCQIfyCBJggZywv1Hn8pKJomQk1IBNpqtWL06NHgOA6lpaWhuCRBdJvf+RBNk1AToSAkAv273/0OJpMpFJciiIBBtgcRboIu0J988gk+++wzrF69OtiXIoigQNE0ES6CKtAXLlzAgw8+iHfffRexsbFdnm+1WmE2mxU3glADFE0T4SBoAs0Yw7x58/CLX/wCY8aM8ek5q1atQmJionTLzMwM1vIIolssL9Qj2sNvDUXTRKDxW6CXL18OjuO83k6ePIm1a9fiypUreOyxx3x+7cceewxNTU3S7dy5c/4ujyCCztICIZrmPDz+3BEb/nyMhJroOX43S6qrq0N9fb3XcwYNGoR77rkH//nPf8BxnT/GDocDWq0Wc+bMwdtvv93ltahZEtEboAZMhD+ooptdVVWVwkOurq7G9OnT8d5772H8+PG46qqrunwNEmiit+BNpNNjgXlDSagJAX90TResRWRlZSnux8XFAQAGDx7skzgTRG9CjJTdCXVti3CcomnCX6iSkCACyPJCPRI86DB1ySP8hRr2E0SQIG+acAc17CcIFUApeURPIYEmiCAipuR5a8BEEJ4ggSaIEOCtAdNzR2xYV05CTbhCAk0QIWR5oR46NxUuYqYHQcghgSaIELNstPdMj9WlJNSEAAk0QYSBX17tuVzczoDnKZomQAJNEGHlUQ/eNIMQTb9AQt2nIYEmCBWwvFCP4cmu8TQPUOOlPgwJNEGohNtydG6jabNNiKY3n7WHYVVEOCGBJgiV4SnTo6yBUaZHHyNozZIIgug+y0YLkfTqUhvsTs0YnjtiAwfBvyYiG4qgCULFLBvtvgqRAbSB2AcggSYIlSNWITrnTvMgbzrSIYEmiF7CL692H02XNTCKpiMUEmiC6EWI0bRzlzwxmqaeHpEFCTRB9EKWFugxPdP117e2BXih1IYjlxxhWBURaEigCaKXUthf67ZcnGfAp+d48qYjABJogujlPOphMEBZA6MRW70cEmiCiADEwQDO5eJtvOBNk+XROyGBJogI4rYcnVtvmiyP3glVEhJEhFHYX4vC/lq8dNSGNr7zeFkDQ1mDDemxwLyhVIXYG6AImiAilKUF7jvk1bYI/abJ9lA/JNAEEcGIHfLSY5XHGQTbgzYR1Q0JNEH0AeYNFfKmnePpNl7o6UHRtDohgSaIPkJhfy0e9dDTgzYR1UlQBfrjjz/G+PHjERMTg+TkZMyaNSuYlyMIwgd+ebWr5QEIm4g0vUVdBC2LY+PGjXjwwQfx7LPP4sYbb4TdbsexY8eCdTmCIPxAzOL48zEbzDJNFqe3JOgFISfCC8cYY12f5h92ux05OTlYuXIl5s+f3+3XMZvNSExMRFNTExISEgK4QoIgRI5ccuCzczychWB4MofbcigTN9D4o2tBsTgOHz6M8+fPQ6PRoLCwEBkZGbjpppu6jKCtVivMZrPiRhBEcBG9aeeUvLIGhpe+pg3EcBIUgf72228BAE899RQef/xxfPTRR0hOTsaUKVNw+fJlj89btWoVEhMTpVtmZmYwlkcQhBtuy9G5loo7OtLxSKjDgl8CvXz5cnAc5/V28uRJ8LxQvvT73/8ed955J4qKivDWW2+B4zj8+9//9vj6jz32GJqamqTbuXPnevbdEQThF2LetHO5eJsD2FPLe3gWESz8Mph+85vfYN68eV7PGTRoEGpqagAAw4cPl44bDAYMGjQIVVVVHp9rMBhgMBj8WRJBEEGgsL8WALCzmoedB3QaIFYPaao4+dOhwa932Gg0wmg0dnleUVERDAYDysvLcd111wEAbDYbzp49i+zs7O6tlCCIkCL29BB5XjZWq6yBITPOoXicCDxB+QhMSEjAL37xC6xYsQKZmZnIzs7Giy++CAC4++67g3FJgiCCTH4yh7KGzlyPT8/x2Hmex+SBGhLqIBG0v1FefPFF6HQ63HfffWhtbcX48eOxbds2JCcnB+uSBEEEkdtydLgtR5mW18YLQn3OwsjyCAJByYMOFJQHTRDq5MglB3ZW82jrSOzgIEx2Ibom7HnQBEFENoX9tVg6Ssid5iDYH0Tgob9JCILoNqLtIbL5rB0nGhjyKcsjIFAETRBEwDjRwMA6vhI9hz7iCIIIGPnJHE40MAyIBVaX2mBngI4Dpl5FmR7dgSJogiACxm05OjxaqEeLDbB3BNF2Bnx2jqdS8W5AAk0QRMCZmK6BTrZvKI7YWldO/ab9gSwOgiACjrwK8cglBz49J/TxqG0R7pPd4RsUQRMEEVQK+2sVE1y2fc/j+SM2GrHlAyTQBEEEHXFobYIesDHB8ihrYORLdwEJNEEQIaGwvxa/vFo5GIBamHqHBJogiJByW45OiqYnpgsSdOSSA38+RkMBnKFNQoIgQo5zK9Od53m08cJX2kDshCJogiDCT4fr4WCgSFoGCTRBEGFnskmwPLQcYLaRNy1CFgdBEGFHtDyOXHJgTy0vedN9vflS3/uOCYJQLc7etLz5krxrXl+BLA6CIFRLvqzf9Oaz9j5X4EIRNEEQqkXeb/r5IzapwKWvDKylCJogiF5Bfh8scCGBJgiiV+Bc4NIXilvI4iAIotcg30T88zGblJIXqXYHRdAEQfRKJqYry8UjEYqgCYLolTin5EUikfvRQxAE0cshgSYIglApQRPoU6dO4fbbb0f//v2RkJCA6667Dtu3bw/W5QiCICKOoAn0rbfeCrvdjm3btuHQoUMoKCjArbfeitra2mBdkiAIIqIIikBfunQJFRUVWL58OUaNGoW8vDw899xzaGlpwbFjx4JxSYIgiIgjKAKdmpqKoUOH4p133kFzczPsdjveeOMNpKWloaioyOPzrFYrzGaz4kYQBNFXCUqaHcdx+PzzzzFr1izEx8dDo9EgLS0NW7ZsQXJyssfnrVq1CitXrgzGkgiCIHodfkXQy5cvB8dxXm8nT54EYwyLFi1CWloavvzyS+zfvx+zZs3CzJkzUVNT4/H1H3vsMTQ1NUm3c+fO9fgbJAiC6K1wjDHm68l1dXWor6/3es6gQYPw5Zdf4kc/+hEaGhqQkJAgPZaXl4f58+dj+fLlPl3PbDYjMTERTU1NitchCILorfija35ZHEajEUajscvzWlpaAAAajTJA12g04Pm+0YWKIAiipwRlk3DixIlITk7G3LlzcfToUZw6dQq//e1vUVlZiVtuuSUYlyQIgog4giLQ/fv3x5YtW2CxWHDjjTdizJgx2L17Nz788EMUFBQE45IEQRARh18edKghD5ogiEjDH12jXhwEQRAqhQSaIAhCpZBAEwRBqBQSaIIgCJVCAk0QBNENQjG0lgSaIAiiG+yp5aWhtcGCBJogCKIbhGJoLQ2NJQiC6AahGFpLETRBEIRKIYEmCIJQKSTQBEEQKoUEmiAIQqWQQBMEQagUEmiCIAiVQgJNEAShUkigCYIgVAoJNEEQhEohgSYIglApqi71Fqdxmc3mMK+EIAgiMIh65su0QVUL9JUrVwAAmZmZYV4JQRBEYLly5QoSExO9nqPqobE8z6O6uhrx8fHgOC7cywkoZrMZmZmZOHfuXJ8diEvvAb0HQN97DxhjuHLlCkwmEzQa7y6zqiNojUaDq666KtzLCCoJCQl94ofSG/Qe0HsA9K33oKvIWYQ2CQmCIFQKCTRBEIRKIYEOEwaDAStWrIDBYAj3UsIGvQf0HgD0HnhD1ZuEBEEQfRmKoAmCIFQKCTRBEIRKIYEmCIJQKSTQBEEQKoUEmiAIQqWQQIeJV199FTk5OYiOjsb48eOxf//+cC8pZDz11FPgOE5xGzZsWLiXFVR27dqFmTNnwmQygeM4bNq0SfE4YwxPPvkkMjIyEBMTg2nTpqGioiI8iw0SXb0H8+bNc/m5mDFjRngWqxJIoMPAP//5TzzyyCNYsWIFDh8+jIKCAkyfPh0XL14M99JCxogRI1BTUyPddu/eHe4lBZXm5mYUFBTg1Vdfdfv4Cy+8gJdffhmvv/469u3bh379+mH69Oloa2sL8UqDR1fvAQDMmDFD8XOxYcOGEK5QhTAi5IwbN44tWrRIuu9wOJjJZGKrVq0K46pCx4oVK1hBQUG4lxE2ALAPPvhAus/zPEtPT2cvvviidKyxsZEZDAa2YcOGMKww+Di/B4wxNnfuXHb77beHZT1qhSLoENPe3o5Dhw5h2rRp0jGNRoNp06Zhz549YVxZaKmoqIDJZMKgQYMwZ84cVFVVhXtJYaOyshK1tbWKn4nExESMHz++T/1MAMCOHTuQlpaGoUOHYuHChaivrw/3ksIKCXSIuXTpEhwOBwYMGKA4PmDAANTW1oZpVaFl/PjxWLduHbZs2YLXXnsNlZWVmDRpktT/u68h/r/35Z8JQLA33nnnHXzxxRd4/vnnsXPnTtx0001wOBzhXlrYUHW7USIyuemmm6R/jxo1CuPHj0d2djb+9a9/Yf78+WFcGRFO/uu//kv698iRIzFq1CgMHjwYO3bswNSpU8O4svBBEXSI6d+/P7RaLS5cuKA4fuHCBaSnp4dpVeElKSkJP/jBD3D69OlwLyUsiP/v9DOhZNCgQejfv3+f/bkASKBDTlRUFIqKivDFF19Ix3iexxdffIGJEyeGcWXhw2Kx4MyZM8jIyAj3UsJCbm4u0tPTFT8TZrMZ+/bt67M/EwDw/fffo76+vs/+XABkcYSFRx55BHPnzsWYMWMwbtw4vPTSS2hubsYDDzwQ7qWFhGXLlmHmzJnIzs5GdXU1VqxYAa1Wi9mzZ4d7aUHDYrEoIsHKykqUlpYiJSUFWVlZWLp0KZ555hnk5eUhNzcXTzzxBEwmE2bNmhW+RQcYb+9BSkoKVq5ciTvvvBPp6ek4c+YMfve732HIkCGYPn16GFcdZsKdRtJXWbt2LcvKymJRUVFs3LhxbO/eveFeUsi49957WUZGBouKimIDBw5k9957Lzt9+nS4lxVUtm/fzgC43ObOncsYE1LtnnjiCTZgwABmMBjY1KlTWXl5eXgXHWC8vQctLS3sRz/6ETMajUyv17Ps7Gz24IMPstra2nAvO6xQP2iCIAiVQh40QRCESiGBJgiCUCkk0ARBECqFBJogCEKlkEATBEGoFBJogiAIlUICTRAEoVJIoAmCIFQKCTRBEIRKIYEmCIJQKSTQBEEQKuX/AxrQLMlsiXIfAAAAAElFTkSuQmCC", 189 | "text/plain": [ 190 | "
" 191 | ] 192 | }, 193 | "metadata": {}, 194 | "output_type": "display_data" 195 | } 196 | ], 197 | "source": [ 198 | "# Build a model with the same architecture\n", 199 | "model = ResMLP(num_layer=6, hidden_dim=100) # NN with the same architecture\n", 200 | "opt = torch.optim.Adam(model.parameters(), lr=0.001) # same optimizer\n", 201 | "\n", 202 | "# L2 regression training\n", 203 | "model.train()\n", 204 | "for i in range(2000):\n", 205 | " model.zero_grad()\n", 206 | " y_pred = model(x_tr)\n", 207 | " loss = (y_pred - y_tr).pow(2).mean()\n", 208 | " loss.backward()\n", 209 | " opt.step()\n", 210 | "\n", 211 | "# Evaluation\n", 212 | "model.eval()\n", 213 | "y_pred = model(x_full_normal).detach()\n", 214 | "plt.scatter(x_tr.cpu(), y_tr.cpu(), label=\"training data\", s=1, color=\"silver\")\n", 215 | "plt.scatter(x_te.cpu(), y_te.cpu(), label=\"test data\", s=1, color=\"goldenrod\")\n", 216 | "plt.scatter(x_full_normal.cpu(), y_pred.cpu(), label=\"predictions\", s=1, color=\"lightskyblue\")\n", 217 | "plt.legend(); plt.show()" 218 | ] 219 | } 220 | ], 221 | "metadata": { 222 | "kernelspec": { 223 | "display_name": "Python 3", 224 | "language": "python", 225 | "name": "python3" 226 | }, 227 | "language_info": { 228 | "codemirror_mode": { 229 | "name": "ipython", 230 | "version": 3 231 | }, 232 | "file_extension": ".py", 233 | "mimetype": "text/x-python", 234 | "name": "python", 235 | "nbconvert_exporter": "python", 236 | "pygments_lexer": "ipython3", 237 | "version": "3.11.2" 238 | }, 239 | "orig_nbformat": 4 240 | }, 241 | "nbformat": 4, 242 | "nbformat_minor": 2 243 | } 244 | --------------------------------------------------------------------------------