├── .DS_Store ├── .Rbuildignore ├── .github ├── .gitignore └── workflows │ └── pkgdown.yaml ├── .gitignore ├── .travis.yml ├── CRAN-RELEASE ├── CRAN-SUBMISSION ├── DESCRIPTION ├── NAMESPACE ├── NEWS.md ├── R ├── .DS_Store ├── AUPEC.R ├── AUPECcv.R ├── GATE.R ├── GATEcv.R ├── PAPD.R ├── PAPDcv.R ├── PAPE.R ├── PAPEcv.R ├── PAV.R ├── PAVcv.R ├── consist.test.R ├── consistcv.test.R ├── data.R ├── het.test.R ├── hetcv.test.R ├── itr_helpers.R ├── itr_plot.R ├── itr_qoi.R ├── itr_run_bagging.R ├── itr_run_bart.R ├── itr_run_bartmachine.R ├── itr_run_boost.R ├── itr_run_caret.r ├── itr_run_cart.R ├── itr_run_causal-forest.R ├── itr_run_lasso.R ├── itr_run_random-forest.R ├── itr_run_superlearner.R ├── itr_run_svm.R ├── itr_run_user.R ├── itr_summary.R └── main.r ├── README.Rmd ├── README.md ├── _pkgdown.yml ├── cran-comments.md ├── data └── star.rda ├── docs ├── 404.html ├── articles │ ├── cv_multiple_alg.html │ ├── cv_single_alg.html │ ├── index.html │ ├── install.html │ ├── paper_alg1.html │ ├── sample_split.html │ ├── sample_split_caret.html │ ├── user_itr.html │ └── user_itr_algs.html ├── authors.html ├── deps │ ├── bootstrap-5.1.3 │ │ ├── bootstrap.bundle.min.js │ │ ├── bootstrap.bundle.min.js.map │ │ ├── bootstrap.min.css │ │ ├── font.css │ │ └── fonts │ │ │ ├── 1Ptxg8zYS_SKggPN4iEgvnHyvveLxVs9pbCIPrc.woff │ │ │ ├── 1Ptxg8zYS_SKggPN4iEgvnHyvveLxVvaorCIPrc.woff │ │ │ ├── 4iCs6KVjbNBYlgo6ew.woff │ │ │ ├── 4iCs6KVjbNBYlgoKfw7w.woff │ │ │ ├── 4iCv6KVjbNBYlgoCxCvTtA.woff │ │ │ ├── 4iCv6KVjbNBYlgoCxCvjsGyL.woff │ │ │ ├── 6xK1dSBYKcSV-LCoeQqfX1RYOo3qPZ7nsDQ.woff │ │ │ ├── 6xK1dSBYKcSV-LCoeQqfX1RYOo3qPa7j.woff │ │ │ ├── 6xK3dSBYKcSV-LCoeQqfX1RYOo3aPA.woff │ │ │ ├── 6xK3dSBYKcSV-LCoeQqfX1RYOo3qOK7j.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3i54rAkw.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vAkw.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vwlxdo.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zAkw.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zwlxdo.woff │ │ │ ├── CSR54z1Qlv-GDxkbKVQ_dFsvWNRevw.woff │ │ │ ├── CSR54z1Qlv-GDxkbKVQ_dFsvaNA.woff │ │ │ ├── CSR64z1Qlv-GDxkbKVQ_TOQ.woff │ │ │ ├── CSR64z1Qlv-GDxkbKVQ_fOAKSw.woff │ │ │ ├── JTURjIg1_i6t8kCHKm45_ZpC7g0.woff │ │ │ ├── JTURjIg1_i6t8kCHKm45_dJE7g0.woff │ │ │ ├── JTUSjIg1_i6t8kCHKm45xW0.woff │ │ │ ├── KFOlCnqEu92Fr1MmEU9fBBc-.woff │ │ │ ├── KFOlCnqEu92Fr1MmEU9vAA.woff │ │ │ ├── KFOlCnqEu92Fr1MmSU5fBBc-.woff │ │ │ ├── KFOlCnqEu92Fr1MmSU5vAA.woff │ │ │ ├── KFOlCnqEu92Fr1MmWUlfBBc-.woff │ │ │ ├── KFOlCnqEu92Fr1MmWUlvAA.woff │ │ │ ├── KFOmCnqEu92Fr1Me5g.woff │ │ │ ├── KFOmCnqEu92Fr1Mu4mxM.woff │ │ │ ├── QGYpz_kZZAGCONcK2A4bGOj8mNhL.woff │ │ │ ├── S6u8w4BMUTPHjxsAXC-s.woff │ │ │ ├── S6u8w4BMUTPHjxswWA.woff │ │ │ ├── S6u9w4BMUTPHh6UVSwiPHw.woff │ │ │ ├── S6u9w4BMUTPHh6UVeww.woff │ │ │ ├── S6u9w4BMUTPHh7USSwiPHw.woff │ │ │ ├── S6u9w4BMUTPHh7USeww.woff │ │ │ ├── S6uyw4BMUTPHjx4wWA.woff │ │ │ ├── S6uyw4BMUTPHvxo.woff │ │ │ ├── UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuFuYMZs.woff │ │ │ ├── UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuI6fMZs.woff │ │ │ ├── UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuLyfMZs.woff │ │ │ ├── XRXV3I6Li01BKof4MQ.woff │ │ │ ├── XRXW3I6Li01BKofA6sKkZQ.woff │ │ │ ├── XRXW3I6Li01BKofAjsOkZQ.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkWVAexg.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkaVQ.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkWVAexg.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkaVQ.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjWVAexg.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjaVQ.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1x4gaVQ.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1y4k.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0B4gaVQ.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0C4k.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0B4gaVQ.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0C4k.woff │ │ │ ├── pe03MImSLYBIv1o4X1M8cc9iB_5p.woff │ │ │ ├── pe0qMImSLYBIv1o4X1M8cfe5.woff │ │ │ └── q5uGsou0JOdh94bfvQlr.woff │ ├── bootstrap-5.2.2 │ │ ├── bootstrap.bundle.min.js │ │ ├── bootstrap.bundle.min.js.map │ │ ├── bootstrap.min.css │ │ ├── font.css │ │ └── fonts │ │ │ ├── 1Ptxg8zYS_SKggPN4iEgvnHyvveLxVs9pbCIPrc.woff │ │ │ ├── 1Ptxg8zYS_SKggPN4iEgvnHyvveLxVvaorCIPrc.woff │ │ │ ├── 1adeadb2fe618c5ed46221f15e12b9c8.woff │ │ │ ├── 4iCs6KVjbNBYlgo6ew.woff │ │ │ ├── 4iCs6KVjbNBYlgoKfw7w.woff │ │ │ ├── 4iCv6KVjbNBYlgoCxCvTtA.woff │ │ │ ├── 4iCv6KVjbNBYlgoCxCvjsGyL.woff │ │ │ ├── 6xK1dSBYKcSV-LCoeQqfX1RYOo3qPZ7nsDQ.woff │ │ │ ├── 6xK1dSBYKcSV-LCoeQqfX1RYOo3qPa7j.woff │ │ │ ├── 6xK3dSBYKcSV-LCoeQqfX1RYOo3aPA.woff │ │ │ ├── 6xK3dSBYKcSV-LCoeQqfX1RYOo3qOK7j.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3i54rAkw.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vAkw.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vwlxdo.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zAkw.woff │ │ │ ├── 6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zwlxdo.woff │ │ │ ├── CSR54z1Qlv-GDxkbKVQ_dFsvWNRevw.woff │ │ │ ├── CSR54z1Qlv-GDxkbKVQ_dFsvaNA.woff │ │ │ ├── CSR64z1Qlv-GDxkbKVQ_TOQ.woff │ │ │ ├── CSR64z1Qlv-GDxkbKVQ_fOAKSw.woff │ │ │ ├── HI_diYsKILxRpg3hIP6sJ7fM7PqPMcMnZFqUwX28DBKXhM0.woff │ │ │ ├── HI_diYsKILxRpg3hIP6sJ7fM7PqPMcMnZFqUwX28DMyQhM0.woff │ │ │ ├── HI_jiYsKILxRpg3hIP6sJ7fM7PqlOPHYvDP_W9O7GQTTbI1rSg.woff │ │ │ ├── HI_jiYsKILxRpg3hIP6sJ7fM7PqlOPHYvDP_W9O7GQTTsoprSg.woff │ │ │ ├── JTUHjIg1_i6t8kCHKm4532VJOt5-QNFgpCtZ6Ew9.woff │ │ │ ├── JTUHjIg1_i6t8kCHKm4532VJOt5-QNFgpCtr6Ew9.woff │ │ │ ├── JTUHjIg1_i6t8kCHKm4532VJOt5-QNFgpCuM70w9.woff │ │ │ ├── KFOlCnqEu92Fr1MmEU9fBBc-.woff │ │ │ ├── KFOlCnqEu92Fr1MmEU9vAA.woff │ │ │ ├── KFOlCnqEu92Fr1MmSU5fBBc-.woff │ │ │ ├── KFOlCnqEu92Fr1MmSU5vAA.woff │ │ │ ├── KFOlCnqEu92Fr1MmWUlfBBc-.woff │ │ │ ├── KFOlCnqEu92Fr1MmWUlvAA.woff │ │ │ ├── KFOmCnqEu92Fr1Me5g.woff │ │ │ ├── KFOmCnqEu92Fr1Mu4mxM.woff │ │ │ ├── QGYpz_kZZAGCONcK2A4bGOj8mNhL.woff │ │ │ ├── S6u8w4BMUTPHjxsAXC-s.woff │ │ │ ├── S6u8w4BMUTPHjxswWA.woff │ │ │ ├── S6u9w4BMUTPHh6UVSwiPHw.woff │ │ │ ├── S6u9w4BMUTPHh6UVeww.woff │ │ │ ├── S6u9w4BMUTPHh7USSwiPHw.woff │ │ │ ├── S6u9w4BMUTPHh7USeww.woff │ │ │ ├── S6uyw4BMUTPHjx4wWA.woff │ │ │ ├── S6uyw4BMUTPHvxo.woff │ │ │ ├── UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuFuYMZs.woff │ │ │ ├── UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuI6fMZs.woff │ │ │ ├── UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuLyfMZs.woff │ │ │ ├── XRXI3I6Li01BKofiOc5wtlZ2di8HDFwmRTA.woff │ │ │ ├── XRXI3I6Li01BKofiOc5wtlZ2di8HDGUmRTA.woff │ │ │ ├── XRXI3I6Li01BKofiOc5wtlZ2di8HDLshRTA.woff │ │ │ ├── a98f7a7574819ba83bec6279a2cecd95.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk0ZjaVQ.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkWVAexg.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkaVQ.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkWVAexg.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkaVQ.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk_RkaVQ.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkxhjaVQ.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjWVAexg.woff │ │ │ ├── memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjaVQ.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1x4gaVQ.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1y4k.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsgH1y4k.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgshZ1y4k.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0B4gaVQ.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0C4k.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0B4gaVQ.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0C4k.woff │ │ │ ├── memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjr0C4k.woff │ │ │ └── q5uGsou0JOdh94bfvQlr.woff │ ├── data-deps.txt │ └── jquery-3.6.0 │ │ ├── jquery-3.6.0.js │ │ ├── jquery-3.6.0.min.js │ │ └── jquery-3.6.0.min.map ├── index.html ├── link.svg ├── news │ └── index.html ├── pkgdown.js ├── pkgdown.yml ├── reference │ ├── AUPEC.html │ ├── AUPECcv.html │ ├── GATE.html │ ├── GATEcv.html │ ├── PAPD.html │ ├── PAPDcv.html │ ├── PAPE.html │ ├── PAPEcv.html │ ├── PAV.html │ ├── PAVcv.html │ ├── Rplot001.png │ ├── compute_qoi.html │ ├── compute_qoi_user.html │ ├── consist.test.html │ ├── consistcv.test.html │ ├── create_ml_args.html │ ├── create_ml_args_bart.html │ ├── create_ml_args_bartc.html │ ├── create_ml_args_causalforest.html │ ├── create_ml_args_lasso.html │ ├── create_ml_args_superLearner.html │ ├── create_ml_args_svm.html │ ├── create_ml_args_svm_cls.html │ ├── create_ml_arguments.html │ ├── estimate_itr.html │ ├── evaluate_itr.html │ ├── figures │ │ ├── README-caret_model-1.png │ │ ├── README-caret_model-2.png │ │ ├── README-compare_itr_aupec-1.png │ │ ├── README-compare_itr_gate-1.png │ │ ├── README-compare_itr_model_summary-1.png │ │ ├── README-cv_estimate-1.png │ │ ├── README-cv_plot-1.png │ │ ├── README-est_extract-1.png │ │ ├── README-manual.png │ │ ├── README-multiple_plot-1.png │ │ ├── README-plot-1.png │ │ ├── README-sl_plot-1.png │ │ ├── README-sp_plot-1.png │ │ ├── README-sv_plot-1.png │ │ ├── README-user_itr_aupec-1.png │ │ ├── README-user_itr_gate-1.png │ │ ├── README-workflow.png │ │ ├── gate.png │ │ ├── plot_5folds.png │ │ └── rf.png │ ├── fit_itr.html │ ├── het.test.html │ ├── hetcv.test.html │ ├── index.html │ ├── itr_single_outcome.html │ ├── plot.itr.html │ ├── plot_estimate.html │ ├── print.summary.itr.html │ ├── print.summary.test_itr.html │ ├── star.html │ ├── summary.itr.html │ ├── summary.test_itr.html │ └── test_itr.html ├── search.json └── sitemap.xml ├── evalITR.Rproj ├── inst └── WORDLIST ├── man ├── .DS_Store ├── AUPEC.Rd ├── AUPECcv.Rd ├── GATE.Rd ├── GATEcv.Rd ├── PAPD.Rd ├── PAPDcv.Rd ├── PAPE.Rd ├── PAPEcv.Rd ├── PAV.Rd ├── PAVcv.Rd ├── compute_qoi.Rd ├── compute_qoi_user.Rd ├── consist.test.Rd ├── consistcv.test.Rd ├── create_ml_args.Rd ├── create_ml_args_bart.Rd ├── create_ml_args_bartc.Rd ├── create_ml_args_causalforest.Rd ├── create_ml_args_lasso.Rd ├── create_ml_args_superLearner.Rd ├── create_ml_args_svm.Rd ├── create_ml_args_svm_cls.Rd ├── create_ml_arguments.Rd ├── estimate_itr.Rd ├── evaluate_itr.Rd ├── figures │ ├── README-caret_model-1.png │ ├── README-caret_model-2.png │ ├── README-compare_itr_aupec-1.png │ ├── README-compare_itr_gate-1.png │ ├── README-compare_itr_model_summary-1.png │ ├── README-cv_estimate-1.png │ ├── README-cv_plot-1.png │ ├── README-est_extract-1.png │ ├── README-manual.png │ ├── README-multiple_plot-1.png │ ├── README-plot-1.png │ ├── README-sl_plot-1.png │ ├── README-sp_plot-1.png │ ├── README-sv_plot-1.png │ ├── README-user_itr_aupec-1.png │ ├── README-user_itr_gate-1.png │ ├── README-workflow.png │ ├── gate.png │ ├── plot_5folds.png │ └── rf.png ├── fit_itr.Rd ├── het.test.Rd ├── hetcv.test.Rd ├── plot.itr.Rd ├── plot_estimate.Rd ├── print.summary.itr.Rd ├── print.summary.test_itr.Rd ├── star.Rd ├── summary.itr.Rd ├── summary.test_itr.Rd └── test_itr.Rd ├── tests ├── spelling.R ├── testthat.R └── testthat │ ├── star.rda │ ├── test-high_level.R │ └── test-low_level.R └── vignettes ├── .gitignore ├── cv_multiple_alg.Rmd ├── cv_single_alg.Rmd ├── install.Rmd ├── paper_alg1.Rmd ├── sample_split.Rmd ├── sample_split_caret.Rmd ├── user_itr.Rmd └── user_itr_algs.Rmd /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/.DS_Store -------------------------------------------------------------------------------- /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^cran-comments\.md$ 4 | ^\.travis\.yml$ 5 | ^CRAN-RELEASE$ 6 | ^README\.Rmd$ 7 | ^_pkgdown\.yml$ 8 | ^docs$ 9 | ^pkgdown$ 10 | ^\.github$ 11 | ^CRAN-SUBMISSION$ 12 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/workflows/pkgdown.yaml: -------------------------------------------------------------------------------- 1 | name: Publish to GitHub Pages when a branch is merged 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | - causal-ml 9 | 10 | jobs: 11 | publish: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Check out 15 | uses: actions/checkout@v3 16 | 17 | - name: Publish generated content to GitHub Pages 18 | uses: crazy-max/ghaction-github-pages@v3 19 | with: 20 | target_branch: gh-pages 21 | build_dir: docs 22 | env: 23 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | *.DS_Store 5 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Sample .travis.yml for R projects 2 | 3 | language: r 4 | cache: packages 5 | sudo: required 6 | 7 | r_build_args: --resave-data 8 | 9 | notifications: 10 | email: 11 | on_success: change 12 | on_failure: change -------------------------------------------------------------------------------- /CRAN-RELEASE: -------------------------------------------------------------------------------- 1 | This package was submitted to CRAN on 2021-10-15. 2 | Once it is accepted, delete this file and tag the release (commit df0a395). 3 | -------------------------------------------------------------------------------- /CRAN-SUBMISSION: -------------------------------------------------------------------------------- 1 | Version: 1.0.0 2 | Date: 2023-08-21 05:12:52 UTC 3 | SHA: f3b3fcaa37bcc7e79ca3a48a46689fe48a757a55 4 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: evalITR 2 | Title: Evaluating Individualized Treatment Rules 3 | Version: 1.0.0 4 | Date: 2023-08-20 5 | Authors@R: c( 6 | person("Michael Lingzhi", "Li", , "mili@hbs.edu", role = c("aut", "cre")), 7 | person("Kosuke", "Imai", , "imai@harvard.edu", role = "aut"), 8 | person("Jialu", "Li", , "jialu_li@g.harvard.edu", role = "ctb"), 9 | person("Xiaolong", "Yang", , "xiaolong_yang@g.harvard.edu", role = "ctb") 10 | ) 11 | Maintainer: Michael Lingzhi Li 12 | Description: Provides various statistical methods for evaluating 13 | Individualized Treatment Rules under randomized data. The provided 14 | metrics include Population Average Value (PAV), Population Average 15 | Prescription Effect (PAPE), Area Under Prescription Effect Curve 16 | (AUPEC). It also provides the tools to analyze Individualized 17 | Treatment Rules under budget constraints. Detailed reference in Imai 18 | and Li (2019) . 19 | License: GPL (>=2) 20 | URL: https://github.com/MichaelLLi/evalITR, 21 | https://michaellli.github.io/evalITR/, 22 | https://jialul.github.io/causal-ml/ 23 | BugReports: https://github.com/MichaelLLi/evalITR/issues 24 | Depends: 25 | dplyr (>= 1.0), 26 | MASS (>= 7.0), 27 | Matrix (>= 1.0), 28 | quadprog (>= 1.0), 29 | R (>= 3.5.0), 30 | stats 31 | Imports: 32 | caret, 33 | cli, 34 | e1071, 35 | forcats, 36 | gbm, 37 | ggdist, 38 | ggplot2, 39 | ggthemes, 40 | glmnet, 41 | grf, 42 | haven, 43 | purrr, 44 | rlang, 45 | rpart, 46 | rqPen, 47 | scales, 48 | utils, 49 | bartCause, 50 | SuperLearner 51 | Suggests: 52 | doParallel, 53 | furrr, 54 | knitr, 55 | rmarkdown, 56 | testthat, 57 | bartMachine, 58 | elasticnet, 59 | randomForest, 60 | spelling 61 | VignetteBuilder: 62 | knitr 63 | Encoding: UTF-8 64 | LazyData: true 65 | RoxygenNote: 7.2.2 66 | Language: en-US 67 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(plot,itr) 4 | S3method(print,summary.itr) 5 | S3method(print,summary.test_itr) 6 | S3method(summary,itr) 7 | S3method(summary,test_itr) 8 | export(AUPEC) 9 | export(AUPECcv) 10 | export(GATE) 11 | export(GATEcv) 12 | export(PAPD) 13 | export(PAPDcv) 14 | export(PAPE) 15 | export(PAPEcv) 16 | export(PAV) 17 | export(PAVcv) 18 | export(consist.test) 19 | export(consistcv.test) 20 | export(estimate_itr) 21 | export(evaluate_itr) 22 | export(het.test) 23 | export(hetcv.test) 24 | export(plot_estimate) 25 | export(test_itr) 26 | import(dplyr) 27 | import(ggplot2) 28 | import(ggthemes) 29 | import(haven) 30 | import(rpart) 31 | importFrom(MASS,ginv) 32 | importFrom(MASS,mvrnorm) 33 | importFrom(Matrix,nearPD) 34 | importFrom(SuperLearner,SuperLearner) 35 | importFrom(cli,cat_rule) 36 | importFrom(dplyr,ntile) 37 | importFrom(dplyr,pull) 38 | importFrom(ggplot2,.data) 39 | importFrom(purrr,map) 40 | importFrom(quadprog,solve.QP) 41 | importFrom(rlang,"!!") 42 | importFrom(rlang,.data) 43 | importFrom(rlang,sym) 44 | importFrom(stats,as.formula) 45 | importFrom(stats,binomial) 46 | importFrom(stats,cov) 47 | importFrom(stats,gaussian) 48 | importFrom(stats,model.matrix) 49 | importFrom(stats,pchisq) 50 | importFrom(stats,pnorm) 51 | importFrom(stats,predict) 52 | importFrom(stats,quantile) 53 | importFrom(stats,rbinom) 54 | importFrom(stats,runif) 55 | importFrom(stats,sd) 56 | importFrom(stats,var) 57 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | # evalITR 1.0.0 2 | 3 | - Revamped package structure 4 | - Introduced the high-level function estimate_itr() to provide a easy process to train and evaluate individualized treatment rules. 5 | - Added support for automatic training of many machine learning algorithms including Caret, Superlearner, BART, and Causal Forests. 6 | - Provided plotting tools that automatically creates beautiful plots for evaluation metrics 7 | - Changed output structure to provide more detailed information on evaluation metrics and confidence intervals, integrates with summary() function. 8 | 9 | 10 | # evalITR 0.3.0 11 | 12 | * Added a `NEWS.md` file to track changes to the package. 13 | -------------------------------------------------------------------------------- /R/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/R/.DS_Store -------------------------------------------------------------------------------- /R/GATE.R: -------------------------------------------------------------------------------- 1 | #' Estimation of the Grouped Average Treatment Effects (GATEs) in Randomized Experiments 2 | #' 3 | #' This function estimates the Grouped Average Treatment Effects (GATEs) where the groups are determined by a continuous score. The details of the methods for this design are given in Imai and Li (2022). 4 | #' 5 | #' 6 | #' 7 | #' @param T A vector of the unit-level binary treatment receipt variable for each sample. 8 | #' @param tau A vector of the unit-level continuous score. Conditional Average Treatment Effect is one possible measure. 9 | #' @param Y A vector of the outcome variable of interest for each sample. 10 | #' @param ngates The number of groups to separate the data into. The groups are determined by \code{tau}. Default is 5. 11 | #' @return A list that contains the following items: \item{gate}{The estimated 12 | #' vector of GATEs of length \code{ngates} arranged in order of increasing \code{tau}.} \item{sd}{The estimated vector of standard deviation 13 | #' of GATEs.} 14 | #' @examples 15 | #' T = c(1,0,1,0,1,0,1,0) 16 | #' tau = c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) 17 | #' Y = c(4,5,0,2,4,1,-4,3) 18 | #' gatelist <- GATE(T,tau,Y,ngates=5) 19 | #' gatelist$gate 20 | #' gatelist$sd 21 | #' @author Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 22 | #' \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 23 | #' @references Imai and Li (2022). \dQuote{Statistical Inference for Heterogeneous Treatment Effects Discovered by Generic Machine Learning in Randomized Experiments}, 24 | #' @keywords evaluation 25 | #' @export GATE 26 | GATE <- function(T, tau, Y, ngates = 5) { 27 | if (!(identical(as.numeric(T),as.numeric(as.logical(T))))) { 28 | stop("T should be binary.") 29 | } 30 | if ((length(T)!=length(tau)) | (length(tau)!=length(Y))) { 31 | stop("All the data should have the same length.") 32 | } 33 | if (length(T)==0) { 34 | stop("The data should have positive length.") 35 | } 36 | n = length(Y) 37 | n1 = sum(T) 38 | n0 = n-n1 39 | fd_label = ntile(tau, ngates) 40 | vargts = numeric(ngates) 41 | gates = numeric(ngates) 42 | for (i in 1:ngates) { 43 | That = as.numeric(fd_label == i) 44 | plim = sum(That)/ length(That) 45 | gates[i] = ngates * (1/n1*sum(T*That*Y)+1/n0*sum(Y*(1-T)*(1-That))-1/n0*sum(Y * (1-T))) 46 | Sfp1 = var((That*Y)[T==1]) 47 | Sfp0 = var((That*Y)[T==0]) 48 | if (length(Y[T==1 & That==1])>0 & length(Y[T==0 & That==1])>0) { 49 | kf1 = mean(Y[T==1 & That==1])-mean(Y[T==0 & That==1]) 50 | vargts[i] = ngates * ngates * (Sfp1 / n1+Sfp0 / n0 - (ngates -1) / ((ngates ^ 2) * (n-1)) * kf1 ^ 2) 51 | } else { 52 | vargts[i] = NA 53 | } 54 | } 55 | return(list(gate=gates,sd=sqrt(pmax(vargts,0)))) 56 | } 57 | -------------------------------------------------------------------------------- /R/PAPD.R: -------------------------------------------------------------------------------- 1 | #' Estimation of the Population Average Prescription Difference in Randomized Experiments 2 | #' 3 | #' This function estimates the Population Average Prescription Difference with a budget 4 | #' constraint. The details of the methods for this design are given in Imai and Li (2019). 5 | #' 6 | #' 7 | #' 8 | #' @param T A vector of the unit-level binary treatment receipt variable for each sample. 9 | #' @param Thatfp A vector of the unit-level binary treatment that would have been assigned by the 10 | #' first individualized treatment rule. Please ensure that the percentage of treatment units of That is lower than the budget constraint. 11 | #' @param Thatgp A vector of the unit-level binary treatment that would have been assigned by the 12 | #' second individualized treatment rule. Please ensure that the percentage of treatment units of That is lower than the budget constraint. 13 | #' @param Y A vector of the outcome variable of interest for each sample. 14 | #' @param budget The maximum percentage of population that can be treated under the 15 | #' budget constraint. Should be a decimal between 0 and 1. 16 | #' @param centered If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 17 | #' the variance of the estimator. Default is \code{TRUE}. 18 | #' @return A list that contains the following items: \item{papd}{The estimated 19 | #' Population Average Prescription Difference} \item{sd}{The estimated standard deviation 20 | #' of PAPD.} 21 | #' @examples 22 | #' T = c(1,0,1,0,1,0,1,0) 23 | #' That = c(0,1,1,0,0,1,1,0) 24 | #' That2 = c(1,0,0,1,1,0,0,1) 25 | #' Y = c(4,5,0,2,4,1,-4,3) 26 | #' papdlist <- PAPD(T,That,That2,Y,budget = 0.5) 27 | #' papdlist$papd 28 | #' papdlist$sd 29 | #' @author Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 30 | #' \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 31 | #' @references Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 32 | #' @keywords evaluation 33 | #' @export PAPD 34 | PAPD <- function (T, Thatfp,Thatgp , Y, budget, centered = TRUE) { 35 | if (!(identical(as.numeric(T),as.numeric(as.logical(T))))) { 36 | stop("T should be binary.") 37 | } 38 | if (!(identical(as.numeric(Thatfp),as.numeric(as.logical(Thatfp))))) { 39 | stop("Thatfp should be binary.") 40 | } 41 | if (!(identical(as.numeric(Thatgp),as.numeric(as.logical(Thatgp))))) { 42 | stop("Thatgp should be binary.") 43 | } 44 | if ((budget<0) | (budget>1)) { 45 | stop("Budget constraint should be between 0 and 1") 46 | } 47 | if ((length(T)!=length(Thatfp)) | (length(Thatfp)!=length(Thatgp)) | (length(Thatgp)!=length(Y))) { 48 | stop("All the data should have the same length.") 49 | } 50 | if (length(T)==0) { 51 | stop("The data should have positive length.") 52 | } 53 | if ((sum(Thatfp)>floor(length(T)*budget)+1) | (sum(Thatgp)>floor(length(T)*budget)+1)) { 54 | stop("The proportion of treated units in Thatfp or Thatgp should be below or equal to budget.") 55 | } 56 | if (!is.logical(centered)) { 57 | stop("The centered parameter should be TRUE or FALSE.") 58 | } 59 | T=as.numeric(T) 60 | Thatfp=as.numeric(Thatfp) 61 | Thatgp=as.numeric(Thatgp) 62 | Y=as.numeric(Y) 63 | if (centered) { 64 | Y = Y - mean(Y) 65 | } 66 | n=length(Y) 67 | n1=sum(T) 68 | n0=n-n1 69 | SAPEfp=1/n1*sum(T*Thatfp*Y)+1/n0*sum(Y*(1-T)*(1-Thatfp))-budget/n1*sum(Y*T)-(1-budget)/n0*sum(Y*(1-T)) 70 | SAPEgp=1/n1*sum(T*Thatgp*Y)+1/n0*sum(Y*(1-T)*(1-Thatgp))-budget/n1*sum(Y*T)-(1-budget)/n0*sum(Y*(1-T)) 71 | Sfp1=var(((Thatfp-budget)*Y)[T==1]) 72 | Sfp0=var(((Thatfp-budget)*Y)[T==0]) 73 | kf1=mean(Y[T==1 & Thatfp==1])-mean(Y[T==0 & Thatfp==1]) 74 | kf0=mean(Y[T==1 & Thatfp==0])-mean(Y[T==0 & Thatfp==0]) 75 | PAPD=SAPEfp-SAPEgp 76 | Sfgp1=var(((Thatfp-Thatgp)*Y)[T==1]) 77 | Sfgp0=var(((Thatfp-Thatgp)*Y)[T==0]) 78 | kg1=mean(Y[T==1 & Thatgp==1])-mean(Y[T==0 & Thatgp==1]) 79 | kg0=mean(Y[T==1 & Thatgp==0])-mean(Y[T==0 & Thatgp==0]) 80 | varfgp=Sfgp1/n1+Sfgp0/n0-floor(n*budget)*(n-floor(n*budget))/(n^2*(n-1))*(kf1^2+kg1^2)+ 81 | 2*floor(n*budget)*max(floor(n*budget),n-floor(n*budget))/(n^2*(n-1))*abs(kf1*kg1) 82 | return(list(papd=PAPD,sd=sqrt(max(varfgp,0)))) 83 | } 84 | -------------------------------------------------------------------------------- /R/PAPE.R: -------------------------------------------------------------------------------- 1 | #' Estimation of the Population Average Prescription Effect in Randomized Experiments 2 | #' 3 | #' This function estimates the Population Average Prescription Effect with and without a budget 4 | #' constraint. The details of the methods for this design are given in Imai and Li (2019). 5 | #' 6 | #' 7 | #' 8 | #' @param T A vector of the unit-level binary treatment receipt variable for each sample. 9 | #' @param That A vector of the unit-level binary treatment that would have been assigned by the 10 | #' individualized treatment rule. If \code{budget} is specified, please ensure 11 | #' that the percentage of treatment units of That is lower than the budget constraint. 12 | #' @param Y A vector of the outcome variable of interest for each sample. 13 | #' @param budget The maximum percentage of population that can be treated under the 14 | #' budget constraint. Should be a decimal between 0 and 1. Default is NA which assumes 15 | #' no budget constraint. 16 | #' @param centered If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 17 | #' the variance of the estimator. Default is \code{TRUE}. 18 | #' @return A list that contains the following items: \item{pape}{The estimated 19 | #' Population Average Prescription Effect.} \item{sd}{The estimated standard deviation 20 | #' of PAPE.} 21 | #' @examples 22 | #' T = c(1,0,1,0,1,0,1,0) 23 | #' That = c(0,1,1,0,0,1,1,0) 24 | #' Y = c(4,5,0,2,4,1,-4,3) 25 | #' papelist <- PAPE(T,That,Y) 26 | #' papelist$pape 27 | #' papelist$sd 28 | #' @author Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 29 | #' \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 30 | #' @references Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 31 | #' @keywords evaluation 32 | #' @export PAPE 33 | PAPE <- function (T, That, Y, budget = NA, centered = TRUE) { 34 | if (!(identical(as.numeric(T),as.numeric(as.logical(T))))) { 35 | stop("T should be binary.") 36 | } 37 | if (!(identical(as.numeric(That),as.numeric(as.logical(That))))) { 38 | stop("That should be binary.") 39 | } 40 | if ((length(T)!=length(That)) | (length(That)!=length(Y))) { 41 | stop("All the data should have the same length.") 42 | } 43 | if (length(T)==0) { 44 | stop("The data should have positive length.") 45 | } 46 | if (!is.na(budget) & (sum(That)>floor(length(T)*budget)+1)) { 47 | stop("The number of treated units in That should be below or equal to budget") 48 | } 49 | if (!is.logical(centered)) { 50 | stop("The centered parameter should be TRUE or FALSE.") 51 | } 52 | if (!is.na(budget) & ((budget<0) | (budget>1))) { 53 | stop("Budget constraint should be between 0 and 1") 54 | } 55 | T=as.numeric(T) 56 | That=as.numeric(That) 57 | Y=as.numeric(Y) 58 | if (centered) { 59 | Y = Y - mean(Y) 60 | } 61 | if (is.na(budget)) { 62 | n=length(Y) 63 | n1=sum(T) 64 | n0=n-n1 65 | n1h=sum(That) 66 | n0h=n-n1h 67 | probs=sum(That)/n 68 | SAPE=n/(n-1)*(1/n1*sum(T*That*Y)+1/n0*sum(Y*(1-T)*(1-That))-n1h/n1/n*sum(Y*T)-n0h/n0/n*sum(Y*(1-T))) 69 | Sf1=var(((That-probs)*Y)[T==1]) 70 | Sf0=var(((That-probs)*Y)[T==0]) 71 | SATE=1/n1*sum(T*Y)-1/n0*(sum((1-T)*Y)) 72 | covarterm=1/n^2*(SAPE^2+2*(n-1)*SAPE*SATE*(2*probs-1)-(1-probs)*probs*n*SATE^2) 73 | varexp=(n/(n-1))^2*(Sf1/n1+Sf0/n0+covarterm) 74 | return(list(pape=SAPE,sd=sqrt(max(varexp,0)))) 75 | } else { 76 | n=length(Y) 77 | n1=sum(T) 78 | n0=n-n1 79 | n1h=sum(That) 80 | n0h=n-n1h 81 | SAPEfp=1/n1*sum(T*That*Y)+1/n0*sum(Y*(1-T)*(1-That))-budget/n1*sum(Y*T)-(1-budget)/n0*sum(Y*(1-T)) 82 | Sfp1=var(((That-budget)*Y)[T==1]) 83 | Sfp0=var(((That-budget)*Y)[T==0]) 84 | kf1=mean(Y[T==1 & That==1])-mean(Y[T==0 & That==1]) 85 | kf0=mean(Y[T==1 & That==0])-mean(Y[T==0 & That==0]) 86 | varfp=Sfp1/n1+Sfp0/n0+floor(n*budget)*(n-floor(n*budget))/(n^2*(n-1))*((2*budget-1)*kf1^2-2*budget*kf1*kf0) 87 | return(list(pape=SAPEfp,sd=sqrt(max(varfp,0)))) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /R/PAV.R: -------------------------------------------------------------------------------- 1 | #' Estimation of the Population Average Value in Randomized Experiments 2 | #' 3 | #' This function estimates the Population Average Value. The details of the methods for this design are given in Imai and Li (2019). 4 | #' 5 | #' 6 | #' 7 | #' @param T A vector of the unit-level binary treatment receipt variable for each sample. 8 | #' @param That A vector of the unit-level binary treatment that would have been assigned by the 9 | #' individualized treatment rule. If \code{budget} is specified, please ensure 10 | #' that the percentage of treatment units of That is lower than the budget constraint. 11 | #' @param Y A vector of the outcome variable of interest for each sample. 12 | #' @param centered If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 13 | #' the variance of the estimator. Default is \code{TRUE}. 14 | #' @return A list that contains the following items: \item{pav}{The estimated 15 | #' Population Average Value.} \item{sd}{The estimated standard deviation 16 | #' of PAV.} 17 | #' @examples 18 | #' T = c(1,0,1,0,1,0,1,0) 19 | #' That = c(0,1,1,0,0,1,1,0) 20 | #' Y = c(4,5,0,2,4,1,-4,3) 21 | #' pavlist <- PAV(T,That,Y) 22 | #' pavlist$pav 23 | #' pavlist$sd 24 | #' @author Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 25 | #' \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 26 | #' @references Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 27 | #' @keywords evaluation 28 | #' @export PAV 29 | PAV <- function (T, That, Y, centered = TRUE) { 30 | if (!(identical(as.numeric(T),as.numeric(as.logical(T))))) { 31 | stop("T should be binary.") 32 | } 33 | if (!(identical(as.numeric(That),as.numeric(as.logical(That))))) { 34 | stop("That should be binary.") 35 | } 36 | if ((length(T)!=length(That)) | (length(That)!=length(Y))) { 37 | stop("All the data should have the same length.") 38 | } 39 | if (length(T)==0) { 40 | stop("The data should have positive length.") 41 | } 42 | if (!is.logical(centered)) { 43 | stop("The centered parameter should be TRUE or FALSE.") 44 | } 45 | T=as.numeric(T) 46 | That=as.numeric(That) 47 | Y=as.numeric(Y) 48 | if (centered) { 49 | Y = Y - mean(Y) 50 | } 51 | n=length(Y) 52 | n1=sum(T) 53 | n0=n-n1 54 | SAV=1/n1*sum(T*That*Y)+1/n0*sum(Y*(1-T)*(1-That)) 55 | Sf1=var((That*Y)[T==1]) 56 | Sf0=var(((1-That)*Y)[T==0]) 57 | varexp=Sf1/n1+Sf0/n0 58 | return(list(pav=SAV,sd=sqrt(max(varexp,0)))) 59 | } 60 | -------------------------------------------------------------------------------- /R/PAVcv.R: -------------------------------------------------------------------------------- 1 | #' Estimation of the Population Average Value in Randomized Experiments Under Cross Validation 2 | #' 3 | #' This function estimates the Population Average Value. The details of the methods for this design are given in Imai and Li (2019). 4 | #' 5 | #' 6 | #' 7 | #' @param T A vector of the unit-level binary treatment receipt variable for each sample. 8 | #' @param That A matrix where the \code{i}th column is the unit-level binary treatment that would have been assigned by the 9 | #' individualized treatment rule generated in the \code{i}th fold. If \code{budget} is specified, please ensure 10 | #' that the percentage of treatment units of That is lower than the budget constraint. 11 | #' @param Y The outcome variable of interest. 12 | #' @param ind A vector of integers (between 1 and number of folds inclusive) indicating which testing set does each sample belong to. 13 | #' @param centered If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 14 | #' the variance of the estimator. Default is \code{TRUE}. 15 | #' @return A list that contains the following items: \item{pav}{The estimated 16 | #' Population Average Value.} \item{sd}{The estimated standard deviation 17 | #' of PAV.} 18 | #' @examples 19 | #' T = c(1,0,1,0,1,0,1,0) 20 | #' That = matrix(c(0,1,1,0,0,1,1,0,1,0,0,1,1,0,0,1), nrow = 8, ncol = 2) 21 | #' Y = c(4,5,0,2,4,1,-4,3) 22 | #' ind = c(rep(1,4),rep(2,4)) 23 | #' pavlist <- PAVcv(T, That, Y, ind) 24 | #' pavlist$pav 25 | #' pavlist$sd 26 | #' @author Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 27 | #' \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 28 | #' @references Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 29 | #' @keywords evaluation 30 | #' @export PAVcv 31 | PAVcv <- function (T, That, Y, ind, centered = TRUE) { 32 | if (!(identical(as.numeric(T),as.numeric(as.logical(T))))) { 33 | stop("T should be binary.") 34 | } 35 | if (!is.logical(centered)) { 36 | stop("The centered parameter should be TRUE or FALSE.") 37 | } 38 | if (!(identical(as.numeric(That),as.numeric(as.logical(That))))) { 39 | stop("That should be binary.") 40 | } 41 | if ((length(T)!=dim(That)[1]) | (dim(That)[1]!=length(Y))) { 42 | stop("All the data should have the same length.") 43 | } 44 | if (length(T)==0) { 45 | stop("The data should have positive length.") 46 | } 47 | T=as.numeric(T) 48 | That=as.matrix(That) 49 | Y=as.numeric(Y) 50 | if (centered) { 51 | Y = Y - mean(Y) 52 | } 53 | nfolds = max(ind) 54 | n = length(Y) 55 | n1 = sum(T) 56 | n0 = n - n1 57 | pavfold = c() 58 | sdfold = c() 59 | Sf1 = 0 60 | Sf0 = 0 61 | covijtauij = 0 62 | n1n1 = n1*(n1-1) 63 | n1n0 = n0*n1 64 | n0n0 = n0*(n0-1) 65 | ThatYT1mean = apply(That*Y*T,1,mean) 66 | ThatYT0mean = apply(That*Y*(1-T),1,mean) 67 | for (i in 1:nfolds) { 68 | output = PAV(T[ind==i],That[ind==i,i],Y[ind==i]) 69 | m = length(T[ind==i]) 70 | m1 = sum(T[ind==i]) 71 | m0 = m - m1 72 | probs=sum(That[ind==i,i])/m 73 | Sf1=Sf1 + var((That[,i]*Y)[T==1 & ind==i])/(m1*nfolds) 74 | Sf0=Sf0 + var(((1-That[,i])*Y)[T==0 & ind==i])/(m0*nfolds) 75 | pavfold = c(pavfold, output$pav) 76 | sdfold = c(sdfold, output$sd) 77 | covijtauij = covijtauij + (((sum((That[,i]*Y*T))^2-sum((That[,i]*Y^2*T)))/n1n1 - 78 | 2*sum((That[,i]*Y*T))*sum(That[,i]*Y*(1-T))/n1n0 + 79 | (sum((That[,i]*Y*(1-T)))^2-sum((That[,i]*Y^2*(1-T))))/n0n0) - 80 | ((sum(ThatYT1mean)^2-sum(ThatYT1mean^2))/n1n1 - 81 | (2*sum(ThatYT1mean)*sum(ThatYT0mean))/n1n0 + 82 | (sum(ThatYT0mean)^2-sum(ThatYT0mean^2))/n0n0)) / nfolds 83 | } 84 | mF = n / nfolds 85 | SF2 = var(pavfold) 86 | varcv = Sf1+Sf0 87 | varexp = (varcv + covijtauij) - (nfolds - 1)/ nfolds * min(SF2,(varcv + covijtauij)) 88 | return(list(pav=mean(pavfold),sd=sqrt(max(varexp,0)))) 89 | } 90 | -------------------------------------------------------------------------------- /R/consist.test.R: -------------------------------------------------------------------------------- 1 | #' The Consistency Test for Grouped Average Treatment Effects (GATEs) in Randomized Experiments 2 | #' 3 | #' This function calculates statistics related to the test of treatment effect consistency across groups. 4 | #' 5 | #' The details of the methods for this design are given in Imai and Li (2022). 6 | #' 7 | #' 8 | #' @param T A vector of the unit-level binary treatment receipt variable for each sample. 9 | #' @param tau A vector of the unit-level continuous score. Conditional Average Treatment Effect is one possible measure. 10 | #' @param Y A vector of the outcome variable of interest for each sample. 11 | #' @param ngates The number of groups to separate the data into. The groups are determined by \code{tau}. Default is 5. 12 | #' @param nsim Number of Monte Carlo simulations used to simulate the null distributions. Default is 10000. 13 | #' @return A list that contains the following items: \item{stat}{The estimated 14 | #' statistic for the test of consistency} \item{pval}{The p-value of the null 15 | #' hypothesis (that the treatment effects are consistent)} 16 | #' @examples 17 | #' T = c(1,0,1,0,1,0,1,0) 18 | #' tau = c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) 19 | #' Y = c(4,5,0,2,4,1,-4,3) 20 | #' consisttestlist <- consist.test(T,tau,Y,ngates=5) 21 | #' consisttestlist$stat 22 | #' consisttestlist$pval 23 | #' @author Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 24 | #' \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 25 | #' @references Imai and Li (2022). \dQuote{Statistical Inference for Heterogeneous Treatment Effects Discovered by Generic Machine Learning in Randomized Experiments}, 26 | #' @keywords evaluation 27 | #' @export consist.test 28 | consist.test <- function(T, tau, Y, ngates = 5, nsim = 10000) { 29 | if (!(identical(as.numeric(T),as.numeric(as.logical(T))))) { 30 | stop("T should be binary.") 31 | } 32 | if ((length(T)!=length(tau)) | (length(tau)!=length(Y))) { 33 | stop("All the data should have the same length.") 34 | } 35 | if (length(T)==0) { 36 | stop("The data should have positive length.") 37 | } 38 | n = length(Y) 39 | n1 = sum(T) 40 | n0 = n-n1 41 | fd_label = ntile(tau, ngates) 42 | vargts = numeric(ngates) 43 | papes = numeric(ngates) 44 | kf1s = numeric(ngates) 45 | kf0s = numeric(ngates) 46 | Sfp1s = as.list(numeric(ngates)) 47 | Sfp0s = as.list(numeric(ngates)) 48 | mcov = matrix(0, nrow = ngates, ncol = ngates) 49 | for (i in 1:ngates) { 50 | That = as.numeric(fd_label == i) 51 | plim = 1 / ngates 52 | papes[i] = ngates * (1/n1*sum(T*That*Y)+1/n0*sum(Y*(1-T)*(1-That))-plim/n1*sum(Y*T)-(1-plim)/n0*sum(Y*(1-T))) 53 | Sfp1s[[i]] = (That*Y)[T==1] 54 | Sfp0s[[i]] = (That*Y)[T==0] 55 | kf1s[i] = mean(Y[T==1 & That==1])-mean(Y[T==0 & That==1]) 56 | kf0s[i] = mean(Y[T==1 & That==0])-mean(Y[T==0 & That==0]) 57 | } 58 | for (i in 1:ngates) { 59 | for (j in 1:ngates) { 60 | mcov[i,j] = ngates ^ 2 * (cov(Sfp1s[[i]],Sfp1s[[j]]) / n1 + cov(Sfp0s[[i]],Sfp0s[[j]]) / n0) + 61 | 1/ (ngates * (n - 1)) *((ngates - 1)*(kf1s[i]^2-kf1s[i]*kf0s[i]+kf1s[j]^2-kf1s[j]*kf0s[j]) - ngates * (ngates - 1)* kf1s[i] * kf1s[j]) 62 | } 63 | } 64 | mcov[is.nan(mcov)] = 0 65 | mcov = as.matrix(nearPD(mcov, eig.tol = 1e-4)$mat) 66 | if (is.finite(determinant(mcov)$modulus)) { 67 | mcov_inv = as.matrix(nearPD(ginv(mcov))$mat) 68 | rsamples = mvrnorm(n=nsim, numeric(ngates), ginv(mcov_inv)) 69 | A = matrix(data = 0, nrow= ngates-1, ncol = ngates) 70 | for (i in 1:(ngates-1)) { 71 | A[i,i] = -1 72 | A[i,i+1] = 1 73 | } 74 | values = numeric(nsim) 75 | for (i in 1:nsim) { 76 | values[i] = (solve.QP(Dmat=mcov_inv,dvec=mcov_inv %*% rsamples[i,],Amat=t(A))$value * 2 + t(rsamples[i,]) %*% mcov_inv %*% rsamples[i,]) 77 | } 78 | actual_val = (solve.QP(Dmat=mcov_inv,dvec=mcov_inv %*% papes,Amat=t(A))$value * 2 + (t(papes) %*% mcov_inv %*% papes)[1,1]) 79 | return(list(stat=actual_val,pval=1-mean(actual_val>values))) 80 | } else { 81 | return(list(stat=NA,pval=NA)) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /R/data.R: -------------------------------------------------------------------------------- 1 | #' Tennessee’s Student/Teacher Achievement Ratio (STAR) project 2 | #' 3 | #' A longitudinal study experimentally evaluating the impacts of class size in early education on various outcomes (Mosteller, 1995) 4 | #' 5 | #' @name star 6 | #' @format A data frame with 1911 observations and 14 variables: 7 | #' \describe{ 8 | #' \item{treatment}{A binary treatment indicating whether a student is assigned to small class and regular class without an aid} 9 | #' \item{g3tlangss}{A continous variable measuring student's writing scores} 10 | #' \item{g3treadss}{A continous variable measuring student's reading scores} 11 | #' \item{g3tmathss}{A continous variable measuring student's math scores} 12 | #' \item{gender}{Students' gender} 13 | #' \item{race}{Students' race} 14 | #' \item{birthmonth}{Students' birth month} 15 | #' \item{birthyear}{Students' birth year} 16 | #' \item{SCHLURBN}{Urban or rural} 17 | #' \item{GKENRMNT}{Enrollment size} 18 | #' \item{GRDRANGE}{Grade range} 19 | #' \item{GKFRLNCH}{Number of students on free lunch} 20 | #' \item{GKBUSED}{Number of students on school buses} 21 | #' \item{GKWHITE}{Percentage of white students} 22 | #' } 23 | "star" -------------------------------------------------------------------------------- /R/het.test.R: -------------------------------------------------------------------------------- 1 | #' The Heterogeneity Test for Grouped Average Treatment Effects (GATEs) in Randomized Experiments 2 | #' 3 | #' This function calculates statistics related to the test of heterogeneous treatment effects across groups. 4 | #' 5 | #' The details of the methods for this design are given in Imai and Li (2022). 6 | #' 7 | #' 8 | #' @param T A vector of the unit-level binary treatment receipt variable for each sample. 9 | #' @param tau A vector of the unit-level continuous score. Conditional Average Treatment Effect is one possible measure. 10 | #' @param Y A vector of the outcome variable of interest for each sample. 11 | #' @param ngates The number of groups to separate the data into. The groups are determined by \code{tau}. Default is 5. 12 | #' @return A list that contains the following items: \item{stat}{The estimated 13 | #' statistic for the test of heterogeneity.} \item{pval}{The p-value of the null 14 | #' hypothesis (that the treatment effects are homogeneous)} 15 | #' @examples 16 | #' T = c(1,0,1,0,1,0,1,0) 17 | #' tau = c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) 18 | #' Y = c(4,5,0,2,4,1,-4,3) 19 | #' hettestlist <- het.test(T,tau,Y,ngates=5) 20 | #' hettestlist$stat 21 | #' hettestlist$pval 22 | #' @author Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 23 | #' \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 24 | #' @references Imai and Li (2022). \dQuote{Statistical Inference for Heterogeneous Treatment Effects Discovered by Generic Machine Learning in Randomized Experiments}, 25 | #' @keywords evaluation 26 | #' @export het.test 27 | het.test <- function(T, tau, Y, ngates = 5) { 28 | if (!(identical(as.numeric(T),as.numeric(as.logical(T))))) { 29 | stop("T should be binary.") 30 | } 31 | if ((length(T)!=length(tau)) | (length(tau)!=length(Y))) { 32 | stop("All the data should have the same length.") 33 | } 34 | if (length(T)==0) { 35 | stop("The data should have positive length.") 36 | } 37 | n = length(Y) 38 | n1 = sum(T) 39 | n0 = n-n1 40 | fd_label = ntile(tau, ngates) 41 | vargts = numeric(ngates) 42 | papes = numeric(ngates) 43 | kf1s = numeric(ngates) 44 | kf0s = numeric(ngates) 45 | Sfp1s = as.list(numeric(ngates)) 46 | Sfp0s = as.list(numeric(ngates)) 47 | mcov = matrix(0, nrow = ngates, ncol = ngates) 48 | for (i in 1:ngates) { 49 | That = as.numeric(fd_label == i) 50 | plim = 1 / ngates 51 | papes[i] = ngates * (1/n1*sum(T*That*Y)+1/n0*sum(Y*(1-T)*(1-That))-plim/n1*sum(Y*T)-(1-plim)/n0*sum(Y*(1-T))) 52 | Sfp1s[[i]] = (That*Y)[T==1] 53 | Sfp0s[[i]] = (That*Y)[T==0] 54 | kf1s[i] = mean(Y[T==1 & That==1])-mean(Y[T==0 & That==1]) 55 | kf0s[i] = mean(Y[T==1 & That==0])-mean(Y[T==0 & That==0]) 56 | } 57 | for (i in 1:ngates) { 58 | for (j in 1:ngates) { 59 | mcov[i,j] = ngates ^ 2 * (cov(Sfp1s[[i]],Sfp1s[[j]]) / n1 + cov(Sfp0s[[i]],Sfp0s[[j]]) / n0) + 60 | 1/ (ngates * (n - 1)) *((ngates - 1)*(kf1s[i]^2-kf1s[i]*kf0s[i]+kf1s[j]^2-kf1s[j]*kf0s[j]) - ngates * (ngates - 1) *kf1s[i] * kf1s[j]) 61 | } 62 | } 63 | mcov = diag(diag(mcov), nrow = ngates, ncol = ngates) 64 | if (is.finite(determinant(mcov)$modulus)) { 65 | stat = t(papes) %*% solve(mcov) %*% papes 66 | return(list(stat=stat,pval=pchisq(stat, ngates, lower.tail = FALSE))) 67 | } else { 68 | return(list(stat=NA,pval=NA)) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /R/itr_run_bagging.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## bagging 4 | 5 | run_bagging <- function( 6 | dat_train, 7 | dat_test, 8 | dat_total, 9 | params, 10 | indcv, 11 | iter, 12 | budget 13 | ) { 14 | 15 | # split/cross-validation 16 | cv <- params$cv 17 | 18 | ## train 19 | fit_train <- train_bagging(dat_train) 20 | 21 | 22 | ## test 23 | fit_test <- test_bagging( 24 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 25 | indcv, iter, budget, cv 26 | ) 27 | 28 | return(list(test = fit_test, train = fit_train)) 29 | } 30 | 31 | 32 | train_bagging <- function(dat_train) { 33 | 34 | ## format data 35 | training_data_elements_bagging = create_ml_args_bagging(dat_train) 36 | 37 | ## train formula 38 | formula_bagging = training_data_elements_bagging[["formula"]] 39 | 40 | ## tunning parameter 41 | tune_parameter = ncol(training_data_elements_bagging[["data"]]) -1 42 | 43 | ## fit 44 | fit <- randomForest::randomForest(formula_bagging, 45 | data = training_data_elements_bagging[["data"]], 46 | mtry=tune_parameter, ntree = 500, 47 | norm.votes=TRUE) 48 | 49 | return(fit) 50 | 51 | } 52 | 53 | #'@importFrom stats predict runif 54 | test_bagging <- function( 55 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 56 | ) { 57 | 58 | ## format data 59 | testing_data_elements_bagging = create_ml_args_bagging(dat_test) 60 | total_data_elements_bagging = create_ml_args_bagging(dat_total) 61 | 62 | ## outcome 63 | outcome = testing_data_elements_bagging[["data"]][["Y"]] 64 | 65 | if(cv == TRUE){ 66 | 67 | if(length(unique(outcome)) > 2){ 68 | 69 | ## predict 70 | Y0t_total = predict( 71 | fit_train, 72 | newdata = total_data_elements_bagging[["data0t"]]) 73 | Y1t_total = predict( 74 | fit_train, 75 | newdata = total_data_elements_bagging[["data1t"]]) 76 | 77 | }else{ 78 | 79 | ## predict 80 | Y0t_total = predict( 81 | fit_train, 82 | newdata = total_data_elements_bagging[["data0t"]], 83 | type = "prob")[, 2] 84 | Y1t_total = predict( 85 | fit_train, 86 | newdata = total_data_elements_bagging[["data1t"]], 87 | type = "prob")[, 2] 88 | } 89 | 90 | tau_total = Y1t_total - Y0t_total + runif(n_df,-1e-6,1e-6) 91 | 92 | 93 | ## compute quantities of interest 94 | tau_test <- tau_total[indcv == iter] 95 | That <- as.numeric(tau_total > 0) 96 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 97 | 98 | ## output 99 | cf_output <- list( 100 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 101 | tau_cv = tau_total, 102 | That_cv = That, 103 | That_pcv = That_p 104 | ) 105 | } 106 | 107 | if(cv == FALSE){ 108 | 109 | if(length(unique(outcome)) > 2){ 110 | 111 | ## predict 112 | Y0t_test = predict( 113 | fit_train, 114 | newdata = testing_data_elements_bagging[["data0t"]]) 115 | Y1t_test = predict( 116 | fit_train, 117 | newdata = testing_data_elements_bagging[["data1t"]]) 118 | 119 | }else{ 120 | 121 | ## predict 122 | Y0t_test = predict( 123 | fit_train, 124 | newdata = testing_data_elements_bagging[["data0t"]], 125 | type = "prob")[, 2] 126 | Y1t_test = predict( 127 | fit_train, 128 | newdata = testing_data_elements_bagging[["data1t"]], 129 | type = "prob")[, 2] 130 | } 131 | 132 | tau_test = Y1t_test - Y0t_test 133 | 134 | ## compute quantities of interest 135 | That = as.numeric(tau_test > 0) 136 | That_p = numeric(length(That)) 137 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 138 | 139 | ## output 140 | cf_output <- list( 141 | tau = tau_test, 142 | tau_cv = tau_test, 143 | That_cv = That, 144 | That_pcv = That_p 145 | ) 146 | 147 | } 148 | 149 | return(cf_output) 150 | } 151 | 152 | 153 | -------------------------------------------------------------------------------- /R/itr_run_bart.R: -------------------------------------------------------------------------------- 1 | #' 2 | run_bartc <- function( 3 | dat_train, 4 | dat_test, 5 | dat_total, 6 | params, 7 | indcv, 8 | iter, 9 | budget 10 | ) { 11 | 12 | # split/cross-validation 13 | cv <- params$cv 14 | 15 | ## train 16 | fit_train <- train_bartc(dat_train) 17 | 18 | ## test 19 | fit_test <- test_bartc( 20 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 21 | indcv, iter, budget, cv 22 | ) 23 | 24 | return(list(test = fit_test, train = fit_train)) 25 | } 26 | 27 | 28 | 29 | train_bartc <- function(dat_train) { 30 | 31 | ## format training data 32 | training_data_elements_bartc = create_ml_args_bartc(dat_train) 33 | 34 | ## fit 35 | fit <- bartCause::bartc(response = training_data_elements_bartc[["Y"]], 36 | treatment = training_data_elements_bartc[["T"]], 37 | confounders = training_data_elements_bartc[["X"]], 38 | keepTrees = TRUE) 39 | 40 | return(fit) 41 | } 42 | 43 | #'@importFrom stats predict runif 44 | test_bartc <- function( 45 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 46 | ) { 47 | 48 | ## format data 49 | testing_data_elements_bartc = create_ml_args_bartc(dat_test) 50 | total_data_elements_bartc = create_ml_args_bartc(dat_total) 51 | 52 | if(cv == TRUE){ 53 | ## predict 54 | Y0t_total=predict(fit_train,total_data_elements_bartc[["X0t"]]) 55 | Y1t_total=predict(fit_train,total_data_elements_bartc[["X1t"]]) 56 | 57 | tau_total=colMeans(Y1t_total)-colMeans(Y0t_total) + runif(n_df,-1e-6,1e-6) 58 | 59 | 60 | ## compute quantities of interest 61 | tau_test <- tau_total[indcv == iter] 62 | That <- as.numeric(tau_total > 0) 63 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 64 | 65 | 66 | ## output 67 | cf_output <- list( 68 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 69 | tau_cv = tau_total, 70 | That_cv = That, 71 | That_pcv = That_p 72 | ) 73 | } 74 | 75 | if(cv == FALSE){ 76 | ## predict 77 | Y0t_test=predict(fit_train,testing_data_elements_bartc[["X0t"]]) 78 | Y1t_test=predict(fit_train,testing_data_elements_bartc[["X1t"]]) 79 | 80 | tau_test=colMeans(Y1t_test)-colMeans(Y0t_test) 81 | 82 | ## compute quantities of interest 83 | That = as.numeric(tau_test > 0) 84 | That_p = numeric(length(That)) 85 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 86 | 87 | ## output 88 | cf_output <- list( 89 | tau = tau_test, 90 | tau_cv = tau_test, 91 | That_cv = That, 92 | That_pcv = That_p 93 | ) 94 | } 95 | return(cf_output) 96 | } 97 | 98 | # Y0t<-predict(barc1,X0t) 99 | # Y1t<-predict(barc1,X1t) 100 | # tau_test2<-Y1t-Y0t 101 | # That2=as.numeric(tau_test2>0) 102 | -------------------------------------------------------------------------------- /R/itr_run_bartmachine.R: -------------------------------------------------------------------------------- 1 | #' 2 | run_bartmachine <- function( 3 | dat_train, 4 | dat_test, 5 | dat_total, 6 | params, 7 | indcv, 8 | iter, 9 | budget 10 | ) { 11 | 12 | ## train 13 | fit_train <- train_bart(dat_train) 14 | 15 | 16 | ## test 17 | fit_test <- test_bart( 18 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 19 | indcv, iter, budget 20 | ) 21 | 22 | return(list(test = fit_test, train = fit_train)) 23 | } 24 | 25 | 26 | #' @import haven 27 | train_bart <- function(dat_train) { 28 | 29 | ## format training data 30 | training_data_elements_bart = create_ml_args_bart(dat_train) 31 | 32 | ## format binary outcome 33 | outcome_bart = training_data_elements_bart[["Y"]] 34 | 35 | if(length(unique(outcome_bart)) == 2){ 36 | outcome_bart = factor(outcome_bart, levels = c(1,0)) 37 | } 38 | 39 | ## fit 40 | fit <- bartMachine::bartMachine( 41 | X=training_data_elements_bart[["X_and_T"]], 42 | y=outcome_bart, 43 | num_trees = 30, 44 | run_in_sample = TRUE, 45 | serialize = TRUE) 46 | 47 | return(fit) 48 | } 49 | 50 | #'@importFrom stats predict runif 51 | test_bart <- function( 52 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, iter, budget 53 | ) { 54 | 55 | ## format data 56 | testing_data_elements_bart = create_ml_args_bart(dat_test) 57 | total_data_elements_bart = create_ml_args_bart(dat_total) 58 | 59 | ## predict 60 | Y0t_total=predict(fit_train,total_data_elements_bart[["X0t"]]) 61 | Y1t_total=predict(fit_train,total_data_elements_bart[["X1t"]]) 62 | 63 | tau_total=Y1t_total - Y0t_total + runif(n_df,-1e-6,1e-6) 64 | 65 | 66 | ## compute quantities of interest 67 | tau_test <- tau_total[indcv == iter] 68 | That <- as.numeric(tau_total > 0) 69 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 70 | 71 | 72 | ## output 73 | cf_output <- list( 74 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 75 | tau_cv = tau_total, 76 | That_cv = That, 77 | That_pcv = That_p 78 | ) 79 | 80 | return(cf_output) 81 | } 82 | -------------------------------------------------------------------------------- /R/itr_run_boost.R: -------------------------------------------------------------------------------- 1 | 2 | ## boosted tree 3 | 4 | run_boost <- function( 5 | dat_train, 6 | dat_test, 7 | dat_total, 8 | params, 9 | indcv, 10 | iter, 11 | budget 12 | ) { 13 | 14 | # split/cross-validation 15 | cv <- params$cv 16 | 17 | ## train 18 | fit_train <- train_boost(dat_train) 19 | 20 | ## test 21 | fit_test <- test_boost( 22 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 23 | indcv, iter, budget, cv 24 | ) 25 | 26 | return(list(test = fit_test, train = fit_train)) 27 | } 28 | 29 | 30 | 31 | train_boost <- function(dat_train) { 32 | 33 | ## format training data 34 | training_data_elements_boosted = create_ml_args_boosted(dat_train) 35 | 36 | ## train formula 37 | formula_boosted = training_data_elements_boosted[["formula"]] 38 | 39 | ## outcome 40 | outcome = training_data_elements_boosted[["data"]][["Y"]] 41 | 42 | if(length(unique(outcome)) > 2){ 43 | ## fit 44 | fit <- gbm::gbm(formula_boosted, data = training_data_elements_boosted[["data"]], 45 | distribution = "gaussian", 46 | n.trees = 5000, 47 | interaction.depth = 4) 48 | }else { 49 | ## fit 50 | fit <- gbm::gbm(formula_boosted, data = training_data_elements_boosted[["data"]], 51 | distribution = "bernoulli", 52 | n.trees = 5000, 53 | interaction.depth = 4) 54 | } 55 | 56 | return(fit) 57 | 58 | } 59 | 60 | #'@importFrom stats predict runif 61 | test_boost <- function( 62 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, 63 | iter, budget, cv 64 | ) { 65 | 66 | ## format data 67 | testing_data_elements_boosted = create_ml_args_boosted(dat_test) 68 | 69 | total_data_elements_boosted = create_ml_args_boosted(dat_total) 70 | 71 | if(cv == TRUE){ 72 | ## predict 73 | Y0t_total = predict( 74 | fit_train, 75 | as.data.frame(total_data_elements_boosted[["data0t"]]), 76 | type = "response") 77 | Y1t_total = predict( 78 | fit_train, 79 | as.data.frame(total_data_elements_boosted[["data1t"]]), 80 | type = "response") 81 | 82 | tau_total = Y1t_total - Y0t_total + runif(n_df,-1e-6,1e-6) 83 | 84 | ## compute quantities of interest 85 | tau_test <- tau_total[indcv == iter] 86 | That <- as.numeric(tau_total > 0) 87 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 88 | 89 | ## output 90 | cf_output <- list( 91 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 92 | tau_cv = tau_total, 93 | That_cv = That, 94 | That_pcv = That_p 95 | ) 96 | } 97 | 98 | if(cv == FALSE){ 99 | ## predict 100 | Y0t_test = predict( 101 | fit_train, 102 | as.data.frame(testing_data_elements_boosted[["data0t"]]), 103 | type = "response") 104 | Y1t_test = predict( 105 | fit_train, 106 | as.data.frame(testing_data_elements_boosted[["data1t"]]), 107 | type = "response") 108 | 109 | tau_test = Y1t_test - Y0t_test 110 | 111 | ## compute quantities of interest 112 | That = as.numeric(tau_test > 0) 113 | That_p = numeric(length(That)) 114 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 115 | 116 | ## output 117 | cf_output <- list( 118 | tau = tau_test, 119 | tau_cv = tau_test, 120 | That_cv = That, 121 | That_pcv = That_p 122 | ) 123 | } 124 | 125 | return(cf_output) 126 | } 127 | 128 | 129 | 130 | 131 | 132 | -------------------------------------------------------------------------------- /R/itr_run_caret.r: -------------------------------------------------------------------------------- 1 | 2 | ## caret package 3 | 4 | run_caret <- function( 5 | dat_train, 6 | dat_test, 7 | dat_total, 8 | params, 9 | indcv, 10 | iter, 11 | budget, 12 | train_method, 13 | ... 14 | ) { 15 | 16 | # split/cross-validation 17 | cv <- params$cv 18 | 19 | # caret train parameters 20 | train_params <- params$train_params 21 | 22 | ## train 23 | fit_train <- train_caret(dat_train, train_params, train_method, ...) 24 | 25 | ## test 26 | fit_test <- test_caret( 27 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 28 | indcv, iter, budget, cv 29 | ) 30 | 31 | return(list(test = fit_test, train = fit_train)) 32 | } 33 | 34 | 35 | 36 | train_caret <- function(dat_train, train_params, train_method, ...) { 37 | 38 | ## format training data 39 | training_data_elements_caret = create_ml_args_caret(dat_train) 40 | 41 | ## train formula 42 | covariates = training_data_elements_caret[["data"]] %>% dplyr::select(-c(Y, T)) %>% colnames() 43 | 44 | formula = as.formula(paste("Y ~ (", paste0(covariates, collapse = "+"), ")*T")) 45 | 46 | ## add additional parameters from ... 47 | train_params = c(train_params, list(...)) 48 | 49 | ## train 50 | fit <- do.call(caret::train, c(list( 51 | formula, 52 | data = training_data_elements_caret[["data"]], 53 | method = train_method), 54 | train_params)) 55 | 56 | return(fit) 57 | 58 | } 59 | 60 | #'@importFrom stats predict runif 61 | test_caret <- function( 62 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, 63 | iter, budget, cv 64 | ) { 65 | 66 | ## format data 67 | testing_data_elements_caret = create_ml_args_caret(dat_test) 68 | total_data_elements_caret = create_ml_args_caret(dat_total) 69 | 70 | if(cv == TRUE){ 71 | ## predict 72 | Y0t_total = predict( 73 | fit_train, 74 | as.data.frame(total_data_elements_caret[["data0t"]]), 75 | type = "raw") 76 | Y1t_total = predict( 77 | fit_train, 78 | as.data.frame(total_data_elements_caret[["data1t"]]), 79 | type = "raw") 80 | 81 | tau_total = Y1t_total - Y0t_total + runif(n_df,-1e-6,1e-6) 82 | 83 | ## compute quantities of interest 84 | tau_test <- tau_total[indcv == iter] 85 | That <- as.numeric(tau_total > 0) 86 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 87 | 88 | ## output 89 | cf_output <- list( 90 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 91 | tau_cv = tau_total, 92 | That_cv = That, 93 | That_pcv = That_p 94 | ) 95 | } 96 | 97 | if(cv == FALSE){ 98 | ## predict 99 | Y0t_test = predict( 100 | fit_train, 101 | as.data.frame(testing_data_elements_caret[["data0t"]]), 102 | type = "raw") 103 | Y1t_test = predict( 104 | fit_train, 105 | as.data.frame(testing_data_elements_caret[["data1t"]]), 106 | type = "raw") 107 | 108 | tau_test = Y1t_test - Y0t_test 109 | 110 | ## compute quantities of interest 111 | That = as.numeric(tau_test > 0) 112 | That_p = numeric(length(That)) 113 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 114 | 115 | ## output 116 | cf_output <- list( 117 | tau = tau_test, 118 | tau_cv = tau_test, 119 | That_cv = That, 120 | That_pcv = That_p 121 | ) 122 | } 123 | 124 | return(cf_output) 125 | } 126 | 127 | 128 | 129 | 130 | 131 | -------------------------------------------------------------------------------- /R/itr_run_cart.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## cart 4 | 5 | 6 | run_cart <- function( 7 | dat_train, 8 | dat_test, 9 | dat_total, 10 | params, 11 | indcv, 12 | iter, 13 | budget 14 | ) { 15 | 16 | # split/cross-validation 17 | cv <- params$cv 18 | 19 | ## train 20 | fit_train <- train_cart(dat_train) 21 | 22 | ## test 23 | fit_test <- test_cart( 24 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 25 | indcv, iter, budget, cv 26 | ) 27 | 28 | 29 | return(list(test = fit_test, train = fit_train)) 30 | } 31 | 32 | 33 | #' @import rpart 34 | train_cart <- function(dat_train) { 35 | 36 | ## format training data 37 | training_data_elements_cart = create_ml_args_cart(dat_train) 38 | 39 | ## train formula 40 | formula_cart = training_data_elements_cart[["formula"]] 41 | 42 | ## outcome 43 | outcome = training_data_elements_cart[["data"]][["Y"]] 44 | 45 | if(length(unique(outcome)) > 2){ 46 | ## fit 47 | fit <- rpart(formula_cart, data = training_data_elements_cart[["data"]], method = "anova") 48 | # control = rpart.control(minsplit = 2, minbucket = 1, 49 | # cp = 0.0015)) #relax the contraint for cart 50 | }else { 51 | ## fit 52 | fit <- rpart(formula_cart, data = training_data_elements_cart[["data"]], method = "class") 53 | } 54 | 55 | return(fit) 56 | } 57 | 58 | #'@importFrom stats predict runif 59 | test_cart <- function( 60 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 61 | ) { 62 | 63 | ## format data 64 | testing_data_elements_cart = create_ml_args_cart(dat_test) 65 | total_data_elements_cart = create_ml_args_cart(dat_total) 66 | 67 | ## outcome 68 | outcome = testing_data_elements_cart[["data"]][["Y"]] 69 | 70 | if(cv == TRUE){ 71 | 72 | if(length(unique(outcome)) > 2){ 73 | ## predict 74 | Y0t_total = predict(fit_train, newdata=total_data_elements_cart[["data0t"]]) 75 | Y1t_total = predict(fit_train, newdata=total_data_elements_cart[["data1t"]]) 76 | 77 | }else { 78 | ## predict 79 | Y0t_total = predict( 80 | fit_train, 81 | newdata = total_data_elements_cart[["data0t"]], 82 | type = "prob")[, 2] 83 | Y1t_total = predict( 84 | fit_train, 85 | newdata = total_data_elements_cart[["data1t"]], 86 | type = "prob")[, 2] 87 | } 88 | 89 | # predicted tau 90 | tau_total=Y1t_total - Y0t_total + runif(n_df,-1e-6,1e-6) 91 | 92 | ## compute quantities of interest 93 | tau_test <- tau_total[indcv == iter] 94 | That <- as.numeric(tau_total > 0) 95 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 96 | 97 | 98 | ## output 99 | cf_output <- list( 100 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 101 | tau_cv = tau_total, 102 | That_cv = That, 103 | That_pcv = That_p 104 | ) 105 | } 106 | 107 | if(cv == FALSE){ 108 | 109 | if(length(unique(outcome)) > 2){ 110 | ## predict 111 | Y0t_test = predict(fit_train, newdata=testing_data_elements_cart[["data0t"]]) 112 | Y1t_test = predict(fit_train, newdata=testing_data_elements_cart[["data1t"]]) 113 | 114 | }else { 115 | ## predict 116 | Y0t_test = predict( 117 | fit_train, 118 | newdata = testing_data_elements_cart[["data0t"]], 119 | type = "prob")[, 2] 120 | Y1t_test = predict( 121 | fit_train, 122 | newdata = testing_data_elements_cart[["data1t"]], 123 | type = "prob")[, 2] 124 | } 125 | 126 | # predicted tau 127 | tau_test = Y1t_test - Y0t_test + runif(length(Y0t_test),-1e-6,1e-6) 128 | 129 | ## compute quantities of interest 130 | That = as.numeric(tau_test > 0) 131 | That_p = numeric(length(That)) 132 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 133 | 134 | 135 | ## output 136 | cf_output <- list( 137 | tau = tau_test, 138 | tau_cv = tau_test, 139 | That_cv = That, 140 | That_pcv = That_p 141 | ) 142 | } 143 | 144 | 145 | return(cf_output) 146 | } 147 | 148 | 149 | -------------------------------------------------------------------------------- /R/itr_run_causal-forest.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## causal-forest 4 | 5 | run_causal_forest <- function( 6 | dat_train, 7 | dat_test, 8 | dat_total, 9 | params, 10 | indcv, 11 | iter, 12 | budget 13 | ) { 14 | 15 | # split/cross-validation 16 | cv <- params$cv 17 | 18 | ## train 19 | fit_train <- train_causal_forest(dat_train) 20 | 21 | ## test 22 | fit_test <- test_causal_forest( 23 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 24 | indcv, iter, budget, cv 25 | ) 26 | 27 | return(list(test = fit_test, train = fit_train)) 28 | } 29 | 30 | 31 | 32 | train_causal_forest <- function(dat_train) { 33 | 34 | ## format training data 35 | training_data_elements_cf <- create_ml_args_causalforest(dat_train) 36 | 37 | ## fit 38 | fit <- grf::causal_forest( 39 | training_data_elements_cf[["X_expand"]], 40 | training_data_elements_cf[["Y"]], 41 | training_data_elements_cf[["T"]], 42 | num.trees = 2000 43 | ) 44 | return(fit) 45 | } 46 | 47 | #'@importFrom stats predict runif 48 | test_causal_forest <- function( 49 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 50 | ) { 51 | 52 | ## format data 53 | testing_data_elements_cf <- create_ml_args_causalforest(dat_test) 54 | 55 | total_data_elements_cf <- create_ml_args_causalforest(dat_total) 56 | 57 | if(cv == TRUE){ 58 | ## predict 59 | tau_total <- predict( 60 | fit_train, 61 | total_data_elements_cf[["X_expand"]] 62 | )$predictions + runif(n_df,-1e-6,1e-6) 63 | 64 | ## compute quantities of interest 65 | tau_test <- tau_total[indcv == iter] 66 | That <- as.numeric(tau_total > 0) 67 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 68 | 69 | ## output 70 | cf_output <- list( 71 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 72 | tau_cv = tau_total, 73 | That_cv = That, 74 | That_pcv = That_p 75 | ) 76 | 77 | } 78 | 79 | if(cv == FALSE){ 80 | ## predict 81 | tau_test <- predict( 82 | fit_train, 83 | testing_data_elements_cf[["X_expand"]])$predictions 84 | 85 | ## compute quantities of interest 86 | That = as.numeric(tau_test > 0) 87 | That_p = numeric(length(That)) 88 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 89 | 90 | ## output 91 | cf_output <- list( 92 | tau = tau_test, 93 | tau_cv = tau_test, 94 | That_cv = That, 95 | That_pcv = That_p 96 | ) 97 | } 98 | 99 | return(cf_output) 100 | } 101 | 102 | -------------------------------------------------------------------------------- /R/itr_run_lasso.R: -------------------------------------------------------------------------------- 1 | 2 | ## lasso 3 | 4 | run_lasso <- function( 5 | dat_train, 6 | dat_test, 7 | dat_total, 8 | params, 9 | indcv, 10 | iter, 11 | budget 12 | ) { 13 | 14 | # split/cross-validation 15 | cv <- params$cv 16 | 17 | ## train 18 | fit_train <- train_lasso(dat_train) 19 | 20 | ## test 21 | fit_test <- test_lasso( 22 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 23 | indcv, iter, budget, cv 24 | ) 25 | 26 | 27 | return(list(test = fit_test, train = fit_train)) 28 | } 29 | 30 | 31 | 32 | train_lasso <- function(dat_train) { 33 | 34 | ## format training data 35 | training_data_elements_lasso <- create_ml_args_lasso(dat_train) 36 | 37 | ## outcome 38 | outcome = training_data_elements_lasso[["Y"]] 39 | 40 | if(length(unique(outcome)) > 2){ 41 | ## find the best lambda 42 | # cv.lasso <- glmnet::cv.glmnet( 43 | # training_data_elements_lasso[["X_expand"]], 44 | # training_data_elements_lasso[["Y"]], 45 | # alpha = 1, 46 | # family = "gaussian") 47 | 48 | ## fit 49 | fit <- glmnet::glmnet( 50 | training_data_elements_lasso[["X_expand"]], 51 | training_data_elements_lasso[["Y"]], 52 | alpha = 1, 53 | family = "gaussian", 54 | # lambda = cv.lasso$lambda.min) 55 | lambda = 0.05) 56 | 57 | }else { 58 | ## find the best lambda 59 | # cv.lasso <- glmnet::cv.glmnet( 60 | # training_data_elements_lasso[["X_expand"]], 61 | # training_data_elements_lasso[["Y"]], 62 | # alpha = 1, 63 | # family = "binomial") 64 | 65 | ## fit 66 | fit <- glmnet::glmnet( 67 | training_data_elements_lasso[["X_expand"]], 68 | training_data_elements_lasso[["Y"]], 69 | alpha = 1, 70 | family = "binomial", 71 | # lambda = cv.lasso$lambda.min) 72 | lambda = 0.05) 73 | } 74 | 75 | return(fit) 76 | } 77 | 78 | #'@importFrom stats predict runif 79 | test_lasso <- function( 80 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 81 | ) { 82 | 83 | ## format data 84 | testing_data_elements_lasso <- create_ml_args_lasso(dat_test) 85 | total_data_elements_lasso <- create_ml_args_lasso(dat_total) 86 | 87 | if(cv == TRUE){ 88 | ## predict 89 | Y0t1_total = predict( 90 | fit_train, 91 | total_data_elements_lasso[["X0t_expand"]], 92 | type = "response") 93 | Y1t1_total = predict( 94 | fit_train, 95 | total_data_elements_lasso[["X1t_expand"]], 96 | type = "response") 97 | 98 | tau_total=Y1t1_total-Y0t1_total + runif(n_df,-1e-6,1e-6) 99 | 100 | ## compute quantities of interest 101 | tau_test <- tau_total[indcv == iter] 102 | That <- as.numeric(tau_total > 0) 103 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 104 | 105 | 106 | ## output 107 | cf_output <- list( 108 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 109 | tau_cv = tau_total, 110 | That_cv = That, 111 | That_pcv = That_p 112 | ) 113 | } 114 | 115 | if(cv == FALSE){ 116 | ## predict 117 | Y0t1_test = predict( 118 | fit_train, 119 | testing_data_elements_lasso[["X0t_expand"]], 120 | type = "response") 121 | Y1t1_test = predict( 122 | fit_train, 123 | testing_data_elements_lasso[["X1t_expand"]], 124 | type = "response") 125 | 126 | tau_test=Y1t1_test-Y0t1_test 127 | 128 | ## compute quantities of interest 129 | That = as.numeric(tau_test > 0) 130 | That_p = numeric(length(That)) 131 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 132 | 133 | ## output 134 | cf_output <- list( 135 | tau = tau_test, 136 | tau_cv = tau_test, 137 | That_cv = That, 138 | That_pcv = That_p 139 | ) 140 | } 141 | 142 | return(cf_output) 143 | } 144 | 145 | 146 | -------------------------------------------------------------------------------- /R/itr_run_random-forest.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## random forest 4 | 5 | run_random_forest <- function( 6 | dat_train, 7 | dat_test, 8 | dat_total, 9 | params, 10 | indcv, 11 | iter, 12 | budget 13 | ) { 14 | 15 | # split/cross-validation 16 | cv <- params$cv 17 | 18 | ## train 19 | fit_train <- train_random_forest(dat_train) 20 | 21 | ## test 22 | fit_test <- test_random_forest( 23 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 24 | indcv, iter, budget, cv 25 | ) 26 | 27 | return(list(test = fit_test, train = fit_train)) 28 | } 29 | 30 | 31 | 32 | train_random_forest <- function(dat_train) { 33 | 34 | ## format training data 35 | training_data_elements_rf = create_ml_args_rf(dat_train) 36 | 37 | ## train formula 38 | formula_rf = training_data_elements_rf[["formula"]] 39 | 40 | ## fit 41 | fit <- randomForest::randomForest(formula_rf, data = training_data_elements_rf[["data"]], ntree = 500) 42 | 43 | return(fit) 44 | } 45 | 46 | #'@importFrom stats predict runif 47 | test_random_forest <- function( 48 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 49 | ) { 50 | 51 | ## format data 52 | testing_data_elements_rf = create_ml_args_rf(dat_test) 53 | total_data_elements_rf = create_ml_args_rf(dat_total) 54 | 55 | ## outcome 56 | outcome = testing_data_elements_rf[["data"]][["Y"]] 57 | 58 | if(cv == TRUE){ 59 | 60 | if(length(unique(outcome)) > 2){ 61 | 62 | ## predict 63 | Y0t_total = predict( 64 | fit_train, 65 | newdata = total_data_elements_rf[["data0t"]]) 66 | Y1t_total = predict( 67 | fit_train, 68 | newdata = total_data_elements_rf[["data1t"]]) 69 | 70 | }else { 71 | 72 | ## predict 73 | Y0t_total = predict( 74 | fit_train, 75 | newdata = total_data_elements_rf[["data0t"]], 76 | type = "prob")[, 2] 77 | Y1t_total = predict( 78 | fit_train, 79 | newdata = total_data_elements_rf[["data1t"]], 80 | type = "prob")[, 2] 81 | } 82 | 83 | tau_total=Y1t_total - Y0t_total + runif(n_df,-1e-6,1e-6) 84 | 85 | ## compute quantities of interest 86 | tau_test <- tau_total[indcv == iter] 87 | That <- as.numeric(tau_total > 0) 88 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 89 | 90 | 91 | ## output 92 | cf_output <- list( 93 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 94 | tau_cv = tau_total, 95 | That_cv = That, 96 | That_pcv = That_p 97 | ) 98 | } 99 | 100 | if(cv == FALSE){ 101 | 102 | if(length(unique(outcome)) > 2){ 103 | 104 | ## predict 105 | Y0t_test = predict( 106 | fit_train, 107 | newdata = testing_data_elements_rf[["data0t"]]) 108 | Y1t_test = predict( 109 | fit_train, 110 | newdata = testing_data_elements_rf[["data1t"]]) 111 | 112 | }else { 113 | 114 | ## predict 115 | Y0t_test = predict( 116 | fit_train, 117 | newdata = testing_data_elements_rf[["data0t"]], 118 | type = "prob")[, 2] 119 | Y1t_test = predict( 120 | fit_train, 121 | newdata = testing_data_elements_rf[["data1t"]], 122 | type = "prob")[, 2] 123 | } 124 | 125 | tau_test=Y1t_test - Y0t_test 126 | 127 | ## compute quantities of interest 128 | That = as.numeric(tau_test > 0) 129 | That_p = numeric(length(That)) 130 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 131 | 132 | ## output 133 | cf_output <- list( 134 | tau = tau_test, 135 | tau_cv = tau_test, 136 | That_cv = That, 137 | That_pcv = That_p 138 | ) 139 | } 140 | 141 | return(cf_output) 142 | } 143 | 144 | 145 | 146 | 147 | 148 | 149 | -------------------------------------------------------------------------------- /R/itr_run_superlearner.R: -------------------------------------------------------------------------------- 1 | ## superLearner 2 | run_superLearner <- function( 3 | dat_train, 4 | dat_test, 5 | dat_total, 6 | params, 7 | indcv, 8 | iter, 9 | budget, 10 | train_method, 11 | SL_library, 12 | ... 13 | ) { 14 | 15 | # split/cross-validation 16 | cv <- params$cv 17 | 18 | ## train 19 | fit_train <- train_superLearner( 20 | dat_train, 21 | train_method, 22 | SL_library) 23 | 24 | ## test 25 | fit_test <- test_superLearner( 26 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 27 | indcv, iter, budget, cv 28 | ) 29 | 30 | return(list(test = fit_test, train = fit_train)) 31 | } 32 | 33 | 34 | #' @importFrom stats binomial gaussian 35 | #' @importFrom SuperLearner SuperLearner 36 | train_superLearner <- function(dat_train, train_method, SL_library) { 37 | 38 | ## format training data 39 | training_data_elements <- create_ml_args_superLearner(dat_train) 40 | 41 | ## parameters 42 | Y = training_data_elements[["Y"]] 43 | X = training_data_elements[["X_expand"]] 44 | SL_library = SL_library 45 | 46 | if(length(unique(Y)) > 2){ 47 | 48 | fit <- SuperLearner( 49 | Y = Y, 50 | X = X, 51 | family = gaussian(), 52 | SL.library = SL_library) 53 | 54 | }else { 55 | 56 | fit <- SuperLearner( 57 | Y = Y, 58 | X = X, 59 | family = binomial(), 60 | SL.library = SL_library) 61 | } 62 | 63 | return(fit) 64 | } 65 | 66 | #'@importFrom stats predict runif 67 | test_superLearner <- function( 68 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 69 | ) { 70 | 71 | ## format data 72 | testing_data_elements <- create_ml_args_superLearner(dat_test) 73 | total_data_elements <- create_ml_args_superLearner(dat_total) 74 | 75 | if(cv == TRUE){ 76 | ## predict 77 | Y0t1_total = predict( 78 | fit_train, 79 | testing_data_elements[["X0t_expand"]], 80 | onlySL = TRUE) 81 | Y1t1_total = predict( 82 | fit_train, 83 | total_data_elements[["X1t_expand"]], 84 | onlySL = TRUE) 85 | 86 | tau_total=Y1t1_total$pred-Y0t1_total$pred + runif(n_df,-1e-6,1e-6) 87 | 88 | ## compute quantities of interest 89 | tau_test <- tau_total[indcv == iter] 90 | That <- as.numeric(tau_total > 0) 91 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 92 | 93 | ## output 94 | cf_output <- list( 95 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 96 | tau_cv = tau_total, 97 | That_cv = That, 98 | That_pcv = That_p 99 | ) 100 | } 101 | 102 | if(cv == FALSE){ 103 | ## predict 104 | Y0t1_test = predict( 105 | fit_train, 106 | testing_data_elements[["X0t_expand"]], 107 | onlySL = TRUE) 108 | Y1t1_test = predict( 109 | fit_train, 110 | testing_data_elements[["X1t_expand"]], 111 | onlySL = TRUE) 112 | 113 | tau_test = Y1t1_test$pred - Y0t1_test$pred 114 | 115 | ## compute quantities of interest 116 | That = as.numeric(tau_test > 0) 117 | That_p = numeric(length(That)) 118 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 119 | 120 | ## output 121 | cf_output <- list( 122 | tau = tau_test, 123 | tau_cv = tau_test, 124 | That_cv = That, 125 | That_pcv = That_p 126 | ) 127 | } 128 | 129 | return(cf_output) 130 | } 131 | 132 | 133 | -------------------------------------------------------------------------------- /R/itr_run_svm.R: -------------------------------------------------------------------------------- 1 | 2 | ## svm 3 | 4 | run_svm <- function( 5 | dat_train, 6 | dat_test, 7 | dat_total, 8 | params, 9 | indcv, 10 | iter, 11 | budget 12 | ) { 13 | 14 | # split/cross-validation 15 | cv <- params$cv 16 | 17 | ## train 18 | fit_train <- train_svm(dat_train) 19 | 20 | ## test 21 | fit_test <- test_svm( 22 | fit_train, dat_test, dat_total, params$n_df, params$n_tb, 23 | indcv, iter, budget, cv 24 | ) 25 | 26 | return(list(test = fit_test, train = fit_train)) 27 | } 28 | 29 | 30 | 31 | train_svm <- function(dat_train) { 32 | 33 | ## format training data 34 | training_data_elements_svm <- create_ml_args_svm(dat_train) 35 | formula_svm = training_data_elements_svm[["formula"]] 36 | 37 | ## fit 38 | fit <- e1071::svm(formula_svm, 39 | data = training_data_elements_svm[["data"]], 40 | gamma = 1, 41 | cost = 1, 42 | scale = TRUE, 43 | epsolon = 0.1, 44 | type = "eps-regression") 45 | 46 | # fit <- fit(formula_svm, 47 | # data=training_data_elements_svm[["data"]], 48 | # model="svm", 49 | # gamma = 1, 50 | # C = 1, 51 | # scaled = TRUE, 52 | # epsilon = 0.1, 53 | # kpar = list(sigma = 1), 54 | # type = "eps-svr") 55 | 56 | # fit.pred =function(fit,data) {return (predict(fit,data)) } 57 | # svm.imp <- Importance(fit, 58 | # data=training_data_elements_svm[["data"]], 59 | # PRED = fit.pred, 60 | # outindex = 1, 61 | # method = "svm") 62 | 63 | # fit.tune <- tune(svm, 64 | # formula_svm, 65 | # data = training_data_elements_svm[["data"]], 66 | # ranges = list( 67 | # cost = c(0.1,1,10,100,1000), 68 | # gamma = c(0.0001,0.001,0.01,0.1,1) 69 | # )) 70 | # fit <- fit.tune$best.model 71 | 72 | return(fit) 73 | } 74 | 75 | #'@importFrom stats predict runif 76 | test_svm <- function( 77 | fit_train, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 78 | ) { 79 | 80 | ## format data 81 | testing_data_elements_svm = create_ml_args_svm(dat_test) 82 | total_data_elements_svm = create_ml_args_svm(dat_total) 83 | 84 | if(cv == TRUE){ 85 | ## predict 86 | Y0t1_total=predict(fit_train,total_data_elements_svm[["data0t"]]) 87 | Y1t1_total=predict(fit_train,total_data_elements_svm[["data1t"]]) 88 | 89 | tau_total=Y1t1_total-Y0t1_total + runif(n_df,-1e-6,1e-6) 90 | 91 | ## compute quantities of interest 92 | tau_test <- tau_total[indcv == iter] 93 | That <- as.numeric(tau_total > 0) 94 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 95 | 96 | 97 | ## output 98 | cf_output <- list( 99 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 100 | tau_cv = tau_total, 101 | That_cv = That, 102 | That_pcv = That_p 103 | ) 104 | } 105 | 106 | if(cv == FALSE){ 107 | ## predict 108 | Y0t1_test=predict(fit_train,testing_data_elements_svm[["data0t"]]) 109 | Y1t1_test=predict(fit_train,testing_data_elements_svm[["data1t"]]) 110 | 111 | tau_test=Y1t1_test-Y0t1_test 112 | 113 | ## compute quantities of interest 114 | That = as.numeric(tau_test > 0) 115 | That_p = numeric(length(That)) 116 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 117 | 118 | ## output 119 | cf_output <- list( 120 | tau = tau_test, 121 | tau_cv = tau_test, 122 | That_cv = That, 123 | That_pcv = That_p 124 | ) 125 | } 126 | 127 | 128 | return(cf_output) 129 | } 130 | -------------------------------------------------------------------------------- /R/itr_run_user.R: -------------------------------------------------------------------------------- 1 | 2 | ## user-defined functions 3 | 4 | run_user <- function( 5 | dat_train, 6 | dat_test, 7 | dat_total, 8 | params, 9 | indcv, 10 | iter, 11 | budget, 12 | train_method, 13 | ... 14 | ) { 15 | 16 | # split/cross-validation 17 | cv <- params$cv 18 | 19 | ## train 20 | if(cv == TRUE){ 21 | fit_user <- do.call(train_method, list(dat_train, dat_total)) 22 | } 23 | 24 | if(cv == FALSE){ 25 | fit_user <- do.call(train_method, list(dat_train, dat_test)) 26 | } 27 | 28 | ## test 29 | fit_test <- test_user( 30 | fit_user, dat_test, dat_total, params$n_df, params$n_tb, 31 | indcv, iter, budget, cv 32 | ) 33 | 34 | return(list(test = fit_test, train = fit_user$fit)) 35 | } 36 | 37 | 38 | 39 | #'@importFrom stats predict runif 40 | test_user <- function( 41 | fit_user, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 42 | ) { 43 | 44 | if(cv == TRUE){ 45 | # predict 46 | tau_total= fit_user$score 47 | 48 | ## compute quantities of interest 49 | tau_test <- tau_total[indcv == iter] 50 | That <- fit_user$itr 51 | That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 52 | 53 | ## output 54 | cf_output <- list( 55 | tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 56 | tau_cv = tau_total, 57 | That_cv = That, 58 | That_pcv = That_p 59 | ) 60 | } 61 | 62 | if(cv == FALSE){ 63 | ## predict 64 | tau_test= fit_user$score 65 | 66 | ## compute quantities of interest 67 | That = fit_user$itr 68 | That_p = numeric(length(That)) 69 | That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 70 | 71 | ## output 72 | cf_output <- list( 73 | tau = tau_test, 74 | tau_cv = tau_test, 75 | That_cv = That, 76 | That_pcv = That_p 77 | ) 78 | } 79 | 80 | return(cf_output) 81 | } 82 | 83 | # #'@importFrom stats predict runif 84 | # test_user <- function( 85 | # fit_user, dat_test, dat_total, n_df, n_tb, indcv, iter, budget, cv 86 | # ) { 87 | 88 | # if(cv == TRUE){ 89 | # # predict 90 | # tau_total= fit_user$score 91 | 92 | # ## compute quantities of interest 93 | # tau_test <- tau_total[indcv == iter] 94 | # That <- fit_user$itr 95 | # That_p <- as.numeric(tau_total >= sort(tau_test, decreasing = TRUE)[floor(budget*length(tau_test))+1]) 96 | 97 | # ## output 98 | # cf_output <- list( 99 | # tau = c(tau_test, rep(NA, length(tau_total) - length(tau_test))), 100 | # tau_cv = tau_total, 101 | # That_cv = That, 102 | # That_pcv = That_p 103 | # ) 104 | # } 105 | 106 | # if(cv == FALSE){ 107 | # ## predict 108 | # tau_test= fit_user$score 109 | 110 | # ## compute quantities of interest 111 | # That = fit_user$itr 112 | # That_p = numeric(length(That)) 113 | # That_p[sort(tau_test,decreasing =TRUE,index.return=TRUE)$ix[1:(floor(budget*length(tau_test))+1)]] = 1 114 | 115 | # ## output 116 | # cf_output <- list( 117 | # tau = tau_test, 118 | # tau_cv = tau_test, 119 | # That_cv = That, 120 | # That_pcv = That_p 121 | # ) 122 | # } 123 | 124 | # return(cf_output) 125 | # } 126 | 127 | 128 | -------------------------------------------------------------------------------- /README.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | output: github_document 3 | --- 4 | 5 | 6 | 7 | ```{r, include = FALSE} 8 | knitr::opts_chunk$set( 9 | collapse = TRUE, 10 | comment = "#>") 11 | ``` 12 | 13 | ## evalITR [![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/evalITR)](https://cran.r-project.org/package=evalITR) ![CRAN downloads](http://cranlogs.r-pkg.org/badges/grand-total/evalITR) 14 | 15 | 16 | 17 | 18 | 19 | ```{r pic1, echo=FALSE} 20 | knitr::include_graphics("man/figures/README-manual.png") 21 | ``` 22 | 23 | R package evalITR provides various statistical methods for estimating 24 | and evaluating Individualized Treatment Rules under randomized data. The 25 | provided metrics include (1) population average prescriptive effect 26 | `PAPE`; (2) population average prescriptive effect with a budget 27 | constraint `PAPEp`; (3) population average prescriptive effect 28 | difference with a budget constraint `PAPDp`; (4) and area under the 29 | prescriptive effect curve `AUPEC`; (5) Grouped Average Treatment 30 | Effects `GATEs`. The details of the methods for this design are given in [Imai and Li 31 | (2023)](https://arxiv.org/abs/1905.05389) and 32 | [Imai and Li](https://arxiv.org/abs/2203.14511). 33 | 34 | Documentation and website: https://michaellli.github.io/evalITR/ 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ## evalITR [![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/evalITR)](https://cran.r-project.org/package=evalITR) ![CRAN downloads](http://cranlogs.r-pkg.org/badges/grand-total/evalITR) 5 | 6 | 7 | 8 | 9 | ![](man/figures/README-manual.png) 10 | 11 | R package evalITR provides various statistical methods for estimating 12 | and evaluating Individualized Treatment Rules under randomized data. The 13 | provided metrics include (1) population average prescriptive effect 14 | `PAPE`; (2) population average prescriptive effect with a budget 15 | constraint `PAPEp`; (3) population average prescriptive effect 16 | difference with a budget constraint `PAPDp`; (4) and area under the 17 | prescriptive effect curve `AUPEC`; (5) Grouped Average Treatment Effects 18 | `GATEs`. The details of the methods for this design are given in [Imai 19 | and Li (2023)](https://arxiv.org/abs/1905.05389) and [Imai and 20 | Li](https://arxiv.org/abs/2203.14511). 21 | 22 | Documentation and website: 23 | -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | destination: docs 2 | 3 | template: 4 | bootstrap: 5 5 | params: 6 | bootswatch: spacelab 7 | 8 | navbar: 9 | title: "evalITR" 10 | left: 11 | - text: "User's Guide" 12 | menu: 13 | - text: "Installation" 14 | href: articles/install.html 15 | - text: "Sample Splitting" 16 | href: articles/sample_split.html 17 | - text: "Sample Splitting with Caret/SuperLearner" 18 | href: articles/sample_split_caret.html 19 | - text: "Cross-Validation" 20 | href: articles/cv_single_alg.html 21 | - text: "Cross-Validation with Multiple Algorithms" 22 | href: articles/cv_multiple_alg.html 23 | - text: "User-Defined ITR" 24 | href: articles/user_itr.html 25 | - text: "Compare Estimated and User Defined ITR" 26 | href: articles/user_itr_algs.html 27 | - text: "Reference" 28 | href: reference/index.html 29 | - text: "Changelog" 30 | href: news/index.html 31 | 32 | # reference: 33 | # - title: Fitting models 34 | # desc: ~ 35 | # contents: 36 | # - '`estimate_itr`' 37 | # - '`evaluate_itr`' 38 | # - title: Checking results 39 | # desc: ~ 40 | # contents: 41 | # - '`plot.itr`' 42 | # - '`plot_estimate`' 43 | # - '`summary.itr`' 44 | # - '`summary.test_itr`' 45 | # - title: Additional analyses 46 | # desc: ~ 47 | # contents: 48 | # - '`test_itr`' 49 | # - title: Built-in data 50 | # desc: ~ 51 | # contents: 52 | # - '`star`' 53 | # # - title: Misc. 54 | # # desc: ~ 55 | # # contents: 56 | # # - '`keyATM-package`' 57 | 58 | # toc: 59 | # depth: 2 60 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | ## Test environments 2 | * local Windows 10 install, R 4.0.5 3 | * local Windows 10 install, R 3.6.5 4 | * win-builder (devel and release) 5 | * Mac OS X 10.11 (on Rhub), R-release 6 | * Ubuntu 16.04 (on travis-ci), R-release 7 | 8 | ## R CMD check results 9 | There were no ERRORs or WARNINGs. 10 | 11 | 12 | ## Downstream dependencies 13 | -------------------------------------------------------------------------------- /data/star.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/data/star.rda -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/font.css: -------------------------------------------------------------------------------- 1 | @font-face { 2 | font-family: 'Open Sans'; 3 | font-style: italic; 4 | font-weight: 400; 5 | font-stretch: normal; 6 | font-display: swap; 7 | src: url(fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkaVQ.woff) format('woff'); 8 | } 9 | @font-face { 10 | font-family: 'Open Sans'; 11 | font-style: italic; 12 | font-weight: 700; 13 | font-stretch: normal; 14 | font-display: swap; 15 | src: url(fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjaVQ.woff) format('woff'); 16 | } 17 | @font-face { 18 | font-family: 'Open Sans'; 19 | font-style: normal; 20 | font-weight: 400; 21 | font-stretch: normal; 22 | font-display: swap; 23 | src: url(fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0C4k.woff) format('woff'); 24 | } 25 | @font-face { 26 | font-family: 'Open Sans'; 27 | font-style: normal; 28 | font-weight: 700; 29 | font-stretch: normal; 30 | font-display: swap; 31 | src: url(fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1y4k.woff) format('woff'); 32 | } 33 | -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/1Ptxg8zYS_SKggPN4iEgvnHyvveLxVs9pbCIPrc.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/1Ptxg8zYS_SKggPN4iEgvnHyvveLxVs9pbCIPrc.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/1Ptxg8zYS_SKggPN4iEgvnHyvveLxVvaorCIPrc.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/1Ptxg8zYS_SKggPN4iEgvnHyvveLxVvaorCIPrc.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/4iCs6KVjbNBYlgo6ew.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/4iCs6KVjbNBYlgo6ew.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/4iCs6KVjbNBYlgoKfw7w.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/4iCs6KVjbNBYlgoKfw7w.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/4iCv6KVjbNBYlgoCxCvTtA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/4iCv6KVjbNBYlgoCxCvTtA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/4iCv6KVjbNBYlgoCxCvjsGyL.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/4iCv6KVjbNBYlgoCxCvjsGyL.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/6xK1dSBYKcSV-LCoeQqfX1RYOo3qPZ7nsDQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/6xK1dSBYKcSV-LCoeQqfX1RYOo3qPZ7nsDQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/6xK1dSBYKcSV-LCoeQqfX1RYOo3qPa7j.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/6xK1dSBYKcSV-LCoeQqfX1RYOo3qPa7j.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/6xK3dSBYKcSV-LCoeQqfX1RYOo3aPA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/6xK3dSBYKcSV-LCoeQqfX1RYOo3aPA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/6xK3dSBYKcSV-LCoeQqfX1RYOo3qOK7j.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/6xK3dSBYKcSV-LCoeQqfX1RYOo3qOK7j.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3i54rAkw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3i54rAkw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vAkw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vAkw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vwlxdo.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vwlxdo.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zAkw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zAkw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zwlxdo.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zwlxdo.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/CSR54z1Qlv-GDxkbKVQ_dFsvWNRevw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/CSR54z1Qlv-GDxkbKVQ_dFsvWNRevw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/CSR54z1Qlv-GDxkbKVQ_dFsvaNA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/CSR54z1Qlv-GDxkbKVQ_dFsvaNA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/CSR64z1Qlv-GDxkbKVQ_TOQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/CSR64z1Qlv-GDxkbKVQ_TOQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/CSR64z1Qlv-GDxkbKVQ_fOAKSw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/CSR64z1Qlv-GDxkbKVQ_fOAKSw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/JTURjIg1_i6t8kCHKm45_ZpC7g0.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/JTURjIg1_i6t8kCHKm45_ZpC7g0.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/JTURjIg1_i6t8kCHKm45_dJE7g0.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/JTURjIg1_i6t8kCHKm45_dJE7g0.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/JTUSjIg1_i6t8kCHKm45xW0.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/JTUSjIg1_i6t8kCHKm45xW0.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmEU9fBBc-.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmEU9fBBc-.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmEU9vAA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmEU9vAA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmSU5fBBc-.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmSU5fBBc-.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmSU5vAA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmSU5vAA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmWUlfBBc-.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmWUlfBBc-.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmWUlvAA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/KFOlCnqEu92Fr1MmWUlvAA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/KFOmCnqEu92Fr1Me5g.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/KFOmCnqEu92Fr1Me5g.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/KFOmCnqEu92Fr1Mu4mxM.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/KFOmCnqEu92Fr1Mu4mxM.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/QGYpz_kZZAGCONcK2A4bGOj8mNhL.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/QGYpz_kZZAGCONcK2A4bGOj8mNhL.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/S6u8w4BMUTPHjxsAXC-s.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/S6u8w4BMUTPHjxsAXC-s.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/S6u8w4BMUTPHjxswWA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/S6u8w4BMUTPHjxswWA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/S6u9w4BMUTPHh6UVSwiPHw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/S6u9w4BMUTPHh6UVSwiPHw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/S6u9w4BMUTPHh6UVeww.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/S6u9w4BMUTPHh6UVeww.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/S6u9w4BMUTPHh7USSwiPHw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/S6u9w4BMUTPHh7USSwiPHw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/S6u9w4BMUTPHh7USeww.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/S6u9w4BMUTPHh7USeww.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/S6uyw4BMUTPHjx4wWA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/S6uyw4BMUTPHjx4wWA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/S6uyw4BMUTPHvxo.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/S6uyw4BMUTPHvxo.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuFuYMZs.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuFuYMZs.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuI6fMZs.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuI6fMZs.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuLyfMZs.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuLyfMZs.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/XRXV3I6Li01BKof4MQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/XRXV3I6Li01BKof4MQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/XRXW3I6Li01BKofA6sKkZQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/XRXW3I6Li01BKofA6sKkZQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/XRXW3I6Li01BKofAjsOkZQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/XRXW3I6Li01BKofAjsOkZQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkWVAexg.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkWVAexg.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkWVAexg.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkWVAexg.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjWVAexg.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjWVAexg.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1x4gaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1x4gaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1y4k.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1y4k.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0B4gaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0B4gaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0C4k.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0C4k.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0B4gaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0B4gaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0C4k.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0C4k.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/pe03MImSLYBIv1o4X1M8cc9iB_5p.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/pe03MImSLYBIv1o4X1M8cc9iB_5p.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/pe0qMImSLYBIv1o4X1M8cfe5.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/pe0qMImSLYBIv1o4X1M8cfe5.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.1.3/fonts/q5uGsou0JOdh94bfvQlr.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.1.3/fonts/q5uGsou0JOdh94bfvQlr.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/font.css: -------------------------------------------------------------------------------- 1 | @font-face { 2 | font-family: 'Open Sans'; 3 | font-style: italic; 4 | font-weight: 400; 5 | font-stretch: normal; 6 | font-display: swap; 7 | src: url(fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkaVQ.woff) format('woff'); 8 | } 9 | @font-face { 10 | font-family: 'Open Sans'; 11 | font-style: italic; 12 | font-weight: 700; 13 | font-stretch: normal; 14 | font-display: swap; 15 | src: url(fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjaVQ.woff) format('woff'); 16 | } 17 | @font-face { 18 | font-family: 'Open Sans'; 19 | font-style: normal; 20 | font-weight: 400; 21 | font-stretch: normal; 22 | font-display: swap; 23 | src: url(fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0C4k.woff) format('woff'); 24 | } 25 | @font-face { 26 | font-family: 'Open Sans'; 27 | font-style: normal; 28 | font-weight: 700; 29 | font-stretch: normal; 30 | font-display: swap; 31 | src: url(fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1y4k.woff) format('woff'); 32 | } 33 | -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/1Ptxg8zYS_SKggPN4iEgvnHyvveLxVs9pbCIPrc.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/1Ptxg8zYS_SKggPN4iEgvnHyvveLxVs9pbCIPrc.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/1Ptxg8zYS_SKggPN4iEgvnHyvveLxVvaorCIPrc.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/1Ptxg8zYS_SKggPN4iEgvnHyvveLxVvaorCIPrc.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/1adeadb2fe618c5ed46221f15e12b9c8.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/1adeadb2fe618c5ed46221f15e12b9c8.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/4iCs6KVjbNBYlgo6ew.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/4iCs6KVjbNBYlgo6ew.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/4iCs6KVjbNBYlgoKfw7w.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/4iCs6KVjbNBYlgoKfw7w.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/4iCv6KVjbNBYlgoCxCvTtA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/4iCv6KVjbNBYlgoCxCvTtA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/4iCv6KVjbNBYlgoCxCvjsGyL.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/4iCv6KVjbNBYlgoCxCvjsGyL.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/6xK1dSBYKcSV-LCoeQqfX1RYOo3qPZ7nsDQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/6xK1dSBYKcSV-LCoeQqfX1RYOo3qPZ7nsDQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/6xK1dSBYKcSV-LCoeQqfX1RYOo3qPa7j.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/6xK1dSBYKcSV-LCoeQqfX1RYOo3qPa7j.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/6xK3dSBYKcSV-LCoeQqfX1RYOo3aPA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/6xK3dSBYKcSV-LCoeQqfX1RYOo3aPA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/6xK3dSBYKcSV-LCoeQqfX1RYOo3qOK7j.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/6xK3dSBYKcSV-LCoeQqfX1RYOo3qOK7j.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3i54rAkw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3i54rAkw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vAkw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vAkw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vwlxdo.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ig4vwlxdo.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zAkw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zAkw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zwlxdo.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/6xKydSBYKcSV-LCoeQqfX1RYOo3ik4zwlxdo.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/CSR54z1Qlv-GDxkbKVQ_dFsvWNRevw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/CSR54z1Qlv-GDxkbKVQ_dFsvWNRevw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/CSR54z1Qlv-GDxkbKVQ_dFsvaNA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/CSR54z1Qlv-GDxkbKVQ_dFsvaNA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/CSR64z1Qlv-GDxkbKVQ_TOQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/CSR64z1Qlv-GDxkbKVQ_TOQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/CSR64z1Qlv-GDxkbKVQ_fOAKSw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/CSR64z1Qlv-GDxkbKVQ_fOAKSw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/HI_diYsKILxRpg3hIP6sJ7fM7PqPMcMnZFqUwX28DBKXhM0.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/HI_diYsKILxRpg3hIP6sJ7fM7PqPMcMnZFqUwX28DBKXhM0.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/HI_diYsKILxRpg3hIP6sJ7fM7PqPMcMnZFqUwX28DMyQhM0.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/HI_diYsKILxRpg3hIP6sJ7fM7PqPMcMnZFqUwX28DMyQhM0.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/HI_jiYsKILxRpg3hIP6sJ7fM7PqlOPHYvDP_W9O7GQTTbI1rSg.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/HI_jiYsKILxRpg3hIP6sJ7fM7PqlOPHYvDP_W9O7GQTTbI1rSg.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/HI_jiYsKILxRpg3hIP6sJ7fM7PqlOPHYvDP_W9O7GQTTsoprSg.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/HI_jiYsKILxRpg3hIP6sJ7fM7PqlOPHYvDP_W9O7GQTTsoprSg.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/JTUHjIg1_i6t8kCHKm4532VJOt5-QNFgpCtZ6Ew9.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/JTUHjIg1_i6t8kCHKm4532VJOt5-QNFgpCtZ6Ew9.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/JTUHjIg1_i6t8kCHKm4532VJOt5-QNFgpCtr6Ew9.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/JTUHjIg1_i6t8kCHKm4532VJOt5-QNFgpCtr6Ew9.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/JTUHjIg1_i6t8kCHKm4532VJOt5-QNFgpCuM70w9.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/JTUHjIg1_i6t8kCHKm4532VJOt5-QNFgpCuM70w9.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmEU9fBBc-.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmEU9fBBc-.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmEU9vAA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmEU9vAA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmSU5fBBc-.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmSU5fBBc-.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmSU5vAA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmSU5vAA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmWUlfBBc-.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmWUlfBBc-.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmWUlvAA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/KFOlCnqEu92Fr1MmWUlvAA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/KFOmCnqEu92Fr1Me5g.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/KFOmCnqEu92Fr1Me5g.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/KFOmCnqEu92Fr1Mu4mxM.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/KFOmCnqEu92Fr1Mu4mxM.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/QGYpz_kZZAGCONcK2A4bGOj8mNhL.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/QGYpz_kZZAGCONcK2A4bGOj8mNhL.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/S6u8w4BMUTPHjxsAXC-s.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/S6u8w4BMUTPHjxsAXC-s.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/S6u8w4BMUTPHjxswWA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/S6u8w4BMUTPHjxswWA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/S6u9w4BMUTPHh6UVSwiPHw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/S6u9w4BMUTPHh6UVSwiPHw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/S6u9w4BMUTPHh6UVeww.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/S6u9w4BMUTPHh6UVeww.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/S6u9w4BMUTPHh7USSwiPHw.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/S6u9w4BMUTPHh7USSwiPHw.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/S6u9w4BMUTPHh7USeww.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/S6u9w4BMUTPHh7USeww.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/S6uyw4BMUTPHjx4wWA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/S6uyw4BMUTPHjx4wWA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/S6uyw4BMUTPHvxo.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/S6uyw4BMUTPHvxo.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuFuYMZs.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuFuYMZs.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuI6fMZs.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuI6fMZs.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuLyfMZs.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuLyfMZs.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/XRXI3I6Li01BKofiOc5wtlZ2di8HDFwmRTA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/XRXI3I6Li01BKofiOc5wtlZ2di8HDFwmRTA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/XRXI3I6Li01BKofiOc5wtlZ2di8HDGUmRTA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/XRXI3I6Li01BKofiOc5wtlZ2di8HDGUmRTA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/XRXI3I6Li01BKofiOc5wtlZ2di8HDLshRTA.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/XRXI3I6Li01BKofiOc5wtlZ2di8HDLshRTA.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/a98f7a7574819ba83bec6279a2cecd95.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/a98f7a7574819ba83bec6279a2cecd95.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk0ZjaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk0ZjaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkWVAexg.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkWVAexg.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk5hkaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkWVAexg.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkWVAexg.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk8ZkaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk_RkaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0Rk_RkaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkxhjaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkxhjaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjWVAexg.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjWVAexg.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memQYaGs126MiZpBA-UFUIcVXSCEkx2cmqvXlWq8tWZ0Pw86hd0RkyFjaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1x4gaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1x4gaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1y4k.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1y4k.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsgH1y4k.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsgH1y4k.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgshZ1y4k.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgshZ1y4k.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0B4gaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0B4gaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0C4k.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsiH0C4k.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0B4gaVQ.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0B4gaVQ.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0C4k.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjZ0C4k.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjr0C4k.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsjr0C4k.woff -------------------------------------------------------------------------------- /docs/deps/bootstrap-5.2.2/fonts/q5uGsou0JOdh94bfvQlr.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/deps/bootstrap-5.2.2/fonts/q5uGsou0JOdh94bfvQlr.woff -------------------------------------------------------------------------------- /docs/deps/data-deps.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/link.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 8 | 12 | 13 | -------------------------------------------------------------------------------- /docs/pkgdown.yml: -------------------------------------------------------------------------------- 1 | pandoc: 2.17.0.1 2 | pkgdown: 2.0.7 3 | pkgdown_sha: ~ 4 | articles: 5 | cv_multiple_alg: cv_multiple_alg.html 6 | cv_single_alg: cv_single_alg.html 7 | install: install.html 8 | paper_alg1: paper_alg1.html 9 | sample_split: sample_split.html 10 | sample_split_caret: sample_split_caret.html 11 | user_itr: user_itr.html 12 | user_itr_algs: user_itr_algs.html 13 | last_built: 2023-08-21T13:19Z 14 | 15 | -------------------------------------------------------------------------------- /docs/reference/Rplot001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/Rplot001.png -------------------------------------------------------------------------------- /docs/reference/figures/README-caret_model-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-caret_model-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-caret_model-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-caret_model-2.png -------------------------------------------------------------------------------- /docs/reference/figures/README-compare_itr_aupec-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-compare_itr_aupec-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-compare_itr_gate-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-compare_itr_gate-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-compare_itr_model_summary-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-compare_itr_model_summary-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-cv_estimate-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-cv_estimate-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-cv_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-cv_plot-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-est_extract-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-est_extract-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-manual.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-manual.png -------------------------------------------------------------------------------- /docs/reference/figures/README-multiple_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-multiple_plot-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-plot-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-sl_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-sl_plot-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-sp_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-sp_plot-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-sv_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-sv_plot-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-user_itr_aupec-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-user_itr_aupec-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-user_itr_gate-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-user_itr_gate-1.png -------------------------------------------------------------------------------- /docs/reference/figures/README-workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/README-workflow.png -------------------------------------------------------------------------------- /docs/reference/figures/gate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/gate.png -------------------------------------------------------------------------------- /docs/reference/figures/plot_5folds.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/plot_5folds.png -------------------------------------------------------------------------------- /docs/reference/figures/rf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/docs/reference/figures/rf.png -------------------------------------------------------------------------------- /docs/sitemap.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | /404.html 5 | 6 | 7 | /articles/cv_multiple_alg.html 8 | 9 | 10 | /articles/cv_single_alg.html 11 | 12 | 13 | /articles/index.html 14 | 15 | 16 | /articles/install.html 17 | 18 | 19 | /articles/paper_alg1.html 20 | 21 | 22 | /articles/sample_split.html 23 | 24 | 25 | /articles/sample_split_caret.html 26 | 27 | 28 | /articles/user_itr.html 29 | 30 | 31 | /articles/user_itr_algs.html 32 | 33 | 34 | /authors.html 35 | 36 | 37 | /index.html 38 | 39 | 40 | /news/index.html 41 | 42 | 43 | /reference/AUPEC.html 44 | 45 | 46 | /reference/AUPECcv.html 47 | 48 | 49 | /reference/compute_qoi.html 50 | 51 | 52 | /reference/compute_qoi_user.html 53 | 54 | 55 | /reference/consist.test.html 56 | 57 | 58 | /reference/consistcv.test.html 59 | 60 | 61 | /reference/create_ml_args.html 62 | 63 | 64 | /reference/create_ml_args_bart.html 65 | 66 | 67 | /reference/create_ml_args_bartc.html 68 | 69 | 70 | /reference/create_ml_args_causalforest.html 71 | 72 | 73 | /reference/create_ml_args_lasso.html 74 | 75 | 76 | /reference/create_ml_args_superLearner.html 77 | 78 | 79 | /reference/create_ml_args_svm.html 80 | 81 | 82 | /reference/create_ml_args_svm_cls.html 83 | 84 | 85 | /reference/create_ml_arguments.html 86 | 87 | 88 | /reference/estimate_itr.html 89 | 90 | 91 | /reference/evaluate_itr.html 92 | 93 | 94 | /reference/fit_itr.html 95 | 96 | 97 | /reference/GATE.html 98 | 99 | 100 | /reference/GATEcv.html 101 | 102 | 103 | /reference/het.test.html 104 | 105 | 106 | /reference/hetcv.test.html 107 | 108 | 109 | /reference/index.html 110 | 111 | 112 | /reference/itr_single_outcome.html 113 | 114 | 115 | /reference/PAPD.html 116 | 117 | 118 | /reference/PAPDcv.html 119 | 120 | 121 | /reference/PAPE.html 122 | 123 | 124 | /reference/PAPEcv.html 125 | 126 | 127 | /reference/PAV.html 128 | 129 | 130 | /reference/PAVcv.html 131 | 132 | 133 | /reference/plot.itr.html 134 | 135 | 136 | /reference/plot_estimate.html 137 | 138 | 139 | /reference/print.summary.itr.html 140 | 141 | 142 | /reference/print.summary.test_itr.html 143 | 144 | 145 | /reference/star.html 146 | 147 | 148 | /reference/summary.itr.html 149 | 150 | 151 | /reference/summary.test_itr.html 152 | 153 | 154 | /reference/test_itr.html 155 | 156 | 157 | -------------------------------------------------------------------------------- /evalITR.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: No 4 | SaveWorkspace: No 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | PackageRoxygenize: rd,collate,namespace 22 | -------------------------------------------------------------------------------- /inst/WORDLIST: -------------------------------------------------------------------------------- 1 | AUPEC 2 | CATE 3 | GATEcv 4 | GATEs 5 | GBM 6 | ITR 7 | ITRs 8 | Lingzhi 9 | Mosteller 10 | PAPD 11 | PAPDp 12 | PAPE 13 | PAPEp 14 | PAV 15 | Parallelization 16 | SuperLearner 17 | alg 18 | algoritms 19 | arXiv 20 | bartCause 21 | bartMachine 22 | cdots 23 | constaint 24 | continous 25 | frac 26 | ggplot 27 | grandient 28 | incoporate 29 | itr 30 | leftarrow 31 | leq 32 | mathbb 33 | mathbf 34 | modulized 35 | multisession 36 | recommendate 37 | seperately 38 | th 39 | widehat 40 | -------------------------------------------------------------------------------- /man/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/.DS_Store -------------------------------------------------------------------------------- /man/AUPEC.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/AUPEC.R 3 | \name{AUPEC} 4 | \alias{AUPEC} 5 | \title{Estimation of the Area Under Prescription Evaluation Curve (AUPEC) in Randomized Experiments} 6 | \usage{ 7 | AUPEC(T, tau, Y, centered = TRUE) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{tau}{A vector of the unit-level continuous score for treatment assignment. We assume those that have tau<0 should 13 | not have treatment. Conditional Average Treatment Effect is one possible measure.} 14 | 15 | \item{Y}{A vector of the outcome variable of interest for each sample.} 16 | 17 | \item{centered}{If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 18 | the variance of the estimator. Default is \code{TRUE}.} 19 | } 20 | \value{ 21 | A list that contains the following items: \item{aupec}{The estimated 22 | Area Under Prescription Evaluation Curve} \item{sd}{The estimated standard deviation 23 | of AUPEC.}\item{vec}{A vector of points outlining the AUPEC curve across each possible budget point for the dataset. 24 | Each step increases the budget by 1/n where n is the number of data points. } 25 | } 26 | \description{ 27 | This function estimates AUPEC. The details of the methods for this design are given in Imai and Li (2019). 28 | } 29 | \examples{ 30 | T = c(1,0,1,0,1,0,1,0) 31 | tau = c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) 32 | Y = c(4,5,0,2,4,1,-4,3) 33 | aupeclist <- AUPEC(T,tau,Y) 34 | aupeclist$aupec 35 | aupeclist$sd 36 | aupeclist$vec 37 | } 38 | \references{ 39 | Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 40 | } 41 | \author{ 42 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 43 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 44 | } 45 | \keyword{evaluation} 46 | -------------------------------------------------------------------------------- /man/AUPECcv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/AUPECcv.R 3 | \name{AUPECcv} 4 | \alias{AUPECcv} 5 | \title{Estimation of the Area Under Prescription Evaluation Curve (AUPEC) in Randomized Experiments Under Cross Validation} 6 | \usage{ 7 | AUPECcv(T, tau, Y, ind, centered = TRUE) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{tau}{A matrix where the \code{i}th column is the unit-level continuous score for treatment assignment generated in the \code{i}th fold.} 13 | 14 | \item{Y}{The outcome variable of interest.} 15 | 16 | \item{ind}{A vector of integers (between 1 and number of folds inclusive) indicating which testing set does each sample belong to.} 17 | 18 | \item{centered}{If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 19 | the variance of the estimator. Default is \code{TRUE}.} 20 | } 21 | \value{ 22 | A list that contains the following items: \item{aupec}{The estimated 23 | AUPEC.} \item{sd}{The estimated standard deviation 24 | of AUPEC.} 25 | } 26 | \description{ 27 | This function estimates AUPEC. The details of the methods for this design are given in Imai and Li (2019). 28 | } 29 | \examples{ 30 | T = c(1,0,1,0,1,0,1,0) 31 | tau = matrix(c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,-0.5,-0.3,-0.1,0.1,0.3,0.5,0.7,0.9),nrow = 8, ncol = 2) 32 | Y = c(4,5,0,2,4,1,-4,3) 33 | ind = c(rep(1,4),rep(2,4)) 34 | aupeclist <- AUPECcv(T, tau, Y, ind) 35 | aupeclist$aupec 36 | aupeclist$sd 37 | } 38 | \references{ 39 | Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 40 | } 41 | \author{ 42 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 43 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 44 | } 45 | \keyword{evaluation} 46 | -------------------------------------------------------------------------------- /man/GATE.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/GATE.R 3 | \name{GATE} 4 | \alias{GATE} 5 | \title{Estimation of the Grouped Average Treatment Effects (GATEs) in Randomized Experiments} 6 | \usage{ 7 | GATE(T, tau, Y, ngates = 5) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{tau}{A vector of the unit-level continuous score. Conditional Average Treatment Effect is one possible measure.} 13 | 14 | \item{Y}{A vector of the outcome variable of interest for each sample.} 15 | 16 | \item{ngates}{The number of groups to separate the data into. The groups are determined by \code{tau}. Default is 5.} 17 | } 18 | \value{ 19 | A list that contains the following items: \item{gate}{The estimated 20 | vector of GATEs of length \code{ngates} arranged in order of increasing \code{tau}.} \item{sd}{The estimated vector of standard deviation 21 | of GATEs.} 22 | } 23 | \description{ 24 | This function estimates the Grouped Average Treatment Effects (GATEs) where the groups are determined by a continuous score. The details of the methods for this design are given in Imai and Li (2022). 25 | } 26 | \examples{ 27 | T = c(1,0,1,0,1,0,1,0) 28 | tau = c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) 29 | Y = c(4,5,0,2,4,1,-4,3) 30 | gatelist <- GATE(T,tau,Y,ngates=5) 31 | gatelist$gate 32 | gatelist$sd 33 | } 34 | \references{ 35 | Imai and Li (2022). \dQuote{Statistical Inference for Heterogeneous Treatment Effects Discovered by Generic Machine Learning in Randomized Experiments}, 36 | } 37 | \author{ 38 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 39 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 40 | } 41 | \keyword{evaluation} 42 | -------------------------------------------------------------------------------- /man/GATEcv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/GATEcv.R 3 | \name{GATEcv} 4 | \alias{GATEcv} 5 | \title{Estimation of the Grouped Average Treatment Effects (GATEs) in Randomized Experiments Under Cross Validation} 6 | \usage{ 7 | GATEcv(T, tau, Y, ind, ngates = 5) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{tau}{A matrix where the \code{i}th column is the unit-level continuous score for treatment assignment generated in the \code{i}th fold. Conditional Average Treatment Effect is one possible measure.} 13 | 14 | \item{Y}{A vector of the outcome variable of interest for each sample.} 15 | 16 | \item{ind}{A vector of integers (between 1 and number of folds inclusive) indicating which testing set does each sample belong to.} 17 | 18 | \item{ngates}{The number of groups to separate the data into. The groups are determined by \code{tau}. Default is 5.} 19 | } 20 | \value{ 21 | A list that contains the following items: \item{gate}{The estimated 22 | vector of GATEs under cross-validation of length \code{ngates} arranged in order of increasing \code{tau}.} \item{sd}{The estimated vector of standard deviation 23 | of GATEs under cross-validation.} 24 | } 25 | \description{ 26 | This function estimates the Grouped Average Treatment Effects (GATEs) under cross-validation where the groups are determined by a continuous score. The details of the methods for this design are given in Imai and Li (2022). 27 | } 28 | \examples{ 29 | T = c(1,0,1,0,1,0,1,0) 30 | tau = matrix(c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,-0.5,-0.3,-0.1,0.1,0.3,0.5,0.7,0.9),nrow = 8, ncol = 2) 31 | Y = c(4,5,0,2,4,1,-4,3) 32 | ind = c(rep(1,4),rep(2,4)) 33 | gatelist <- GATEcv(T, tau, Y, ind, ngates = 2) 34 | gatelist$gate 35 | gatelist$sd 36 | } 37 | \references{ 38 | Imai and Li (2022). \dQuote{Statistical Inference for Heterogeneous Treatment Effects Discovered by Generic Machine Learning in Randomized Experiments}, 39 | } 40 | \author{ 41 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 42 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 43 | } 44 | \keyword{evaluation} 45 | -------------------------------------------------------------------------------- /man/PAPD.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/PAPD.R 3 | \name{PAPD} 4 | \alias{PAPD} 5 | \title{Estimation of the Population Average Prescription Difference in Randomized Experiments} 6 | \usage{ 7 | PAPD(T, Thatfp, Thatgp, Y, budget, centered = TRUE) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{Thatfp}{A vector of the unit-level binary treatment that would have been assigned by the 13 | first individualized treatment rule. Please ensure that the percentage of treatment units of That is lower than the budget constraint.} 14 | 15 | \item{Thatgp}{A vector of the unit-level binary treatment that would have been assigned by the 16 | second individualized treatment rule. Please ensure that the percentage of treatment units of That is lower than the budget constraint.} 17 | 18 | \item{Y}{A vector of the outcome variable of interest for each sample.} 19 | 20 | \item{budget}{The maximum percentage of population that can be treated under the 21 | budget constraint. Should be a decimal between 0 and 1.} 22 | 23 | \item{centered}{If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 24 | the variance of the estimator. Default is \code{TRUE}.} 25 | } 26 | \value{ 27 | A list that contains the following items: \item{papd}{The estimated 28 | Population Average Prescription Difference} \item{sd}{The estimated standard deviation 29 | of PAPD.} 30 | } 31 | \description{ 32 | This function estimates the Population Average Prescription Difference with a budget 33 | constraint. The details of the methods for this design are given in Imai and Li (2019). 34 | } 35 | \examples{ 36 | T = c(1,0,1,0,1,0,1,0) 37 | That = c(0,1,1,0,0,1,1,0) 38 | That2 = c(1,0,0,1,1,0,0,1) 39 | Y = c(4,5,0,2,4,1,-4,3) 40 | papdlist <- PAPD(T,That,That2,Y,budget = 0.5) 41 | papdlist$papd 42 | papdlist$sd 43 | } 44 | \references{ 45 | Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 46 | } 47 | \author{ 48 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 49 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 50 | } 51 | \keyword{evaluation} 52 | -------------------------------------------------------------------------------- /man/PAPDcv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/PAPDcv.R 3 | \name{PAPDcv} 4 | \alias{PAPDcv} 5 | \title{Estimation of the Population Average Prescription Difference in Randomized Experiments Under Cross Validation} 6 | \usage{ 7 | PAPDcv(T, Thatfp, Thatgp, Y, ind, budget, centered = TRUE) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{Thatfp}{A matrix where the \code{i}th column is the unit-level binary treatment that would have been assigned by the first 13 | individualized treatment rule generated in the \code{i}th fold. Please ensure 14 | that the percentage of treatment units of That is lower than the budget constraint.} 15 | 16 | \item{Thatgp}{A matrix where the \code{i}th column is the unit-level binary treatment that would have been assigned by the second 17 | individualized treatment rule generated in the \code{i}th fold. Please ensure 18 | that the percentage of treatment units of That is lower than the budget constraint.} 19 | 20 | \item{Y}{The outcome variable of interest.} 21 | 22 | \item{ind}{A vector of integers (between 1 and number of folds inclusive) indicating which testing set does each sample belong to.} 23 | 24 | \item{budget}{The maximum percentage of population that can be treated under the 25 | budget constraint. Should be a decimal between 0 and 1.} 26 | 27 | \item{centered}{If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 28 | the variance of the estimator. Default is \code{TRUE}.} 29 | } 30 | \value{ 31 | A list that contains the following items: \item{papd}{The estimated 32 | Population Average Prescription Difference.} \item{sd}{The estimated standard deviation 33 | of PAPD.} 34 | } 35 | \description{ 36 | This function estimates the Population Average Prescription Difference with a budget constaint under cross validation. The details of the methods for this design are given in Imai and Li (2019). 37 | } 38 | \examples{ 39 | T = c(1,0,1,0,1,0,1,0) 40 | That = matrix(c(0,1,1,0,0,1,1,0,1,0,0,1,1,0,0,1), nrow = 8, ncol = 2) 41 | That2 = matrix(c(0,0,1,1,0,0,1,1,1,1,0,0,1,1,0,0), nrow = 8, ncol = 2) 42 | Y = c(4,5,0,2,4,1,-4,3) 43 | ind = c(rep(1,4),rep(2,4)) 44 | papdlist <- PAPDcv(T, That, That2, Y, ind, budget = 0.5) 45 | papdlist$papd 46 | papdlist$sd 47 | } 48 | \references{ 49 | Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 50 | } 51 | \author{ 52 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 53 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 54 | } 55 | \keyword{evaluation} 56 | -------------------------------------------------------------------------------- /man/PAPE.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/PAPE.R 3 | \name{PAPE} 4 | \alias{PAPE} 5 | \title{Estimation of the Population Average Prescription Effect in Randomized Experiments} 6 | \usage{ 7 | PAPE(T, That, Y, budget = NA, centered = TRUE) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{That}{A vector of the unit-level binary treatment that would have been assigned by the 13 | individualized treatment rule. If \code{budget} is specified, please ensure 14 | that the percentage of treatment units of That is lower than the budget constraint.} 15 | 16 | \item{Y}{A vector of the outcome variable of interest for each sample.} 17 | 18 | \item{budget}{The maximum percentage of population that can be treated under the 19 | budget constraint. Should be a decimal between 0 and 1. Default is NA which assumes 20 | no budget constraint.} 21 | 22 | \item{centered}{If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 23 | the variance of the estimator. Default is \code{TRUE}.} 24 | } 25 | \value{ 26 | A list that contains the following items: \item{pape}{The estimated 27 | Population Average Prescription Effect.} \item{sd}{The estimated standard deviation 28 | of PAPE.} 29 | } 30 | \description{ 31 | This function estimates the Population Average Prescription Effect with and without a budget 32 | constraint. The details of the methods for this design are given in Imai and Li (2019). 33 | } 34 | \examples{ 35 | T = c(1,0,1,0,1,0,1,0) 36 | That = c(0,1,1,0,0,1,1,0) 37 | Y = c(4,5,0,2,4,1,-4,3) 38 | papelist <- PAPE(T,That,Y) 39 | papelist$pape 40 | papelist$sd 41 | } 42 | \references{ 43 | Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 44 | } 45 | \author{ 46 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 47 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 48 | } 49 | \keyword{evaluation} 50 | -------------------------------------------------------------------------------- /man/PAPEcv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/PAPEcv.R 3 | \name{PAPEcv} 4 | \alias{PAPEcv} 5 | \title{Estimation of the Population Average Prescription Effect in Randomized Experiments Under Cross Validation} 6 | \usage{ 7 | PAPEcv(T, That, Y, ind, budget = NA, centered = TRUE) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{That}{A matrix where the \code{i}th column is the unit-level binary treatment that would have been assigned by the 13 | individualized treatment rule generated in the \code{i}th fold. If \code{budget} is specified, please ensure 14 | that the percentage of treatment units of That is lower than the budget constraint.} 15 | 16 | \item{Y}{The outcome variable of interest.} 17 | 18 | \item{ind}{A vector of integers (between 1 and number of folds inclusive) indicating which testing set does each sample belong to.} 19 | 20 | \item{budget}{The maximum percentage of population that can be treated under the 21 | budget constraint. Should be a decimal between 0 and 1. Default is NA which assumes 22 | no budget constraint.} 23 | 24 | \item{centered}{If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 25 | the variance of the estimator. Default is \code{TRUE}.} 26 | } 27 | \value{ 28 | A list that contains the following items: \item{pape}{The estimated 29 | Population Average Prescription Effect.} \item{sd}{The estimated standard deviation 30 | of PAPE.} 31 | } 32 | \description{ 33 | This function estimates the Population Average Prescription Effect with and without a budget 34 | constraint. The details of the methods for this design are given in Imai and Li (2019). 35 | } 36 | \examples{ 37 | T = c(1,0,1,0,1,0,1,0) 38 | That = matrix(c(0,1,1,0,0,1,1,0,1,0,0,1,1,0,0,1), nrow = 8, ncol = 2) 39 | Y = c(4,5,0,2,4,1,-4,3) 40 | ind = c(rep(1,4),rep(2,4)) 41 | papelist <- PAPEcv(T, That, Y, ind) 42 | papelist$pape 43 | papelist$sd 44 | } 45 | \references{ 46 | Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 47 | } 48 | \author{ 49 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 50 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 51 | } 52 | \keyword{evaluation} 53 | -------------------------------------------------------------------------------- /man/PAV.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/PAV.R 3 | \name{PAV} 4 | \alias{PAV} 5 | \title{Estimation of the Population Average Value in Randomized Experiments} 6 | \usage{ 7 | PAV(T, That, Y, centered = TRUE) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{That}{A vector of the unit-level binary treatment that would have been assigned by the 13 | individualized treatment rule. If \code{budget} is specified, please ensure 14 | that the percentage of treatment units of That is lower than the budget constraint.} 15 | 16 | \item{Y}{A vector of the outcome variable of interest for each sample.} 17 | 18 | \item{centered}{If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 19 | the variance of the estimator. Default is \code{TRUE}.} 20 | } 21 | \value{ 22 | A list that contains the following items: \item{pav}{The estimated 23 | Population Average Value.} \item{sd}{The estimated standard deviation 24 | of PAV.} 25 | } 26 | \description{ 27 | This function estimates the Population Average Value. The details of the methods for this design are given in Imai and Li (2019). 28 | } 29 | \examples{ 30 | T = c(1,0,1,0,1,0,1,0) 31 | That = c(0,1,1,0,0,1,1,0) 32 | Y = c(4,5,0,2,4,1,-4,3) 33 | pavlist <- PAV(T,That,Y) 34 | pavlist$pav 35 | pavlist$sd 36 | } 37 | \references{ 38 | Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 39 | } 40 | \author{ 41 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 42 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 43 | } 44 | \keyword{evaluation} 45 | -------------------------------------------------------------------------------- /man/PAVcv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/PAVcv.R 3 | \name{PAVcv} 4 | \alias{PAVcv} 5 | \title{Estimation of the Population Average Value in Randomized Experiments Under Cross Validation} 6 | \usage{ 7 | PAVcv(T, That, Y, ind, centered = TRUE) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{That}{A matrix where the \code{i}th column is the unit-level binary treatment that would have been assigned by the 13 | individualized treatment rule generated in the \code{i}th fold. If \code{budget} is specified, please ensure 14 | that the percentage of treatment units of That is lower than the budget constraint.} 15 | 16 | \item{Y}{The outcome variable of interest.} 17 | 18 | \item{ind}{A vector of integers (between 1 and number of folds inclusive) indicating which testing set does each sample belong to.} 19 | 20 | \item{centered}{If \code{TRUE}, the outcome variables would be centered before processing. This minimizes 21 | the variance of the estimator. Default is \code{TRUE}.} 22 | } 23 | \value{ 24 | A list that contains the following items: \item{pav}{The estimated 25 | Population Average Value.} \item{sd}{The estimated standard deviation 26 | of PAV.} 27 | } 28 | \description{ 29 | This function estimates the Population Average Value. The details of the methods for this design are given in Imai and Li (2019). 30 | } 31 | \examples{ 32 | T = c(1,0,1,0,1,0,1,0) 33 | That = matrix(c(0,1,1,0,0,1,1,0,1,0,0,1,1,0,0,1), nrow = 8, ncol = 2) 34 | Y = c(4,5,0,2,4,1,-4,3) 35 | ind = c(rep(1,4),rep(2,4)) 36 | pavlist <- PAVcv(T, That, Y, ind) 37 | pavlist$pav 38 | pavlist$sd 39 | } 40 | \references{ 41 | Imai and Li (2019). \dQuote{Experimental Evaluation of Individualized Treatment Rules}, 42 | } 43 | \author{ 44 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 45 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 46 | } 47 | \keyword{evaluation} 48 | -------------------------------------------------------------------------------- /man/compute_qoi.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_qoi.R 3 | \name{compute_qoi} 4 | \alias{compute_qoi} 5 | \title{Compute Quantities of Interest (PAPE, PAPEp, PAPDp, AUPEC, GATE, GATEcv)} 6 | \usage{ 7 | compute_qoi(fit_obj, algorithms) 8 | } 9 | \arguments{ 10 | \item{fit_obj}{An output object from \code{fit_itr} function.} 11 | 12 | \item{algorithms}{Machine learning algorithms} 13 | } 14 | \description{ 15 | Compute Quantities of Interest (PAPE, PAPEp, PAPDp, AUPEC, GATE, GATEcv) 16 | } 17 | -------------------------------------------------------------------------------- /man/compute_qoi_user.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_qoi.R 3 | \name{compute_qoi_user} 4 | \alias{compute_qoi_user} 5 | \title{Compute Quantities of Interest (PAPE, PAPEp, PAPDp, AUPEC, GATE, GATEcv) with user defined functions} 6 | \usage{ 7 | compute_qoi_user(user_itr, Tcv, Ycv, data, ngates, budget, ...) 8 | } 9 | \arguments{ 10 | \item{user_itr}{A user-defined function to create an ITR. The function should take the data as input and return an unit-level continuous score for treatment assignment. We assume those that have score less than 0 should not have treatment. The default is \code{NULL}, which means the ITR will be estimated from the \code{estimate_itr}.} 11 | 12 | \item{Tcv}{A vector of the unit-level binary treatment.} 13 | 14 | \item{Ycv}{A vector of the unit-level continuous outcome.} 15 | 16 | \item{data}{A data frame containing the variables of interest.} 17 | 18 | \item{ngates}{The number of gates to be used in the GATE function.} 19 | 20 | \item{budget}{The maximum percentage of population that can be treated under the budget constraint.} 21 | 22 | \item{...}{Additional arguments to be passed to the user-defined function.} 23 | } 24 | \description{ 25 | Compute Quantities of Interest (PAPE, PAPEp, PAPDp, AUPEC, GATE, GATEcv) with user defined functions 26 | } 27 | -------------------------------------------------------------------------------- /man/consist.test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/consist.test.R 3 | \name{consist.test} 4 | \alias{consist.test} 5 | \title{The Consistency Test for Grouped Average Treatment Effects (GATEs) in Randomized Experiments} 6 | \usage{ 7 | consist.test(T, tau, Y, ngates = 5, nsim = 10000) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{tau}{A vector of the unit-level continuous score. Conditional Average Treatment Effect is one possible measure.} 13 | 14 | \item{Y}{A vector of the outcome variable of interest for each sample.} 15 | 16 | \item{ngates}{The number of groups to separate the data into. The groups are determined by \code{tau}. Default is 5.} 17 | 18 | \item{nsim}{Number of Monte Carlo simulations used to simulate the null distributions. Default is 10000.} 19 | } 20 | \value{ 21 | A list that contains the following items: \item{stat}{The estimated 22 | statistic for the test of consistency} \item{pval}{The p-value of the null 23 | hypothesis (that the treatment effects are consistent)} 24 | } 25 | \description{ 26 | This function calculates statistics related to the test of treatment effect consistency across groups. 27 | } 28 | \details{ 29 | The details of the methods for this design are given in Imai and Li (2022). 30 | } 31 | \examples{ 32 | T = c(1,0,1,0,1,0,1,0) 33 | tau = c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) 34 | Y = c(4,5,0,2,4,1,-4,3) 35 | consisttestlist <- consist.test(T,tau,Y,ngates=5) 36 | consisttestlist$stat 37 | consisttestlist$pval 38 | } 39 | \references{ 40 | Imai and Li (2022). \dQuote{Statistical Inference for Heterogeneous Treatment Effects Discovered by Generic Machine Learning in Randomized Experiments}, 41 | } 42 | \author{ 43 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 44 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 45 | } 46 | \keyword{evaluation} 47 | -------------------------------------------------------------------------------- /man/consistcv.test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/consistcv.test.R 3 | \name{consistcv.test} 4 | \alias{consistcv.test} 5 | \title{The Consistency Test for Grouped Average Treatment Effects (GATEs) under Cross Validation in Randomized Experiments} 6 | \usage{ 7 | consistcv.test(T, tau, Y, ind, ngates = 5, nsim = 10000) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{tau}{A vector of the unit-level continuous score. Conditional Average Treatment Effect is one possible measure.} 13 | 14 | \item{Y}{A vector of the outcome variable of interest for each sample.} 15 | 16 | \item{ind}{A vector of integers (between 1 and number of folds inclusive) indicating which testing set does each sample belong to.} 17 | 18 | \item{ngates}{The number of groups to separate the data into. The groups are determined by \code{tau}. Default is 5.} 19 | 20 | \item{nsim}{Number of Monte Carlo simulations used to simulate the null distributions. Default is 10000.} 21 | } 22 | \value{ 23 | A list that contains the following items: \item{stat}{The estimated 24 | statistic for the test of consistency under cross-validation.} \item{pval}{The p-value of the null 25 | hypothesis (that the treatment effects are consistent)} 26 | } 27 | \description{ 28 | This function calculates statistics related to the test of treatment effect consistency across groups under cross-validation. 29 | } 30 | \details{ 31 | The details of the methods for this design are given in Imai and Li (2022). 32 | } 33 | \examples{ 34 | T = c(1,0,1,0,1,0,1,0) 35 | tau = matrix(c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,-0.5,-0.3,-0.1,0.1,0.3,0.5,0.7,0.9),nrow = 8, ncol = 2) 36 | Y = c(4,5,0,2,4,1,-4,3) 37 | ind = c(rep(1,4),rep(2,4)) 38 | consisttestlist <- consistcv.test(T,tau,Y,ind,ngates=2) 39 | consisttestlist$stat 40 | consisttestlist$pval 41 | } 42 | \references{ 43 | Imai and Li (2022). \dQuote{Statistical Inference for Heterogeneous Treatment Effects Discovered by Generic Machine Learning in Randomized Experiments}, 44 | } 45 | \author{ 46 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 47 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 48 | } 49 | \keyword{evaluation} 50 | -------------------------------------------------------------------------------- /man/create_ml_args.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_helpers.R 3 | \name{create_ml_args} 4 | \alias{create_ml_args} 5 | \title{Create general arguments} 6 | \usage{ 7 | create_ml_args(data) 8 | } 9 | \arguments{ 10 | \item{data}{A dataset} 11 | } 12 | \description{ 13 | Create general arguments 14 | } 15 | -------------------------------------------------------------------------------- /man/create_ml_args_bart.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_helpers.R 3 | \name{create_ml_args_bart} 4 | \alias{create_ml_args_bart} 5 | \title{Create arguments for bartMachine} 6 | \usage{ 7 | create_ml_args_bart(data) 8 | } 9 | \arguments{ 10 | \item{data}{A dataset} 11 | } 12 | \description{ 13 | Create arguments for bartMachine 14 | } 15 | -------------------------------------------------------------------------------- /man/create_ml_args_bartc.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_helpers.R 3 | \name{create_ml_args_bartc} 4 | \alias{create_ml_args_bartc} 5 | \title{Create arguments for bartCause} 6 | \usage{ 7 | create_ml_args_bartc(data) 8 | } 9 | \arguments{ 10 | \item{data}{A dataset} 11 | } 12 | \description{ 13 | Create arguments for bartCause 14 | } 15 | -------------------------------------------------------------------------------- /man/create_ml_args_causalforest.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_helpers.R 3 | \name{create_ml_args_causalforest} 4 | \alias{create_ml_args_causalforest} 5 | \title{Create arguments for causal forest} 6 | \usage{ 7 | create_ml_args_causalforest(data) 8 | } 9 | \arguments{ 10 | \item{data}{A dataset} 11 | } 12 | \description{ 13 | Create arguments for causal forest 14 | } 15 | -------------------------------------------------------------------------------- /man/create_ml_args_lasso.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_helpers.R 3 | \name{create_ml_args_lasso} 4 | \alias{create_ml_args_lasso} 5 | \title{Create arguments for LASSO} 6 | \usage{ 7 | create_ml_args_lasso(data) 8 | } 9 | \arguments{ 10 | \item{data}{A dataset} 11 | } 12 | \description{ 13 | Create arguments for LASSO 14 | } 15 | -------------------------------------------------------------------------------- /man/create_ml_args_superLearner.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_helpers.R 3 | \name{create_ml_args_superLearner} 4 | \alias{create_ml_args_superLearner} 5 | \title{Create arguments for super learner} 6 | \usage{ 7 | create_ml_args_superLearner(data) 8 | } 9 | \arguments{ 10 | \item{data}{A dataset} 11 | } 12 | \description{ 13 | Create arguments for super learner 14 | } 15 | -------------------------------------------------------------------------------- /man/create_ml_args_svm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_helpers.R 3 | \name{create_ml_args_svm} 4 | \alias{create_ml_args_svm} 5 | \title{Create arguments for SVM} 6 | \usage{ 7 | create_ml_args_svm(data) 8 | } 9 | \arguments{ 10 | \item{data}{A dataset} 11 | } 12 | \description{ 13 | Create arguments for SVM 14 | } 15 | -------------------------------------------------------------------------------- /man/create_ml_args_svm_cls.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_helpers.R 3 | \name{create_ml_args_svm_cls} 4 | \alias{create_ml_args_svm_cls} 5 | \title{Create arguments for SVM classification} 6 | \usage{ 7 | create_ml_args_svm_cls(data) 8 | } 9 | \arguments{ 10 | \item{data}{A dataset} 11 | } 12 | \description{ 13 | Create arguments for SVM classification 14 | } 15 | -------------------------------------------------------------------------------- /man/create_ml_arguments.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_helpers.R 3 | \name{create_ml_arguments} 4 | \alias{create_ml_arguments} 5 | \title{Create arguments for ML algorithms} 6 | \usage{ 7 | create_ml_arguments(outcome, treatment, data) 8 | } 9 | \arguments{ 10 | \item{outcome}{Outcome of interests} 11 | 12 | \item{treatment}{Treatment variable} 13 | 14 | \item{data}{A dataset} 15 | } 16 | \description{ 17 | Create arguments for ML algorithms 18 | } 19 | -------------------------------------------------------------------------------- /man/estimate_itr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/main.r 3 | \name{estimate_itr} 4 | \alias{estimate_itr} 5 | \title{Estimate individual treatment rules (ITR)} 6 | \usage{ 7 | estimate_itr( 8 | treatment, 9 | form, 10 | data, 11 | algorithms, 12 | budget, 13 | n_folds = 5, 14 | split_ratio = 0, 15 | ngates = 5, 16 | preProcess = NULL, 17 | weights = NULL, 18 | trControl = caret::trainControl(method = "none"), 19 | tuneGrid = NULL, 20 | tuneLength = ifelse(trControl$method == "none", 1, 3), 21 | user_model = NULL, 22 | SL_library = NULL, 23 | ... 24 | ) 25 | } 26 | \arguments{ 27 | \item{treatment}{Treatment variable} 28 | 29 | \item{form}{a formula object that takes the form \code{y ~ T + x1 + x2 + ...}.} 30 | 31 | \item{data}{A data frame that contains the outcome \code{y} and the treatment \code{T}.} 32 | 33 | \item{algorithms}{List of machine learning algorithms to be used.} 34 | 35 | \item{budget}{The maximum percentage of population that can be treated under the budget constraint.} 36 | 37 | \item{n_folds}{Number of cross-validation folds. Default is 5.} 38 | 39 | \item{split_ratio}{Split ratio between train and test set under sample splitting. Default is 0.} 40 | 41 | \item{ngates}{The number of groups to separate the data into. The groups are determined by tau. Default is 5.} 42 | 43 | \item{preProcess}{caret parameter} 44 | 45 | \item{weights}{caret parameter} 46 | 47 | \item{trControl}{caret parameter} 48 | 49 | \item{tuneGrid}{caret parameter} 50 | 51 | \item{tuneLength}{caret parameter} 52 | 53 | \item{user_model}{A user-defined function to create an ITR. The function should take the data as input and return a model to estimate the ITR.} 54 | 55 | \item{SL_library}{A list of machine learning algorithms to be used in the super learner.} 56 | 57 | \item{...}{Additional arguments passed to \code{caret::train}} 58 | } 59 | \value{ 60 | An object of \code{itr} class 61 | } 62 | \description{ 63 | Estimate individual treatment rules (ITR) 64 | } 65 | -------------------------------------------------------------------------------- /man/evaluate_itr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/main.r 3 | \name{evaluate_itr} 4 | \alias{evaluate_itr} 5 | \title{Evaluate ITR} 6 | \usage{ 7 | evaluate_itr( 8 | fit = NULL, 9 | user_itr = NULL, 10 | outcome = c(), 11 | treatment = c(), 12 | data = list(), 13 | budget = 1, 14 | ngates = 5, 15 | ... 16 | ) 17 | } 18 | \arguments{ 19 | \item{fit}{Fitted model. Usually an output from \code{estimate_itr}} 20 | 21 | \item{user_itr}{A user-defined function to create an ITR. The function should take the data as input and return an unit-level continuous score for treatment assignment. We assume those that have score less than 0 should not have treatment. The default is \code{NULL}, which means the ITR will be estimated from the \code{estimate_itr}.} 22 | 23 | \item{outcome}{A character string of the outcome variable name.} 24 | 25 | \item{treatment}{A character string of the treatment variable name.} 26 | 27 | \item{data}{A data frame containing the variables specified in \code{outcome}, \code{treatment}, and \code{tau}.} 28 | 29 | \item{budget}{The maximum percentage of population that can be treated under the budget constraint.} 30 | 31 | \item{ngates}{The number of gates to use for the ITR. The default is 5. 32 | A user-defined function to create an ITR. The function should take the data as input and return an ITR. The output is a vector of the unit-level binary treatment that would have been assigned by the individualized treatment rule. The default is \code{NULL}, which means the ITR will be estimated from the \code{estimate_itr}. 33 | See \code{?evaluate_itr} for an example.} 34 | 35 | \item{...}{Further arguments passed to the function.} 36 | } 37 | \value{ 38 | An object of \code{itr} class 39 | } 40 | \description{ 41 | Evaluate ITR 42 | } 43 | -------------------------------------------------------------------------------- /man/figures/README-caret_model-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-caret_model-1.png -------------------------------------------------------------------------------- /man/figures/README-caret_model-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-caret_model-2.png -------------------------------------------------------------------------------- /man/figures/README-compare_itr_aupec-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-compare_itr_aupec-1.png -------------------------------------------------------------------------------- /man/figures/README-compare_itr_gate-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-compare_itr_gate-1.png -------------------------------------------------------------------------------- /man/figures/README-compare_itr_model_summary-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-compare_itr_model_summary-1.png -------------------------------------------------------------------------------- /man/figures/README-cv_estimate-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-cv_estimate-1.png -------------------------------------------------------------------------------- /man/figures/README-cv_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-cv_plot-1.png -------------------------------------------------------------------------------- /man/figures/README-est_extract-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-est_extract-1.png -------------------------------------------------------------------------------- /man/figures/README-manual.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-manual.png -------------------------------------------------------------------------------- /man/figures/README-multiple_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-multiple_plot-1.png -------------------------------------------------------------------------------- /man/figures/README-plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-plot-1.png -------------------------------------------------------------------------------- /man/figures/README-sl_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-sl_plot-1.png -------------------------------------------------------------------------------- /man/figures/README-sp_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-sp_plot-1.png -------------------------------------------------------------------------------- /man/figures/README-sv_plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-sv_plot-1.png -------------------------------------------------------------------------------- /man/figures/README-user_itr_aupec-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-user_itr_aupec-1.png -------------------------------------------------------------------------------- /man/figures/README-user_itr_gate-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-user_itr_gate-1.png -------------------------------------------------------------------------------- /man/figures/README-workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/README-workflow.png -------------------------------------------------------------------------------- /man/figures/gate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/gate.png -------------------------------------------------------------------------------- /man/figures/plot_5folds.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/plot_5folds.png -------------------------------------------------------------------------------- /man/figures/rf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/man/figures/rf.png -------------------------------------------------------------------------------- /man/fit_itr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/main.r 3 | \name{fit_itr} 4 | \alias{fit_itr} 5 | \title{Estimate ITR for Single Outcome} 6 | \usage{ 7 | fit_itr(data, algorithms, params, folds, budget, user_model, ...) 8 | } 9 | \arguments{ 10 | \item{data}{A dataset.} 11 | 12 | \item{algorithms}{Machine learning algorithms.} 13 | 14 | \item{params}{A list of parameters.} 15 | 16 | \item{folds}{Number of folds.} 17 | 18 | \item{budget}{The maximum percentage of population that can be treated under the budget constraint.} 19 | 20 | \item{user_model}{User's own function to estimated the ITR.} 21 | 22 | \item{...}{Additional arguments passed to \code{caret::train}} 23 | } 24 | \value{ 25 | A list of estimates. 26 | } 27 | \description{ 28 | Estimate ITR for Single Outcome 29 | } 30 | -------------------------------------------------------------------------------- /man/het.test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/het.test.R 3 | \name{het.test} 4 | \alias{het.test} 5 | \title{The Heterogeneity Test for Grouped Average Treatment Effects (GATEs) in Randomized Experiments} 6 | \usage{ 7 | het.test(T, tau, Y, ngates = 5) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{tau}{A vector of the unit-level continuous score. Conditional Average Treatment Effect is one possible measure.} 13 | 14 | \item{Y}{A vector of the outcome variable of interest for each sample.} 15 | 16 | \item{ngates}{The number of groups to separate the data into. The groups are determined by \code{tau}. Default is 5.} 17 | } 18 | \value{ 19 | A list that contains the following items: \item{stat}{The estimated 20 | statistic for the test of heterogeneity.} \item{pval}{The p-value of the null 21 | hypothesis (that the treatment effects are homogeneous)} 22 | } 23 | \description{ 24 | This function calculates statistics related to the test of heterogeneous treatment effects across groups. 25 | } 26 | \details{ 27 | The details of the methods for this design are given in Imai and Li (2022). 28 | } 29 | \examples{ 30 | T = c(1,0,1,0,1,0,1,0) 31 | tau = c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) 32 | Y = c(4,5,0,2,4,1,-4,3) 33 | hettestlist <- het.test(T,tau,Y,ngates=5) 34 | hettestlist$stat 35 | hettestlist$pval 36 | } 37 | \references{ 38 | Imai and Li (2022). \dQuote{Statistical Inference for Heterogeneous Treatment Effects Discovered by Generic Machine Learning in Randomized Experiments}, 39 | } 40 | \author{ 41 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 42 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 43 | } 44 | \keyword{evaluation} 45 | -------------------------------------------------------------------------------- /man/hetcv.test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hetcv.test.R 3 | \name{hetcv.test} 4 | \alias{hetcv.test} 5 | \title{The Heterogeneity Test for Grouped Average Treatment Effects (GATEs) under Cross Validation in Randomized Experiments} 6 | \usage{ 7 | hetcv.test(T, tau, Y, ind, ngates = 5) 8 | } 9 | \arguments{ 10 | \item{T}{A vector of the unit-level binary treatment receipt variable for each sample.} 11 | 12 | \item{tau}{A vector of the unit-level continuous score. Conditional Average Treatment Effect is one possible measure.} 13 | 14 | \item{Y}{A vector of the outcome variable of interest for each sample.} 15 | 16 | \item{ind}{A vector of integers (between 1 and number of folds inclusive) indicating which testing set does each sample belong to.} 17 | 18 | \item{ngates}{The number of groups to separate the data into. The groups are determined by \code{tau}. Default is 5.} 19 | } 20 | \value{ 21 | A list that contains the following items: \item{stat}{The estimated 22 | statistic for the test of heterogeneity under cross-validation.} \item{pval}{The p-value of the null 23 | hypothesis (that the treatment effects are homogeneous)} 24 | } 25 | \description{ 26 | This function calculates statistics related to the test of heterogeneous treatment effects across groups under cross-validation. 27 | } 28 | \details{ 29 | The details of the methods for this design are given in Imai and Li (2022). 30 | } 31 | \examples{ 32 | T = c(1,0,1,0,1,0,1,0) 33 | tau = matrix(c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,-0.5,-0.3,-0.1,0.1,0.3,0.5,0.7,0.9),nrow = 8, ncol = 2) 34 | Y = c(4,5,0,2,4,1,-4,3) 35 | ind = c(rep(1,4),rep(2,4)) 36 | hettestlist <- hetcv.test(T,tau,Y,ind,ngates=2) 37 | hettestlist$stat 38 | hettestlist$pval 39 | } 40 | \references{ 41 | Imai and Li (2022). \dQuote{Statistical Inference for Heterogeneous Treatment Effects Discovered by Generic Machine Learning in Randomized Experiments}, 42 | } 43 | \author{ 44 | Michael Lingzhi Li, Technology and Operations Management, Harvard Business School 45 | \email{mili@hbs.edu}, \url{https://www.michaellz.com/}; 46 | } 47 | \keyword{evaluation} 48 | -------------------------------------------------------------------------------- /man/plot.itr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_plot.R 3 | \name{plot.itr} 4 | \alias{plot.itr} 5 | \title{Plot the AUPEC curve} 6 | \usage{ 7 | \method{plot}{itr}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{An object of \code{evaluate_itr()} class. This is typically an output of \code{evaluate_itr()} function.} 11 | 12 | \item{...}{Further arguments passed to the function.} 13 | } 14 | \value{ 15 | A plot of ggplot2 object. 16 | } 17 | \description{ 18 | Plot the AUPEC curve 19 | } 20 | -------------------------------------------------------------------------------- /man/plot_estimate.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_plot.R 3 | \name{plot_estimate} 4 | \alias{plot_estimate} 5 | \title{Plot the GATE estimate} 6 | \usage{ 7 | plot_estimate(x, type, ...) 8 | } 9 | \arguments{ 10 | \item{x}{An table object. This is typically an output of \code{evaluate_itr()} function.} 11 | 12 | \item{type}{The metric that you wish to plot. One of GATE, PAPE, PAPEp, or PAPDp.} 13 | 14 | \item{...}{Further arguments passed to the function.} 15 | } 16 | \value{ 17 | A plot of ggplot2 object. 18 | } 19 | \description{ 20 | Plot the GATE estimate 21 | } 22 | -------------------------------------------------------------------------------- /man/print.summary.itr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_summary.R 3 | \name{print.summary.itr} 4 | \alias{print.summary.itr} 5 | \title{Print} 6 | \usage{ 7 | \method{print}{summary.itr}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{An object of \code{summary.itr} class. This is typically an output of \code{summary.itr()} function.} 11 | 12 | \item{...}{Other parameters. Currently not supported.} 13 | } 14 | \description{ 15 | Print 16 | } 17 | -------------------------------------------------------------------------------- /man/print.summary.test_itr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_summary.R 3 | \name{print.summary.test_itr} 4 | \alias{print.summary.test_itr} 5 | \title{Print} 6 | \usage{ 7 | \method{print}{summary.test_itr}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{An object of \code{summary.test_itr} class. This is typically an output of \code{summary.test_itr()} function.} 11 | 12 | \item{...}{Other parameters.} 13 | } 14 | \description{ 15 | Print 16 | } 17 | -------------------------------------------------------------------------------- /man/star.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{star} 5 | \alias{star} 6 | \title{Tennessee’s Student/Teacher Achievement Ratio (STAR) project} 7 | \format{ 8 | A data frame with 1911 observations and 14 variables: 9 | \describe{ 10 | \item{treatment}{A binary treatment indicating whether a student is assigned to small class and regular class without an aid} 11 | \item{g3tlangss}{A continous variable measuring student's writing scores} 12 | \item{g3treadss}{A continous variable measuring student's reading scores} 13 | \item{g3tmathss}{A continous variable measuring student's math scores} 14 | \item{gender}{Students' gender} 15 | \item{race}{Students' race} 16 | \item{birthmonth}{Students' birth month} 17 | \item{birthyear}{Students' birth year} 18 | \item{SCHLURBN}{Urban or rural} 19 | \item{GKENRMNT}{Enrollment size} 20 | \item{GRDRANGE}{Grade range} 21 | \item{GKFRLNCH}{Number of students on free lunch} 22 | \item{GKBUSED}{Number of students on school buses} 23 | \item{GKWHITE}{Percentage of white students} 24 | } 25 | } 26 | \usage{ 27 | star 28 | } 29 | \description{ 30 | A longitudinal study experimentally evaluating the impacts of class size in early education on various outcomes (Mosteller, 1995) 31 | } 32 | \keyword{datasets} 33 | -------------------------------------------------------------------------------- /man/summary.itr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_summary.R 3 | \name{summary.itr} 4 | \alias{summary.itr} 5 | \title{Summarize estimate_itr output} 6 | \usage{ 7 | \method{summary}{itr}(object, ...) 8 | } 9 | \arguments{ 10 | \item{object}{An object of \code{estimate_itr} class (typically an output of \code{estimate_itr()} function).} 11 | 12 | \item{...}{Other parameters.} 13 | } 14 | \description{ 15 | Summarize estimate_itr output 16 | } 17 | -------------------------------------------------------------------------------- /man/summary.test_itr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/itr_summary.R 3 | \name{summary.test_itr} 4 | \alias{summary.test_itr} 5 | \title{Summarize test_itr output} 6 | \usage{ 7 | \method{summary}{test_itr}(object, ...) 8 | } 9 | \arguments{ 10 | \item{object}{An object of \code{test_itr} class (typically an output of \code{test_itr()} function).} 11 | 12 | \item{...}{Other parameters.} 13 | } 14 | \description{ 15 | Summarize test_itr output 16 | } 17 | -------------------------------------------------------------------------------- /man/test_itr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/main.r 3 | \name{test_itr} 4 | \alias{test_itr} 5 | \title{Conduct hypothesis tests} 6 | \usage{ 7 | test_itr(fit, nsim = 1000, ...) 8 | } 9 | \arguments{ 10 | \item{fit}{Fitted model. Usually an output from \code{estimate_itr}} 11 | 12 | \item{nsim}{Number of Monte Carlo simulations used to simulate the null distributions. Default is 1000.} 13 | 14 | \item{...}{Further arguments passed to the function.} 15 | } 16 | \value{ 17 | An object of \code{test_itr} class 18 | } 19 | \description{ 20 | Conduct hypothesis tests 21 | } 22 | -------------------------------------------------------------------------------- /tests/spelling.R: -------------------------------------------------------------------------------- 1 | if(requireNamespace('spelling', quietly = TRUE)) 2 | spelling::spell_check_test(vignettes = TRUE, error = FALSE, 3 | skip_on_cran = TRUE) 4 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(evalITR) 3 | 4 | test_check("evalITR") 5 | -------------------------------------------------------------------------------- /tests/testthat/star.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MichaelLLi/evalITR/50d68e9c985738aa7a1cb982545bba3255390482/tests/testthat/star.rda -------------------------------------------------------------------------------- /tests/testthat/test-high_level.R: -------------------------------------------------------------------------------- 1 | library(evalITR) 2 | library(dplyr) 3 | test_that("Sample Splitting Works", { 4 | load("star.rda") 5 | # specifying the outcome 6 | outcomes <- "g3tlangss" 7 | 8 | # specifying the treatment 9 | treatment <- "treatment" 10 | 11 | # specifying the data (remove other outcomes) 12 | star_data <- star %>% dplyr::select(-c(g3treadss,g3tmathss)) 13 | 14 | # specifying the formula 15 | user_formula <- as.formula( 16 | "g3tlangss ~ treatment + gender + race + birthmonth + 17 | birthyear + SCHLURBN + GRDRANGE + GKENRMNT + GKFRLNCH + 18 | GKBUSED + GKWHITE ") 19 | 20 | 21 | # estimate ITR 22 | fit <- estimate_itr( 23 | treatment = treatment, 24 | form = user_formula, 25 | data = star_data, 26 | algorithms = c("lasso"), 27 | budget = 0.2, 28 | split_ratio = 0.7) 29 | expect_no_error(estimate_itr( 30 | treatment = treatment, 31 | form = user_formula, 32 | data = star_data, 33 | algorithms = c("lasso"), 34 | budget = 0.2, 35 | split_ratio = 0.7)) 36 | 37 | 38 | # evaluate ITR 39 | est <- evaluate_itr(fit) 40 | expect_no_error(evaluate_itr(fit)) 41 | }) 42 | 43 | -------------------------------------------------------------------------------- /tests/testthat/test-low_level.R: -------------------------------------------------------------------------------- 1 | library(evalITR) 2 | 3 | test_that("Non Cross-Validated Functions Work", { 4 | T = c(1,0,1,0,1,0,1,0) 5 | That = c(0,1,1,0,0,1,1,0) 6 | That2 = c(1,0,0,1,1,0,0,1) 7 | tau = c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) 8 | Y = c(4,5,0,2,4,1,-4,3) 9 | papelist <- PAPE(T,That,Y) 10 | pavlist <- PAV(T,That,Y) 11 | papdlist <- PAPD(T,That,That2,Y,0.5) 12 | aupeclist <- AUPEC(T,tau,Y) 13 | gatelist <- GATE(T,tau,Y,ngates=2) 14 | expect_type(papelist,"list") 15 | expect_type(pavlist,"list") 16 | expect_type(papdlist,"list") 17 | expect_type(aupeclist,"list") 18 | expect_type(gatelist,"list") 19 | expect_type(papelist$pape,"double") 20 | expect_type(pavlist$pav,"double") 21 | expect_type(papdlist$papd,"double") 22 | expect_type(aupeclist$aupec,"double") 23 | expect_type(gatelist$gate,"double") 24 | expect_type(papelist$sd,"double") 25 | expect_type(pavlist$sd,"double") 26 | expect_type(papdlist$sd,"double") 27 | expect_type(aupeclist$sd,"double") 28 | expect_type(gatelist$sd,"double") 29 | }) 30 | 31 | test_that("Cross-Validated Functions Work", { 32 | T = c(1,0,1,0,1,0,1,0) 33 | That = matrix(c(0,1,1,0,0,1,1,0,1,0,0,1,1,0,0,1), nrow = 8, ncol = 2) 34 | That2 = matrix(c(0,0,1,1,0,0,1,1,1,1,0,0,1,1,0,0), nrow = 8, ncol = 2) 35 | tau = matrix(c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,-0.5,-0.3,-0.1,0.1,0.3,0.5,0.7,0.9),nrow = 8, ncol = 2) 36 | Y = c(4,5,0,2,4,1,-4,3) 37 | ind = c(rep(1,4),rep(2,4)) 38 | papelist <- PAPEcv(T,That,Y,ind,budget = 0.5) 39 | pavlist <- PAVcv(T,That,Y,ind) 40 | papdlist <- PAPDcv(T,That,That2,Y,ind,budget = 0.5) 41 | aupeclist <- AUPECcv(T,tau,Y,ind) 42 | gatelist <- GATEcv(T,tau,Y,ind,ngates=2) 43 | expect_type(papelist,"list") 44 | expect_type(pavlist,"list") 45 | expect_type(papdlist,"list") 46 | expect_type(aupeclist,"list") 47 | expect_type(gatelist,"list") 48 | expect_type(papelist$pape,"double") 49 | expect_type(pavlist$pav,"double") 50 | expect_type(papdlist$papd,"double") 51 | expect_type(aupeclist$aupec,"double") 52 | expect_type(gatelist$gate,"double") 53 | expect_type(papelist$sd,"double") 54 | expect_type(pavlist$sd,"double") 55 | expect_type(papdlist$sd,"double") 56 | expect_type(aupeclist$sd,"double") 57 | expect_type(gatelist$sd,"double") 58 | }) 59 | 60 | -------------------------------------------------------------------------------- /vignettes/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | *.R 3 | -------------------------------------------------------------------------------- /vignettes/cv_multiple_alg.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Cross-validation with multiple ML algorithms" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{Cross-validation with multiple ML algorithms} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>", 14 | fig.path = "../man/figures/README-" 15 | ) 16 | 17 | library(dplyr) 18 | 19 | load("../data/star.rda") 20 | 21 | # specifying the outcome 22 | outcomes <- "g3tlangss" 23 | 24 | # specifying the treatment 25 | treatment <- "treatment" 26 | 27 | # specifying the data (remove other outcomes) 28 | star_data <- star %>% dplyr::select(-c(g3treadss,g3tmathss)) 29 | 30 | # specifying the formula 31 | user_formula <- as.formula( 32 | "g3tlangss ~ treatment + gender + race + birthmonth + 33 | birthyear + SCHLURBN + GRDRANGE + GKENRMNT + GKFRLNCH + 34 | GKBUSED + GKWHITE ") 35 | ``` 36 | 37 | We can estimate ITR with various machine learning algorithms and then compare the performance of each model. The package includes all ML algorithms in the `caret` package and 2 additional algorithms ([causal forest](https://grf-labs.github.io/grf/reference/causal_forest.html) and [bartCause](https://CRAN.R-project.org/package=bartCause)). 38 | 39 | The package also allows estimate heterogeneous treatment effects on the individual and group-level. On the individual-level, the summary statistics and the AUPEC plot show whether assigning individualized treatment rules may outperform complete random experiment. On the group-level, we specify the number of groups through `ngates` and estimating heterogeneous treatment effects across groups. 40 | 41 | ```{r multiple, message=TRUE, warning=TRUE} 42 | library(evalITR) 43 | 44 | # specify the trainControl method 45 | fitControl <- caret::trainControl( 46 | method = "repeatedcv", 47 | number = 2, 48 | repeats = 2) 49 | # estimate ITR 50 | set.seed(2021) 51 | fit_cv <- estimate_itr( 52 | treatment = "treatment", 53 | form = user_formula, 54 | data = star_data, 55 | trControl = fitControl, 56 | algorithms = c( 57 | "causal_forest", 58 | # "bartc", 59 | # "rlasso", # from rlearner 60 | # "ulasso", # from rlearner 61 | "lasso" # from caret package 62 | # "rf" # from caret package 63 | ), # from caret package 64 | budget = 0.2, 65 | n_folds = 2) 66 | 67 | # evaluate ITR 68 | est_cv <- evaluate_itr(fit_cv) 69 | 70 | # summarize estimates 71 | summary(est_cv) 72 | ``` 73 | 74 | We plot the estimated Area Under the Prescriptive Effect Curve for the writing score across different ML algorithms. 75 | 76 | 77 | ```{r multiple_plot, fig.width=8, fig.height=6,fig.align = "center"} 78 | # plot the AUPEC with different ML algorithms 79 | plot(est_cv) 80 | ``` 81 | -------------------------------------------------------------------------------- /vignettes/cv_single_alg.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Cross-validation with single algorithm" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{Cross-validation with single algorithm} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>", 14 | fig.path = "../man/figures/README-" 15 | ) 16 | 17 | library(dplyr) 18 | 19 | load("../data/star.rda") 20 | 21 | # specifying the outcome 22 | outcomes <- "g3tlangss" 23 | 24 | # specifying the treatment 25 | treatment <- "treatment" 26 | 27 | # specifying the data (remove other outcomes) 28 | star_data <- star %>% dplyr::select(-c(g3treadss,g3tmathss)) 29 | 30 | # specifying the formula 31 | user_formula <- as.formula( 32 | "g3tlangss ~ treatment + gender + race + birthmonth + 33 | birthyear + SCHLURBN + GRDRANGE + GKENRMNT + GKFRLNCH + 34 | GKBUSED + GKWHITE ") 35 | ``` 36 | 37 | 38 | When users choose to estimate and evaluate ITR under cross-validation, the package implements Algorithm 1 from [Imai and Li 39 | (2023)](https://arxiv.org/abs/1905.05389) to estimate and evaluate ITR. For more information about Algorithm 1, please refer to the [this page](../articles/paper_alg1.html). 40 | 41 | 42 | Instead of specifying the `split_ratio` argument, 43 | we choose the number of folds (`n_folds`). We present an example of estimating ITR with 3 folds cross-validation. 44 | In practice, we recommend using 10 folds to get a more stable model performance. 45 | 46 | 47 | | Input | R package input | Descriptions | 48 | |:-----------------------------------------------------------------|:---------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------| 49 | | Data $\mathbf{Z}=\left\{\mathbf{X}_i, T_i, Y_i\right\}_{i=1}^n$ | `treatment = treatment, form = user_formula, data = star_data` | `treatment` is a character string specifying the treatment variable in the `data`; `form` is a formula specifying the outcome and covariates; and a dataframe `data` | 50 | | Machine learning algorithm $F$ | `algorithms = c("causal_forest")` | a character vector specifying the ML algorithms to be used | 51 | | Evaluation metric $\tau_f$ | PAPE, PAPD, AUPEC, GATE | By default | 52 | | Number of folds $K$ | `n_folds = 3` | `n_folds` is a numeric value indicating the number of folds used for cross-validation | 53 | | … | `budget = 0.2` | `budget` is a numeric value specifying the maximum percentage of population that can be treated under the budget constraint | 54 | 55 | 56 | ```{r cv_estimate, message = FALSE, out.width = '60%'} 57 | library(evalITR) 58 | # estimate ITR 59 | set.seed(2021) 60 | fit_cv <- estimate_itr( 61 | treatment = treatment, 62 | form = user_formula, 63 | data = star_data, 64 | algorithms = c("causal_forest"), 65 | budget = 0.2, 66 | n_folds = 3) 67 | 68 | ``` 69 | 70 | The output will be an object that 71 | includes estimated evaluation metric $\hat{\tau}_F$ and the estimated 72 | variance of $\hat{\tau}_F$ for different metrics (PAPE, PAPD, AUPEC). 73 | 74 | 75 | ```{r cv_eval, message = FALSE, out.width = '50%'} 76 | # evaluate ITR 77 | est_cv <- evaluate_itr(fit_cv) 78 | 79 | # summarize estimates 80 | summary(est_cv) 81 | ``` 82 | -------------------------------------------------------------------------------- /vignettes/install.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Installation" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{Installation} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE, messsage = FALSE, warning = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>" 14 | ) 15 | 16 | options(rmarkdown.html_vignette.check_title = FALSE) 17 | 18 | ``` 19 | 20 | ### Installation 21 | 22 | You can install the released version of evalITR from [CRAN](https://CRAN.R-project.org) with: 23 | 24 | ```{r messsage = FALSE, warning = FALSE, eval=FALSE} 25 | # Install release version from CRAN (updating evalITR is the same command) 26 | install.packages("evalITR") 27 | ``` 28 | 29 | Or, you can install the development version of evalITR from [GitHub](https://github.com/) with: 30 | 31 | ``` {r messsage = FALSE, warning = FALSE, eval = FALSE} 32 | # install.packages("devtools") 33 | devtools::install_github("MichaelLLi/evalITR", ref = "causal-ml") 34 | ``` 35 | 36 | If you want to use the latest version of the package, you can install the development version of evalITR by specifying the branch name in `devtools::install_github`. 37 | 38 | 39 | ### Parallelization 40 | 41 | (Optional) if you have multiple cores, we recommendate using multisession futures and processing in parallel. This would increase computation efficiency and reduce the time to fit the model. 42 | 43 | ```{r messsage = FALSE, warning = FALSE, eval=FALSE} 44 | library(furrr) 45 | library(future.apply) 46 | 47 | # check the number of cores 48 | parallel::detectCores() 49 | 50 | # set the number of cores 51 | nworkers <- 4 52 | plan(multisession, workers =nworkers) 53 | ``` 54 | 55 | -------------------------------------------------------------------------------- /vignettes/paper_alg1.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "paper_alg1" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{paper_alg1} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>" 14 | ) 15 | ``` 16 | 17 | 18 | **Algorithm 1 Estimating and Evaluating an Individualized Treatment Rule 19 | (ITR) using the Same Experimental Data via Cross-Validation** 20 | 21 | | Steps in Algorithm 1 | Function/object | Output | 22 | |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------|:-----------------------------------| 23 | | 1\. Split data into $K$ random subsets of equal size $\left(\mathbf{Z}_1, \cdots, \mathbf{Z}_k\right)$ | `caret::createFolds()` within `estimate_itr()` | dataframe | 24 | | 2\. k $\leftarrow$ 1 | | | 25 | | 3\. while $k \leq K$ do | for loop in `fit_itr()` within `estimate_itr()` | | 26 | | 4\. $\quad \mathbf{Z}_{-k}=\left[\mathbf{Z}_1, \cdots, \mathbf{Z}_{k-1}, \mathbf{Z}_{k+1}, \cdots, \mathbf{Z}_K\right]$ | `trainset` object | training data | 27 | | 5\. $\hat{f}_{-k}=F\left(\mathbf{Z}_{-k}\right)$ | modulized functions for each ML algoritms (e.g., `run_causal_forest()`) within `estimate_itr()` | ITR (binary vector) | 28 | | 6\. $\hat{\tau}_k=\hat{\tau}_{\hat{f}_{-k}}\left(\mathbf{Z}_k\right)$ | `compute_qoi()` function within `evaluate_itr()` | metrics for fold $k$ | 29 | | 7\. $k \leftarrow k+1$ | | | 30 | | 8\. end while | | | 31 | | 9.return $\hat{\tau}_F=\frac{1}{K} \sum_{k=1}^K \hat{\tau}_k$, $\widehat{\mathbb{V}\left(\hat{\tau}_F\right)}=v\left(\hat{f}_{-1}, \cdots, \hat{f}_{-k}, \mathbf{Z}_1, \cdots, \mathbf{Z}_K\right)$ | `PAPEcv()` `PAPDcv()` and `getAupecOutput()` functions inside `compute_qoi()` function within `evaluate_itr()` | averaging the results across folds | 32 | 33 | 34 | -------------------------------------------------------------------------------- /vignettes/sample_split.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Sample Splitting" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{Sample Splitting} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>", 14 | fig.path = "../man/figures/README-" 15 | ) 16 | 17 | load("../data/star.rda") 18 | 19 | ``` 20 | 21 | 22 | This is an example using the `star` dataset (for more information about the dataset, please use `?star`). 23 | 24 | We start with a simple example with one outcome variable (writing scores) and one machine learning algorithm (causal forest). Then we move to incoporate multiple outcomes and compare model performances with several machine learning algorithms. 25 | 26 | 27 | To begin, we load the dataset and specify the outcome variable and covariates to be used in the model. Next, we utilize a random forest algorithm to develop an Individualized Treatment Rule (ITR) for estimating the varied impacts of small class sizes on students' writing scores. Since the treatment is often costly for most policy programs, we consider a case with 20% budget constraint (`budget` = 0.2). The model will identify the top 20% of units who benefit from the treatment most and assign them to with the treatment. We train the model through sample splitting, with the `split_ratio` between the train and test sets determined by the `split_ratio` argument. Specifically, we allocate 70% of the data to train the model, while the remaining 30% is used as testing data (`split_ratio` = 0.7). 28 | 29 | 30 | ```{r sample_split, warning = FALSE, message = FALSE} 31 | library(dplyr) 32 | library(evalITR) 33 | 34 | # specifying the outcome 35 | outcomes <- "g3tlangss" 36 | 37 | # specifying the treatment 38 | treatment <- "treatment" 39 | 40 | # specifying the data (remove other outcomes) 41 | star_data <- star %>% dplyr::select(-c(g3treadss,g3tmathss)) 42 | 43 | # specifying the formula 44 | user_formula <- as.formula( 45 | "g3tlangss ~ treatment + gender + race + birthmonth + 46 | birthyear + SCHLURBN + GRDRANGE + GKENRMNT + GKFRLNCH + 47 | GKBUSED + GKWHITE ") 48 | 49 | 50 | # estimate ITR 51 | fit <- estimate_itr( 52 | treatment = treatment, 53 | form = user_formula, 54 | data = star_data, 55 | algorithms = c("causal_forest"), 56 | budget = 0.2, 57 | split_ratio = 0.7) 58 | 59 | 60 | # evaluate ITR 61 | est <- evaluate_itr(fit) 62 | ``` 63 | 64 | 65 | The`summary()` function displays the following summary statistics: 66 | 67 | 68 | | Statistics | Description | 69 | |:-------- | :------------------------| 70 | | `PAPE` | population average prescriptive effect | 71 | | `PAPEp` | population average prescriptive effect with a budget constraint | 72 | | `PAPDp` | population average prescriptive effect difference with a budget constraint (this quantity will be computed with more than 2 machine learning algorithms) | 73 | | `AUPEC` | area under the prescriptive effect curve | 74 | | `GATE` | grouped average treatment effects | 75 | 76 | 77 | For more information about these evaluation metrics, please refer to [Imai and Li (2021)](https://arxiv.org/abs/1905.05389) and [Imai and Li (2022)](https://arxiv.org/abs/2203.14511). 78 | 79 | 80 | ```{r sp_summary} 81 | # summarize estimates 82 | summary(est) 83 | ``` 84 | 85 | 86 | We can extract estimates from the `est` object. The following code shows how to extract the GATE estimates for the writing score with the causal forest algorithm. 87 | We can also plot the estimates using the `plot_estimate()` function and specify the type of estimates to be plotted 88 | (`GATE`, `PAPE`, `PAPEp`, `PAPDp`). 89 | 90 | 91 | ```{r est_extract, warning = FALSE, message = FALSE, fig.width = 6, fig.height = 4} 92 | # plot GATE estimates 93 | library(ggplot2) 94 | gate_est <- summary(est)$GATE 95 | 96 | plot_estimate(gate_est, type = "GATE") + 97 | scale_color_manual(values = c("#0072B2", "#D55E00")) 98 | ``` 99 | 100 | 101 | We plot the estimated Area Under the Prescriptive Effect Curve for the writing score across a range of budget constraints for causal forest. 102 | 103 | ```{r sp_plot, fig.width = 6, fig.height = 4} 104 | # plot the AUPEC 105 | plot(est) 106 | ``` 107 | -------------------------------------------------------------------------------- /vignettes/user_itr.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "User Defined ITR" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{User Defined ITR} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>", 14 | fig.path = "../man/figures/README-" 15 | ) 16 | 17 | library(dplyr) 18 | library(evalITR) 19 | 20 | load("../data/star.rda") 21 | 22 | # specifying the outcome 23 | outcomes <- "g3tlangss" 24 | 25 | # specifying the treatment 26 | treatment <- "treatment" 27 | 28 | # specifying the data (remove other outcomes) 29 | star_data <- star %>% dplyr::select(-c(g3treadss,g3tmathss)) 30 | 31 | star_data = star_data %>% mutate( 32 | school_urban = SCHLURBN 33 | ) 34 | 35 | # specifying the formula 36 | user_formula <- as.formula( 37 | "g3tlangss ~ treatment + gender + race + birthmonth + 38 | birthyear + SCHLURBN + GRDRANGE + GKENRMNT + GKFRLNCH + 39 | GKBUSED + GKWHITE ") 40 | 41 | ``` 42 | 43 | Instead of using the ITRs estimated by `evalITR` models, we can define our own ITR and evaluate its performance using the `evaluate_itr` function. The function takes the following arguments: 44 | 45 | 46 | | Argument | Description | 47 | |:-------- | :------------------------| 48 | | `user_itr` | a function defined by users that returns a unit-level continuous score for treatment assignment (we assume those that have score less than 0 should not have treatment) | 49 | | `data` | a data frame | 50 | | `treatment` | a character string specifying the treatment variable in the `data` | 51 | | `outcome` | a character string specifying the outcome variable in the `data` | 52 | | `budget` | a numeric value specifying the maximum percentage of population that can be treated under the budget constraint | 53 | 54 | 55 | The function returns an object that contains the estimated GATE, ATE, and AUPEC for the user defined ITR. 56 | 57 | ```{r user_itr_summary, warning = FALSE, message = FALSE} 58 | # user's own ITR 59 | score_function <- function(data){ 60 | 61 | data %>% 62 | mutate(score = case_when( 63 | school_urban == 1 ~ 0.1, # inner-city 64 | school_urban == 2 ~ 0.2, # suburban 65 | school_urban == 3 ~ 0.4, # rural 66 | school_urban == 4 ~ 0.3, # urban 67 | )) %>% 68 | pull(score) -> score 69 | 70 | return(score) 71 | } 72 | 73 | # evalutate ITR 74 | user_itr <- evaluate_itr( 75 | user_itr = score_function, 76 | data = star_data, 77 | treatment = treatment, 78 | outcome = outcomes, 79 | budget = 0.2) 80 | 81 | # summarize estimates 82 | summary(user_itr) 83 | ``` 84 | 85 | 86 | We can extract estimates from the `est` object. The following code shows how to extract the GATE estimates for the writing score with the causal forest algorithm. 87 | We can also plot the estimates using the `plot_estimate()` function and specify the type of estimates to be plotted 88 | (`GATE`, `PAPE`, `PAPEp`, `PAPDp`). 89 | 90 | ```{r user_itr_gate, warning = FALSE, message = FALSE, fig.width = 6, fig.height = 4} 91 | # plot GATE estimates 92 | library(ggplot2) 93 | gate_est <- summary(user_itr)$GATE 94 | 95 | plot_estimate(gate_est, type = "GATE") + 96 | scale_color_manual(values = c("#0072B2", "#D55E00")) 97 | ``` 98 | 99 | 100 | We plot the estimated Area Under the Prescriptive Effect Curve (AUPEC) for the writing score across a range of budget constraints for user defined ITR. 101 | 102 | ```{r user_itr_aupec, fig.width = 6, fig.height = 4} 103 | # plot the AUPEC 104 | plot(user_itr) 105 | ``` 106 | --------------------------------------------------------------------------------