├── .gitignore ├── README.md ├── dashboard ├── .Rapp.history ├── .gitignore ├── global.R ├── google_analytics.js ├── metalab.Rproj ├── packrat │ ├── init.R │ ├── packrat.lock │ ├── packrat.opts │ └── src │ │ ├── DT │ │ └── DT_0.1.tar.gz │ │ ├── RColorBrewer │ │ └── RColorBrewer_1.1-2.tar.gz │ │ ├── assertthat │ │ └── assertthat_0.1.tar.gz │ │ ├── bitops │ │ └── bitops_1.0-6.tar.gz │ │ ├── caTools │ │ └── caTools_1.17.1.tar.gz │ │ ├── colorspace │ │ └── colorspace_1.2-6.tar.gz │ │ ├── dichromat │ │ └── dichromat_2.0-0.tar.gz │ │ ├── ggplot2 │ │ └── ggplot2_2.1.0.tar.gz │ │ ├── gtable │ │ └── gtable_0.2.0.tar.gz │ │ ├── htmltools │ │ └── htmltools_0.3.5.tar.gz │ │ ├── htmlwidgets │ │ └── htmlwidgets_0.6.tar.gz │ │ ├── httpuv │ │ └── httpuv_1.3.3.tar.gz │ │ ├── labeling │ │ └── labeling_0.3.tar.gz │ │ ├── magrittr │ │ └── magrittr_1.5.tar.gz │ │ ├── markdown │ │ └── markdown_0.7.7.tar.gz │ │ ├── metafor │ │ └── metafor_1.9-8.tar.gz │ │ ├── munsell │ │ └── munsell_0.4.3.tar.gz │ │ ├── pwr │ │ └── pwr_1.1-3.tar.gz │ │ ├── quadprog │ │ └── quadprog_1.5-5.tar.gz │ │ ├── reshape2 │ │ └── reshape2_1.4.1.tar.gz │ │ ├── scales │ │ └── scales_0.4.0.tar.gz │ │ ├── xtable │ │ └── xtable_1.8-2.tar.gz │ │ └── yaml │ │ └── yaml_2.1.13.tar.gz ├── rmarkdown │ ├── _output.yaml │ ├── background.Rmd │ ├── building.Rmd │ ├── contribute.Rmd │ ├── datasets.Rmd │ ├── inphondb.Rmd │ ├── inworddb.Rmd │ ├── media │ │ ├── image03.jpg │ │ ├── image04.jpg │ │ └── image05.jpg │ ├── overview.Rmd │ ├── resources.Rmd │ └── stats.Rmd ├── server.R ├── ui.R └── www │ ├── custom.css │ └── images │ ├── datasets │ ├── bouba-kiki.png │ ├── catBias.png │ ├── dinosaur.png │ ├── dinosaur.xcf │ ├── dinosaur_original.png │ ├── discrim.png │ ├── distrib.png │ ├── gaze.png │ ├── ids.png │ ├── phonotactics.png │ ├── pointing.png │ ├── segmentation.png │ ├── statistical.png │ ├── word_rec.png │ └── zot.png │ └── people │ ├── alex.jpg │ ├── christina.jpg │ ├── mika.jpg │ ├── mike.jpg │ ├── molly.jpg │ ├── page.jpg │ └── sho.jpg ├── data ├── AGL_phonotactics ├── AGL_soundsDL_paired ├── catBias ├── gaze_following ├── infant_directed_speech_preference ├── label_advantage ├── mutual_exclusivity ├── phonemic_discrimination_native ├── phonemic_discrimination_nonnative ├── pointing_concurrent ├── pointing_longitudinal ├── statSeg ├── symbolism ├── word_recognition └── word_segmentation ├── documentation └── ma_instructions.md ├── metadata ├── datasets.json ├── people.yaml ├── reports.yaml ├── spec.yaml └── spec_derived.yaml ├── reports ├── OLD │ ├── dois copy.csv │ ├── dois.csv │ ├── dois_as_csv.csv │ ├── dois_mini.csv │ └── dois_mini.txt ├── _output.yaml ├── design_choice.Rmd ├── design_choices │ └── design_choice.html ├── developmental_curves.Rmd ├── developmental_curves_ML.Rmd ├── doi_publication_metrics.Rmd ├── hierarchicalrandomeffect.Rmd ├── impact_factors.csv ├── manybabies_infancy.Rmd ├── metameta.Rmd ├── networks │ ├── .gitignore │ ├── OLD │ │ ├── networks.Rmd │ │ ├── networks.html │ │ └── networks1.html │ ├── data │ │ ├── ES_data_for_networks.csv │ │ ├── ES_data_for_networks2.csv │ │ ├── all_papers.csv │ │ ├── all_papers_all.csv │ │ ├── dois2.csv │ │ ├── savedrecs (3).txt │ │ ├── savedrecs_stat_seg.txt │ │ ├── statseg_papers.csv │ │ ├── wos_bib1_6.csv │ │ └── wos_data.txt │ ├── doi_publication_metrics.Rmd │ ├── dois2.csv │ ├── get_primary_to_get_dois.R │ ├── networks_clean.Rmd │ ├── networks_clean.html │ ├── networks_ind_paper_level .Rmd │ ├── networks_paper_level.Rmd │ └── networks_paper_level.html ├── p-curves.Rmd ├── pcurve.R ├── reproducibility.Rmd ├── template.Rmd └── within_subject_ES.Rmd ├── scripts ├── cache_datasets.R ├── cache_datasets.sh ├── cache_datasets_crontab.txt ├── compute_es.R ├── deploy.sh ├── render_reports.R └── render_reports.sh └── write-ups ├── BUCLD_2015 └── metalab-BUCLD.pdf ├── ICIS_2016 └── MetaLab_ICIS2016.pdf ├── Videos └── MetaLabVideoDescriptions.pdf ├── childdev_2017 ├── ReplicableDevelopmentalResearch.Rmd ├── ReplicableDevelopmentalResearch.docx ├── SupplementaryMaterials.Rmd ├── analyses │ ├── bias.R │ ├── funnel.R │ ├── initial_data.R │ ├── method.R │ ├── p_values.R │ ├── power.R │ └── sample_size.R ├── educationpaper_environment.RData ├── metalab_education.bib └── save_analyses.R └── synthesis_paper ├── NHB ├── SynthesisCoverLetter.pdf ├── figs │ ├── fig3.pdf │ ├── fig3_lab.pdf │ ├── fig3legend.pdf │ ├── fig4.pdf │ ├── fig4_lab.pdf │ ├── fig4legend.pdf │ ├── figs3and4.Rmd │ └── figs3and4.html ├── metalab_synthesis.Rmd ├── metalab_synthesis.bib ├── metalab_synthesis.pdf └── metalab_synthesis.tex ├── PNAS ├── README-steps for converting rmd to latex ├── figs │ ├── fig3.pdf │ ├── fig3_lab.pdf │ ├── fig3legend.pdf │ ├── fig4.pdf │ ├── fig4_lab.pdf │ ├── fig4legend.pdf │ ├── figs3and4.Rmd │ ├── figs3and4.html │ ├── p_curve_plots-1.pdf │ └── unnamed-chunk-2-1.pdf ├── getcitationsfromrmd.R ├── metalab_synthesis.bib ├── pnas-metalab-synthesis.aux ├── pnas-metalab-synthesis.bbl ├── pnas-metalab-synthesis.blg ├── pnas-metalab-synthesis.log ├── pnas-metalab-synthesis.out ├── pnas-metalab-synthesis.synctex.gz ├── pnas-metalab-synthesis.tex ├── pnas-metalab-synthesis.xwm ├── pnas-metalab-synthesis_including_atypical.pdf ├── pnas-new.bst ├── pnas-new.cls ├── pnasresearcharticle.sty ├── rawmetalabtext.txt ├── submitted │ └── pnas-metalab-synthesis.pdf └── widetext.sty ├── SI ├── synthesis_SI.Rmd ├── synthesis_SI.html ├── synthesis_SI.pdf ├── synthesis_SI_html.Rmd └── synthesis_SI_html.html ├── misc ├── misc │ ├── checking_ML_MA_outliers.Rmd │ ├── misc_pcurve │ │ ├── effect_size_estimate.R │ │ ├── p2f2.R │ │ ├── p2power.R │ │ ├── pcurve copy.R │ │ ├── pcurve.R │ │ ├── pcurve_app copy.R │ │ └── pcurve_app.R │ ├── model_fits_synthesis_paper.Rmd │ ├── model_fits_synthesis_paper.html │ ├── out │ │ └── libs │ │ │ ├── bootstrap-3.3.5 │ │ │ ├── css │ │ │ │ ├── bootstrap-theme.css │ │ │ │ ├── bootstrap-theme.css.map │ │ │ │ ├── bootstrap-theme.min.css │ │ │ │ ├── bootstrap.css │ │ │ │ ├── bootstrap.css.map │ │ │ │ ├── bootstrap.min.css │ │ │ │ ├── cerulean.min.css │ │ │ │ ├── cosmo.min.css │ │ │ │ ├── flatly.min.css │ │ │ │ ├── fonts │ │ │ │ │ ├── Lato.ttf │ │ │ │ │ ├── LatoBold.ttf │ │ │ │ │ ├── LatoItalic.ttf │ │ │ │ │ ├── NewsCycle.ttf │ │ │ │ │ ├── NewsCycleBold.ttf │ │ │ │ │ ├── OpenSans.ttf │ │ │ │ │ ├── OpenSansBold.ttf │ │ │ │ │ ├── OpenSansBoldItalic.ttf │ │ │ │ │ ├── OpenSansItalic.ttf │ │ │ │ │ ├── OpenSansLight.ttf │ │ │ │ │ ├── OpenSansLightItalic.ttf │ │ │ │ │ ├── Raleway.ttf │ │ │ │ │ ├── RalewayBold.ttf │ │ │ │ │ ├── Roboto.ttf │ │ │ │ │ ├── RobotoBold.ttf │ │ │ │ │ ├── RobotoLight.ttf │ │ │ │ │ ├── RobotoMedium.ttf │ │ │ │ │ ├── SourceSansPro.ttf │ │ │ │ │ ├── SourceSansProBold.ttf │ │ │ │ │ ├── SourceSansProItalic.ttf │ │ │ │ │ ├── SourceSansProLight.ttf │ │ │ │ │ └── Ubuntu.ttf │ │ │ │ ├── journal.min.css │ │ │ │ ├── lumen.min.css │ │ │ │ ├── paper.min.css │ │ │ │ ├── readable.min.css │ │ │ │ ├── sandstone.min.css │ │ │ │ ├── simplex.min.css │ │ │ │ ├── spacelab.min.css │ │ │ │ ├── united.min.css │ │ │ │ └── yeti.min.css │ │ │ ├── fonts │ │ │ │ ├── glyphicons-halflings-regular.eot │ │ │ │ ├── glyphicons-halflings-regular.svg │ │ │ │ ├── glyphicons-halflings-regular.ttf │ │ │ │ ├── glyphicons-halflings-regular.woff │ │ │ │ └── glyphicons-halflings-regular.woff2 │ │ │ ├── js │ │ │ │ ├── bootstrap.js │ │ │ │ ├── bootstrap.min.js │ │ │ │ └── npm.js │ │ │ └── shim │ │ │ │ ├── html5shiv.min.js │ │ │ │ └── respond.min.js │ │ │ ├── jquery-1.11.3 │ │ │ └── jquery.min.js │ │ │ ├── jqueryui-1.11.4 │ │ │ ├── README │ │ │ ├── images │ │ │ │ ├── ui-icons_444444_256x240.png │ │ │ │ ├── ui-icons_555555_256x240.png │ │ │ │ ├── ui-icons_777620_256x240.png │ │ │ │ ├── ui-icons_777777_256x240.png │ │ │ │ ├── ui-icons_cc0000_256x240.png │ │ │ │ └── ui-icons_ffffff_256x240.png │ │ │ ├── index.html │ │ │ ├── jquery-ui.css │ │ │ ├── jquery-ui.js │ │ │ ├── jquery-ui.min.css │ │ │ ├── jquery-ui.min.js │ │ │ ├── jquery-ui.structure.css │ │ │ ├── jquery-ui.structure.min.css │ │ │ ├── jquery-ui.theme.css │ │ │ └── jquery-ui.theme.min.css │ │ │ ├── navigation-1.0 │ │ │ ├── codefolding.js │ │ │ └── tabsets.js │ │ │ ├── navigation-1.1 │ │ │ ├── FileSaver.min.js │ │ │ ├── codefolding.js │ │ │ ├── sourceembed.js │ │ │ └── tabsets.js │ │ │ └── tocify-1.9.1 │ │ │ ├── jquery.tocify.css │ │ │ └── jquery.tocify.js │ ├── replicability_analyses.Rmd │ ├── replicability_analyses.html │ └── resampling │ │ ├── resampling.Rmd │ │ ├── resampling.html │ │ └── resampling_paper_age_sampling.html └── subselect_age │ ├── Subselect_age.pdf │ ├── all_data_age_subset.csv │ ├── fig3.pdf │ ├── fig4.pdf │ ├── metalab_synthesis_age_subset.Rmd │ ├── metalab_synthesis_age_subset.pdf │ ├── metalab_synthesis_age_subset.tex │ └── subselect_age.Rmd ├── open_mind ├── Synthesis Cover Letter.pdf ├── figs │ ├── fig3.pdf │ ├── fig3_lab.ai │ ├── fig3_lab.pdf │ ├── fig3legend.pdf │ ├── fig4.pdf │ ├── fig4_lab.ai │ ├── fig4_lab.pdf │ ├── fig4legend.pdf │ ├── figs3and4.Rmd │ └── figs3and4.html ├── metalab_synthesis.Rmd ├── metalab_synthesis.bib ├── metalab_synthesis.pdf ├── metalab_synthesis.tex ├── metalab_synthesis_latex_formatted.log ├── metalab_synthesis_latex_formatted.pdf ├── metalab_synthesis_latex_formatted.synctex.gz ├── metalab_synthesis_latex_formatted.tex ├── open_mind.Rproj └── tmp-pdfcrop-95274.tex ├── paper_data ├── synthesis_paper_all_data.feather └── synthesis_paper_datasets.feather ├── paper_scripts ├── cache_paper_data.R ├── pcurve.R └── synthesis_helpers.R └── psychscience ├── SynthesisCoverLetter.pdf ├── figs ├── fig3.pdf ├── fig3_lab.pdf ├── fig3legend.pdf ├── fig4.pdf ├── fig4_lab.pdf ├── fig4legend.pdf ├── figs3and4.Rmd └── figs3and4.html ├── metalab_synthesis.Rmd ├── metalab_synthesis.bib ├── metalab_synthesis.docx ├── metalab_synthesis.pdf └── metalab_synthesis.tex /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .Rhistory 3 | shinyapps/ 4 | *.pem 5 | log/ 6 | .Rproj.user 7 | rsconnect/ 8 | *_files/ 9 | *_cache/ 10 | .RData 11 | dashboard/figure/ 12 | dashboard/cache/ 13 | dashboard/rmarkdown/*.html 14 | reports/out/ 15 | reports/*.html 16 | reports/libs 17 | reports/*_cache/ 18 | reports/*_files 19 | write-ups/metalab_update/metalab_update_files/ 20 | write-ups/metalab_update/*.tex 21 | *.Rout 22 | metalab.Rproj 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # THIS REPO IS OUT OF DATE! 2 | 3 | # GO TO: [metalab2](https://github.com/langcog/metalab2) 4 | -------------------------------------------------------------------------------- /dashboard/.Rapp.history: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/.Rapp.history -------------------------------------------------------------------------------- /dashboard/.gitignore: -------------------------------------------------------------------------------- 1 | packrat/lib*/ 2 | -------------------------------------------------------------------------------- /dashboard/global.R: -------------------------------------------------------------------------------- 1 | library(shiny) 2 | library(shinydashboard) 3 | library(dplyr) 4 | library(tidyr) 5 | library(ggplot2) 6 | library(purrr) 7 | library(langcog) 8 | library(feather) 9 | 10 | 11 | #font <- "Ubuntu" 12 | #theme_set(#theme_mikabr(base_family = font) + 13 | # theme(legend.position = "top", 14 | # legend.key = element_blank(), 15 | # legend.background = element_rect(fill = "transparent"))) 16 | 17 | 18 | fields <- yaml::yaml.load_file("../metadata/spec.yaml") 19 | fields_derived <- yaml::yaml.load_file("../metadata/spec_derived.yaml") %>% 20 | purrr::transpose() %>% 21 | simplify_all() %>% 22 | dplyr::as_data_frame() 23 | 24 | reports <- yaml::yaml.load_file("../metadata/reports.yaml") 25 | people <- yaml::yaml.load_file("../metadata/people.yaml") 26 | 27 | includeRmd <- function(path, shiny_data = NULL) { 28 | shiny:::dependsOnFile(path) 29 | rmarkdown::render(path, quiet = TRUE) 30 | includeHTML(gsub(".Rmd", ".html", path)) 31 | } 32 | 33 | cached_data <- list.files("../data/") 34 | 35 | datasets <- jsonlite::fromJSON("../metadata/datasets.json") %>% 36 | filter(filename %in% cached_data) 37 | 38 | load_dataset <- function(filename) { 39 | feather::read_feather(file.path("..", "data", filename)) %>% 40 | mutate(filename = filename, 41 | # response_mode_exposure_phase = sprintf( 42 | # "%s \n %s", response_mode, exposure_phase), 43 | year = ifelse(grepl("submitted", study_ID), Inf, 44 | stringr::str_extract(study_ID, "([:digit:]{4})")) 45 | ) 46 | } 47 | 48 | avg_month <- 365.2425 / 12.0 49 | 50 | all_data <- cached_data %>% 51 | map_df(load_dataset) %>% 52 | mutate(all_mod = "", 53 | mean_age_months = mean_age / avg_month) 54 | 55 | all_data <- all_data %>% 56 | filter(!is.na(d_calc)) 57 | 58 | studies <- all_data %>% 59 | group_by(dataset) %>% 60 | summarise(num_experiments = n(), 61 | num_papers = length(unique(study_ID))) 62 | 63 | subjects <- all_data %>% 64 | rowwise() %>% 65 | mutate(n_total = sum(c(n_1, n_2), na.rm = TRUE)) %>% 66 | distinct(dataset, study_ID, same_infant, .keep_all = TRUE) %>% 67 | group_by(dataset) %>% 68 | summarise(num_subjects = sum(n_total)) 69 | 70 | datasets <- datasets %>% 71 | rename(dataset = name) %>% 72 | left_join(studies) %>% 73 | left_join(subjects) %>% 74 | rename(name = dataset) 75 | 76 | 77 | -------------------------------------------------------------------------------- /dashboard/google_analytics.js: -------------------------------------------------------------------------------- 1 | (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ 2 | (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), 3 | m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) 4 | })(window,document,'script','https://www.google-analytics.com/analytics.js','ga'); 5 | 6 | ga('create', 'UA-84150050-1', 'auto'); 7 | ga('send', 'pageview'); 8 | -------------------------------------------------------------------------------- /dashboard/metalab.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | -------------------------------------------------------------------------------- /dashboard/packrat/packrat.lock: -------------------------------------------------------------------------------- 1 | PackratFormat: 1.4 2 | PackratVersion: 0.4.8.1 3 | RVersion: 3.3.1 4 | Repos: CRAN=https://cran.rstudio.com/ 5 | 6 | Package: BH 7 | Source: CRAN 8 | Version: 1.60.0-2 9 | Hash: be2967bcff307e561d9ba17faf03205c 10 | 11 | Package: DBI 12 | Source: CRAN 13 | Version: 0.5-1 14 | Hash: 375869004db963f9915f2b8a49b4a718 15 | 16 | Package: DT 17 | Source: CRAN 18 | Version: 0.1 19 | Hash: 66d3d04d40c505e567c1328eae0409ee 20 | Requires: htmltools, htmlwidgets, magrittr 21 | 22 | Package: MASS 23 | Source: CRAN 24 | Version: 7.3-45 25 | Hash: 0650d5701aa7e369e77c9e2c8e1dc913 26 | 27 | Package: Matrix 28 | Source: CRAN 29 | Version: 1.2-6 30 | Hash: 6d5b4689f994e2af7e9cc11324c5cbeb 31 | Requires: lattice 32 | 33 | Package: R6 34 | Source: CRAN 35 | Version: 2.2.0 36 | Hash: 712773a3439bd32db5c40938e1a9aac2 37 | 38 | Package: RColorBrewer 39 | Source: CRAN 40 | Version: 1.1-2 41 | Hash: c0d56cd15034f395874c870141870c25 42 | 43 | Package: Rcpp 44 | Source: CRAN 45 | Version: 0.12.7 46 | Hash: f0ba7322cfb66f1b563462faa9c2b2ab 47 | 48 | Package: assertthat 49 | Source: CRAN 50 | Version: 0.1 51 | Hash: 0afb92b59b02593c70ff8046700ba9d3 52 | 53 | Package: base64enc 54 | Source: CRAN 55 | Version: 0.1-3 56 | Hash: c590d29e555926af053055e23ee79efb 57 | 58 | Package: bitops 59 | Source: CRAN 60 | Version: 1.0-6 61 | Hash: 67d0775189fd0041d95abca618c5c07e 62 | 63 | Package: caTools 64 | Source: CRAN 65 | Version: 1.17.1 66 | Hash: 97cb6f6293cd18d17df77a6383cc6763 67 | Requires: bitops 68 | 69 | Package: colorspace 70 | Source: CRAN 71 | Version: 1.2-6 72 | Hash: 00bb12245cd975c450cc4a960884fa15 73 | 74 | Package: curl 75 | Source: CRAN 76 | Version: 1.2 77 | Hash: 11de7924273ee61a95c7a157a572bc8a 78 | 79 | Package: dichromat 80 | Source: CRAN 81 | Version: 2.0-0 82 | Hash: 08eed0c80510af29bb15f840ccfe37ce 83 | 84 | Package: digest 85 | Source: CRAN 86 | Version: 0.6.10 87 | Hash: 87165608ca8aeb0958835c3c5b52b6b7 88 | 89 | Package: dplyr 90 | Source: CRAN 91 | Version: 0.5.0 92 | Hash: 03e5e71801c0f49217922e2cd4680f06 93 | Requires: BH, DBI, R6, Rcpp, assertthat, lazyeval, magrittr, tibble 94 | 95 | Package: evaluate 96 | Source: CRAN 97 | Version: 0.9 98 | Hash: cf338d73ba4ee3a4fefec612cdfd38db 99 | Requires: stringr 100 | 101 | Package: feather 102 | Source: CRAN 103 | Version: 0.3.0 104 | Hash: 07695228cd716a5877b41feab0655980 105 | Requires: Rcpp, hms, tibble 106 | 107 | Package: formatR 108 | Source: CRAN 109 | Version: 1.4 110 | Hash: b2a47a2b91737978cf50ba95825a9cd1 111 | 112 | Package: ggplot2 113 | Source: CRAN 114 | Version: 2.1.0 115 | Hash: 50e297b0191179c39c7dcae0eff72b51 116 | Requires: MASS, digest, gtable, plyr, reshape2, scales 117 | 118 | Package: gtable 119 | Source: CRAN 120 | Version: 0.2.0 121 | Hash: cd78381a9d3fea966ac39bd0daaf5554 122 | 123 | Package: highr 124 | Source: CRAN 125 | Version: 0.6 126 | Hash: aa3d5b7912b5fed4b546ed5cd2a1760b 127 | 128 | Package: hms 129 | Source: CRAN 130 | Version: 0.2 131 | Hash: c1b6ded8c65ae31a98d73a312b2e6574 132 | 133 | Package: htmltools 134 | Source: CRAN 135 | Version: 0.3.5 136 | Hash: a96dc1cfe628e66337c86be732567df2 137 | Requires: Rcpp, digest 138 | 139 | Package: htmlwidgets 140 | Source: CRAN 141 | Version: 0.6 142 | Hash: 267a36d738dd98cec3dc9c471becb717 143 | Requires: htmltools, jsonlite, yaml 144 | 145 | Package: httpuv 146 | Source: CRAN 147 | Version: 1.3.3 148 | Hash: 12c4ec43e5609293576e368c740c1f84 149 | Requires: Rcpp 150 | 151 | Package: jsonlite 152 | Source: CRAN 153 | Version: 1.1 154 | Hash: 86d4b85b84421a4815f1bb4872e30ef4 155 | 156 | Package: knitr 157 | Source: CRAN 158 | Version: 1.14 159 | Hash: e7b411f5a5512295a260a45683fa0c4a 160 | Requires: digest, evaluate, formatR, highr, markdown, stringr, yaml 161 | 162 | Package: labeling 163 | Source: CRAN 164 | Version: 0.3 165 | Hash: ecf589b42cd284b03a4beb9665482d3e 166 | 167 | Package: langcog 168 | Source: github 169 | Version: 0.1.9001 170 | Hash: fae1635d843b7ff3ff167cb957b2eb22 171 | Requires: assertthat, dplyr, ggplot2, lazyeval, quadprog 172 | GithubRepo: langcog 173 | GithubUsername: langcog 174 | GithubRef: master 175 | GithubSha1: 70e5e76cc8197e9073063963af18ed7ddee3b06d 176 | 177 | Package: lattice 178 | Source: CRAN 179 | Version: 0.20-33 180 | Hash: d7158cf4e63e8c12cb227738076b37d8 181 | 182 | Package: lazyeval 183 | Source: CRAN 184 | Version: 0.2.0 185 | Hash: 3d6e7608e65bbf5cb170dab1e3c9ed8b 186 | 187 | Package: magrittr 188 | Source: CRAN 189 | Version: 1.5 190 | Hash: bdc4d48c3135e8f3b399536ddf160df4 191 | 192 | Package: markdown 193 | Source: CRAN 194 | Version: 0.7.7 195 | Hash: fea2343a1119d61b0cc5c0a950d103a3 196 | Requires: mime 197 | 198 | Package: metafor 199 | Source: CRAN 200 | Version: 1.9-8 201 | Hash: 9430098dbc94581c33d4c299997b815c 202 | Requires: Matrix 203 | 204 | Package: mime 205 | Source: CRAN 206 | Version: 0.5 207 | Hash: 463550cf44fb6f0a2359368f42eebe62 208 | 209 | Package: munsell 210 | Source: CRAN 211 | Version: 0.4.3 212 | Hash: f96d896947fcaf9b6d0074002e9f4f9d 213 | Requires: colorspace 214 | 215 | Package: packrat 216 | Source: CRAN 217 | Version: 0.4.8-1 218 | Hash: 6ad605ba7b4b476d84be6632393f5765 219 | 220 | Package: plyr 221 | Source: CRAN 222 | Version: 1.8.4 223 | Hash: 75ca080b10a9314a9b293964331e5df7 224 | Requires: Rcpp 225 | 226 | Package: purrr 227 | Source: CRAN 228 | Version: 0.2.2 229 | Hash: 14bc8b3a4c61f67866ebfb413f77ff9f 230 | Requires: BH, Rcpp, dplyr, lazyeval, magrittr 231 | 232 | Package: pwr 233 | Source: CRAN 234 | Version: 1.1-3 235 | Hash: 23946a57fddd95fa449e164b2238c653 236 | 237 | Package: quadprog 238 | Source: CRAN 239 | Version: 1.5-5 240 | Hash: 69000b7dc41b09977dcf781f909d235c 241 | 242 | Package: readr 243 | Source: CRAN 244 | Version: 1.0.0 245 | Hash: d7cf8e4bfe7f5fa25d908362735bc9ae 246 | Requires: BH, R6, Rcpp, curl, hms, tibble 247 | 248 | Package: reshape2 249 | Source: CRAN 250 | Version: 1.4.1 251 | Hash: c9a8d2349fe969cf519dd0ced9cc1414 252 | Requires: Rcpp, plyr, stringr 253 | 254 | Package: rmarkdown 255 | Source: github 256 | Version: 1.0.9016 257 | Hash: a5b374882d710da70a48da55612b555e 258 | Requires: base64enc, caTools, evaluate, htmltools, jsonlite, knitr, 259 | tibble, yaml 260 | GithubRepo: rmarkdown 261 | GithubUsername: rstudio 262 | GithubRef: master 263 | GithubSha1: 33aef1750e06a85e94c259927b601e2d3bf8b4cb 264 | 265 | Package: scales 266 | Source: CRAN 267 | Version: 0.4.0 268 | Hash: c4a91e7a322e03813b68b73940ce031f 269 | Requires: RColorBrewer, Rcpp, dichromat, labeling, munsell, plyr 270 | 271 | Package: shiny 272 | Source: CRAN 273 | Version: 0.14.1 274 | Hash: 65d870a0d4c8b9a2ac3dd552a367510b 275 | Requires: R6, digest, htmltools, httpuv, jsonlite, mime, sourcetools, 276 | xtable 277 | 278 | Package: shinydashboard 279 | Source: CRAN 280 | Version: 0.5.3 281 | Hash: 6ec0688120d2dd4500b6561886f413a4 282 | Requires: htmltools, shiny 283 | 284 | Package: sourcetools 285 | Source: CRAN 286 | Version: 0.1.5 287 | Hash: 04ddeb1c288302222562326a471e2945 288 | 289 | Package: stringi 290 | Source: CRAN 291 | Version: 1.1.1 292 | Hash: 48f2afc5dfecb65ba32a87d38b3533ff 293 | 294 | Package: stringr 295 | Source: CRAN 296 | Version: 1.1.0 297 | Hash: 4d21d0063b37a17ea18eaec6acd29bf3 298 | Requires: magrittr, stringi 299 | 300 | Package: tibble 301 | Source: CRAN 302 | Version: 1.2 303 | Hash: 3b35b556ef7acddb0d28fe6a27ad50e4 304 | Requires: Rcpp, assertthat, lazyeval 305 | 306 | Package: tidyr 307 | Source: CRAN 308 | Version: 0.6.0 309 | Hash: daf06bb30f2d9581e69609e0c0f7d38a 310 | Requires: Rcpp, dplyr, lazyeval, magrittr, stringi, tibble 311 | 312 | Package: xtable 313 | Source: CRAN 314 | Version: 1.8-2 315 | Hash: 7293235cfcc14cdff1ce7fd1a0212031 316 | 317 | Package: yaml 318 | Source: CRAN 319 | Version: 2.1.13 320 | Hash: 4854ccabebc225e8a7309fb4a74980de 321 | -------------------------------------------------------------------------------- /dashboard/packrat/packrat.opts: -------------------------------------------------------------------------------- 1 | auto.snapshot: TRUE 2 | use.cache: FALSE 3 | print.banner.on.startup: auto 4 | vcs.ignore.lib: TRUE 5 | vcs.ignore.src: FALSE 6 | external.packages: 7 | local.repos: 8 | load.external.packages.on.startup: TRUE 9 | ignored.packages: 10 | quiet.package.installation: TRUE 11 | snapshot.recommended.packages: FALSE 12 | snapshot.fields: 13 | Imports 14 | Depends 15 | LinkingTo 16 | -------------------------------------------------------------------------------- /dashboard/packrat/src/DT/DT_0.1.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/DT/DT_0.1.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/RColorBrewer/RColorBrewer_1.1-2.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/RColorBrewer/RColorBrewer_1.1-2.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/assertthat/assertthat_0.1.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/assertthat/assertthat_0.1.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/bitops/bitops_1.0-6.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/bitops/bitops_1.0-6.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/caTools/caTools_1.17.1.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/caTools/caTools_1.17.1.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/colorspace/colorspace_1.2-6.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/colorspace/colorspace_1.2-6.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/dichromat/dichromat_2.0-0.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/dichromat/dichromat_2.0-0.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/ggplot2/ggplot2_2.1.0.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/ggplot2/ggplot2_2.1.0.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/gtable/gtable_0.2.0.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/gtable/gtable_0.2.0.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/htmltools/htmltools_0.3.5.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/htmltools/htmltools_0.3.5.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/htmlwidgets/htmlwidgets_0.6.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/htmlwidgets/htmlwidgets_0.6.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/httpuv/httpuv_1.3.3.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/httpuv/httpuv_1.3.3.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/labeling/labeling_0.3.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/labeling/labeling_0.3.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/magrittr/magrittr_1.5.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/magrittr/magrittr_1.5.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/markdown/markdown_0.7.7.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/markdown/markdown_0.7.7.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/metafor/metafor_1.9-8.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/metafor/metafor_1.9-8.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/munsell/munsell_0.4.3.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/munsell/munsell_0.4.3.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/pwr/pwr_1.1-3.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/pwr/pwr_1.1-3.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/quadprog/quadprog_1.5-5.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/quadprog/quadprog_1.5-5.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/reshape2/reshape2_1.4.1.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/reshape2/reshape2_1.4.1.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/scales/scales_0.4.0.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/scales/scales_0.4.0.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/xtable/xtable_1.8-2.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/xtable/xtable_1.8-2.tar.gz -------------------------------------------------------------------------------- /dashboard/packrat/src/yaml/yaml_2.1.13.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/packrat/src/yaml/yaml_2.1.13.tar.gz -------------------------------------------------------------------------------- /dashboard/rmarkdown/_output.yaml: -------------------------------------------------------------------------------- 1 | html_fragment 2 | -------------------------------------------------------------------------------- /dashboard/rmarkdown/contribute.Rmd: -------------------------------------------------------------------------------- 1 | We welcome researchers interested in contributing to Metalab. Please contact us at metalab-project@googlegroups.com 2 | 3 | Contributions can take various forms: 4 | 5 | 1. Adding unpublished data. Ideally we can link to a report on the data, for example on [OSF](http://osf.io). Then contact the curator 6 | with a link and if possible all necessary data. 7 | 2. Suggesting and/or adding published papers not yet included. Simply email the curator, ideally with a pdf. If you are the author, try to make sure that all necessary data is either included or provided in your email. 8 | 3. Creating new meta-analyses. You can either collaborate on current in-progress projects (see below) or create your own meta-analyses. In the latter case see the section listing resources further down. 9 | 10 | **New meta-analyses looking for contributors** 11 | 12 | Currently, the following topics are in the process of becoming a meta-analysis and we are looking for contributors: 13 | 14 | * Segmenting words from an artificial stream of speech, an extension of [*InWordDB*](http://inworddb.acristia.org); contact: [*Christina Bergmann*](http://sites.google.com/site/chbergma/home/contact) 15 | * Detecting mispronunciations of known words, without a learning phase in the lab; contact: [*Christina Bergmann*](http://sites.google.com/site/chbergma/home/contact) 16 | 17 | 18 | **Resources** 19 | 20 | Here are a few resources on creating meta-analyses compatible with Metalab: 21 | 22 | * [*Metalab MA template*](https://docs.google.com/spreadsheets/d/12Y_2BcFSu48t0F8a_xrY1Ro2fJoCIV1h8O627WNcrjY/edit?usp=sharing) 23 | * [*Code book*](https://docs.google.com/document/d/1szXxrv75qTVx-4adlLrwmjWSDIBJfMH09dzh9733mog/edit?usp=sharing) 24 | * [*Interrogating PubMed via a script*](https://gist.github.com/mcfrank/c1ec74df1427278cbe53) 25 | * [*Selecting studies for 26 | inclusion*](https://www.getdatajoy.com/project/561388dfb485274e40055563) 27 | * [*FAQ*](https://docs.google.com/document/d/1_7mgeaARlYK6TecyUqzk9SSr0pLAkV3pQZE4qYT7CMA/edit?usp=sharing) 28 | * [*InWordDB, a well-fleshed out MA*](https://docs.google.com/spreadsheets/d/1XN6VaYRs7CStrINct_rr2d6rh6WN28xFiIGB5T1vkzY/edit?usp=sharing) 29 | * [*Instructions for creating a 30 | community-augmented meta-analysis*](https://sites.google.com/site/infantdbs/create-your-own-cama) 31 | (including further resources) 32 | -------------------------------------------------------------------------------- /dashboard/rmarkdown/datasets.Rmd: -------------------------------------------------------------------------------- 1 | ```{r include=FALSE, echo=FALSE} 2 | library(shiny) 3 | knitr::opts_chunk$set(message = FALSE, warning = FALSE, cache = FALSE) 4 | datasets <- shiny_data$datasets 5 | ``` 6 | 7 | ## Individual meta-analyses 8 | 9 | For more information on phonemic discrimination and word segmentation MAs, please see the relevant tabs. 10 | 11 | ```{r results='asis', echo=FALSE} 12 | for (i in 1:nrow(datasets)) { 13 | dataset <- datasets[i,] 14 | cat(sprintf("### %s \n * [%s](%s) \n * N papers = %s, N effect sizes = %s \n * %s \n * Curator is %s \n\n", 15 | dataset$name, dataset$citation, dataset$link, dataset$num_papers, dataset$num_experiments, dataset$description, dataset$curator)) 16 | } 17 | ``` 18 | -------------------------------------------------------------------------------- /dashboard/rmarkdown/inphondb.Rmd: -------------------------------------------------------------------------------- 1 | ```{r include=FALSE, echo=FALSE} 2 | knitr::opts_chunk$set(message = FALSE, warning = FALSE, cache = FALSE) 3 | ``` 4 | 5 | Even before birth, infants are listening to us. In a world full of sounds, they must learn to attend to certain sound contrasts more than others in order to parse speech effectively. What are infants' patterns of perception and discrimination at each step of this trip? On this website, we nucleate meta-analyses bearing on this key question. We now hold the largest database of infant vowel discrimination research, in a public format. 6 | 7 | Links with more information: 8 | 9 | * [Overview website](https://sites.google.com/site/inphondb/) 10 | * [Native data](https://docs.google.com/spreadsheets/d/13ytBtSP9ZUNDeW5K2uf_VfyTFOILivTFZN1YBLZtmuU/edit) and [Non-native data](https://docs.google.com/spreadsheets/d/1K7iCsLUXTB8GXbynuQ9aJOM6O-EcrnDDlmYspdW_Sh8/edit) 11 | * [Codebook](https://drive.google.com/file/d/0B_tr1H-CJ3AIb1VpeUtqalhxNkU/view) 12 | 13 | 14 | **References** 15 | 16 | * Tsuji, S. & Cristia, A. (2014). Perceptual attunement in vowels: A meta-analysis. *Developmental Psychobiology, 56*, 179-191. 17 | * Tsuji, S., & Cristia, A. (2013). 50 years of infant vowel discrimination research: what have we learned? *Journal of the Phonetic Society of Japan, 17*(3), 1-11. 18 | -------------------------------------------------------------------------------- /dashboard/rmarkdown/inworddb.Rmd: -------------------------------------------------------------------------------- 1 | ```{r include=FALSE, echo=FALSE} 2 | knitr::opts_chunk$set(message = FALSE, warning = FALSE, cache = FALSE) 3 | ``` 4 | 5 | One of the first things infants seem to extract and encode in their memory are recurring words. In this database, we nucleate meta-analyses bearing on how infants encode words. We have started with an analysis of studies on infants' extraction of words from native, natural speech as evidenced in paradigms built on listening times. 6 | 7 | Links with more information: 8 | 9 | * [Dedicated website](https://sites.google.com/site/inworddb/) 10 | * [Data](https://docs.google.com/spreadsheets/d/1djn_-iE4uOs6yRmvBgzYAhB5guzfo3-3wnC7LJVLCe8/edit) 11 | * [A complete list of sources](https://sites.google.com/site/inworddb/db/sources) 12 | 13 | **Curator** 14 | 15 | [*Christina Bergmann*](http://sites.google.com/site/chbergma/home/contact) 16 | 17 | 18 | **References** 19 | 20 | * Bergmann, C. & Cristia, A. (2015). Development of infants’ segmentation of words from native speech: A meta-analytic approach. Online first publication in *Developmental Science*. doi: [10.1111/desc.12341](http://onlinelibrary.wiley.com/doi/10.1111/desc.12341/abstract) 21 | 22 | 23 | **Acknowledgements** 24 | 25 | The following people enriched this meta-analysis by contributing additional data: 26 | 27 | * Laura Bosch 28 | * Anne Christophe 29 | * Marieke van Heugten 30 | * Elizabeth Johnson 31 | * Rochelle Newman 32 | * Jon Willits and Jenny Saffran 33 | * Rushen Shi 34 | * Leher Singh 35 | * the Plymouth babylab 36 | * Emily Mason-Apps 37 | -------------------------------------------------------------------------------- /dashboard/rmarkdown/media/image03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/rmarkdown/media/image03.jpg -------------------------------------------------------------------------------- /dashboard/rmarkdown/media/image04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/rmarkdown/media/image04.jpg -------------------------------------------------------------------------------- /dashboard/rmarkdown/media/image05.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/rmarkdown/media/image05.jpg -------------------------------------------------------------------------------- /dashboard/rmarkdown/overview.Rmd: -------------------------------------------------------------------------------- 1 | ```{r include=FALSE, echo=FALSE} 2 | library(shiny) 3 | knitr::opts_chunk$set(message = FALSE, warning = FALSE, cache = FALSE) 4 | ``` 5 | 6 | ## Overview 7 | 8 | MetaLab is a research tool for aggregating across studies in the language 9 | acquisition literature. Currently, MetaLab contains 10 | `r as.integer(sum(as.numeric(datasets$num_experiments), na.rm = TRUE))` effect sizes across 11 | meta-analyses in `r nrow(datasets)` domains of language acquisition, based on 12 | data from `r as.integer(sum(as.numeric(datasets$num_papers), na.rm = TRUE))` papers collecting 13 | `r as.integer(sum(as.numeric(datasets$num_subjects), na.rm = TRUE))` subjects. These studies 14 | can be used to obtain better estimates of effect sizes across different domains, 15 | methods, and ages. Using our power calculator, researchers can use these 16 | estimates to plan appropriate sample sizes for prospective studies. More 17 | generally, MetaLab can be used as a theoretical tool for exploring patterns in 18 | development across language acquisition domains. 19 | 20 | ## Documentation 21 | 22 | + For more information about individual datasets, see the `Datasets` tab. 23 | + For information about the field structure of the site, see the `Field Specification` tab. 24 | + Individual meta-analyses with other available resources also have their own documentation tabs. 25 | + For information about doing meta-analyses and contributing to MetaLab, see the `Contribute` page. 26 | 27 | ## Citation 28 | 29 | Currently, papers on the system are in preparation. Please contact [metalab-project@googlegroups.com]() for citation information. 30 | 31 | ## Caveat 32 | 33 | Please note that data and visualizations are under development at the moment (Spring 2016) and should not be taken as definitive. 34 | -------------------------------------------------------------------------------- /dashboard/rmarkdown/resources.Rmd: -------------------------------------------------------------------------------- 1 | Here is a short list of links to resources about metalab: 2 | 3 | * [*Metalab MA template*](https://docs.google.com/spreadsheets/d/12Y_2BcFSu48t0F8a_xrY1Ro2fJoCIV1h8O627WNcrjY/edit?usp=sharing) 4 | * [*Code book*](https://docs.google.com/document/d/1szXxrv75qTVx-4adlLrwmjWSDIBJfMH09dzh9733mog/edit?usp=sharing) 5 | * [*Interrogating PubMed via a script*](https://gist.github.com/mcfrank/c1ec74df1427278cbe53) 6 | * [*Selecting studies for 7 | inclusion*](https://www.getdatajoy.com/project/561388dfb485274e40055563) 8 | * [*FAQ*](https://docs.google.com/document/d/1_7mgeaARlYK6TecyUqzk9SSr0pLAkV3pQZE4qYT7CMA/edit?usp=sharing) 9 | * [*InWordDB, a well-fleshed out MA*](https://docs.google.com/spreadsheets/d/1XN6VaYRs7CStrINct_rr2d6rh6WN28xFiIGB5T1vkzY/edit?usp=sharing) 10 | * [*Instructions for creating a 11 | community-augmented meta-analysis*](https://sites.google.com/site/infantdbs/create-your-own-cama) 12 | (including further resources) 13 | -------------------------------------------------------------------------------- /dashboard/rmarkdown/stats.Rmd: -------------------------------------------------------------------------------- 1 | ```{r include=FALSE, echo=FALSE} 2 | library(shiny) 3 | knitr::opts_chunk$set(message = FALSE, warning = FALSE, cache = FALSE) 4 | ``` 5 | 6 | ## Overview 7 | 8 | All analyses on the site are conducted with the [`metafor`](http://www.metafor-project.org/doku.php) package. 9 | 10 | ## Effect Size Computation 11 | 12 | Effect size computation is handled by a script, [`compute_es.R`](https://github.com/langcog/metalab/blob/master/scripts/compute_es.R). 13 | 14 | Several pre-existing MAs deal with special cases, and these are listed in the script. 15 | 16 | Except where noted, formulas are from [Hedges & Olkin's textbook](http://www.amazon.com/Statistical-Methods-Meta-Analysis-Larry-Hedges/dp/0123363802). 17 | 18 | ## Statistical Models 19 | 20 | The visualizations page uses a standard random effects meta-analysis as the default, but allows several other models (all available in the `rma` function of `metafor`). 21 | 22 | For many analyses, the use of a multi-level approach (with grouping by paper) is useful, and may make a difference to results. We have not made this multi-level model the default, for two reasons. First, the approach will be unfamiliar to many users. Second, many common statistics are not implemented for this model, e.g. $\tau^2$ and the test for funnel-plot asymmetry. 23 | -------------------------------------------------------------------------------- /dashboard/www/custom.css: -------------------------------------------------------------------------------- 1 | @import url(https://fonts.googleapis.com/css?family=Ubuntu); 2 | 3 | .main-header .logo { 4 | font-family: "Ubuntu", sans-serif; 5 | } 6 | 7 | body { 8 | font-family: "Ubuntu", sans-serif; 9 | } 10 | 11 | .jumbo { 12 | font-family: "Ubuntu", sans-serif; 13 | font-size: 500%; 14 | } 15 | 16 | .sidebar-toggle { 17 | visibility: hidden; 18 | } 19 | 20 | .main-sidebar { 21 | margin-bottom: 60px; 22 | } 23 | 24 | .footer { 25 | position: absolute; 26 | bottom: 0; 27 | width: 100%; 28 | height: 60px; 29 | } 30 | 31 | .nav-tabs-custom .nav-tabs li.active { 32 | border-top-color: red; 33 | } 34 | 35 | #moderator_input .shiny-options-group span { 36 | font-size: 11px; 37 | } 38 | 39 | .dataset-txt { 40 | padding-left: 0; 41 | } 42 | 43 | .dataset-img { 44 | margin-top: 12px; 45 | } 46 | 47 | pre { 48 | font-size: 11px; 49 | font-family: Monaco, monospace; 50 | } 51 | -------------------------------------------------------------------------------- /dashboard/www/images/datasets/bouba-kiki.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/bouba-kiki.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/catBias.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/catBias.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/dinosaur.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/dinosaur.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/dinosaur.xcf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/dinosaur.xcf -------------------------------------------------------------------------------- /dashboard/www/images/datasets/dinosaur_original.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/dinosaur_original.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/discrim.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/discrim.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/distrib.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/distrib.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/gaze.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/gaze.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/ids.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/ids.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/phonotactics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/phonotactics.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/pointing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/pointing.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/segmentation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/segmentation.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/statistical.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/statistical.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/word_rec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/word_rec.png -------------------------------------------------------------------------------- /dashboard/www/images/datasets/zot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/datasets/zot.png -------------------------------------------------------------------------------- /dashboard/www/images/people/alex.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/people/alex.jpg -------------------------------------------------------------------------------- /dashboard/www/images/people/christina.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/people/christina.jpg -------------------------------------------------------------------------------- /dashboard/www/images/people/mika.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/people/mika.jpg -------------------------------------------------------------------------------- /dashboard/www/images/people/mike.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/people/mike.jpg -------------------------------------------------------------------------------- /dashboard/www/images/people/molly.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/people/molly.jpg -------------------------------------------------------------------------------- /dashboard/www/images/people/page.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/people/page.jpg -------------------------------------------------------------------------------- /dashboard/www/images/people/sho.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/dashboard/www/images/people/sho.jpg -------------------------------------------------------------------------------- /data/AGL_phonotactics: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/AGL_phonotactics -------------------------------------------------------------------------------- /data/AGL_soundsDL_paired: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/AGL_soundsDL_paired -------------------------------------------------------------------------------- /data/catBias: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/catBias -------------------------------------------------------------------------------- /data/gaze_following: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/gaze_following -------------------------------------------------------------------------------- /data/infant_directed_speech_preference: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/infant_directed_speech_preference -------------------------------------------------------------------------------- /data/label_advantage: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/label_advantage -------------------------------------------------------------------------------- /data/mutual_exclusivity: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/mutual_exclusivity -------------------------------------------------------------------------------- /data/phonemic_discrimination_native: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/phonemic_discrimination_native -------------------------------------------------------------------------------- /data/phonemic_discrimination_nonnative: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/phonemic_discrimination_nonnative -------------------------------------------------------------------------------- /data/pointing_concurrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/pointing_concurrent -------------------------------------------------------------------------------- /data/pointing_longitudinal: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/pointing_longitudinal -------------------------------------------------------------------------------- /data/statSeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/statSeg -------------------------------------------------------------------------------- /data/symbolism: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/symbolism -------------------------------------------------------------------------------- /data/word_recognition: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/word_recognition -------------------------------------------------------------------------------- /data/word_segmentation: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/data/word_segmentation -------------------------------------------------------------------------------- /metadata/people.yaml: -------------------------------------------------------------------------------- 1 | #---------------- 2 | 3 | - name: Christina Bergman 4 | email: chbergma@gmail.com 5 | website: https://sites.google.com/site/chbergma/ 6 | affiliation: École normale supérieure 7 | tags: word segmentation, reproducibility, p-curves 8 | image: images/people/christina.jpg 9 | 10 | - name: Mika Braginsky 11 | email: mika.br@gmail.com 12 | website: http://mikabr.github.io/ 13 | affiliation: Stanford University 14 | tags: infrastructure 15 | image: images/people/mika.jpg 16 | 17 | - name: Alejandrina Cristia 18 | email: alecristia@gmail.com 19 | website: https://sites.google.com/site/acrsta/ 20 | affiliation: École normale supérieure 21 | tags: IDS preference 22 | image: images/people/alex.jpg 23 | 24 | - name: Michael C. Frank 25 | email: mcfrank@stanford.edu 26 | website: http://web.stanford.edu/~mcfrank/ 27 | affiliation: Stanford University 28 | tags: developmental curves, p-curves 29 | image: images/people/mike.jpg 30 | 31 | - name: Molly Lewis 32 | email: mollyllewis@gmail.com 33 | website: http://home.uchicago.edu/~mollylewis/ 34 | affiliation: University of Chicago/University of Wisconsin-Madison 35 | tags: word learning, replicability, meta-science 36 | image: images/people/molly.jpg 37 | 38 | - name: Page Piccinini 39 | email: page.piccinini@gmail.com 40 | website: http://idiom.ucsd.edu/~ppiccinini/ 41 | affiliation: École normale supérieure 42 | tags: developmental curves 43 | image: images/people/page.jpg 44 | 45 | - name: Sho Tsuji 46 | email: tsujish@gmail.com 47 | website: https://sites.google.com/site/tsujish/ 48 | affiliation: École normale supérieure 49 | tags: phonemic discrimination 50 | image: images/people/sho.jpg 51 | -------------------------------------------------------------------------------- /metadata/reports.yaml: -------------------------------------------------------------------------------- 1 | - title: Developmental curves 2 | file: developmental_curves 3 | 4 | - title: Reproducibility 5 | file: reproducibility 6 | 7 | - title: P-curves 8 | file: p-curves 9 | 10 | - title: Quantitative Growth 11 | file: metameta 12 | 13 | - title: Hierarchical Random Effects 14 | file: hierarchicalrandomeffect 15 | 16 | - title: Design Choices 17 | file: design_choice 18 | 19 | -------------------------------------------------------------------------------- /metadata/spec_derived.yaml: -------------------------------------------------------------------------------- 1 | #---------------- 2 | 3 | - field: corr_imputed 4 | description: imputed correlation 5 | 6 | - field: d_calc 7 | description: calculated Cohen's d 8 | 9 | - field: d_var_calc 10 | description: calculated Cohen's d variance 11 | 12 | - field: g_calc 13 | description: calculated Hedges' g 14 | 15 | - field: r_calc 16 | description: calculated Pearson's r 17 | 18 | - field: mean_age 19 | description: mean of mean_age_1 and mean_age_2 weighted by n_1 and n_2 20 | 21 | - field: "n" 22 | description: mean of n_1 and n_2 23 | -------------------------------------------------------------------------------- /reports/OLD/dois.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/reports/OLD/dois.csv -------------------------------------------------------------------------------- /reports/OLD/dois_as_csv.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/reports/OLD/dois_as_csv.csv -------------------------------------------------------------------------------- /reports/OLD/dois_mini.csv: -------------------------------------------------------------------------------- 1 | Chambers, K., Onishi, K. H., & Fisher, C. (2003). Infants learn phonotactic regularities from brief auditory experience. Cognition, 87, B69â€"B77. ; https://doi.org/10.1016/s0010-0277(02)00233-0 ; 2 | Chambers, K. E., Onishi, K. H., & Fisher, C. (2011). Representations for phonotactic learning in infancy. Language Learning & Development, 7, 287â€"308. ; https://doi.org/10.1080/15475441.2011.580447; 3 | Cristia, A. (2006). Speech sound categories in language acquisition & learning. Unpublished Masters of Arts Thesis, Purdue University. ; 4 | Cristia, A. & Peperkamp, S. (2012). Generalizing without encoding specifics: Infants infer phonotactic patterns on sound classes. BUCLD Proceedings, 36, 126-138.; 5 | Seidl, A., & Buckley, E. (2005). On the learning of arbitrary phonological rules. Language Learning & Development 1:289â€"316.; 6 | Seidl, A., Cristia, A., Onishi, K. H., & Bernard, A. (2009). Allophonic & phonemic contrasts in infants learning of sound patterns. Language Learning & Development, 5, 191â€"202. ; https://doi.org/10.1080/15475440902754326; -------------------------------------------------------------------------------- /reports/OLD/dois_mini.txt: -------------------------------------------------------------------------------- 1 | Chambers, K., Onishi, K. H., & Fisher, C. (2003). Infants learn phonotactic regularities from brief auditory experience. Cognition, 87, B69â€"B77. ; https://doi.org/10.1016/s0010-0277(02)00233-0 ; 2 | Chambers, K. E., Onishi, K. H., & Fisher, C. (2011). Representations for phonotactic learning in infancy. Language Learning & Development, 7, 287â€"308. ; https://doi.org/10.1080/15475441.2011.580447; 3 | Cristia, A. (2006). Speech sound categories in language acquisition & learning. Unpublished Masters of Arts Thesis, Purdue University. ; 4 | Cristia, A. & Peperkamp, S. (2012). Generalizing without encoding specifics: Infants infer phonotactic patterns on sound classes. BUCLD Proceedings, 36, 126-138.; 5 | Seidl, A., & Buckley, E. (2005). On the learning of arbitrary phonological rules. Language Learning & Development 1:289â€"316.; 6 | Seidl, A., Cristia, A., Onishi, K. H., & Bernard, A. (2009). Allophonic & phonemic contrasts in infants learning of sound patterns. Language Learning & Development, 5, 191â€"202. ; https://doi.org/10.1080/15475440902754326; -------------------------------------------------------------------------------- /reports/_output.yaml: -------------------------------------------------------------------------------- 1 | html_document: 2 | self_contained: false 3 | lib_dir: out/libs 4 | toc: true 5 | toc_float: true 6 | theme: united 7 | highlight: tango 8 | code_folding: hide 9 | fig.width: 8 10 | fig.height: 5 11 | -------------------------------------------------------------------------------- /reports/hierarchicalrandomeffect.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hierarchical Random Effects in Meta-Analyses: Do they change stuff?" 3 | author: "Sho Tsuji and Christina Bergmann" 4 | date: "This report was rendered on `r Sys.Date()` and will be automatically re-rendered nightly, reflecting any changes in the data or code." 5 | --- 6 | 7 | # Introduction 8 | ```{r, setup, include=FALSE} 9 | knitr::opts_chunk$set(warning = FALSE, message = FALSE, cache = TRUE) 10 | ggplot2::theme_set(langcog::theme_mikabr(base_family = "Ubuntu")) 11 | source("../dashboard/global.R", chdir = TRUE) 12 | library(metafor) 13 | library(dplyr) 14 | library(tidyr) 15 | library(ggplot2) 16 | library(stringr) 17 | library(purrr) 18 | library(langcog) 19 | ``` 20 | 21 | Each effect size is nested within an experiment which is in turn nested within a paper (this includes unpublished reports, theses, and the likes). It can be assumed that effect sizes within these nested structures are not independent. Here we explore whether and how accounting for this possible correlation affects both a random effects base model and a moderator analysis. As example we chose InWordDB. 22 | 23 | 24 | # Base Model 25 | 26 | Standard random effects model, no moderators. First we run the model without accounting for any hierarchical structure (as reported in the publication by Bergmann & Cristia 2015; note that differences in effect size estimation are due to an updated dataset here that also includes nonnative studies, as compared to the paper). 27 | 28 | ```{r Data} 29 | inworddb <- droplevels(all_data[all_data$short_name=="inworddb", ]) 30 | 31 | StandardMod <- rma(g_calc, g_var_calc, data = inworddb) 32 | summary(StandardMod) 33 | ``` 34 | 35 | ## Two Level Model: Paper 36 | 37 | We first add a level for the paper a given effect size was reported in. These effect sizes presumably stem from a batch of studies that were conducted in the same lab in a very similar fashion and by the same set of experimenters, introducing possible correlations. 38 | 39 | ```{r RandPerPaper} 40 | 41 | PerPaperMod <- rma.mv(g_calc, g_var_calc, random = ~ 1 | short_cite, data = inworddb) 42 | summary(PerPaperMod) 43 | ``` 44 | 45 | ## Three Level Model: Paper and Experiment 46 | 47 | Nested within paper, we introduce a level for experiment number. Experiments can report several effect sizes, for example when infants are run in conditions; slight variations of the same study which are presumed to be even more similar than effect sizes within a paper. 48 | A caveat is that conventions on what counts as experiment and what counts as conditions within an experiment might differ across papers. 49 | 50 | ```{r RandPerExpAndPaper} 51 | 52 | PerExpPaperMod <- rma.mv(g_calc, g_var_calc, random = ~ factor(expt_num) | short_cite, data = inworddb) 53 | summary(PerExpPaperMod) 54 | ``` 55 | 56 | To summarize, all these models differ in their effect size estimates, but do not change the statistical outcome. The effect remains small but significantly above 0. Adding the level of experiment number did not dramatically change the result. 57 | 58 | # Moderator Model from the Paper 59 | 60 | ```{r AgeSimple} 61 | #Centering mean age 62 | inworddb$ageC <- inworddb$mean_age-mean(inworddb$mean_age) 63 | 64 | StandardMod <- rma(g_calc, g_var_calc, mod = ageC, data = inworddb) 65 | summary(StandardMod) 66 | ``` 67 | 68 | ## Two Level Model: Paper 69 | 70 | ```{r AgePerPaper} 71 | 72 | PerPaperMod <- rma.mv(g_calc, g_var_calc, mod = ageC, random = ~ 1 | short_cite, data = inworddb) 73 | summary(PerPaperMod) 74 | ``` 75 | 76 | 77 | ## Three Level Model: Paper and Experiment 78 | 79 | ```{r AgePerExpAndPaper} 80 | 81 | PerExpPaperMod <- rma.mv(g_calc, g_var_calc, mod = ageC, random = ~ factor(expt_num) | short_cite, data = inworddb) 82 | summary(PerExpPaperMod) 83 | ``` 84 | 85 | To summarize, introducing hierarchical structure changed the outcome for the moderator test. Age (centered) now has a small, but significant, effect on effect sizes. This is not the case when ignoring the nested structure of effect sizes. 86 | This result mirrors the reported analyses in Bergmann & Cristia (2015) that there is a small, positive effect of age when only considering papers which test at least two age groups in the same set-up. -------------------------------------------------------------------------------- /reports/impact_factors.csv: -------------------------------------------------------------------------------- 1 | journal,IF developmental science,3.808 cognition,3.479 child development,4.061 cognitive psychology,5.064 journal of experimental child psychology,2.549 developmental psychology,4.141 plos one,3.057 language learning and development,NA infancy,2.241 first language,1.254 journal of memory and language,4.014 language and speech,0.895 language and cognitive processes,2.134 psychological science,4.94 journal of phonetics,1.598 journal of cognition and development,1.683 british journal of developmental psychology,2.841 unpublished,0 proceedings ,0.5 journal of child language,1.598 poster,0.5 the journal of the acoustical society of america,1.503 perception \& psychophysics,NA journal of experimental psychology: human perception and performance,3.358 psychonomic bulletin \& review,3.369 "journal of speech, language, and hearing research", frontiers in psychology,2.56 cortex,5.128 science,33.611 infant behavior and development,1.515 bmc neuroscience,2.665 the journal of neuroscience,6.344 developmental psychobiology,3.307 international journal of bilingualism,0.697 psicothema,1.21 language learning \& development,NA social development,1.505 -------------------------------------------------------------------------------- /reports/manybabies_infancy.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Infant Directed Speech Preference - ManyBabies" 3 | author: "Mike Frank" 4 | date: "This report was rendered on `r Sys.Date()` and will be automatically re-rendered nightly, reflecting any changes in the data or code." 5 | --- 6 | 7 | ```{r, include=FALSE} 8 | knitr::opts_chunk$set(warning = FALSE, message = FALSE) 9 | ggplot2::theme_set(langcog::theme_mikabr(base_family = "Arial")) 10 | source("../dashboard/global.R", chdir = TRUE) 11 | ``` 12 | 13 | Get IDS pref data. 14 | 15 | ```{r} 16 | idspref <- filter(all_data, dataset=="Infant directed speech preference") 17 | ``` 18 | 19 | General plot. 20 | 21 | ```{r} 22 | ggplot(idspref, 23 | aes(x = mean_age_months, y = d_calc)) + 24 | geom_jitter(aes(size = n), alpha = 0.5) + 25 | geom_hline(yintercept = 0, linetype = "dashed", color = "grey") + 26 | scale_size_continuous(guide = FALSE) + 27 | scale_x_continuous("Mean Age (Months)", breaks = c(0,3,6,9,12)) + 28 | ylab("Effect Size (d)") + 29 | geom_smooth(aes(weight = 1/d_var_calc), col = "black", 30 | method = "loess", span = 2, se = FALSE) 31 | ``` 32 | 33 | 34 | Funnel plot. 35 | 36 | ```{r} 37 | CRIT_95 <- 1.96 38 | CRIT_99 <- 2.58 39 | 40 | model <- metafor::rma(d_calc ~ 1, vi = d_var_calc, 41 | slab = short_cite, data = idspref, 42 | method = "REML") 43 | d <- data_frame(se = sqrt(model$vi), es = model$yi) 44 | center <- mean(d$es) 45 | xlabel <- "Effect Size (d)" 46 | ylabel <- "Standard Error" 47 | 48 | 49 | lower_lim <- max(d$se) + .05 * max(d$se) 50 | funnel95 <- data.frame(x = c(center - lower_lim * CRIT_95, center, 51 | center + lower_lim * CRIT_95), 52 | y = c(-lower_lim, 0, -lower_lim)) 53 | 54 | left_lim99 <- ifelse(center - lower_lim * CRIT_99 < min(d$es), 55 | center - lower_lim * CRIT_99, 56 | min(d$es)) 57 | right_lim99 <- ifelse(center + lower_lim * CRIT_99 > max(d$es), 58 | center + lower_lim * CRIT_99, 59 | max(d$es)) 60 | funnel99 <- data.frame(x = c(center - lower_lim * CRIT_99, center, 61 | center + lower_lim * CRIT_99), 62 | y = c(-lower_lim, 0, -lower_lim)) 63 | 64 | 65 | ggplot(d, aes(x = es, y = -se)) + 66 | scale_x_continuous(limits = c(left_lim99, right_lim99)) + 67 | scale_y_continuous(labels = function(x){abs(x)}) + 68 | geom_polygon(aes(x = x, y = y), data = funnel95, alpha = .5, 69 | fill = "white") + 70 | geom_polygon(aes(x = x, y = y), data = funnel99, alpha = .5, 71 | fill = "white") + 72 | geom_vline(xintercept = center, linetype = "dotted", color = "black") + 73 | # geom_vline(xintercept = 0, linetype = "dashed", color = "darkgrey") + 74 | geom_point() + 75 | xlab(xlabel) + 76 | ylab(ylabel) + 77 | geom_text(x = center + lower_lim * CRIT_95, 78 | y = -lower_lim + lower_lim / 60, 79 | label = "p < .05", vjust = "bottom", hjust = "center") + 80 | geom_text(x = center + lower_lim * CRIT_99, 81 | y = -lower_lim + lower_lim / 60, 82 | label = "p < .01", vjust = "bottom", hjust = "center") + 83 | theme(panel.background = element_rect(fill = "grey"), 84 | panel.grid.major = element_line(colour = "darkgrey", size = 0.2), 85 | panel.grid.minor = element_line(colour = "darkgrey", size = 0.5)) 86 | ``` -------------------------------------------------------------------------------- /reports/networks/.gitignore: -------------------------------------------------------------------------------- 1 | .httr-oauth 2 | -------------------------------------------------------------------------------- /reports/networks/data/ES_data_for_networks.csv: -------------------------------------------------------------------------------- 1 | "","overall.d","ci_lower","ci_upper","short_name","overall.d.age","fsn_string","egg.random.z","egg.random.p","stouffer.pp.measure","stouffer.Z.pp","stouffer.p.Z.pp" 2 | "1",0.942342450412955,0.237050926370702,1.64763397445521,"catBias",1.53094396341652,8390,5.48339970831044,4.172288549346e-08,"ppr.full",-18.4822510864286,1.43477597095141e-76 3 | "2",1.27163322605183,0.928317537681766,1.61494891442189,"gaze_following",-1.15298085872439,4277,3.29593880535299,0.000980933797474684,"ppr.full",-18.6612429472652,5.11616180556797e-78 4 | "3",0.721100645552326,0.536226247657231,0.905975043447422,"idspref",0.343645640720697,3762,2.14097619211494,0.0322759580321836,"ppr.full",-10.6990235562462,5.14281725509985e-27 5 | "4",0.592349301996462,0.488046035728792,0.696652568264132,"inphondb-native",0.764564694580739,9620,9.17218892100586,4.63509800204844e-20,"ppr.full",-9.69335299908222,1.60893676645911e-22 6 | "5",0.659452761969099,0.415793984496766,0.903111539441432,"inphondb-nonnative",0.911648440897149,3391,3.85922927108089,0.000113745188150197,"ppr.full",-8.88531279974922,3.18705860558998e-19 7 | "6",0.2027119479114,0.156965626448049,0.248458269374751,"inworddb",0.199897067374938,6088,2.5754950202763,0.0100096714635014,"ppr.full",-9.30840325998999,6.48842302413573e-21 8 | "7",0.398085687826752,0.290173734955161,0.505997640698342,"labadv",0.0528495736281519,3928,0.57120482878395,0.567860805356178,"ppr.full",-7.29740128903196,1.46689407319063e-13 9 | "8",1.00858714300335,0.684503374745843,1.33267091126085,"mutex",-0.967793960207081,6443,8.26311779783524,1.4191606598009e-16,"ppr.full",-12.8740833764224,3.14911762698916e-38 10 | "9",0.0377992190373956,-0.0889624754879497,0.164560913562741,"phonotactics",0.22033947414456,45,-1.42956060419677,0.152843169292135,"ppr.full",-1.51801073075727,0.0645058467638832 11 | "10",0.979536739538348,0.619064862130878,1.34000861694582,"pointing_concurrent",0.249001773854867,1617,1.25041717831421,0.211147192590949,"ppr.full",-6.32780739902985,1.24334623577611e-10 12 | "11",0.53516234839714,0.305537979225891,0.76478671756839,"pointing_longitudinal",0.27535063463037,1414,2.79969566840486,0.00511508055808872,"ppr.full",-5.90028942468718,1.81432234977718e-09 13 | "12",-0.216608734017906,-0.43315217285188,-6.5295183933084e-05,"sounds",-0.198767951133173,Inf,-2.668167202188,0.00762662931788937,"ppr.full",-1.02574997409791,0.152504725919043 14 | "13",0.115059072570507,-0.0155738598053361,0.24569200494635,"symbolism",-0.0690056287108323,526,1.42122154397262,0.155252363424869,"ppr.full",-5.55741369457658,1.36900644796354e-08 15 | "14",1.33943128698405,0.859017061266737,1.81984551270136,"word_recognition",-1.11244395773165,2043,2.73757031037296,0.00618948877364021,"ppr.full",-14.8005180297975,7.2679397835504e-50 16 | -------------------------------------------------------------------------------- /reports/networks/data/ES_data_for_networks2.csv: -------------------------------------------------------------------------------- 1 | "","dataset.x","overall.d","ci_lower","ci_upper","short_name","dataset.y","overall.d.age","d.age","tau2","I2","H2","dataset","fsn_string","egg.random.z","egg.random.p","egg.random.z.age","egg.random.p.age" 2 | "1","catBias",0.942342450412955,0.237050926370702,1.64763397445521,"catBias","catBias",1.53094396341652,-0.000377994589890875,9.78878255195829,97.9158805290023,47.9818942203575,"Categorization Bias",8390,5.48339970831044,4.172288549346e-08,5.38124904376417,7.39707730264654e-08 3 | "2","gaze_following",1.27163322605183,0.928317537681766,1.61494891442189,"gaze_following","gaze_following",-1.15298085872439,0.00582337662359039,0.218050010975262,73.2274977395958,3.73517570480879,"Gaze following",4277,3.29593880535299,0.000980933797474684,2.32869549279916,0.0198752030686077 4 | "3","idspref",0.721100645552326,0.536226247657231,0.905975043447422,"idspref","idspref",0.343645640720697,0.00273475070754708,0.298663926366983,74.546156311904,3.92867973989984,"Infant directed speech preference",3762,2.14097619211494,0.0322759580321836,1.26448854500003,0.206054729641146 5 | "4","inphondb-native",0.592349301996462,0.488046035728792,0.696652568264132,"inphondb-native","inphondb-native",0.764564694580739,-0.000739230883816067,0.271143380767658,75.8353490938246,4.13827621132506,"Vowel discrimination (native)",9620,9.17218892100586,4.63509800204844e-20,8.86903447949602,7.37828313896223e-19 6 | "5","inphondb-nonnative",0.659452761969099,0.415793984496766,0.903111539441432,"inphondb-nonnative","inphondb-nonnative",0.911648440897149,-0.001035533709136,0.604108001436863,87.3318196648281,7.89379353263234,"Vowel discrimination (non-native)",3391,3.85922927108089,0.000113745188150197,4.13326536754592,3.57645385687655e-05 7 | "6","inworddb",0.2027119479114,0.156965626448049,0.248458269374751,"inworddb","inworddb",0.199897067374938,1.00714430101587e-05,0.125570585809559,81.7330206758886,5.47435885406656,"Word segmentation",6088,2.5754950202763,0.0100096714635014,2.57612770810744,0.00999137360937482 8 | "7","labadv",0.398085687826752,0.290173734955161,0.505997640698342,"labadv","labadv",0.0528495736281519,0.00104155392117671,0.158531834111097,55.2311024386164,2.23369360085063,"Label advantage in concept learning",3928,0.57120482878395,0.567860805356178,0.309566468840102,0.756890658445436 9 | "8","mutex",1.00858714300335,0.684503374745843,1.33267091126085,"mutex","mutex",-0.967793960207081,0.00234371723735545,0.753774004140389,85.799425609071,7.04196867303325,"Mutual exclusivity",6443,8.26311779783524,1.4191606598009e-16,6.24909047707303,4.12849720815245e-10 10 | "9","phonotactics",0.0377992190373956,-0.0889624754879497,0.164560913562741,"phonotactics","phonotactics",0.22033947414456,-0.000611186222384238,0.135585387918855,75.7721117531228,4.12747487445132,"Phonotactic learning",45,-1.42956060419677,0.152843169292135,-1.07646158062738,0.281720874356171 11 | "10","pointing_concurrent",0.979536739538348,0.619064862130878,1.34000861694582,"pointing_concurrent","pointing_concurrent",0.249001773854867,0.00112726584471017,0,0,1,"Pointing and vocabulary (concurrent)",1617,1.25041717831421,0.211147192590949,1.14726542476394,0.251271939133302 12 | "11","pointing_longitudinal",0.53516234839714,0.305537979225891,0.76478671756839,"pointing_longitudinal","pointing_longitudinal",0.27535063463037,0.000476714781951855,0.0379942463429161,14.3469841633922,1.16750121432689,"Pointing and vocabulary (longitudinal)",1414,2.79969566840486,0.00511508055808872,2.87791882490333,0.00400308132283032 13 | "12","sounds",-0.216608734017906,-0.43315217285188,-6.5295183933084e-05,"sounds","sounds",-0.198767951133173,-9.15698518763867e-05,0.168501730317697,83.0906393786632,5.91388416388828,"Statistical sound category learning",Inf,-2.668167202188,0.00762662931788937,-2.66540165239196,0.00768964198299721 14 | "13","symbolism",0.115059072570507,-0.0155738598053361,0.24569200494635,"symbolism","symbolism",-0.0690056287108323,0.000465385202345419,0.0814910792524869,47.2949305853844,1.89735069340918,"Sound symbolism",526,1.42122154397262,0.155252363424869,1.73542954091665,0.0826647514459549 15 | "14","word_recognition",1.33943128698405,0.859017061266737,1.81984551270136,"word_recognition","word_recognition",-1.11244395773165,0.0039779114475048,0.633468194792081,90.7188371504481,10.7745119465098,"Online word recognition",2043,2.73757031037296,0.00618948877364021,2.16372261428756,0.0304856443500483 16 | -------------------------------------------------------------------------------- /reports/networks/data/all_papers.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/reports/networks/data/all_papers.csv -------------------------------------------------------------------------------- /reports/networks/data/all_papers_all.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/reports/networks/data/all_papers_all.csv -------------------------------------------------------------------------------- /reports/networks/data/dois2.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/reports/networks/data/dois2.csv -------------------------------------------------------------------------------- /reports/networks/data/statseg_papers.csv: -------------------------------------------------------------------------------- 1 | ID,study_ID,long_cite 2 | 1,AslinSaffranNewport1998,"Aslin, R. N., Saffran, J. R., & Newport, E. L. (1998). Computation of conditional probability statistics by 8-month-old infants. Psychological science, 9(4), 321-324." 3 | 2,Graf-EstesLewWilliams2015,"Graf Estes, K. , & Lew-Williams, C. (2015). Listening through voices: Infant statistical word segmentation across multiple speakers. Developmental psychology, 51(11), 1517." 4 | 3,GrafEstes2012,"Graf Estes, K. (2012). Infants generalize representations of statistically segmented words. Front. Psychol., 29, October 2012." 5 | 4,HaySaffran2012,"Hay, J. F., & Saffran, J. R. (2012). Rhythmic grouping biases constrain infant statistical learning. Infancy, 17(6), 610-641." 6 | 5,JohnsonJusczyk2001,"Johnson, E. K. & Jusczyk, P. W.Word segmentation by 8-month-olds: when speech cues count more than statistics. Journal of Memory and Language, 44, 2001" 7 | 6,JohnsonJusczyk2003b,"Johnson, E. K., & Jusczyk, P. W. (2003). Exploring statistical learning by 8-month-olds: The role of complexity and variation. Jusczyk Lab final report, 141-148." 8 | 7,JohnsonSeidl2009,"Johnson, E. K., Seidl, A. H. (2009) At 11 months, prosody still outranks statistics. Developmental Science 12:1 pp 131-141" 9 | 8,JohnsonTyler2010,"Johnson, E. K., & Tyler, M. D. (2010). Testing the limits of statistical learning for word segmentation. Developmental science, 13(2), 339-345." 10 | 9,LewWilliamsSaffran2012,"Lew-Williams, C., & Saffran, J. R. (2012). All words are not created equal: Expectations about word length guide infant statistical learning. Cognition, 122(2), 241-246." 11 | 10,MersadNazzi2012,"Mersad, K., & Nazzi, T. (2012). When Mommy comes to the rescue of statistics: Infants combine top-down and bottom-up cues to segment speech. Language Learning and Development, 8(3), 303-315." 12 | 11,Saffran2001,"Saffran, J. R. (2001). Words in a sea of sounds: The output of infant statistical learning. Cognition, 81(2), 149-169." 13 | 12,SaffranAslinNewport1996,"Saffran, J. R., Aslin, R. N., & Newport, E. L. (1996). Statistical learning by 8-month-old infants. Science, 274, 5294." 14 | 13,SeidlTincoffBakerCristia2015,"Seidl, A., Tincoff, R., Baker, C., & Cristia, A. (2015). Why the body comes first: effects of experimenter touch on infants' word finding. Developmental science, 18(1), 155-164." 15 | 14,Thiessen2010,"Thiessen, E. D. (2010). Effects of visual information on adults’ and infants’ auditory statistical learning. Cognitive Science, 34(6), 1093-1106." 16 | 15,ThiessenHillSaffran2005,"Thiessen, E. D., Hill, E. A., & Saffran, J. R. (2005). Infant‐directed speech facilitates word segmentation. Infancy, 7(1), 53-71." 17 | 16,ThiessenSaffran2003,"Thiessen, E. D., & Saffran, J. R. (2003). When cues collide: Use of stress and statistical cues to word boundaries by 7- to 9-month-old infants. Developmental Psychology, 39(4), 706-716. http://dx.doi.org/10.1037/0012-1649.39.4.706" 18 | 17,ThiessenSaffran2003,"Thiessen, E. D., & Saffran, J. R. (2003). When cues collide: Use of stress and statistical cues to word boundaries by 7- to 9-month-old infants. Developmental Psychology, 39(4), 706-716. http://dx.doi.org/10.1037/0012-1649.39.4.707" 19 | 18,ThiessenSaffran2007,"Thiessen, E. D., & Saffran, J. R. (2007). Learning to learn: Infants’ acquisition of stress-based strategies for word segmentation. Language learning and development, 3(1), 73-100." 20 | 19,JohnsonJuscyk2003a,"Johnson, E. K., & Jusczyk, P. W. (2003). Exploring possible effects of language-‐specific knowledge on infants’ segmentation of an artificial language. Jusczyk lab final report, 141-148.JusczykExploring possible effects of language-specific knowledge on infants' segmentation of an artificial languageJusczyk lab final report." 21 | 20,EricksonThiessenGrafEstes2014,"Erickson, L. C., Thiessen, E. D., & Estes, K. G. (2014). Statistically coherent labels facilitate categorization in 8-month-olds. Journal of Memory and Language, 72, 49-58." 22 | 21,SimonWerchanGoldsteinSweeneyBootzinNadelGómez2016,"Simon, K. N., Werchan, D., Goldstein, M. R., Sweeney, L., Bootzin, R. R., Nadel, L., & Gómez, R. L. (2016). Sleep confers a benefit for retention of statistical language learning in 6.5 month old infants. Brain and language." 23 | 22,LanyShoaib2017,"Lany, J.; Shoaib, A.; Thompson, A. & Graf Estes, K. (2017). Infant statistical-learning ability is related to real-time language processing. Journal of Child Language. DOI: https://doi.org/10.1017/S0305000917000253" 24 | -------------------------------------------------------------------------------- /reports/networks/dois2.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/reports/networks/dois2.csv -------------------------------------------------------------------------------- /reports/networks/get_primary_to_get_dois.R: -------------------------------------------------------------------------------- 1 | ### GET PRIMARY PAPERS TO GET DOIS #### 2 | 3 | library(tidyverse) 4 | 5 | source("../../../dashboard/global.R", chdir = TRUE) 6 | 7 | meta_d <- all_data %>% 8 | filter(short_name == "statSeg") %>% 9 | select(study_ID, long_cite) %>% 10 | distinct() 11 | 12 | df <- meta_d %>% 13 | mutate(ID = 1:n()) %>% 14 | select(ID, everything()) 15 | write_csv(df, "data/statseg_papers.csv") 16 | 17 | # To get dois, copy-pasting 50 at a time into search engine (obtained from: https://apps.crossref.org/SimpleTextQuery/) 18 | 19 | dois <- read.csv("dois2.csv") %>% 20 | filter(dataset == "Statistical Word Segmentation") %>% 21 | select(study_ID, doi) %>% 22 | filter(doi != "" & !is.na(doi)) %>% 23 | mutate(doi = ifelse(grepl(".org/", doi), 24 | unlist(lapply(str_split(doi, ".org/"), 25 | function(x) {x[2]})), as.character(doi))) 26 | 27 | ### Scrape data from web of science at paper level using bibliometrix package 28 | # (copy paste this string into WOS search engine) 29 | # credentials: uchicago email; password: uchicago pw + 1 30 | string = "DO = (" 31 | for (i in 1:length(dois$doi)) { 32 | string = paste(string, dois$doi[i], sep = " OR ") 33 | } 34 | # add paren to 35 | 36 | ### search web of science-> save to marked list -> download marked list as .txt 37 | wos.raw <- readFiles("data/savedrecs_stat_seg.txt") 38 | 39 | # creates dataframe out of raw wos 40 | wos <- convert2df(wos.raw, dbsource = "isi", format = "plaintext") %>% # creates 41 | mutate(DI = tolower(DI), 42 | DI = str_replace_all(DI, "//", "/")) %>% 43 | #ID = str_replace_all(ID, ";;", ";") 44 | arrange(DI) %>% 45 | filter(!is.na(DI)) %>% 46 | mutate_each(funs(as.factor), -AU) 47 | 48 | wos2 <- metaTagExtraction(wos[1,], Field = "CR_AU", sep = ";") 49 | # write.csv(wos, "wos_bib1_6.csv") -------------------------------------------------------------------------------- /reports/p-curves.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "P-Curves" 3 | author: "Christina Bergmann" 4 | date: "`r Sys.Date()`" 5 | output: 6 | html_document: 7 | highlight: tango 8 | theme: united 9 | code_folding: hide 10 | --- 11 | 12 | ```{r, setup, include = FALSE} 13 | library(knitr) 14 | library(dplyr) 15 | library(tidyr) 16 | library(ggplot2) 17 | library(langcog) 18 | 19 | # load data 20 | knitr::opts_chunk$set(fig.width = 8, fig.height = 5, echo = TRUE, 21 | warning = FALSE, message = FALSE, cache = TRUE) 22 | ggplot2::theme_set(langcog::theme_mikabr(base_family = "Ubuntu")) 23 | source("../dashboard/global.R", chdir = TRUE) 24 | ``` 25 | 26 | # Introduction 27 | 28 | P-curves have been proposed as one (of many) ways to measure publication biases, because so-called p-hacking (conducting multiple statistical analyses, excluding data points, etc until a p-value below the significance threshold [typically .05] is reached) can lead to an inflation of high p-values that are nonetheless significant. The exected distribution of p-values in the absence of prevalent p-hacking of a null effect is biased towards smaller values. The present script is based on the p-curve app version 4.0. 29 | 30 | One of the main problems with p-curve is the use of text mining to generate input, and the presence of heterogeneous tests (https://peerj.com/articles/1715/). In the case of MetaLab, however, all test statistics were entered by hand and concern a homogeneous research literature (within each dataset). Further, we only enter the *main* test statistic which would also be used to calculate effect sizes in our meta-analysis. Therefore, we believe that p-curve is an appropriate analysis for the dataset. 31 | 32 | ### Data preparation 33 | 34 | When reported or necessary to calculate effect sizes, we note the t-value or F-score for each record (a single study). We do not consider studies for which this information was unavailable in the following analyses. 35 | 36 | To calculate p-values from $t$-values and $F$-scores, we also need the degrees of freedom. When one group of participants was tested, we use $N-1$, for a two group comparison design we use $N-2$. $F$-scores require two degrees of freedom, the first one however is always one in the main analysis of interest (we do not enter or consider e.g., interactions of the main effect of interest with covariates such as age, gender, etc.). 37 | 38 | # P-curve for each individual dataset 39 | 40 | We need to source code from the p-curve app. This p-curve report is based on the p-curve app 4.0 (accessed 25.02.2015) via code from the [app](http://p-curve.com/app4/App%204.0%20p-curve%202016%2001%2011.r). We have heavily adapted this code, however. 41 | 42 | ```{r} 43 | source("pcurve.R") 44 | ``` 45 | 46 | Compute p-curves for each dataset. 47 | 48 | ```{r PlotEachDataSet} 49 | pc <- all_data %>% 50 | group_by(dataset) %>% 51 | select(study_ID, t, F, n_1, n_2, participant_design, d) %>% 52 | filter(!is.na(t)|!is.na(F)) %>% 53 | mutate(df2 = ifelse(participant_design == "between", (n_1 + n_2)-2, n_1-1), 54 | value = ifelse(is.na(t), F, t**2), # turn ts into Fs by squaring them 55 | test = ifelse(is.na(t), "F", "t"), 56 | df1 = 1) %>% # In the source script, the authors always have 2 dfs, but 57 | # df1 is always 1 (both for t and for F), 58 | # we always look at the main result of ANOVAs 59 | do(p_curve(.)) 60 | 61 | ``` 62 | 63 | Plot p-curves. 64 | 65 | ```{r} 66 | ggplot(pc, aes(x = p, y = value, col = measure, lty = measure)) + 67 | geom_line() + 68 | facet_wrap(~dataset) 69 | ``` 70 | 71 | It's clear that things look pretty reasonable across datasets. 72 | -------------------------------------------------------------------------------- /reports/pcurve.R: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # get_ncp - functions that find non-centrality parameter for f,chi distributions that gives some level of power 3 | # we don't use chi-square / z-scores, so I only add the getncp for F scores (t will be turned to F in this version of p-curve) 4 | ncp_error = function(ncp_est, power, x, df1, df2) 5 | { 6 | pf(x, df1 = df1, df2 = df2, ncp = ncp_est) - (1 - power) 7 | } 8 | 9 | get_ncp <- function(df1, df2, power) 10 | { 11 | xc = qf(p=.95, df1 = df1, df2 = df2) 12 | root <- uniroot(ncp_error, c(0, 35), x = xc, df1 = df1, df2 = df2, power = power)$root 13 | 14 | return(root) 15 | } 16 | 17 | ############################################################################### 18 | # prop33(pc) - Computes % of p-values that are smaller than pc, for the tests submitted to p-curve, if power is 33% 19 | prop33 <- function(pc, ncp33, df1, df2) 20 | { 21 | #pc: critical p-value 22 | 23 | #Overview: 24 | #Creates a vector of the same length as the number of tests submitted to p-curve, significant and not, 25 | # and computes the proportion of p-values expected to be smaller than {pc} given the d.f. 26 | # and outputs the entire vector, with NA values where needed 27 | 28 | #F-tests (& thus t-tests) 29 | prop= (1 - pf(qf(1 - pc, df1=df1, df2=df2), df1=df1, df2=df2, ncp=ncp33)) 30 | 31 | #output it 32 | return(prop) 33 | } 34 | 35 | ############################################################################### 36 | # pbound: bound p-values and pp-values by precision of measurement to avoid errors 37 | pbound <- function(p) 38 | { 39 | pmin(pmax(p,2.2e-16),1-2.2e-16) 40 | } 41 | 42 | 43 | ############################################################################### 44 | # p_curve 45 | # main function that does the doing. 46 | p_curve <- function(df){ 47 | 48 | # Recompute Ps and bound to level of precision desired 49 | df <- df %>% 50 | mutate(p = pbound(1 - pf(value, df1 = df1, df2 = df2)), 51 | p_round = ceiling(p * 100) / 100) %>% 52 | rowwise %>% # need to do this rowwise because get_ncp is not vectorized 53 | # NCP33 (noncentrality parameter giving each test in p-curve 33% power given the d.f. of the test) 54 | mutate(ncp33 = get_ncp(df1, df2, power=1/3)) 55 | 56 | pc_data <- data.frame(p = seq(.01,.05,.01)) %>% 57 | group_by(p) %>% 58 | mutate(observed = sum(df$p_round == p) / sum(df$p < .05), 59 | baseline = 1/5) 60 | 61 | # Green line (Expected p-curve for 33% power) 62 | # Proportion of tests expected to get <01, <02... 63 | gcdf1=prop33(.01, df$ncp33, df$df1, df$df2) #vector with proportion of p-values p<.01, with 33% power 64 | gcdf2=prop33(.02, df$ncp33, df$df1, df$df2) # "" p<.02, " 65 | gcdf3=prop33(.03, df$ncp33, df$df1, df$df2) # "" p<.03, " 66 | gcdf4=prop33(.04, df$ncp33, df$df1, df$df2) # "" p<.04, " 67 | #Note: for p<.05 we know it is 33% power 68 | 69 | #5.1.2 Now compute difference, and divide by 1/3 to get the share of significant p-values in each bin 70 | pc_data$expected <- c(mean(gcdf1)*3, #Average of the vector p<.01 71 | mean(gcdf2-gcdf1)*3, #Difference between .02 and .01 72 | mean(gcdf3-gcdf2)*3, #Difference between .03 and .02 73 | mean(gcdf4-gcdf3)*3, #Difference between .04 and .03 74 | mean(1/3-gcdf4)*3) #Difference between .05 and .04 75 | #Because we have one row per test submitted, the average is weighted average, giving each test equal weight 76 | pc_long <- pc_data %>% 77 | gather(measure, value, observed, baseline, expected) 78 | 79 | return(pc_long) 80 | } 81 | 82 | -------------------------------------------------------------------------------- /reports/template.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Report" 3 | author: "Alice Acquisitionist" 4 | date: "This report was rendered on `r Sys.Date()` and will be automatically re-rendered nightly, reflecting any changes in the data or code." 5 | --- 6 | 7 | ```{r, include=FALSE} 8 | knitr::opts_chunk$set(warning = FALSE, message = FALSE) 9 | ggplot2::theme_set(langcog::theme_mikabr(base_family = "Ubuntu")) 10 | source("../dashboard/global.R", chdir = TRUE) 11 | ``` 12 | 13 | 14 | ## R Markdown 15 | 16 | This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see . 17 | 18 | When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this: 19 | 20 | ```{r cars} 21 | summary(cars) 22 | ``` 23 | 24 | ## Including Plots 25 | 26 | You can also embed plots, for example: 27 | 28 | ```{r pressure, echo=FALSE} 29 | plot(pressure) 30 | ``` 31 | 32 | Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot. 33 | -------------------------------------------------------------------------------- /reports/within_subject_ES.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Within Subject Effect sizes" 3 | author: "Molly Lewis" 4 | date: "This report was rendered on `r Sys.Date()` and will be automatically re-rendered nightly, reflecting any changes in the data or code." 5 | --- 6 | 7 | ```{r, include=FALSE} 8 | knitr::opts_chunk$set(warning = FALSE, message = FALSE) 9 | #ggplot2::theme_set(langcog::theme_mikabr(base_family = "Ubuntu")) 10 | source("../dashboard/global.R", chdir = TRUE) 11 | 12 | library(metafor) 13 | library(tidyverse) 14 | ``` 15 | 16 | Exploring issue with differences in effect size as a function of how its calculated. Described in more detail [here](https://github.com/langcog/metalab/issues/113). 17 | 18 | ```{r, } 19 | all_data = all_data %>% 20 | mutate(es_type = ifelse(es_method == "special_case", "special_case", 21 | ifelse(es_method == "between", "between", 22 | ifelse(es_method %in% c("t_two", "f_two", "d_two", 23 | "subj_diff_two"), "subj-level", 24 | "group-level")))) 25 | 26 | 27 | es.counts = all_data %>% 28 | filter(dataset != "Pointing and vocabulary (longitudinal)") %>% 29 | select(dataset,es_method, participant_design, es_type) %>% 30 | group_by(dataset, es_method, es_type) %>% 31 | summarise(n = n(), 32 | participant_design = participant_design[1]) %>% 33 | arrange(es_method) 34 | 35 | ggplot(es.counts, aes(x = es_method, fill = es_type, group= es_type, y = n)) + 36 | geom_bar(stat= "identity", position = "dodge") + 37 | theme_bw() + 38 | ylab("n studies") + 39 | facet_wrap(~dataset, scales = "free_y") + 40 | scale_x_discrete(drop=TRUE) + 41 | theme(axis.text.x = element_text(angle=90, vjust = .5)) 42 | ``` 43 | 44 | It looks like this issue is primary problematic for vowel discrimination and potentially IDS. For IDS ([Dunst, et al.](http://earlyliteracylearning.org/cellreviews/cellreviews_v5_n1.pdf)), we don't really know how d was calculated. 45 | 46 | For vowel discrimination, let's look at whether there's a difference in effect size as a funciton of how it was calculated. 47 | ```{r} 48 | within.only.vowel.native = all_data %>% 49 | filter(dataset == "Vowel discrimination (native)") %>% 50 | filter(es_type != "special_case" & es_type != "between") 51 | 52 | ggplot(within.only.vowel.native, aes(x = es_type, y = d_calc)) + 53 | geom_boxplot() 54 | 55 | table(within.only.vowel.native$es_type, within.only.vowel.native$method) 56 | # within subject are central fixation and HPP 57 | 58 | table(within.only.vowel.native$es_type, within.only.vowel.native$response_mode) 59 | # all within subject are central fixation and HPP (method) OR eyetracing (response_mode) 60 | 61 | rma(d_calc ~ mean_age + es_type + response_mode , vi = d_var_calc, data = within.only.vowel.native) 62 | 63 | within.only.vowel.nnative = all_data %>% 64 | filter(dataset == "Vowel discrimination (non-native)") %>% 65 | filter(es_type != "special_case" & es_type != "between") 66 | 67 | ggplot(within.only.vowel.nnative, aes(x = es_type, y = d_calc)) + 68 | geom_boxplot() 69 | 70 | table(within.only.vowel.nnative$es_type, within.only.vowel.nnative$method) 71 | # within subject are central fixation and HPP 72 | 73 | table(within.only.vowel.nnative$es_type, within.only.vowel.nnative$response_mode) 74 | # all within subject are central fixation and HPP (method) OR eyetracing (response_mode) 75 | 76 | 77 | rma(d_calc ~ mean_age + es_type , vi = d_var_calc, data = within.only.vowel.nnative) 78 | 79 | ``` 80 | In a simple anaylsis subj-level es claculations are overall lower than group-level. But subj-level calculations are perfectly correlated for native with es_type/method. So, hard to know what's going on here. 81 | 82 | ```{r, levels plot} 83 | # need to recompute datasets summary data, based on filtered all_data 84 | datasets <- datasets %>% 85 | rename(dataset = name) %>% 86 | select(-num_experiments, -num_papers, -num_subjects) %>% 87 | filter(dataset != "Statistical word segementation") %>% 88 | filter(dataset != "Pointing and vocabulary (longitudinal)") 89 | 90 | # rename pointing and vocabulary 91 | datasets$name = plyr::mapvalues(datasets$name, from = c("Pointing and vocabulary (concurrent)"), 92 | to = c("Pointing and vocabulary")) 93 | 94 | all_data$dataset = plyr::mapvalues(all_data$dataset , from = c("Pointing and vocabulary (concurrent)"), 95 | to = c("Pointing and vocabulary")) 96 | 97 | # make levels df 98 | ld.df = data.frame(dataset = datasets$name, 99 | domain = c("Prosody", "Words", "Communication", "Sounds", 100 | "Sounds", "Sounds", "Sounds", "Sounds", "Words", 101 | "Words", "Communication", "Words")) 102 | 103 | ld.df$domain = factor(ld.df$domain, levels = c("Prosody","Sounds", "Words", "Communication")) 104 | 105 | all_data_groups = all_data %>% 106 | filter(es_type != "subj-level" | es_method == "d_two") # include IDS because we don't know because based on d 107 | 108 | single_method_datasets = all_data_groups %>% 109 | group_by(dataset) %>% 110 | summarise(n_methods = length(levels(as.factor(method)))) %>% 111 | filter(n_methods == 1) %>% 112 | .[["dataset"]] 113 | 114 | # get model fits 115 | all_data.resid = data.frame() 116 | for (i in 1:length(datasets$name)) { 117 | d = filter(all_data_groups, dataset == datasets$name[i]) 118 | if (datasets$name[i] %in% single_method_datasets) { 119 | full.model = rma(d_calc, vi = d_var_calc, data = d) 120 | } else { 121 | full.model = rma(d_calc ~ method, vi = d_var_calc, data = d) 122 | } 123 | 124 | d = as.data.frame(rstandard(full.model)$resid) %>% 125 | cbind(d) %>% 126 | rename(residual.d = `rstandard(full.model)$resid`) %>% 127 | mutate(residual.d = residual.d + full.model$b[1]) %>% # add in intercept term 128 | inner_join(all_data) 129 | 130 | all_data.resid = rbind(all_data.resid,d) 131 | } 132 | 133 | # merge in levels 134 | residualized.es = all_data.resid %>% 135 | left_join(ld.df) %>% 136 | filter(dataset != "Statistical sound category learning") %>% 137 | filter(dataset != "Phonotactic learning") %>% 138 | mutate(age.years = mean_age/365) %>% 139 | filter(age.years < 3) 140 | 141 | fs = 16 142 | ggplot(residualized.es, aes(x = age.years, y = residual.d, col = dataset)) + 143 | facet_grid(~ domain) + 144 | geom_point(aes(size = n), alpha = .1, data = filter(residualized.es, residual.d > -.5 & residual.d < 2.5 )) + 145 | geom_hline(yintercept = 0, linetype = "dashed") + 146 | geom_line(stat="smooth", method="lm", se = FALSE, size = 1, formula = y ~ log(x)) + 147 | coord_cartesian(ylim = c(-.5, 2.5), xlim = c(0, 3.1)) + # doesn't remove data from geom_smooth 148 | xlab("Age (years)") + 149 | ylab("Method-residualized\n effect size") + 150 | theme_bw() + 151 | scale_color_solarized() + 152 | theme(legend.position = "none", 153 | legend.key = element_blank(), 154 | axis.line= element_line(size = 1), 155 | axis.text = element_text(colour = "black", size = fs), 156 | strip.text.x = element_text(size = 14), 157 | axis.title = element_text(colour = "black", size = fs), 158 | strip.background = element_rect(fill="grey")) 159 | ``` 160 | Even when exclding within-subject ES for vowel discrimination, meta-meta plot looks mostly the same. 161 | 162 | -------------------------------------------------------------------------------- /scripts/cache_datasets.R: -------------------------------------------------------------------------------- 1 | # TO RUN: Rscript scripts/cache_datasets.R 2 | suppressMessages(suppressWarnings({ 3 | library(dplyr) 4 | library(purrr) 5 | })) 6 | 7 | source("scripts/compute_es.R") 8 | datasets <- jsonlite::fromJSON(txt = "metadata/datasets.json") 9 | fields <- yaml::yaml.load_file("metadata/spec.yaml") 10 | 11 | 12 | # Validate dataset's values for a given field 13 | validate_dataset_field <- function(dataset_name, dataset_contents, field) { 14 | if (field$required) { 15 | if (field$field %in% names(dataset_contents)) { 16 | if (field$type == "options") { 17 | if (class(field$options) == "list") { 18 | options <- names(unlist(field$options, recursive = FALSE)) 19 | } else { 20 | options <- field$options 21 | } 22 | invalid_values <- unique(dataset_contents[[field$field]]) %>% 23 | setdiff(options) 24 | if (!is.null(field$nullable) && field$nullable) { 25 | invalid_values <- na.omit(invalid_values) 26 | } 27 | if (length(invalid_values)) { 28 | for (value in invalid_values) { 29 | cat(sprintf("Dataset '%s' has invalid value '%s' for field '%s'.\n", 30 | dataset_name, value, field$field)) 31 | } 32 | return(FALSE) 33 | } 34 | } else if (field$type == "numeric") { 35 | field_contents <- dataset_contents[[field$field]] 36 | if (!(is.numeric(field_contents) || all(is.na(field_contents)))) { 37 | cat(sprintf("Dataset '%s' has wrong type for numeric field '%s'.\n", 38 | dataset_name, field$field)) 39 | return(FALSE) 40 | } 41 | } 42 | } else { 43 | cat(sprintf("Dataset '%s' is missing required field: '%s'.\n", 44 | dataset_name, field$field)) 45 | return(FALSE) 46 | } 47 | } 48 | return(TRUE) 49 | } 50 | 51 | 52 | # Validate dataset's values for all fields 53 | validate_dataset <- function(dataset_meta, dataset_contents) { 54 | valid_fields <- map(fields, function(field) { 55 | validate_dataset_field(dataset_meta$name, dataset_contents, field) 56 | }) 57 | valid_dataset <- all(unlist(valid_fields)) 58 | return(valid_dataset) 59 | } 60 | 61 | 62 | # Get a dataset's contents from the google sheets 63 | fetch_dataset <- function(dataset_meta) { 64 | 65 | if (dataset_meta$key == "") { 66 | cat(sprintf("Can't load dataset '%s', key missing.\n", dataset_meta$name)) 67 | return() 68 | } 69 | 70 | dataset_url <- sprintf( 71 | "https://docs.google.com/spreadsheets/d/%s/export?id=%s&format=csv", 72 | dataset_meta$key, dataset_meta$key 73 | ) 74 | 75 | tryCatch({ 76 | dataset_url %>% 77 | httr::GET() %>% 78 | httr::content(col_names = TRUE, col_types = NULL, encoding = "UTF-8") 79 | }, 80 | error = function(e) { 81 | cat(sprintf("Can't load dataset '%s' with key '%s'.\n", dataset_meta$name, 82 | dataset_meta$key)) 83 | }) 84 | 85 | } 86 | 87 | 88 | # Manipulate a dataset's contents to prepare it for saving 89 | tidy_dataset <- function(dataset_meta, dataset_contents) { 90 | 91 | # Coerce each field's values to the field's type, discard any columns not in 92 | # field spec, add NA columns for missing (optional) fields 93 | dataset_data <- data_frame(row = 1:nrow(dataset_contents)) 94 | for (field in fields) { 95 | if (field$field %in% names(dataset_contents)) { 96 | if (field$type == "string") { 97 | field_fun <- as.character 98 | } else if (field$type == "numeric") { 99 | field_fun <- as.numeric 100 | } else { 101 | field_fun <- function(x) x 102 | } 103 | dataset_data[,field$field] <- field_fun(dataset_contents[[field$field]]) 104 | } else { 105 | dataset_data[,field$field] <- NA 106 | } 107 | } 108 | 109 | # Impute values for missing correlations 110 | set.seed(111) 111 | if (all(is.na(dataset_data$corr))) { 112 | dataset_data$corr_imputed <- NA 113 | } else { 114 | dataset_data$corr_imputed <- dataset_data$corr %>% 115 | Hmisc::impute(fun = "random") %>% 116 | as.numeric() 117 | } 118 | 119 | # Compute effect sizes and variances 120 | dataset_data_calc <- dataset_data %>% 121 | #mutate(r = NA, SD_dif = NA) %>% #TODO: deal with these fields 122 | #mutate(num = row_number()) %>% 123 | #split(.$num) %>% 124 | split(.$row) %>% 125 | map_df(~bind_cols( 126 | .x, compute_es( 127 | .x$participant_design, .x$x_1, .x$x_2, .x$x_dif, .x$SD_1, .x$SD_2, 128 | .x$SD_dif, .x$n_1, .x$n_2, .x$t, .x$F, .x$d, .x$d_var, .x$corr, 129 | .x$corr_imputed, .x$r, .x$study_ID, .x$expt_num, 130 | .x$special_cases_measures, .x$contrast_sampa 131 | ))) %>% 132 | select(-row) 133 | 134 | # Add any other derived values 135 | method_options <- keep(fields, ~.x$field == "method")[[1]]$options 136 | method_names <- unlist(map(method_options, ~.x[[names(.x)]]$fullname)) 137 | names(method_names) <- unlist(map(method_options, names)) 138 | 139 | dataset_data_calc %>% 140 | mutate(dataset = dataset_meta[["name"]], 141 | short_name = dataset_meta[["short_name"]], 142 | method = unlist(method_names[method])) %>% 143 | rowwise() %>% 144 | mutate(mean_age = weighted.mean(c(mean_age_1, mean_age_2), c(n_1, n_2), 145 | na.rm = TRUE), 146 | n = mean(c(n_1, n_2), na.rm = TRUE)) 147 | 148 | } 149 | 150 | 151 | # Save a dataset's contents to a csv file 152 | save_dataset <- function(dataset_meta, dataset_data) { 153 | #write.csv(dataset_data, dataset_meta$file, row.names = FALSE) 154 | feather::write_feather(dataset_data, file.path("data", dataset_meta$filename)) 155 | cat(sprintf("Dataset '%s' saved successfully.\n", dataset_meta$name)) 156 | } 157 | 158 | 159 | # Fetch a dataset from google sheets, run it through field validation, 160 | # perform any necessary manipulations of its contents, save it to a file 161 | load_dataset <- function(dataset_short_name) { 162 | 163 | dataset_meta <- datasets %>% filter(short_name == dataset_short_name) 164 | if (!nrow(dataset_meta)) { 165 | cat(sprintf("Dataset '%s' isn't in datasets metadata.\n", dataset_short_name)) 166 | return() 167 | } 168 | 169 | dataset_contents <- fetch_dataset(dataset_meta) 170 | if (is.null(dataset_contents)) { 171 | return() 172 | } 173 | 174 | valid_dataset <- validate_dataset(dataset_meta, dataset_contents) 175 | if (!valid_dataset) { 176 | cat(sprintf( 177 | "Dataset '%s' had one or more validation issues, not being cached.\n", 178 | dataset_meta$name)) 179 | return() 180 | } 181 | 182 | dataset_data <- tidy_dataset(dataset_meta, dataset_contents) 183 | save_dataset(dataset_meta, dataset_data) 184 | 185 | } 186 | 187 | 188 | # Given no arguments, load all datasets 189 | # Given one argument, load the dataset with the given name 190 | args <- commandArgs(trailingOnly = TRUE) 191 | if (length(args) == 0) { 192 | for (short_name in datasets$short_name) { 193 | load_dataset(short_name) 194 | } 195 | } else if (length(args) == 1) { 196 | if (args %in% datasets$short_name) { 197 | load_dataset(args) 198 | } else { 199 | cat("Please provide a valid dataset name.\n") 200 | } 201 | } else { 202 | cat("Usage: Rscript scripts/cache_datasets.R [short_name]\n") 203 | } 204 | -------------------------------------------------------------------------------- /scripts/cache_datasets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Runs the cache_datasets R script, writes the results to a log file, and sends each line of the log file 3 | # to LangCog's Slack under #metalab-log channel 4 | 5 | cd /home/ubuntu/metalab/ 6 | git pull 7 | 8 | mkdir -p scripts/log 9 | LOGDATE=$(date +%Y-%m-%dT%H:%M:%S) 10 | LOGFILE=scripts/log/cache_datasets_$LOGDATE 11 | Rscript scripts/cache_datasets.R > $LOGFILE 12 | 13 | SLACKPOSTURL=https://hooks.slack.com/services/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 14 | 15 | while read -r line 16 | do 17 | ENTRY=$line 18 | curl -X POST --data-urlencode 'payload={"channel": "#metalab-log", "username": "cachedatasetbot", "icon_emoji": ":dog:", "text": "'"$ENTRY"'"}' $SLACKPOSTURL 19 | done < $LOGFILE 20 | 21 | git add data/ 22 | git commit -m "datasets cached by bot" 23 | git push origin master 24 | -------------------------------------------------------------------------------- /scripts/cache_datasets_crontab.txt: -------------------------------------------------------------------------------- 1 | 0 10 * * * /home/ubuntu/metalab/scripts/cache_datasets.sh >/dev/null 2>&1 2 | -------------------------------------------------------------------------------- /scripts/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e # exit with nonzero exit code if anything fails 3 | 4 | # clear and re-create the out directory 5 | rm -rf out || exit 0; 6 | mkdir out; 7 | 8 | # run our compile script, discussed above 9 | ./compile.sh 10 | 11 | # go to the out directory and create a *new* Git repo 12 | cd out 13 | git init 14 | 15 | # inside this git repo we'll pretend to be a new user 16 | git config user.name "Report Bot" 17 | git config user.email "mika.br@gmail.com" 18 | 19 | # The first and only commit to this new Git repo contains all the 20 | # files present with the commit message "Deploy to GitHub Pages". 21 | git add . 22 | git commit -m "Deploy to GitHub Pages" 23 | 24 | # Force push from the current repo's master branch to the remote 25 | # repo's gh-pages branch. (All previous history on the gh-pages branch 26 | # will be lost, since we are overwriting it.) We redirect any output to 27 | # /dev/null to hide any sensitive credential data that might otherwise be exposed. 28 | git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" master:gh-pages > /dev/null 2>&1 29 | -------------------------------------------------------------------------------- /scripts/render_reports.R: -------------------------------------------------------------------------------- 1 | reports <- yaml::yaml.load_file("metadata/reports.yaml") 2 | purrr::walk(reports, ~rmarkdown::render(sprintf("reports/%s.Rmd", .x$file), 3 | output_dir = "reports/out")) 4 | -------------------------------------------------------------------------------- /scripts/render_reports.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #cd /home/ubuntu/metalab/ 4 | #rm -rf reports/out || exit 0; 5 | #mkdir reports/out; 6 | 7 | #R CMD BATCH scripts/render_reports.R; 8 | 9 | #mv reports/out .. 10 | #git checkout gh-pages 11 | #cp -r ../out/* . 12 | #rm -r ../out 13 | #git add . 14 | #git commit -m "deployed to github pages" 15 | #git push --force --quiet origin gh-pages 16 | #git checkout master 17 | -------------------------------------------------------------------------------- /write-ups/BUCLD_2015/metalab-BUCLD.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/BUCLD_2015/metalab-BUCLD.pdf -------------------------------------------------------------------------------- /write-ups/ICIS_2016/MetaLab_ICIS2016.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/ICIS_2016/MetaLab_ICIS2016.pdf -------------------------------------------------------------------------------- /write-ups/Videos/MetaLabVideoDescriptions.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/Videos/MetaLabVideoDescriptions.pdf -------------------------------------------------------------------------------- /write-ups/childdev_2017/ReplicableDevelopmentalResearch.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/childdev_2017/ReplicableDevelopmentalResearch.docx -------------------------------------------------------------------------------- /write-ups/childdev_2017/analyses/bias.R: -------------------------------------------------------------------------------- 1 | #### LOAD DATA AND PACKAGES #### 2 | #source("analyses/initial_data.R") # only do this if running script alone, otherwise leave commented 3 | 4 | 5 | #### Effect size by N #### 6 | 7 | bias_grid <-ggplot(all_data, aes(x = n, y = abs(d_calc))) + 8 | facet_wrap(~dataset, scales = "free",ncol = 3) + 9 | xlab("Sample size") + 10 | ylab("Effect Size") + 11 | geom_smooth(method = 'lm', se = T, colour = "darkgrey") + 12 | geom_point(size =.5, alpha = .75) + 13 | theme_classic() + 14 | theme(text = element_text(size=16), axis.line.x = element_line(), axis.line.y = element_line(), legend.position='none') 15 | 16 | 17 | 18 | data_bias = all_data %>% 19 | nest(-dataset, .key = information) %>% 20 | mutate(model = map(information, ~cor.test(abs(.$d_calc), .$n, method = "kendall"))) %>% 21 | mutate(p = map(model, "p.value")) %>% 22 | mutate(tau = map(model, "estimate")) %>% 23 | select(dataset, tau, p) %>% 24 | unnest() %>% 25 | mutate(p = as.numeric(as.character(p))) %>% 26 | mutate(p = ifelse(p < .001, "< .001", as.character(round(p, 3)))) %>% 27 | mutate(tau = as.numeric(as.character(tau))) 28 | 29 | -------------------------------------------------------------------------------- /write-ups/childdev_2017/analyses/funnel.R: -------------------------------------------------------------------------------- 1 | #### LOAD DATA AND PACKAGES #### 2 | #source("analyses/initial_data.R") # only do this if running script alone, otherwise leave commented 3 | 4 | 5 | 6 | #### Adapted from the synthesis paper #### 7 | CRIT_95 = 1.96 8 | 9 | funnel.es.data = all_data %>% 10 | mutate(dataset = as.factor(dataset)) %>% 11 | group_by(dataset) %>% 12 | #mutate(outlier = ifelse(d_calc > mean(d_calc) + (3 * sd(d_calc)) | d_calc < mean(d_calc) - (3 * sd(d_calc)), 1, 0), 13 | # outlier = as.factor(outlier)) %>% 14 | #filter(outlier == 0) %>% 15 | mutate(se = sqrt(d_var_calc), 16 | es = g_calc, 17 | center = median(d_calc), 18 | lower_lim = max(se) + .05 * max(se)) 19 | 20 | 21 | # separate df for 95 CI funnel shape 22 | funnel95.data.wide <- funnel.es.data %>% 23 | select(center, lower_lim, dataset) %>% 24 | group_by(dataset) %>% 25 | summarise(x1 = (center-lower_lim * CRIT_95)[1], 26 | x2 = center[1], 27 | x3 = center[1] + lower_lim[1] * CRIT_95, 28 | y1 = -lower_lim[1], 29 | y2 = 0, 30 | y3 = -lower_lim[1]) 31 | 32 | funnel95.data.x = funnel95.data.wide %>% 33 | select(dataset, dplyr::contains("x")) %>% 34 | gather("coordx", "x", 2:4) %>% 35 | arrange(dataset, coordx) %>% 36 | select(-coordx) 37 | 38 | funnel95.data.y = funnel95.data.wide %>% 39 | select(dataset, dplyr::contains("y")) %>% 40 | gather("coordy", "y", 2:4) %>% 41 | arrange(dataset, coordy) %>% 42 | select(-coordy) 43 | 44 | funnel95.data = bind_cols(funnel95.data.x, funnel95.data.y) 45 | 46 | funnel_grid <- ggplot(funnel.es.data, aes(x = es, y = -se)) + 47 | facet_wrap(~dataset, scales = "free") + 48 | xlab("Effect Size Cohen's d") + 49 | ylab("Standard Error") + 50 | geom_polygon(aes(x = x, y = y), 51 | data = funnel95.data, 52 | fill = "lightgrey") + 53 | geom_smooth(method = 'lm', se = F, colour = "darkgrey") + 54 | geom_vline(aes(xintercept=x2), 55 | linetype = "dashed", color = "red", size = .5, data = funnel95.data.wide) + 56 | geom_vline(xintercept = 0, linetype = "dotted", color = "black", size = .5) + 57 | scale_y_continuous(labels = function(x){abs(x)}) + 58 | geom_point(size = .5) + 59 | theme_classic() 60 | 61 | 62 | 63 | #### Testing for funnel plot asymmetry #### 64 | 65 | data_funnel = all_data %>% 66 | nest(-dataset, .key = information) %>% 67 | mutate(model = map(information, ~ranktest(rma.mv(d_calc, d_var_calc, random = ~ study_ID, data=.)))) %>% 68 | mutate(p = map(model, "pval")) %>% 69 | mutate(tau = map(model, "tau")) %>% 70 | select(dataset, tau, p) %>% 71 | mutate(p = as.numeric(as.character(p))) %>% 72 | mutate(p = ifelse(p < .001, "< .001", as.character(round(p, 3)))) %>% 73 | mutate(tau = as.numeric(as.character(tau))) -------------------------------------------------------------------------------- /write-ups/childdev_2017/analyses/initial_data.R: -------------------------------------------------------------------------------- 1 | ## READ IN DATA #### 2 | source("../../dashboard/global.R", chdir = TRUE) 3 | 4 | 5 | 6 | ## LOAD PACKAGES #### 7 | library(metafor) 8 | library(dplyr) 9 | library(tidyr) 10 | library(ggplot2) 11 | library(stringr) 12 | library(purrr) 13 | library(knitr) 14 | library(broom) 15 | library(pwr) 16 | library(lme4) 17 | 18 | 19 | ## CLEAN DATA #### 20 | all_data = all_data %>% 21 | filter(is.na(condition_type) | condition_type == "critical") %>% 22 | filter(dataset!="Pointing and vocabulary (longitudinal)") %>% 23 | filter(dataset!="Categorization Bias") %>% 24 | filter(dataset!="Statistical Word Segmentation") %>% #added AC 2017-10-19 25 | filter(infant_type == "typical") %>% 26 | mutate(weights_d = 1/(d_var_calc)^2) 27 | 28 | 29 | 30 | 31 | 32 | all_data = all_data %>% 33 | mutate(year = as.numeric(unlist(lapply(strsplit(unlist(study_ID), 34 | "[^0-9]+"), function(x) unlist(x)[2])))) %>% 35 | mutate(year = ifelse(grepl("submitted",study_ID), 2017, year)) %>% 36 | mutate(year = ifelse(dataset == "Phonotactic learning", 37 | as.numeric(unlist(lapply(strsplit(unlist(short_cite),"[^0-9]+"), function(x) unlist(x)[2]))), year)) %>% 38 | mutate(dataset = as.factor(dataset), 39 | dataset = plyr::revalue(dataset, 40 | c("Infant directed speech preference"="IDS preference", 41 | "Statistical sound category learning"="Statistical sound learning", 42 | "Label advantage in concept learning"="Concept-label advantage", 43 | "Vowel discrimination (native)"="Native vowel discrim.", 44 | "Vowel discrimination (non-native)"="Non-native vowel discrim." , 45 | "Pointing and vocabulary (concurrent)"="Pointing and vocabulary" 46 | ))) 47 | 48 | #Remove outliers 49 | 50 | clean_data = all_data %>% 51 | group_by(dataset) %>% 52 | mutate(mean_es = median(d_calc)) %>% 53 | mutate(sd_es = sd(d_calc)) %>% 54 | ungroup() %>% 55 | mutate(no_outlier = ifelse(d_calc < mean_es+3*sd_es, ifelse(d_calc > mean_es-3*sd_es, TRUE, FALSE), FALSE)) %>% 56 | filter(no_outlier) 57 | 58 | #Comment out if you do not want to remove outliers 59 | all_data = clean_data 60 | remove(clean_data) 61 | -------------------------------------------------------------------------------- /write-ups/childdev_2017/analyses/method.R: -------------------------------------------------------------------------------- 1 | ## LOAD DATA AND PACKAGES #### 2 | #source("analyses/initial_data.R") # only do this if running script alone, otherwise leave commented 3 | 4 | ## prelims ## 5 | 6 | axislabels = c("CF", "CondHT", "FC", "HPP", "LwL", "SA") 7 | 8 | 9 | ## METHOD VS EXCLUDED #### 10 | 11 | # Centering mean age 12 | method_exclude_data = all_data %>% 13 | mutate(ageC = ifelse(participant_design == "between", 14 | (((mean_age_1 - mean(mean_age_1)) + (mean_age_1 - mean(mean_age_1)))/2)/30.42, 15 | (((mean_age_1 - mean(mean_age_1))))/30.42)) %>% 16 | mutate(keep = ifelse(is.na(n_2), n_1, n_1 + n_2)) %>% 17 | mutate(dropout = ifelse(is.na(n_excluded_1), NA, ifelse(is.na(n_excluded_2), n_excluded_1, n_excluded_1+n_excluded_2))) %>% 18 | mutate(total_run = keep + dropout) %>% 19 | filter(!is.na(dropout)) %>% 20 | mutate(percent_dropout = dropout*100 / total_run) %>% 21 | group_by(method) %>% 22 | mutate(number = n()) %>% 23 | ungroup() %>% 24 | filter(number > 10) %>% 25 | mutate(method = factor(method)) #%>% 26 | #select(percent_dropout, keep, dropout, total_run, dataset, ageC, method, mean_age_months) 27 | 28 | method_data = all_data %>% 29 | filter(method %in% unique(method_exclude_data$method)) %>% 30 | mutate(ageC = ifelse(participant_design == "between", 31 | (((mean_age_1 - mean(mean_age_1)) + (mean_age_1 - mean(mean_age_1)))/2)/30.42, 32 | (((mean_age_1 - mean(mean_age_1))))/30.42)) %>% 33 | group_by(method) %>% 34 | mutate(number = n()) %>% 35 | ungroup() %>% 36 | filter(number > 10) %>% 37 | mutate(method = factor(method)) #%>% 38 | #group_by(dataset) %>% 39 | #mutate(number = length(levels(as.factor(method)))) %>% 40 | #ungroup() %>% 41 | #filter(number > 2) 42 | 43 | 44 | # method_exclude_data = all_data %>% 45 | # mutate(ageC = ifelse(participant_design == "between", 46 | # (((mean_age_1 - mean(mean_age_1)) + (mean_age_1 - mean(mean_age_1)))/2)/30.42, 47 | # (((mean_age_1 - mean(mean_age_1))))/30.42)) %>% 48 | # mutate(keep = ifelse(is.na(n_2), n_1, n_1 + n_2)) %>% 49 | # mutate(dropout = ifelse(is.na(n_excluded_1), NA, ifelse(is.na(n_excluded_2), n_excluded_1, n_excluded_1+n_excluded_2))) %>% 50 | # mutate(total_run = keep + dropout) %>% 51 | # filter(!is.na(dropout)) %>% 52 | # mutate(percent_dropout = dropout*100 / total_run) %>% 53 | # group_by(method) %>% 54 | # mutate(number = n()) %>% 55 | # ungroup() %>% 56 | # filter(number > 10) %>% 57 | # mutate(method = factor(method)) %>% 58 | # select(percent_dropout, keep, dropout, total_run, dataset, ageC, method, mean_age_months) 59 | 60 | # Build LMER model 61 | method_exclude.m <- lmer(percent_dropout ~ method * ageC + 62 | (1|dataset), data = method_exclude_data) 63 | 64 | # Save summary of model 65 | method_exclude.m_sum = summary(method_exclude.m) 66 | 67 | 68 | ## PLOT OF EFFECT OF METHOD ON DROPOUT RATE #### 69 | # Make plot 70 | method_exclude.plot = ggplot(method_exclude_data, aes(x = method, y = percent_dropout)) + 71 | geom_boxplot() + 72 | geom_jitter(size = .5, alpha = .35) + 73 | xlab("Method") + 74 | ylab("Percent Excluded") + 75 | scale_x_discrete(labels = axislabels) + 76 | theme_classic() + 77 | theme(text = element_text(size=16), axis.line.x = element_line(), axis.line.y = element_line(), legend.position='none') 78 | 79 | 80 | method_exclude_age.plot = ggplot(method_exclude_data, aes(x = mean_age_months, y = percent_dropout, color = method)) + 81 | geom_smooth(method = "lm") + 82 | geom_jitter(size = .5, alpha = .35) + 83 | xlab("Age in Months") + 84 | ylab("Percent Excluded") + 85 | labs(color = "Method") + 86 | scale_color_brewer(type = 'div', palette = 'Set2') + 87 | xlim(0, 42) + 88 | theme_classic() + 89 | theme(text = element_text(size=16), axis.line.x = element_line(), axis.line.y = element_line(), legend.position='top') 90 | 91 | 92 | ## EFFECT OF METHOD #### 93 | 94 | # Build model 95 | method.rma <- rma.mv(d_calc, d_var_calc, mods = ~ageC * relevel(method, "central fixation"), random = ~ short_cite | dataset, data = method_data) 96 | 97 | # Save summary of model 98 | method.rma_sum = summary(method.rma) 99 | 100 | method.rma_coef = coef(method.rma_sum) 101 | 102 | ## PLOT OF EFFECT OF METHOD #### 103 | # Make plot 104 | method.plot = ggplot(method_data, aes(x = method, y = d_calc)) + 105 | geom_hline(yintercept = 0, colour = "grey") + 106 | geom_boxplot() + 107 | geom_jitter(size = .5, alpha = .35) + 108 | xlab("Method") + 109 | ylab("Effect size (Cohen's d)") + 110 | #xlim(0, 40) + 111 | ylim(-1.5, 3.3) + 112 | scale_x_discrete(labels = axislabels) + 113 | theme_classic() + 114 | theme(text = element_text(size=16), axis.line.x = element_line(), axis.line.y = element_line(), legend.position='none') 115 | 116 | method_age.plot = ggplot(method_data, aes(x = mean_age_months, y = d_calc, color = method)) + 117 | geom_hline(yintercept = 0, colour = "grey") + 118 | geom_smooth(method = "lm") + 119 | geom_jitter(size = .5, alpha = .35) + 120 | xlab("Age in Months") + 121 | ylab("Effect size (Cohen's d)") + 122 | xlim(0, 42) + 123 | ylim(-1.5, 3.3) + 124 | labs(color = "Method") + 125 | scale_color_brewer(type = 'div', palette = 'Set2') + 126 | theme_classic() + 127 | theme(text = element_text(size=16), axis.line.x = element_line(), axis.line.y = element_line(), legend.position='top') -------------------------------------------------------------------------------- /write-ups/childdev_2017/analyses/p_values.R: -------------------------------------------------------------------------------- 1 | #### LOAD DATA AND PACKAGES #### 2 | #source("analyses/initial_data.R") # only do this if running script alone, otherwise leave commented 3 | 4 | 5 | #### Recompute p-values #### 6 | all_data = all_data %>% 7 | mutate(t_calc = ifelse(is.na(t), (r_calc/(sqrt((1-(r_calc^2))/(n-2)))), t)) %>% 8 | mutate(dfs = ifelse(participant_design=="between", (n_1+n_2-2), (n_1-1))) %>% 9 | mutate(p_calc = 2*pt(abs(t_calc), df=dfs, lower = FALSE)) %>% 10 | mutate(sig = ifelse(p_calc < .05, TRUE, FALSE)) 11 | 12 | 13 | #plotting effect sizes against p-values to display their relationship 14 | 15 | sig_plot <- ggplot(all_data, aes(p_calc, abs(d_calc)),group_by(sig)) + 16 | geom_hline(yintercept = 1, size = 8, colour = "lightgrey") + 17 | geom_vline(xintercept = .05) + 18 | geom_point(size = all_data$n/20, alpha = .5) + 19 | xlab("Recalculated p-value") + 20 | ylab("Absolute value of Cohen's d") + 21 | ylim(0, 2) + 22 | theme_classic() + 23 | theme(axis.line.x = element_line(), axis.line.y = element_line()) 24 | 25 | 26 | #### P-curve #### 27 | 28 | p_data = all_data %>% 29 | filter(p_calc < .05) %>% 30 | group_by(dataset) %>% 31 | mutate(number = n()) %>% 32 | mutate(n_below = sum(p_calc<.05))%>% 33 | mutate(n_above = sum(p_calc>.05))%>% 34 | mutate(percent_below = n_below/number) %>% 35 | ungroup() %>% 36 | filter(number > 10) 37 | 38 | pcurve.plot <- ggplot(p_data, aes(p_calc)) + 39 | facet_wrap(~dataset, scales = "free") + 40 | geom_vline(xintercept = .05) + 41 | geom_histogram(breaks = seq(0, 0.05,by=0.01)) + 42 | geom_density(adjust = .5) + 43 | xlim(0, .05) + 44 | xlab("p-value (recalculated)") + 45 | ylab("Counts") + 46 | theme_classic() 47 | 48 | -------------------------------------------------------------------------------- /write-ups/childdev_2017/analyses/power.R: -------------------------------------------------------------------------------- 1 | ## LOAD DATA AND PACKAGES #### 2 | #source("analyses/initial_data.R") # only do this if running script alone, otherwise leave commented 3 | 4 | 5 | ## COMPUTE POWER FOR ALL PAPERS #### 6 | 7 | get_power_oldest = function(df){ 8 | pwr.t.test(n = df$n_dataset, d = df$largest_d, sig.level = 0.05) 9 | } 10 | 11 | # Compute oldest paper 12 | oldest = all_data %>% 13 | group_by(dataset, short_cite) %>% 14 | summarise(year = max(year), 15 | largest_d = max(d_calc)) %>% 16 | ungroup() %>% 17 | group_by(dataset) %>% 18 | arrange(year) %>% 19 | filter(row_number() == 1) %>% 20 | ungroup() 21 | 22 | # Combine summary with oldest paper 23 | d_comparison = inner_join(oldest, MA_summary) %>% 24 | select(dataset, largest_d, d, n_dataset, power) 25 | 26 | # Include power 27 | d_comparison_power = d_comparison %>% 28 | nest(-dataset, .key = descriptives) %>% 29 | mutate(power = map(descriptives, get_power_oldest)) %>% 30 | mutate(old_power = map(power, "power")) %>% 31 | select(dataset, old_power) %>% 32 | mutate(old_power = as.numeric(as.character(old_power))) 33 | 34 | # Save overall summary 35 | d_comparison_summary = inner_join(d_comparison, d_comparison_power) %>% 36 | mutate(difference = old_power-as.numeric(power)) %>% 37 | select(-power) 38 | 39 | 40 | ## PLOT OF DIFFERENCE OF D VALUES #### 41 | # Get data ready for figure 42 | d_comparison_full = inner_join(d_comparison, MA_summary) %>% 43 | select(dataset, n_dataset, d, largest_d) %>% 44 | mutate(diff_d = abs(largest_d) - abs(d)) 45 | 46 | # Make plot 47 | d_comparison_diff.plot = ggplot(d_comparison_full, aes(x = largest_d, y = d)) + 48 | geom_smooth(method = "lm", color = "black") + 49 | geom_point(colour="grey50", size = 3) + 50 | geom_point(aes(color = dataset), size = 2) + 51 | xlab("Largest Cohen's d for Oldest Paper") + 52 | ylab("Meta-analytic Cohen's d") + 53 | labs(color = "Meta-analysis") + 54 | #scale_color_brewer(type = 'div', palette = 'Paired') + 55 | theme_classic() + 56 | theme(axis.line.x = element_line(), axis.line.y = element_line(), 57 | legend.position = "top") 58 | 59 | 60 | 61 | #### Power / ES over time #### 62 | 63 | 64 | 65 | # Very simplistic, just look for a general effect of year. 66 | power_year = rma.mv(d_calc, d_var_calc, mods = ~year, random = ~ short_cite | dataset, data = all_data) 67 | 68 | # Make plot 69 | 70 | 71 | # Alternative analysis taking into account method and age 72 | 73 | full.model = rma.mv(d_calc, d_var_calc, mods = ~mean_age_1+method, 74 | random = ~ short_cite | dataset, data = all_data) 75 | 76 | predicted = predict(full.model) 77 | 78 | all_data = all_data %>% 79 | bind_cols(as.data.frame(predicted$pred), 80 | as.data.frame(predicted$se)) %>% 81 | rename(predicted_d = `predicted$pred`, 82 | predicted_se = `predicted$se`) 83 | 84 | power_estimate = pwr.t.test(n = all_data$n, d = all_data$predicted_d, sig.level = 0.05)$power 85 | 86 | all_data = cbind(all_data, power_estimate) 87 | 88 | 89 | power_year.plot = ggplot(all_data , aes(x = year, y = power_estimate, color = dataset)) + 90 | geom_smooth(method = "lm", lwd =1.5) + 91 | xlab("Publication year") + 92 | ylab("Estimated Power") + 93 | scale_color_brewer(type = 'div', palette = 'Paired') + 94 | theme_classic() + 95 | theme(axis.line.x = element_line(), axis.line.y = element_line(), 96 | legend.position = "top") 97 | 98 | 99 | 100 | ##### Power per study ##### 101 | 102 | 103 | power_study.plot = ggplot(all_data , aes(x = reorder(dataset, power_estimate, FUN=median), y = power_estimate, color = dataset)) + 104 | geom_hline(yintercept = .8) + 105 | geom_boxplot(lwd=1) + 106 | geom_jitter(size = .5, alpha = .35) + 107 | xlab("Meta-Analysis") + 108 | ylab("Estimated Power") + 109 | scale_color_brewer(type = 'div', palette = 'Paired') + 110 | theme_classic() + 111 | theme(axis.line.x = element_line(), axis.line.y = element_line(), axis.text.x=element_blank(), 112 | axis.ticks.x=element_blank(), legend.position = "top") 113 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /write-ups/childdev_2017/analyses/sample_size.R: -------------------------------------------------------------------------------- 1 | ## LOAD DATA AND PACKAGES #### 2 | #source("analyses/initial_data.R") # only do this if running script alone, otherwise leave commented 3 | 4 | 5 | ## GET DESCRIPTIVE INFORMATION ##### 6 | # Make functions - This function is a hack, should update to just use map with MA_descriptives 7 | get_power = function(df){ 8 | pwr.t.test(n = df$n_dataset, d = df$d, sig.level = 0.05) 9 | } 10 | 11 | # Organize data to get descriptors 12 | data_rma = all_data %>% 13 | nest(-dataset, .key = information) %>% 14 | mutate(model = map(information, ~rma.mv(d_calc, d_var_calc, random = ~ study_ID, data=.))) %>% 15 | mutate(d = map(model, "b")) %>% 16 | mutate(se = map(model, "se")) %>% 17 | select(dataset, d, se) %>% 18 | mutate(d = as.numeric(as.character(d))) %>% 19 | mutate(se = as.numeric(as.character(se))) 20 | 21 | # Descriptors 22 | MA_descriptives = all_data %>% 23 | mutate(n_total = n) %>% #ifelse(!is.na(n_2), n_1 + n_2, n_1)) %>% I think n does the same thing 24 | group_by(dataset) %>% 25 | summarise(age_dataset = median(mean_age_months), 26 | age_min = min(mean_age_months), 27 | age_max = max(mean_age_months), 28 | n_dataset = median(n_total), 29 | n_min = min(n_total), 30 | n_max = max(n_total), 31 | n_records = n(), 32 | n_papers = length(unique(short_cite))) %>% 33 | ungroup() %>% 34 | inner_join(data_rma) 35 | 36 | # Power 37 | MA_power = MA_descriptives %>% 38 | nest(-dataset, .key = descriptives) %>% 39 | mutate(power = map(descriptives, get_power)) %>% 40 | mutate(power = map(power, "power")) %>% 41 | select(dataset, power) %>% 42 | mutate(power = as.numeric(as.character(power))) 43 | 44 | # Summary 45 | MA_summary = inner_join(MA_descriptives, MA_power) 46 | 47 | MA_summary_table = MA_summary %>% 48 | mutate(age = paste(as.character(round(age_dataset, 0)), " (", as.character(round(age_min, 0)), "-", as.character(round(age_max, 0)), ")", sep = "")) %>% 49 | mutate(n = paste(as.character(n_dataset), " (", as.character(n_min), "-", as.character(round(n_max, 0)), ")", sep = "")) %>% 50 | mutate(ES = paste(as.character(round(d, 2)), " (", as.character(round(se, 2)), ")", sep = "")) %>% 51 | select(dataset, age, n, n_records, n_papers, ES, power) 52 | 53 | 54 | ### DATA AVAILABLITY #### 55 | # #From report Reproducibility by ML and CB 56 | # counts = all_data %>% 57 | # mutate(corr_within_two = ifelse(participant_design == "within_two", as.numeric(corr), NA)) %>% # creates a new column where any time design is not within_two "corr" becomes an NA 58 | # summarise(test_statistic = sum(!is.na(t) | !is.na(F) | !is.na(r)), 59 | # means = sum(!is.na(x_1)), 60 | # SD = sum(!is.na(SD_1)), 61 | # d = sum(!is.na(d)), 62 | # #corr = sum(!is.na(corr)), 63 | # corr_within_two = sum(!is.na(corr_within_two)), 64 | # mean_age = sum(!is.na(mean_age_1)), 65 | # age_range = sum(!is.na(age_range_1)), 66 | # gender = sum(!is.na(gender_1))) %>% 67 | # gather("coded_variable", "n_coded") %>% 68 | # #mutate(total = nrow(all_data)) %>% 69 | # mutate(total = ifelse(coded_variable == "corr_within_two", 70 | # nrow(subset(all_data, participant_design == "within_two")), 71 | # nrow(all_data))) %>% 72 | # mutate(coded_variable = factor(coded_variable)) %>% 73 | # mutate(n_uncoded = total - n_coded) %>% 74 | # select(coded_variable, n_coded, n_uncoded, total) 75 | -------------------------------------------------------------------------------- /write-ups/childdev_2017/educationpaper_environment.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/childdev_2017/educationpaper_environment.RData -------------------------------------------------------------------------------- /write-ups/childdev_2017/save_analyses.R: -------------------------------------------------------------------------------- 1 | ## LOAD DATA AND PACKAGES #### 2 | source("analyses/initial_data.R") 3 | 4 | 5 | ## RUN ANALYSES #### 6 | source("analyses/sample_size.R") 7 | source("analyses/power.R") 8 | source("analyses/method.R") 9 | source("analyses/p_values.R") 10 | source("analyses/funnel.R") 11 | source("analyses/bias.R") 12 | 13 | ## SAVE ENVIRONMENT FOR USE IN PAPER #### 14 | save.image("educationpaper_environment.RData") 15 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/NHB/SynthesisCoverLetter.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/NHB/SynthesisCoverLetter.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/NHB/figs/fig3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/NHB/figs/fig3.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/NHB/figs/fig3_lab.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/NHB/figs/fig3_lab.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/NHB/figs/fig3legend.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/NHB/figs/fig3legend.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/NHB/figs/fig4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/NHB/figs/fig4.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/NHB/figs/fig4_lab.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/NHB/figs/fig4_lab.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/NHB/figs/fig4legend.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/NHB/figs/fig4legend.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/NHB/metalab_synthesis.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/NHB/metalab_synthesis.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/README-steps for converting rmd to latex: -------------------------------------------------------------------------------- 1 | STEPS FOR GOING FROM METALAB_SYNTHESIS.RMD TO LATEX 2 | 3 | PNAS requires latex format but paper is written in rmd. To produce PNAS latex, start with metalab_synthesis.rmd file in psychscience folder and do the following steps. 4 | 5 | Note these steps are for the case where where changes are made to the data only and it is not necssary to edit the paper text in the original rmd (I'm not sure why this would be necessary now). Paper text can be edited now directly in pnas_metalab_synthesis.tex. 6 | 7 | (1) Run raw r code to produce: 8 | * Table 2, which needs to be printed in the console as latex code and content pasted into rmd 9 | * save figure 3 and figure 4 to pdfs so can be edited in illustrator (1 for fig; 1 for the legend) 10 | (2) Knit metalab_synthesis.Rmd and then open metalab_synthesis.tex 11 | (3) From metalab_synthesis.tex paste the following into pnas_metalab_synthesis.tex: 12 | * Table 1 (content only) 13 | * Table 2 (content only) -> edit backslashes for "dagger" and "discrim" 14 | * Edit inline r code in text (this occurs in 5 different paragraphs, identifiable by searching "`r" ) and abstract 15 | (4) From "write-ups/synthesis_paper/psychscience/metalab_synthesis_files/figure-latex/" move the following files to "write-ups/synthesis_paper/PNAS/figs/": 16 | * "unnamed-chunk-2-1.pdf" (funnel) 17 | * "p_curve_plots-1.pdf" (p-curve) 18 | (5) From "write-ups/synthesis_paper/psychscience/figs/" move the following files to "write-ups/synthesis_paper/PNAS/figs/": 19 | * fig3.pdf, fig3legend.pdf 20 | * fig4.pdf, fig4legend.pdf 21 | (6) Edit figures 3 and 4 in illustrator (just open both and move text), and save as fig3_lab.pdf/fig4_lab.pdf (font size = 9) 22 | (7) Compile pnas_metalab_synthesis.tex 23 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/figs/fig3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/figs/fig3.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/figs/fig3_lab.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/figs/fig3_lab.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/figs/fig3legend.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/figs/fig3legend.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/figs/fig4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/figs/fig4.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/figs/fig4_lab.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/figs/fig4_lab.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/figs/fig4legend.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/figs/fig4legend.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/figs/p_curve_plots-1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/figs/p_curve_plots-1.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/figs/unnamed-chunk-2-1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/figs/unnamed-chunk-2-1.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/getcitationsfromrmd.R: -------------------------------------------------------------------------------- 1 | ## pulls out all citations from rmd ("@whatever)") and formats them into latex citation command 2 | 3 | 4 | library(stringr) 5 | 6 | 7 | fileName <- "rawmetalabtext.txt" 8 | m = readChar(fileName, file.info(fileName)$size) 9 | 10 | citation <- "@[:alnum:]*" 11 | k = unlist(str_extract_all(m, citation)) 12 | p = sub("@", "", k) 13 | 14 | paste("\nocite{", paste(p, collapse=","), "}" ,sep = "") 15 | 16 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/pnas-metalab-synthesis.blg: -------------------------------------------------------------------------------- 1 | This is BibTeX, Version 0.99d (TeX Live 2015) 2 | Capacity: max_strings=35307, hash_size=35307, hash_prime=30011 3 | The top-level auxiliary file: pnas-metalab-synthesis.aux 4 | The style file: pnas-new.bst 5 | Illegal, another \bibstyle command---line 86 of file pnas-metalab-synthesis.aux 6 | : \bibstyle 7 | : {pnas2016} 8 | I'm skipping whatever remains of this command 9 | Database file #1: metalab_synthesis.bib 10 | Warning--empty journal in lfprep 11 | Warning--empty journal in bergmanneducational 12 | Warning--empty journal in lammertink2016 13 | Warning--empty journal in lewisunpublished 14 | Warning--empty journal in cristiastatisticalinprep 15 | You've used 38 entries, 16 | 1837 wiz_defined-function locations, 17 | 685 strings with 10440 characters, 18 | and the built_in function-call counts, 8451 in all, are: 19 | = -- 707 20 | > -- 399 21 | < -- 3 22 | + -- 122 23 | - -- 84 24 | * -- 702 25 | := -- 1259 26 | add.period$ -- 70 27 | call.type$ -- 38 28 | change.case$ -- 37 29 | chr.to.int$ -- 0 30 | cite$ -- 43 31 | duplicate$ -- 409 32 | empty$ -- 822 33 | format.name$ -- 84 34 | if$ -- 1835 35 | int.to.chr$ -- 0 36 | int.to.str$ -- 38 37 | missing$ -- 39 38 | newline$ -- 149 39 | num.names$ -- 38 40 | pop$ -- 88 41 | preamble$ -- 1 42 | purify$ -- 0 43 | quote$ -- 0 44 | skip$ -- 229 45 | stack$ -- 0 46 | substring$ -- 656 47 | swap$ -- 99 48 | text.length$ -- 3 49 | text.prefix$ -- 0 50 | top$ -- 0 51 | type$ -- 0 52 | warning$ -- 5 53 | while$ -- 91 54 | width$ -- 40 55 | write$ -- 361 56 | (There was 1 error message) 57 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/pnas-metalab-synthesis.out: -------------------------------------------------------------------------------- 1 | \BOOKMARK [0][-]{matmethods@section.1}{Materials and Methods}{}% 1 2 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/pnas-metalab-synthesis.synctex.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/pnas-metalab-synthesis.synctex.gz -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/pnas-metalab-synthesis.xwm: -------------------------------------------------------------------------------- 1 | \relax 2 | \xwmnewlabel{xwmlastpage}{{1}{8}{Materials and Methods\relax }{matmethods@section.1}{}} 3 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/pnas-metalab-synthesis_including_atypical.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/pnas-metalab-synthesis_including_atypical.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/pnasresearcharticle.sty: -------------------------------------------------------------------------------- 1 | %%% PNAS two column research article style file 2 | %%% For use with pnas-new.cls 3 | \NeedsTeXFormat{LaTeX2e} 4 | \ProvidesPackage{pnasresearcharticle}[2016/02/28 v1.2 PNAS two column research article style] 5 | 6 | %% Set whether the abstract is set into the first column 7 | \setboolean{shortarticle}{true} 8 | % true = set into first column 9 | % false = spans page width 10 | 11 | %% Set colors 12 | \definecolor{color2}{RGB}{130,0,0} % color 13 | 14 | %% Set up the first page footnote/fact box here 15 | \RequirePackage{float} 16 | \floatstyle{plain} 17 | \newfloat{sigstatement}{b!}{sst} 18 | 19 | \additionalelement{% 20 | \afterpage{\begin{sigstatement} 21 | \sffamily 22 | \mdfdefinestyle{pnassigstyle}{linewidth=0.7pt,backgroundcolor=pnasblueback,linecolor=pnasbluetext,fontcolor=pnasbluetext,innertopmargin=6pt,innerrightmargin=6pt,innerbottommargin=6pt,innerleftmargin=6pt} 23 | \@ifundefined{@significancestatement}{}{% 24 | \begin{mdframed}[style=pnassigstyle]% 25 | \section*{Significance Statement}% 26 | \@significancestatement 27 | \end{mdframed}} 28 | % \medskip 29 | \scriptsize 30 | \@ifundefined{@authorcontributions}{}{\@authorcontributions} 31 | \vskip5pt% 32 | \@ifundefined{@authordeclaration}{}{\@authordeclaration} 33 | \vskip5pt% 34 | \@ifundefined{@equalauthors}{}{\@equalauthors} 35 | \vskip5pt% 36 | \@ifundefined{@correspondingauthor}{}{\@correspondingauthor} 37 | \end{sigstatement}} 38 | } 39 | 40 | %% Break at end of article (before references) 41 | % The blank line before the strip command ensures there is nothing placed 42 | % directly before the break (which can cause formatting issues). 43 | \newcommand{\pnasbreak}{ 44 | 45 | \begin{strip} 46 | \vskip-11pt 47 | \end{strip} 48 | } 49 | 50 | \endinput -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/submitted/pnas-metalab-synthesis.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/PNAS/submitted/pnas-metalab-synthesis.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/PNAS/widetext.sty: -------------------------------------------------------------------------------- 1 | \NeedsTeXFormat{LaTeX2e} 2 | \ProvidesPackage{widetext} 3 | 4 | %% Mimics the widetext environment of revtex4 for any other class package 5 | %% Eg: article.cls 6 | %% 7 | %% Compiled by: Anjishnu Sarkar 8 | %% 9 | %% Advantages: 10 | %% *) Supports float (eg: figure) in two column format (Advantage over 11 | %% multicol package) 12 | %% *) One and twocolumn exist on the same page 13 | %% *) Flow of text shown via rule 14 | %% *) Equal height of text when in two column format 15 | %% 16 | %% Acknowledgment(s): 17 | %% 1. Instead of re-inventing the wheel, two packages (flushend, cuted) of 18 | %% the sttools bundle are used. The sttools bundle is available from CTAN. 19 | %% Lisence of these packages rests with their corresponding author. 20 | %% Any bug/problem with flushend and cuted should be forwarded to their 21 | %% corresponding package authors. 22 | %% 2. The idea of the rule came from the following latex community website 23 | %% http://www.latex-community.org/forum/viewtopic.php?f=5&t=2770 24 | %% 25 | %% This package just defines the widetext environment and the rules. 26 | %% 27 | %% Usage: 28 | %% \documentclass[a4paper,12pt,twocolumn]{article} 29 | %% \usepackage{widetext} 30 | %% 31 | %% \begin{document} 32 | %% 33 | %% Some text in twocolumn 34 | %% 35 | %% \begin{widetext} 36 | %% Text in onecolumn format. 37 | %% \end{widetext} 38 | %% 39 | %% Some more text in twocolumn 40 | %% 41 | %% \end{document} 42 | %%%%%%%%%%%%%%%%%%%% 43 | 44 | %% Package required for equal height while in 2 columns format 45 | \IfFileExists{flushend.sty} 46 | {\RequirePackage{flushend}} 47 | {\typeout{} 48 | \typeout{Package widetext error: Install the flushend package which is 49 | a part of sttools bundle. Available from CTAN.} 50 | \typeout{} 51 | \stop 52 | } 53 | 54 | %% Package required for onecolumn and twocolumn to exist on the same page. 55 | %% and also required for widetext environment. 56 | \IfFileExists{cuted.sty} 57 | {\RequirePackage{cuted}} 58 | {\typeout{} 59 | \typeout{Package widetext error: Install the cuted package which is 60 | a part of sttools bundle. Available from CTAN.} 61 | \typeout{} 62 | \stop 63 | } 64 | 65 | 66 | \newlength\@parindent 67 | \setlength\@parindent{\parindent} 68 | 69 | \if@twocolumn 70 | \newenvironment{widetext} 71 | {% 72 | \begin{strip} 73 | \rule{\dimexpr(0.5\textwidth-0.5\columnsep-0.4pt)}{0.4pt}% 74 | \rule{0.4pt}{6pt} 75 | \par %\vspace{6pt} 76 | \parindent \@parindent 77 | }% 78 | {% 79 | \par 80 | \hfill\rule[-6pt]{0.4pt}{6.4pt}% 81 | \rule{\dimexpr(0.5\textwidth-0.5\columnsep-1pt)}{0.4pt} 82 | \end{strip} 83 | } 84 | \else 85 | \newenvironment{widetext}{}{} 86 | \fi -------------------------------------------------------------------------------- /write-ups/synthesis_paper/SI/synthesis_SI.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/SI/synthesis_SI.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/checking_ML_MA_outliers.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: Double-checking Outliers for MAs coded by Molly 3 | author: "Molly Lewis" 4 | date: "`r Sys.Date()`" 5 | output: 6 | html_document: 7 | toc: true 8 | number_sections: false 9 | theme: cerulean 10 | code_folding: hide 11 | --- 12 | 13 | *** 14 | *** 15 | 16 | Here I want to go back and check the coding for outliers for MAs. Particularly, the ones I coded: Mutual exclusivity, concept-label advantage, online word recognition, and gaze following. 17 | 18 | Method for checking: look at funnel plot for model with age as a moderator and identify outliers. 19 | 20 | ```{r load data, include = F} 21 | source("../../../dashboard/global.R", chdir = TRUE) 22 | library(metafor) 23 | knitr::opts_chunk$set(cache = T, warning = F, message = F, fig.width = 8, fig.height = 3.5) 24 | 25 | # Remove incomplete MAs 26 | datasets = filter(datasets, filename != "statistical") 27 | all_data = filter(all_data, dataset != "Statistical word segementation") 28 | 29 | # age-moderated funnel plot 30 | age_funnel_plot <- function(raw.d) { 31 | model = rma(d_calc ~ mean_age, vi = d_var_calc, data = raw.d, 32 | method = "REML") 33 | r <- rstandard(model) 34 | d <- data_frame(se = r$se, es = r$resid, lab = paste0(raw.d$study_ID, "_", raw.d$expt_num), cond = raw.d$expt_condition, age = raw.d$mean_age_1) 35 | center <- 0 36 | lower_lim <- max(d$se) + .05 * max(d$se) 37 | funnel95 <- data.frame(x = c(center - lower_lim * CRIT_95, center, 38 | center + lower_lim * CRIT_95), 39 | y = c(-lower_lim, 0, -lower_lim)) 40 | 41 | ggplot(d, aes(x = es, y = -se)) + 42 | scale_colour_solarized(name = "", labels = labels) + 43 | scale_y_continuous(expand = c(0, 0), 44 | breaks = round(seq(0, -max(d$se), length.out = 5), 2), 45 | labels = round(seq(0, max(d$se), length.out = 5), 2)) + 46 | geom_polygon(aes(x = x, y = y), data = funnel95, alpha = .5, 47 | fill = "white") + 48 | geom_vline(xintercept = center, linetype = "dotted", color = "black") + 49 | geom_vline(xintercept = 0, linetype = "dashed", color = "grey") + 50 | geom_text_repel(aes(label = lab), size = 2) + 51 | theme(panel.background = element_rect(fill = "grey"), 52 | panel.grid.major = element_line(colour = "darkgrey", size = 0.2), 53 | panel.grid.minor = element_line(colour = "darkgrey", size = 0.5)) 54 | } 55 | 56 | ``` 57 | 58 | # Concept-label advantage 59 | ```{r} 60 | age_funnel_plot(filter(all_data, 61 | dataset == "Label advantage in concept learning")) 62 | 63 | to_check = c("fulkerson2003_1", "robinson2007_1", "booth2002_1","booth2002_2", "ferry2013_1", "booth2003_2", "balaban1997_1", "ferry2010_1") 64 | #f and r seem fine. 65 | ``` 66 | 67 | # Online word recognition 68 | ```{r} 69 | age_funnel_plot(filter(all_data, dataset == "Online word recognition")) 70 | 71 | to_check = c("fernald2013_1", "fernald2013_1") # these actually look fine 72 | ``` 73 | 74 | # Gaze following 75 | ```{r} 76 | age_funnel_plot(filter(all_data, dataset == "Gaze following")) 77 | 78 | to_check = c("butler2009_1", "mundy1998_1", "degroote2006_1") 79 | #butler2009_1 and mundy1998_1 seem fine, checking on degroote 80 | ``` 81 | 82 | 83 | # Mutual exclusivity 84 | ```{r} 85 | age_funnel_plot(filter(all_data, dataset == "Mutual exclusivity")) 86 | # looks fine (frank are outliers) 87 | ``` 88 | 89 | # Sound symbolism 90 | ```{r} 91 | age_funnel_plot(filter(all_data, dataset == "Sound symbolism")) 92 | # looks fine 93 | ``` 94 | 95 | # IDS 96 | ```{r} 97 | age_funnel_plot(filter(all_data, 98 | dataset == "Infant directed speech preference")) 99 | 100 | to_check = c("Trainor1996_2", "Werker1989_1") 101 | # both fine (didn't go to source papers, though) 102 | ``` 103 | 104 | # Phonotactic learning 105 | ```{r} 106 | age_funnel_plot(filter(all_data, dataset == "Phonotactic learning")) 107 | to_check = c("1008_1", "1001_1") 108 | # seems fine 109 | ``` 110 | 111 | # Statistical sound category learning 112 | ```{r} 113 | age_funnel_plot(filter(all_data, dataset == "Statistical sound category learning")) 114 | to_check = c("1036_3", "1043_1") 115 | #1043_1 - data from author and SE seem quite small, but can't directly verify (submitted issue) 116 | ``` 117 | 118 | # Word segementation 119 | ```{r} 120 | age_funnel_plot(filter(all_data, dataset == "Word segmentation")) 121 | # too big! 122 | to_check = c("Flocciaetal2016_10") 123 | #fixed! 124 | ``` 125 | 126 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/misc_pcurve/effect_size_estimate.R: -------------------------------------------------------------------------------- 1 | 2 | #R Code behind Figure 5C&D in - MANY LABS REPLICATION PROJECT 3 | #Simonsohn, Simmons and Nelson, "P-Curve and Effect Size: Correcting for Publication Bias Using Only Significant Results" 4 | # 5 | #Prepared by Uri Simonsohn, uws@wharton.upenn.edu 6 | # 7 | #Last updated: 2014 05 13 8 | ########################################################## 9 | 10 | 11 | 12 | #This program uses data collected by the 36 labs participating in the Many Labs replication project 13 | #see Klein et al (2014) https://osf.io/wx7ck/ 14 | 15 | #1) It loads the data for two of the studies: sunk cost fallacy and asian disease. 16 | 17 | #2) It drops the two largest studies, with about 1000 subjects each, so that there will be some effect of selective 18 | #reporting (otherwise those two studies drive the effect size estimate. p-curve still does very well, almost exactly 19 | #estimating effect size from significant studies alone, but, the naive estimate is not too biased either, so the 20 | #example is not partiularly informative. 21 | 22 | #3) Computes a meta-analytical effect size based on all studies (earnest estimate) and based only on the significant ones (naive) 23 | # Using the RMA procedure it collects the standard error of the naive and earnest estiamte 24 | #4) Computes a p-curve based effect size 25 | 26 | #5) Computes the standard error of the p-curve estimate via bootstrapping. IN particular, the same number of significnat 27 | # studies is drawn with replacement from teh set that exists, a new estimate is performed, and the task is repeated 28 | # 1000 times. The standard deviation of the effect size estimate is used as the standard error for p-curve's estimate 29 | 30 | ################################################################# 31 | 32 | #Load library used for the meta-analysis 33 | library(metafor) 34 | 35 | #Create three functions that will be used in the program below 36 | 37 | #Function 1: variance of a cohen d estimate 38 | vd=function(d,df) 39 | { 40 | n=(df+2)/2 41 | return(2/n +(d**2)/(2*(n-3.94))) 42 | } 43 | 44 | #Function 2: Loss function for effect size estimation based on Kolmogorov-Smirnov test 45 | loss=function(t_obs,df_obs,d_est) { 46 | ncp_est=sqrt((df_obs+2)/4)*d_est 47 | tc=qt(.975,df_obs) 48 | power_est=1-pt(tc,df_obs,ncp_est) 49 | p_larger=pt(t_obs,df=df_obs,ncp=ncp_est) 50 | ppr=(p_larger-(1-power_est))/power_est 51 | KSD=ks.test(ppr,punif)$statistic 52 | return(KSD) 53 | } 54 | 55 | 56 | #Function 3: Simplified sampling function 57 | sub = function(df) return(df[sample(nrow(df),nrow(df),replace=TRUE),]) #take a dataframe and generates another of the same size 58 | 59 | 60 | ####################################################################### 61 | 62 | # significant subset 63 | sunk.d.sig=subset(sunk.d.all, sunk.p.all<.05 & sunk.t.all>0) 64 | sunk.t.sig=subset(sunk.t.all, sunk.p.all<.05 & sunk.t.all>0) 65 | sunk.df.sig=subset(sunk.df.all, sunk.p.all<.05 & sunk.t.all>0) 66 | 67 | # variance of the effect sizes using formula (see Function 1 above) 68 | sunk.vd.sig=vd(d=sunk.d.sig, df=sunk.df.sig) #vd: variance of d 69 | sunk.vd.all=vd(d=sunk.d.all, df=sunk.df.all) 70 | 71 | # estimate effect size with p-curve 72 | sunk.dhat.pc=optimize(loss,c(-.3,2), t_obs=sunk.t.sig, df_obs=sunk.df.sig)$minimum 73 | asian.dhat.pc=optimize(loss,c(-.3,2), t_obs=asian.t.sig, df_obs=asian.df.sig)$minimum 74 | 75 | # bootstrap sd for p-curve 76 | sunk = data.frame(sunk.t.sig, sunk.df.sig, sunk.d.sig, sunk.vd.sig) #puts together all significant result 77 | sunk.dhat.pc.sim=c() #store each simulated effect size estimate in vectors. 78 | 79 | for (i in 1:1000){ 80 | sunk.k=sub(sunk)#Draw a sample of significant studies with replacement (otherwise would get the same studies every time)#here i am using the sub(), Function 3 above 81 | sunk.dhat.pc.k = optimize(loss,c(-.2,2),t_obs=sunk.k$sunk.t.sig, df_obs=sunk.k$sunk.df.sig)$minimum 82 | sunk.dhat.pc.sim=c(sunk.dhat.pc.sim, sunk.dhat.pc.k) #add to saved values 83 | } 84 | 85 | sunk.se.pc = sd(sunk.dhat.pc.sim) 86 | 87 | 88 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/misc_pcurve/p2f2.R: -------------------------------------------------------------------------------- 1 | 2 | #R Code behind Figure 5A&B in - CHOICE OVERLOAD 3 | #Simonsohn, Simmons and Nelson, "P-Curve and Effect Size: Correcting for Publication Bias Using Only Significant Results" 4 | # 5 | #Prepared by Uri Simonsohn, uws@wharton.upenn.edu 6 | # 7 | #Last updated: 2014 05 13 8 | ########################################################## 9 | 10 | 11 | #Function 1; loss function for how well expected p-curve fits observed p-curve 12 | loss=function(t_obs,df_obs,d_est) { 13 | ncp_est=sqrt((df_obs+2)/4)*d_est 14 | tc=qt(.975,df_obs) 15 | power_est=1-pt(tc,df_obs,ncp_est) 16 | p_larger=pt(t_obs,df=df_obs,ncp=ncp_est) 17 | ppr=(p_larger-(1-power_est))/power_est 18 | KSD=ks.test(ppr,punif)$statistic 19 | return(KSD) 20 | } 21 | 22 | 23 | #Function 2: Simplified bootstrapping formula 24 | sub = function(df) return(df[sample(nrow(df),nrow(df),replace=TRUE),]) #take a dataframe and generates another of the same size - used for bootstrapping SE 25 | 26 | #Function 3: variance of a cohen d estimate 27 | vd=function(d,df) 28 | { 29 | n=(df+2)/2 30 | return(2/n +(d**2)/(2*(n-3.94))) 31 | } 32 | 33 | 34 | #EFFECT SIZE ESTIMATION PER P-CURVE 35 | #Choice is bad 36 | # THESE ARE THE REPORTED RESULTS USING IN THE META-ANALYSIS 37 | #tbad=c(6.0752,3.6016,2.6336,2.7725,2.5614,2.4787,2.3607,2.1793,2.1768,2.1597,2.0911,2.0552,2.0025) 38 | #dfbad=c(247,65,58,19,38,58,41,84,63,67,113,163,67) 39 | 40 | # CORRECTING ERROR WITH JAMS STUDY (barely affects p-curves point estimate, makes estimates more precise, makes naive smaller) 41 | # the meta-analysis uses an effect size that suffers from selection bias for it counts only people who stopped 42 | # and stopping is affected by treatment. This was discovered while Uri taught paper in PhD class and seemed appropriate to correct 43 | # again, makes very little difference; 44 | tbad=c(4.5548,3.6016,2.6336,2.7725,2.5614,2.4787,2.3607,2.1793,2.1768,2.1597,2.0911,2.0552,2.0025) 45 | dfbad=c(753,65,58,19,38,58,41,84,63,67,113,163,67) 46 | 47 | #Choice is good 48 | tgood=c(4.0093,3.1529,3.0129,2.8302,2.4666,2.3193,2.1967) 49 | dfgood=c(16,43,52,87,88,30,41) 50 | 51 | #Point estimates for p-curve 52 | b.pcb=optimize(loss,c(-.3,4),df_obs=dfbad,t_obs=tbad)$minimum #Choice is bad 53 | g.pcb=optimize(loss,c(-.3,4),df_obs=dfgood,t_obs=tgood)$minimum #Choice is good 54 | 55 | 56 | ######################################## 57 | #COMPUTING STANDARD ERROR FOR P-CURVE 58 | ######################################## 59 | #Make them datasets to draw from them easily 60 | good=data.frame(tgood,dfgood) 61 | bad=data.frame(tbad,dfbad) 62 | 63 | #BOOTSTRAP 64 | dgood.sim=c() 65 | dbad.sim=c() 66 | options(warn=-1) #R gives warning for its own noncentral distribution, not my functions 67 | set.seed(778899) #arbitrary number to always get same results 68 | 69 | for (i in 1:1000) #Do 1000 bootstraps: note, when bootstrappingn p-values larger number of bootstraps in often a good idea, 70 | #but for SE() not necessary as we want the stable central value, SD, not the unstable 2.5th percentile. 71 | { 72 | #Draw a sample of significant studies with replacement (otherwise would get the same studies every time) 73 | good.k=sub(good) 74 | bad.k=sub(bad) 75 | 76 | #Estimate effects for bootstrapped sample 77 | #p-curve 78 | dgood.k=optimize(loss,c(-.3,2),t_obs=good.k$tgood, df_obs=good.k$dfgood)$minimum 79 | dbad.k=optimize(loss,c(-.3,2),t_obs=bad.k$tbad, df_obs=bad.k$dfbad)$minimum 80 | 81 | #add to saved values 82 | dgood.sim=c(dgood.sim,dgood.k) 83 | dbad.sim=c(dbad.sim,dbad.k) 84 | } 85 | 86 | #verify on average I get the right estimate 87 | mean(dgood.sim) #Average bootstrapped mean estiamted with p-curve 88 | mean(dbad.sim) #Average bootstrapped mean estiamted with p-curve 89 | 90 | #compute bootstrapped SE for p-curve 91 | g.pcse=sd(dgood.sim) 92 | b.pcse=sd(dbad.sim) 93 | 94 | #EFFECT SIZE ESTIMATION PER META-ANALYSIS 95 | #Choice is bad 96 | #effect sizes 97 | dbad_all=c(0.77,0.88,0.68,1.21,0.81,0.64,0.72,0.47,0.54,0.52,0.39,0.32,0.48,0.54,0.57,0.22,0.08,0.09,0.06,0.02,0.05,0.08,0.04) 98 | #correct jams study effect size; see long comment in line 42-45 99 | dbad_all[1]=2*tbad[1]/sqrt(dfbad[1]+2) 100 | 101 | #sample sizes 102 | Nbad_all=c(249,67,60,21,40,60,43,86,65,69,115,165,69,52,41,43,82,60,64,504,61,21,68) 103 | #correct jams study sample size; see long comment in line 42-45 104 | Nbad_all[1]=754 105 | 106 | 107 | #Choice is bad 108 | #effect sizes 109 | dgood_all=c(-1.89,-0.94,-0.82,-0.6,-0.52,-0.82,-0.67,-0.26,-0.43,-0.28,-0.31,-0.36,-0.27,-0.17,-0.28,-0.33,-0.2,-0.48,-0.24,-0.16,-0.22,-0.08,-0.17,-0.11,-0.12,-0.09,-0.12,-0.05,-0.05,-0.04,-0.02) 110 | dgood_all=abs(dgood_all) 111 | #sample sizes 112 | Ngood_all=c(18,45,54,89,90,32,43,120,46,81,60,45,66,160,52,34,80,16,52,112,57,280,60,80,56,89,40,174,72,57,40) 113 | 114 | #Compute var(d) 115 | #Choice is bad 116 | nb_all=Nbad_all/2 117 | varb_all=vd(dbad_all,nb_all*2-2) 118 | #Choice is good 119 | ng_all=Ngood_all/2 120 | varg_all=vd(dgood_all,ng_all*2-2) 121 | 122 | #keep only the significant ones 123 | dgood=dgood_all[1:7] 124 | varg=dgood_all[1:7] 125 | 126 | dbad=dbad_all[1:13] 127 | varb=varb_all[1:13] 128 | 129 | #EFFECT SIZE 130 | library(metafor) 131 | 132 | #Choice is good 133 | #naive 134 | g.nb=rma(yi=dgood,vi=varg,method="FE")$b 135 | g.nse=rma(yi=dgood,vi=varg,method="FE")$se 136 | #earnest 137 | g.eb=rma(yi=dgood_all,vi=varg_all,method="FE")$b 138 | g.ese=rma(yi=dgood_all,vi=varg_all,method="FE")$se 139 | 140 | #Choice is bad 141 | #naive 142 | b.nb= rma(yi=dbad,vi=varb,method="FE")$b 143 | b.nse=rma(yi=dbad,vi=varb,method="FE")$se 144 | 145 | #Earnest 146 | b.eb=rma(yi=dbad_all,vi=varb_all,method="FE")$b 147 | b.ese=rma(yi=dbad_all,vi=varb_all,method="FE")$se 148 | 149 | results.b=matrix(c(b.nb, b.eb, b.pcb, g.nb, g.eb, g.pcb),nrow=3,ncol=2) 150 | results.se=matrix(c(b.nse, b.ese, b.pcse, g.nse, g.ese, g.pcse),nrow=3,ncol=2) 151 | 152 | 153 | 154 | 155 | 156 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/misc_pcurve/p2power.R: -------------------------------------------------------------------------------- 1 | 2 | #################################################################################################################################### 3 | # (STEP 1) CREATE FUNCTIONS THAT FIND THE NONCENTRALITY PARAMETER FOR THE t,F, associated with a given power, for the observed d.f. 4 | # 5 | #SET OF FUNCTIONS 1. 6 | #COMPUTE GAP BETWEEN POWER AND DESIRED POWER FOR A GIVEN NCP 7 | # (minimize these in the next step to solve for the ncp that gives the desired power) 8 | ncp_error.f = function(delta, power, x, df1,df2) pf(x, df1 = df1, df2=df2, ncp = delta) - (1-power) 9 | 10 | #SET OF FUNCTIONS 2: MINIMIZE FUNCTIONS ABOVE 11 | #F-test 12 | getncp.f =function(df1,df2, power) { 13 | xc=qf(p=.95, df1=df1,df2=df2) # critical F-value 14 | return(uniroot(ncp_error.f, c(0, 37.62), x = xc, df1 = df1,df2=df2, power=power)$root) } 15 | 16 | 17 | #################################################################################################################################### 18 | # (STEP 2) CREATE PP-VALUES FOR EACH OF THE FOUR DISTRIBUTIONS FOR HOW WELL A GIVEN POWER_EST FITS 19 | powerfit.f=function(f_obs, df1_obs, df2_obs, power_est) { 20 | ncp_est=mapply(getncp.f,df1=df1_obs, df2=df2_obs,power=power_est) #find ncp for each that gives each test power.k 21 | p_larger=pf(f_obs,df1=df1_obs,df2=df2_obs, ncp=ncp_est) #prob t>tobs given ncp_est 22 | ppr=(p_larger-(1-power_est))/power_est #condition on p<.05 23 | return(ppr) 24 | } 25 | 26 | #################################################################################################################################### 27 | # (STEP 3) STACK-UP ALL THE PP-VALUES INTO A VECTOR 28 | powerfit.all=function(power_est){ 29 | ppr.all=c() 30 | #for each kind of test, check if there are any significant values, if there are, add ppr to overall ppr 31 | if (length(f.value.sig)>0) ppr.all=c(ppr.all, powerfit.f(f_obs=f.value.sig, df1_obs=f.df1.sig, df2_obs=f.df2.sig, power_est=power_est)) 32 | 33 | KSD=ks.test(ppr.all,punif)$statistic #KS test on the resulting pprs 34 | return(KSD) 35 | } 36 | 37 | ############################################## 38 | #EXAMPLE: F(2,120)=4.8 39 | f.value.sig=4.8 40 | f.df1.sig=2 41 | f.df2.sig=120 42 | 43 | optimize(powerfit.all,c(.06,.999))$minimum estimate best fitting value 44 | 45 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/misc_pcurve/pcurve copy.R: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # get_ncp - functions that find non-centrality parameter for f,chi distributions that gives some level of power 3 | # we don't use chi-square / z-scores, so I only add the getncp for F scores (t will be turned to F in this version of p-curve) 4 | ncp_error = function(ncp_est, power, x, df1, df2) 5 | { 6 | pf(x, df1 = df1, df2 = df2, ncp = ncp_est) - (1 - power) 7 | } 8 | 9 | get_ncp <- function(df1, df2, power) 10 | { 11 | xc = qf(p=.95, df1 = df1, df2 = df2) 12 | root <- uniroot(ncp_error, c(0, 35), x = xc, df1 = df1, df2 = df2, power = power)$root 13 | 14 | return(root) 15 | } 16 | 17 | ############################################################################### 18 | # prop33(pc) - Computes % of p-values that are smaller than pc, for the tests submitted to p-curve, if power is 33% 19 | prop33 <- function(pc, ncp33, df1, df2) 20 | { 21 | #pc: critical p-value 22 | 23 | #Overview: 24 | #Creates a vector of the same length as the number of tests submitted to p-curve, significant and not, 25 | # and computes the proportion of p-values expected to be smaller than {pc} given the d.f. 26 | # and outputs the entire vector, with NA values where needed 27 | 28 | #F-tests (& thus t-tests) 29 | prop= (1 - pf(qf(1 - pc, df1=df1, df2=df2), df1=df1, df2=df2, ncp=ncp33)) 30 | 31 | #output it 32 | return(prop) 33 | } 34 | 35 | ############################################################################### 36 | # pbound: bound p-values and pp-values by precision of measurement to avoid errors 37 | pbound <- function(p) 38 | { 39 | pmin(pmax(p,2.2e-16),1-2.2e-16) 40 | } 41 | 42 | 43 | ############################################################################### 44 | # p_curve 45 | # main function that does the doing. 46 | p_curve <- function(df){ 47 | 48 | # Recompute Ps and bound to level of precision desired 49 | df <- df %>% 50 | mutate(p = pbound(1 - pf(value, df1 = df1, df2 = df2)), 51 | p_round = ceiling(p * 100) / 100) %>% 52 | rowwise %>% # need to do this rowwise because get_ncp is not vectorized 53 | # NCP33 (noncentrality parameter giving each test in p-curve 33% power given the d.f. of the test) 54 | mutate(ncp33 = get_ncp(df1, df2, power=1/3)) 55 | 56 | pc_data <- data.frame(p = seq(.01,.05,.01)) %>% 57 | group_by(p) %>% 58 | mutate(observed = sum(df$p_round == p) / sum(df$p < .05), 59 | baseline = 1/5) 60 | 61 | # Green line (Expected p-curve for 33% power) 62 | # Proportion of tests expected to get <01, <02... 63 | gcdf1=prop33(.01, df$ncp33, df$df1, df$df2) #vector with proportion of p-values p<.01, with 33% power 64 | gcdf2=prop33(.02, df$ncp33, df$df1, df$df2) # "" p<.02, " 65 | gcdf3=prop33(.03, df$ncp33, df$df1, df$df2) # "" p<.03, " 66 | gcdf4=prop33(.04, df$ncp33, df$df1, df$df2) # "" p<.04, " 67 | #Note: for p<.05 we know it is 33% power 68 | 69 | #5.1.2 Now compute difference, and divide by 1/3 to get the share of significant p-values in each bin 70 | pc_data$expected <- c(mean(gcdf1)*3, #Average of the vector p<.01 71 | mean(gcdf2-gcdf1)*3, #Difference between .02 and .01 72 | mean(gcdf3-gcdf2)*3, #Difference between .03 and .02 73 | mean(gcdf4-gcdf3)*3, #Difference between .04 and .03 74 | mean(1/3-gcdf4)*3) #Difference between .05 and .04 75 | #Because we have one row per test submitted, the average is weighted average, giving each test equal weight 76 | pc_long <- pc_data %>% 77 | gather(measure, value, observed, baseline, expected) 78 | 79 | return(pc_long) 80 | } 81 | 82 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/misc_pcurve/pcurve.R: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # get_ncp - functions that find non-centrality parameter for f,chi distributions that gives some level of power 3 | # we don't use chi-square / z-scores, so I only add the getncp for F scores (t will be turned to F in this version of p-curve) 4 | ncp_error = function(ncp_est, power, x, df1, df2) 5 | { 6 | pf(x, df1 = df1, df2 = df2, ncp = ncp_est) - (1 - power) 7 | } 8 | 9 | get_ncp <- function(df1, df2, power) 10 | { 11 | xc = qf(p=.95, df1 = df1, df2 = df2) 12 | root <- uniroot(ncp_error, c(0, 35), x = xc, df1 = df1, df2 = df2, power = power)$root 13 | 14 | return(root) 15 | } 16 | 17 | ############################################################################### 18 | # prop33(pc) - Computes % of p-values that are smaller than pc, for the tests submitted to p-curve, if power is 33% 19 | prop33 <- function(pc, ncp33, df1, df2) 20 | { 21 | #pc: critical p-value 22 | 23 | #Overview: 24 | #Creates a vector of the same length as the number of tests submitted to p-curve, significant and not, 25 | # and computes the proportion of p-values expected to be smaller than {pc} given the d.f. 26 | # and outputs the entire vector, with NA values where needed 27 | 28 | #F-tests (& thus t-tests) 29 | prop= (1 - pf(qf(1 - pc, df1=df1, df2=df2), df1=df1, df2=df2, ncp=ncp33)) 30 | 31 | #output it 32 | return(prop) 33 | } 34 | 35 | ############################################################################### 36 | # pbound: bound p-values and pp-values by precision of measurement to avoid errors 37 | pbound <- function(p) 38 | { 39 | pmin(pmax(p,2.2e-16),1-2.2e-16) 40 | } 41 | 42 | 43 | ############################################################################### 44 | # p_curve 45 | # main function that does the doing. 46 | p_curve <- function(df){ 47 | 48 | # Recompute Ps and bound to level of precision desired 49 | df <- df %>% 50 | mutate(p = pbound(1 - pf(value, df1 = df1, df2 = df2)), 51 | p_round = ceiling(p * 100) / 100) %>% 52 | rowwise %>% # need to do this rowwise because get_ncp is not vectorized 53 | # NCP33 (noncentrality parameter giving each test in p-curve 33% power given the d.f. of the test) 54 | mutate(ncp33 = get_ncp(df1, df2, power=1/3)) 55 | 56 | pc_data <- data.frame(p = seq(.01,.05,.01)) %>% 57 | group_by(p) %>% 58 | mutate(observed = sum(df$p_round == p) / sum(df$p < .05), 59 | baseline = 1/5) 60 | 61 | # Green line (Expected p-curve for 33% power) 62 | # Proportion of tests expected to get <01, <02... 63 | gcdf1=prop33(.01, df$ncp33, df$df1, df$df2) #vector with proportion of p-values p<.01, with 33% power 64 | gcdf2=prop33(.02, df$ncp33, df$df1, df$df2) # "" p<.02, " 65 | gcdf3=prop33(.03, df$ncp33, df$df1, df$df2) # "" p<.03, " 66 | gcdf4=prop33(.04, df$ncp33, df$df1, df$df2) # "" p<.04, " 67 | #Note: for p<.05 we know it is 33% power 68 | 69 | #5.1.2 Now compute difference, and divide by 1/3 to get the share of significant p-values in each bin 70 | pc_data$expected <- c(mean(gcdf1)*3, #Average of the vector p<.01 71 | mean(gcdf2-gcdf1)*3, #Difference between .02 and .01 72 | mean(gcdf3-gcdf2)*3, #Difference between .03 and .02 73 | mean(gcdf4-gcdf3)*3, #Difference between .04 and .03 74 | mean(1/3-gcdf4)*3) #Difference between .05 and .04 75 | #Because we have one row per test submitted, the average is weighted average, giving each test equal weight 76 | pc_long <- pc_data %>% 77 | gather(measure, value, observed, baseline, expected) 78 | 79 | return(pc_long) 80 | } 81 | 82 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/Lato.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/Lato.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/LatoBold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/LatoBold.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/LatoItalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/LatoItalic.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/NewsCycle.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/NewsCycle.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/NewsCycleBold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/NewsCycleBold.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSans.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSans.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansBold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansBold.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansBoldItalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansBoldItalic.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansItalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansItalic.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansLight.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansLight.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansLightItalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/OpenSansLightItalic.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/Raleway.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/Raleway.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/RalewayBold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/RalewayBold.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/Roboto.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/Roboto.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/RobotoBold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/RobotoBold.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/RobotoLight.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/RobotoLight.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/RobotoMedium.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/RobotoMedium.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/SourceSansPro.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/SourceSansPro.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/SourceSansProBold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/SourceSansProBold.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/SourceSansProItalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/SourceSansProItalic.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/SourceSansProLight.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/SourceSansProLight.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/Ubuntu.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/css/fonts/Ubuntu.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/fonts/glyphicons-halflings-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/fonts/glyphicons-halflings-regular.woff2 -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/js/npm.js: -------------------------------------------------------------------------------- 1 | // This file is autogenerated via the `commonjs` Grunt task. You can require() this file in a CommonJS environment. 2 | require('../../js/transition.js') 3 | require('../../js/alert.js') 4 | require('../../js/button.js') 5 | require('../../js/carousel.js') 6 | require('../../js/collapse.js') 7 | require('../../js/dropdown.js') 8 | require('../../js/modal.js') 9 | require('../../js/tooltip.js') 10 | require('../../js/popover.js') 11 | require('../../js/scrollspy.js') 12 | require('../../js/tab.js') 13 | require('../../js/affix.js') -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/shim/html5shiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.2 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | // Only run this code in IE 8 5 | if (!!window.navigator.userAgent.match("MSIE 8")) { 6 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.2",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b)}(this,document); 7 | }; 8 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/bootstrap-3.3.5/shim/respond.min.js: -------------------------------------------------------------------------------- 1 | /*! Respond.js v1.4.2: min/max-width media query polyfill * Copyright 2013 Scott Jehl 2 | * Licensed under https://github.com/scottjehl/Respond/blob/master/LICENSE-MIT 3 | * */ 4 | 5 | // Only run this code in IE 8 6 | if (!!window.navigator.userAgent.match("MSIE 8")) { 7 | !function(a){"use strict";a.matchMedia=a.matchMedia||function(a){var b,c=a.documentElement,d=c.firstElementChild||c.firstChild,e=a.createElement("body"),f=a.createElement("div");return f.id="mq-test-1",f.style.cssText="position:absolute;top:-100em",e.style.background="none",e.appendChild(f),function(a){return f.innerHTML='­',c.insertBefore(e,d),b=42===f.offsetWidth,c.removeChild(e),{matches:b,media:a}}}(a.document)}(this),function(a){"use strict";function b(){u(!0)}var c={};a.respond=c,c.update=function(){};var d=[],e=function(){var b=!1;try{b=new a.XMLHttpRequest}catch(c){b=new a.ActiveXObject("Microsoft.XMLHTTP")}return function(){return b}}(),f=function(a,b){var c=e();c&&(c.open("GET",a,!0),c.onreadystatechange=function(){4!==c.readyState||200!==c.status&&304!==c.status||b(c.responseText)},4!==c.readyState&&c.send(null))};if(c.ajax=f,c.queue=d,c.regex={media:/@media[^\{]+\{([^\{\}]*\{[^\}\{]*\})+/gi,keyframes:/@(?:\-(?:o|moz|webkit)\-)?keyframes[^\{]+\{(?:[^\{\}]*\{[^\}\{]*\})+[^\}]*\}/gi,urls:/(url\()['"]?([^\/\)'"][^:\)'"]+)['"]?(\))/g,findStyles:/@media *([^\{]+)\{([\S\s]+?)$/,only:/(only\s+)?([a-zA-Z]+)\s?/,minw:/\([\s]*min\-width\s*:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/,maxw:/\([\s]*max\-width\s*:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/},c.mediaQueriesSupported=a.matchMedia&&null!==a.matchMedia("only all")&&a.matchMedia("only all").matches,!c.mediaQueriesSupported){var g,h,i,j=a.document,k=j.documentElement,l=[],m=[],n=[],o={},p=30,q=j.getElementsByTagName("head")[0]||k,r=j.getElementsByTagName("base")[0],s=q.getElementsByTagName("link"),t=function(){var a,b=j.createElement("div"),c=j.body,d=k.style.fontSize,e=c&&c.style.fontSize,f=!1;return b.style.cssText="position:absolute;font-size:1em;width:1em",c||(c=f=j.createElement("body"),c.style.background="none"),k.style.fontSize="100%",c.style.fontSize="100%",c.appendChild(b),f&&k.insertBefore(c,k.firstChild),a=b.offsetWidth,f?k.removeChild(c):c.removeChild(b),k.style.fontSize=d,e&&(c.style.fontSize=e),a=i=parseFloat(a)},u=function(b){var c="clientWidth",d=k[c],e="CSS1Compat"===j.compatMode&&d||j.body[c]||d,f={},o=s[s.length-1],r=(new Date).getTime();if(b&&g&&p>r-g)return a.clearTimeout(h),h=a.setTimeout(u,p),void 0;g=r;for(var v in l)if(l.hasOwnProperty(v)){var w=l[v],x=w.minw,y=w.maxw,z=null===x,A=null===y,B="em";x&&(x=parseFloat(x)*(x.indexOf(B)>-1?i||t():1)),y&&(y=parseFloat(y)*(y.indexOf(B)>-1?i||t():1)),w.hasquery&&(z&&A||!(z||e>=x)||!(A||y>=e))||(f[w.media]||(f[w.media]=[]),f[w.media].push(m[w.rules]))}for(var C in n)n.hasOwnProperty(C)&&n[C]&&n[C].parentNode===q&&q.removeChild(n[C]);n.length=0;for(var D in f)if(f.hasOwnProperty(D)){var E=j.createElement("style"),F=f[D].join("\n");E.type="text/css",E.media=D,q.insertBefore(E,o.nextSibling),E.styleSheet?E.styleSheet.cssText=F:E.appendChild(j.createTextNode(F)),n.push(E)}},v=function(a,b,d){var e=a.replace(c.regex.keyframes,"").match(c.regex.media),f=e&&e.length||0;b=b.substring(0,b.lastIndexOf("/"));var g=function(a){return a.replace(c.regex.urls,"$1"+b+"$2$3")},h=!f&&d;b.length&&(b+="/"),h&&(f=1);for(var i=0;f>i;i++){var j,k,n,o;h?(j=d,m.push(g(a))):(j=e[i].match(c.regex.findStyles)&&RegExp.$1,m.push(RegExp.$2&&g(RegExp.$2))),n=j.split(","),o=n.length;for(var p=0;o>p;p++)k=n[p],l.push({media:k.split("(")[0].match(c.regex.only)&&RegExp.$2||"all",rules:m.length-1,hasquery:k.indexOf("(")>-1,minw:k.match(c.regex.minw)&&parseFloat(RegExp.$1)+(RegExp.$2||""),maxw:k.match(c.regex.maxw)&&parseFloat(RegExp.$1)+(RegExp.$2||"")})}u()},w=function(){if(d.length){var b=d.shift();f(b.href,function(c){v(c,b.href,b.media),o[b.href]=!0,a.setTimeout(function(){w()},0)})}},x=function(){for(var b=0;b'); 26 | if (show) 27 | div.addClass('in'); 28 | var id = 'rcode-643E0F36' + currentIndex++; 29 | div.attr('id', id); 30 | $(this).before(div); 31 | $(this).detach().appendTo(div); 32 | 33 | // add a show code button right above 34 | var showCodeText = $('' + (show ? 'Hide' : 'Code') + ''); 35 | var showCodeButton = $(''); 36 | showCodeButton.append(showCodeText); 37 | showCodeButton 38 | .attr('data-toggle', 'collapse') 39 | .attr('data-target', '#' + id) 40 | .attr('aria-expanded', true) 41 | .attr('aria-controls', id) 42 | .css('margin-bottom', '4px'); 43 | 44 | var buttonRow = $('
'); 45 | var buttonCol = $('
'); 46 | 47 | buttonCol.append(showCodeButton); 48 | buttonRow.append(buttonCol); 49 | 50 | div.before(buttonRow); 51 | 52 | // update state of button on show/hide 53 | div.on('hidden.bs.collapse', function () { 54 | showCodeText.text('Code'); 55 | }); 56 | div.on('show.bs.collapse', function () { 57 | showCodeText.text('Hide'); 58 | }); 59 | }); 60 | 61 | } 62 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/navigation-1.0/tabsets.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | window.buildTabsets = function(tocID) { 4 | 5 | // build a tabset from a section div with the .tabset class 6 | function buildTabset(tabset) { 7 | 8 | // check for fade and pills options 9 | var fade = tabset.hasClass("tabset-fade"); 10 | var pills = tabset.hasClass("tabset-pills"); 11 | var navClass = pills ? "nav-pills" : "nav-tabs"; 12 | 13 | // determine the heading level of the tabset and tabs 14 | var match = tabset.attr('class').match(/level(\d) /); 15 | if (match === null) 16 | return; 17 | var tabsetLevel = Number(match[1]); 18 | var tabLevel = tabsetLevel + 1; 19 | 20 | // find all subheadings immediately below 21 | var tabs = tabset.find("div.section.level" + tabLevel); 22 | if (!tabs.length) 23 | return; 24 | 25 | // create tablist and tab-content elements 26 | var tabList = $(''); 27 | $(tabs[0]).before(tabList); 28 | var tabContent = $('
'); 29 | $(tabs[0]).before(tabContent); 30 | 31 | // build the tabset 32 | tabs.each(function(i) { 33 | 34 | // get the tab div 35 | var tab = $(tabs[i]); 36 | 37 | // get the id then sanitize it for use with bootstrap tabs 38 | var id = tab.attr('id'); 39 | 40 | // remove any table of contents entries associated with 41 | // this ID (since we'll be removing the heading element) 42 | $("div#" + tocID + " li a[href='#" + id + "']").parent().remove(); 43 | 44 | // sanitize the id for use with bootstrap tabs 45 | id = id.replace(/[.\/?&!#<>]/g, '').replace(/\s/g, '_'); 46 | tab.attr('id', id); 47 | 48 | // get the heading element within it, grab it's text, then remove it 49 | var heading = tab.find('h' + tabLevel + ':first'); 50 | var headingText = heading.html(); 51 | heading.remove(); 52 | 53 | // build and append the tab list item 54 | var a = $('' + headingText + ''); 55 | a.attr('href', '#' + id); 56 | a.attr('aria-controls', id); 57 | var li = $('
  • '); 58 | li.append(a); 59 | if (i === 0) 60 | li.attr('class', 'active'); 61 | tabList.append(li); 62 | 63 | // set it's attributes 64 | tab.attr('role', 'tabpanel'); 65 | tab.addClass('tab-pane'); 66 | tab.addClass('tabbed-pane'); 67 | if (fade) 68 | tab.addClass('fade'); 69 | if (i === 0) { 70 | tab.addClass('active'); 71 | if (fade) 72 | tab.addClass('in'); 73 | } 74 | 75 | // move it into the tab content div 76 | tab.detach().appendTo(tabContent); 77 | }); 78 | } 79 | 80 | // convert section divs with the .tabset class to tabsets 81 | var tabsets = $("div.section.tabset"); 82 | tabsets.each(function(i) { 83 | buildTabset($(tabsets[i])); 84 | }); 85 | }; 86 | 87 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/navigation-1.1/FileSaver.min.js: -------------------------------------------------------------------------------- 1 | /*! @source http://purl.eligrey.com/github/FileSaver.js/blob/master/FileSaver.js */ 2 | var saveAs=saveAs||function(e){"use strict";if("undefined"==typeof navigator||!/MSIE [1-9]\./.test(navigator.userAgent)){var t=e.document,n=function(){return e.URL||e.webkitURL||e},o=t.createElementNS("http://www.w3.org/1999/xhtml","a"),r="download"in o,i=function(e){var t=new MouseEvent("click");e.dispatchEvent(t)},a=/Version\/[\d\.]+.*Safari/.test(navigator.userAgent),c=e.webkitRequestFileSystem,f=e.requestFileSystem||c||e.mozRequestFileSystem,u=function(t){(e.setImmediate||e.setTimeout)(function(){throw t},0)},d="application/octet-stream",s=0,l=4e4,v=function(e){var t=function(){"string"==typeof e?n().revokeObjectURL(e):e.remove()};setTimeout(t,l)},p=function(e,t,n){t=[].concat(t);for(var o=t.length;o--;){var r=e["on"+t[o]];if("function"==typeof r)try{r.call(e,n||e)}catch(i){u(i)}}},w=function(e){return/^\s*(?:text\/\S*|application\/xml|\S*\/\S*\+xml)\s*;.*charset\s*=\s*utf-8/i.test(e.type)?new Blob(["\ufeff",e],{type:e.type}):e},y=function(t,u,l){l||(t=w(t));var y,m,S,h=this,R=t.type,O=!1,g=function(){p(h,"writestart progress write writeend".split(" "))},b=function(){if(m&&a&&"undefined"!=typeof FileReader){var o=new FileReader;return o.onloadend=function(){var e=o.result;m.location.href="data:attachment/file"+e.slice(e.search(/[,;]/)),h.readyState=h.DONE,g()},o.readAsDataURL(t),void(h.readyState=h.INIT)}if((O||!y)&&(y=n().createObjectURL(t)),m)m.location.href=y;else{var r=e.open(y,"_blank");void 0===r&&a&&(e.location.href=y)}h.readyState=h.DONE,g(),v(y)},E=function(e){return function(){return h.readyState!==h.DONE?e.apply(this,arguments):void 0}},N={create:!0,exclusive:!1};return h.readyState=h.INIT,u||(u="download"),r?(y=n().createObjectURL(t),void setTimeout(function(){o.href=y,o.download=u,i(o),g(),v(y),h.readyState=h.DONE})):(e.chrome&&R&&R!==d&&(S=t.slice||t.webkitSlice,t=S.call(t,0,t.size,d),O=!0),c&&"download"!==u&&(u+=".download"),(R===d||c)&&(m=e),f?(s+=t.size,void f(e.TEMPORARY,s,E(function(e){e.root.getDirectory("saved",N,E(function(e){var n=function(){e.getFile(u,N,E(function(e){e.createWriter(E(function(n){n.onwriteend=function(t){m.location.href=e.toURL(),h.readyState=h.DONE,p(h,"writeend",t),v(e)},n.onerror=function(){var e=n.error;e.code!==e.ABORT_ERR&&b()},"writestart progress write abort".split(" ").forEach(function(e){n["on"+e]=h["on"+e]}),n.write(t),h.abort=function(){n.abort(),h.readyState=h.DONE},h.readyState=h.WRITING}),b)}),b)};e.getFile(u,{create:!1},E(function(e){e.remove(),n()}),E(function(e){e.code===e.NOT_FOUND_ERR?n():b()}))}),b)}),b)):void b())},m=y.prototype,S=function(e,t,n){return new y(e,t,n)};return"undefined"!=typeof navigator&&navigator.msSaveOrOpenBlob?function(e,t,n){return n||(e=w(e)),navigator.msSaveOrOpenBlob(e,t||"download")}:(m.abort=function(){var e=this;e.readyState=e.DONE,p(e,"abort")},m.readyState=m.INIT=0,m.WRITING=1,m.DONE=2,m.error=m.onwritestart=m.onprogress=m.onwrite=m.onabort=m.onerror=m.onwriteend=null,S)}}("undefined"!=typeof self&&self||"undefined"!=typeof window&&window||this.content);"undefined"!=typeof module&&module.exports?module.exports.saveAs=saveAs:"undefined"!=typeof define&&null!==define&&null!==define.amd&&define([],function(){return saveAs}); -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/navigation-1.1/codefolding.js: -------------------------------------------------------------------------------- 1 | 2 | window.initializeCodeFolding = function(show) { 3 | 4 | // handlers for show-all and hide all 5 | $("#rmd-show-all-code").click(function() { 6 | $('div.r-code-collapse').each(function() { 7 | $(this).collapse('show'); 8 | }); 9 | }); 10 | $("#rmd-hide-all-code").click(function() { 11 | $('div.r-code-collapse').each(function() { 12 | $(this).collapse('hide'); 13 | }); 14 | }); 15 | 16 | // index for unique code element ids 17 | var currentIndex = 1; 18 | 19 | // select all R code blocks 20 | var rCodeBlocks = $('pre.r'); 21 | rCodeBlocks.each(function() { 22 | 23 | // create a collapsable div to wrap the code in 24 | var div = $('
    '); 25 | if (show) 26 | div.addClass('in'); 27 | var id = 'rcode-643E0F36' + currentIndex++; 28 | div.attr('id', id); 29 | $(this).before(div); 30 | $(this).detach().appendTo(div); 31 | 32 | // add a show code button right above 33 | var showCodeText = $('' + (show ? 'Hide' : 'Code') + ''); 34 | var showCodeButton = $(''); 35 | showCodeButton.append(showCodeText); 36 | showCodeButton 37 | .attr('data-toggle', 'collapse') 38 | .attr('data-target', '#' + id) 39 | .attr('aria-expanded', true) 40 | .attr('aria-controls', id) 41 | .css('margin-bottom', '4px'); 42 | 43 | var buttonRow = $('
    '); 44 | var buttonCol = $('
    '); 45 | 46 | buttonCol.append(showCodeButton); 47 | buttonRow.append(buttonCol); 48 | 49 | div.before(buttonRow); 50 | 51 | // update state of button on show/hide 52 | div.on('hidden.bs.collapse', function () { 53 | showCodeText.text('Code'); 54 | }); 55 | div.on('show.bs.collapse', function () { 56 | showCodeText.text('Hide'); 57 | }); 58 | }); 59 | 60 | } 61 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/navigation-1.1/sourceembed.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | window.initializeSourceEmbed = function(filename) { 4 | $("#rmd-download-source").click(function() { 5 | var src = window.atob($("#rmd-source-code").html()); 6 | var blob = new Blob([src], {type: "text/x-r-markdown"}); 7 | saveAs(blob, filename); 8 | }); 9 | }; 10 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/navigation-1.1/tabsets.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | window.buildTabsets = function(tocID) { 4 | 5 | // build a tabset from a section div with the .tabset class 6 | function buildTabset(tabset) { 7 | 8 | // check for fade and pills options 9 | var fade = tabset.hasClass("tabset-fade"); 10 | var pills = tabset.hasClass("tabset-pills"); 11 | var navClass = pills ? "nav-pills" : "nav-tabs"; 12 | 13 | // determine the heading level of the tabset and tabs 14 | var match = tabset.attr('class').match(/level(\d) /); 15 | if (match === null) 16 | return; 17 | var tabsetLevel = Number(match[1]); 18 | var tabLevel = tabsetLevel + 1; 19 | 20 | // find all subheadings immediately below 21 | var tabs = tabset.find("div.section.level" + tabLevel); 22 | if (!tabs.length) 23 | return; 24 | 25 | // create tablist and tab-content elements 26 | var tabList = $(''); 27 | $(tabs[0]).before(tabList); 28 | var tabContent = $('
    '); 29 | $(tabs[0]).before(tabContent); 30 | 31 | // build the tabset 32 | tabs.each(function(i) { 33 | 34 | // get the tab div 35 | var tab = $(tabs[i]); 36 | 37 | // get the id then sanitize it for use with bootstrap tabs 38 | var id = tab.attr('id'); 39 | 40 | // remove any table of contents entries associated with 41 | // this ID (since we'll be removing the heading element) 42 | $("div#" + tocID + " li a[href='#" + id + "']").parent().remove(); 43 | 44 | // sanitize the id for use with bootstrap tabs 45 | id = id.replace(/[.\/?&!#<>]/g, '').replace(/\s/g, '_'); 46 | tab.attr('id', id); 47 | 48 | // get the heading element within it, grab it's text, then remove it 49 | var heading = tab.find('h' + tabLevel + ':first'); 50 | var headingText = heading.html(); 51 | heading.remove(); 52 | 53 | // build and append the tab list item 54 | var a = $('' + headingText + ''); 55 | a.attr('href', '#' + id); 56 | a.attr('aria-controls', id); 57 | var li = $('
  • '); 58 | li.append(a); 59 | if (i === 0) 60 | li.attr('class', 'active'); 61 | tabList.append(li); 62 | 63 | // set it's attributes 64 | tab.attr('role', 'tabpanel'); 65 | tab.addClass('tab-pane'); 66 | tab.addClass('tabbed-pane'); 67 | if (fade) 68 | tab.addClass('fade'); 69 | if (i === 0) { 70 | tab.addClass('active'); 71 | if (fade) 72 | tab.addClass('in'); 73 | } 74 | 75 | // move it into the tab content div 76 | tab.detach().appendTo(tabContent); 77 | }); 78 | } 79 | 80 | // convert section divs with the .tabset class to tabsets 81 | var tabsets = $("div.section.tabset"); 82 | tabsets.each(function(i) { 83 | buildTabset($(tabsets[i])); 84 | }); 85 | }; 86 | 87 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/out/libs/tocify-1.9.1/jquery.tocify.css: -------------------------------------------------------------------------------- 1 | /* 2 | * jquery.tocify.css 1.9.1 3 | * Author: @gregfranko 4 | */ 5 | 6 | /* The Table of Contents container element */ 7 | .tocify { 8 | width: 20%; 9 | max-height: 90%; 10 | overflow: auto; 11 | margin-left: 2%; 12 | position: fixed; 13 | border: 1px solid #ccc; 14 | webkit-border-radius: 6px; 15 | moz-border-radius: 6px; 16 | border-radius: 6px; 17 | } 18 | 19 | /* The Table of Contents is composed of multiple nested unordered lists. These styles remove the default styling of an unordered list because it is ugly. */ 20 | .tocify ul, .tocify li { 21 | list-style: none; 22 | margin: 0; 23 | padding: 0; 24 | border: none; 25 | line-height: 30px; 26 | } 27 | 28 | /* Top level header elements */ 29 | .tocify-header { 30 | text-indent: 10px; 31 | } 32 | 33 | /* Top level subheader elements. These are the first nested items underneath a header element. */ 34 | .tocify-subheader { 35 | text-indent: 20px; 36 | display: none; 37 | } 38 | 39 | /* Makes the font smaller for all subheader elements. */ 40 | .tocify-subheader li { 41 | font-size: 12px; 42 | } 43 | 44 | /* Further indents second level subheader elements. */ 45 | .tocify-subheader .tocify-subheader { 46 | text-indent: 30px; 47 | } 48 | 49 | /* Further indents third level subheader elements. You can continue this pattern if you have more nested elements. */ 50 | .tocify-subheader .tocify-subheader .tocify-subheader { 51 | text-indent: 40px; 52 | } 53 | 54 | /* Twitter Bootstrap Override Style */ 55 | .tocify .tocify-item > a, .tocify .nav-list .nav-header { 56 | margin: 0px; 57 | } 58 | 59 | /* Twitter Bootstrap Override Styles */ 60 | .tocify .tocify-item a, .tocify .list-group-item { 61 | padding: 5px; 62 | } 63 | 64 | .tocify .nav-pills > li { 65 | float: none; 66 | } 67 | 68 | /* We don't override the bootstrap colors because this gives us the 69 | wrong selection colors when using bootstrap themes 70 | 71 | .tocify .list-group-item:hover, .tocify .list-group-item:focus { 72 | background-color: #f5f5f5; 73 | } 74 | 75 | .tocify .list-group-item.active:hover, .tocify .list-group-item.active:focus { 76 | background-color: #428bca; 77 | } 78 | */ 79 | 80 | /* End Twitter Bootstrap Override Styles */ 81 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/misc/resampling/resampling.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Sampling papers for meta-analysis" 3 | date: "`r Sys.Date()`" 4 | output: 5 | html_document: 6 | highlight: tango 7 | theme: spacelab 8 | --- 9 | 10 | ```{r setup, include=FALSE} 11 | library(metafor) 12 | library(langcog) 13 | library(dplyr) 14 | library(purrr) 15 | library(ggplot2) 16 | theme_set(theme_bw()) 17 | font <- "Open Sans" 18 | library(knitr) 19 | opts_chunk$set(cache = T, message = F, warning = F) 20 | source("global.R") 21 | ``` 22 | 23 | Summary functions. 24 | ```{r summary_funs} 25 | calculate_effect_size <- function(sample_data) { 26 | model <- rma(d, vi = d_var, slab = as.character(short_cite), 27 | data = sample_data, method = "REML", 28 | control = list(maxiter = 1000, stepadj = 0.5)) 29 | #verbose = TRUE) 30 | predict(model)$pred 31 | } 32 | 33 | calculate_age_effect <- function(sample_data) { 34 | model <- rma(d ~ mean_age, vi = d_var, slab = as.character(short_cite), 35 | data = sample_data, method = "REML", 36 | control = list(maxiter = 1000, stepadj = 0.5)) 37 | #verbose = TRUE) 38 | model$b["mean_age", 1] 39 | } 40 | ``` 41 | 42 | Bootstrapping functions. 43 | ```{r bootstrap_funs} 44 | get_paper_age_weights <- function(ma_data) { 45 | years = unlist(lapply(strsplit(ma_data$short_cite, "[()]"), 46 | function(x) x[2])) %>% 47 | as.numeric() %>% 48 | ifelse(is.na(.), max(., na.rm = T),.) 49 | 50 | dif = years - min(years, na.rm = T) # get time since seed year 51 | weights = (max(dif) + 1) - dif # add 1 to get rid of zeros 52 | weights 53 | } 54 | 55 | one_sample <- function(ma_data, summary_function, n, sample_strategy) { 56 | function(k) { 57 | if (sample_strategy == "uniform"){ 58 | do.call(summary_function, list(sample_n(ma_data, size = n, replace = F))) 59 | } else if (sample_strategy == "age") { 60 | age_weights = get_paper_age_weights(ma_data) 61 | do.call(summary_function, list(sample_n(ma_data, size = n, 62 | weight = age_weights, replace = F))) 63 | } 64 | } 65 | } 66 | 67 | all_samples <- function(ma_data, summary_function, n, nboot) { 68 | sample_values <- 1:nboot %>% 69 | map(one_sample(ma_data, summary_function, n, "age")) %>% 70 | unlist() 71 | data.frame(n = n, 72 | mean = mean(sample_values), 73 | ci_lower = ci_lower(sample_values), 74 | ci_upper = ci_upper(sample_values), 75 | row.names = NULL) 76 | } 77 | 78 | dataset_data <- function(ma_data, summary_function, nboot) { 79 | overall <- do.call(summary_function, list(ma_data)) 80 | seq(10, nrow(ma_data), by = 1) %>% 81 | map(function(n) all_samples(ma_data, summary_function, n, nboot)) %>% 82 | bind_rows() %>% 83 | mutate(dataset = unique(ma_data$dataset), 84 | overall_est = overall) 85 | } 86 | ``` 87 | 88 | Estimate effect size as a function of number of papers sampled. 89 | ```{r calc_es} 90 | es_data <- all_data %>% 91 | split(.$dataset) %>% 92 | map(function(ma_data) dataset_data(ma_data, calculate_effect_size, 100)) %>% 93 | bind_rows() 94 | ``` 95 | 96 | ```{r plot_es, cache=FALSE, fig.width=10, fig.height=6} 97 | ggplot(es_data, aes(x = n, y = mean, fill = dataset)) + 98 | facet_wrap(~dataset) + 99 | geom_ribbon(aes(ymin = ci_lower, ymax = ci_upper), alpha = 0.5) + 100 | geom_hline(aes(yintercept = overall_est), linetype = "dashed") + 101 | geom_line(weight = 1.5) + 102 | xlab("Number of papers") + 103 | ylab("Effect Size Estimate") + 104 | scale_fill_solarized(guide = FALSE) + 105 | theme(text = element_text(family = font), 106 | panel.grid.minor = element_blank()) 107 | ``` 108 | 109 | Estimate age coefficient as a function of number of papers sampled. 110 | ```{r calc_age} 111 | age_data <- all_data %>% 112 | split(.$dataset) %>% 113 | map(function(ma_data) dataset_data(ma_data, calculate_age_effect, 100)) %>% 114 | bind_rows() 115 | ``` 116 | 117 | ```{r plot_age, cache=FALSE, fig.width=10, fig.height=6} 118 | ggplot(age_data, aes(x = n, y = mean, fill = dataset)) + 119 | facet_wrap(~dataset) + 120 | geom_ribbon(aes(ymin = ci_lower, ymax = ci_upper), alpha = 0.5) + 121 | geom_hline(aes(yintercept = overall_est), linetype = "dashed") + 122 | geom_line(weight = 1.5) + 123 | xlab("Number of papers") + 124 | ylab("Age Coefficient Estimate") + 125 | scale_fill_solarized(guide = FALSE) + 126 | theme(text = element_text(family = font), 127 | panel.grid.minor = element_blank()) 128 | ``` 129 | 130 | Effect size as a function of year 131 | ```{r, plot_es_year, cache=FALSE, fig.width=10, fig.height=6} 132 | all_data$year = unlist(lapply(strsplit(all_data$short_cite, 133 | "[()]"),function(x) x[2])) %>% 134 | as.numeric() 135 | 136 | ggplot(all_data, aes(x = year, y = d)) + 137 | geom_smooth(method = lm) + 138 | ylab("Effect Size") + 139 | facet_wrap(~dataset) + 140 | theme(text = element_text(family = font), 141 | panel.grid.minor = element_blank()) 142 | ``` 143 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/subselect_age/Subselect_age.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/subselect_age/Subselect_age.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/subselect_age/fig3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/subselect_age/fig3.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/subselect_age/fig4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/subselect_age/fig4.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/misc/subselect_age/metalab_synthesis_age_subset.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/misc/subselect_age/metalab_synthesis_age_subset.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/Synthesis Cover Letter.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/Synthesis Cover Letter.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/figs/fig3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/figs/fig3.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/figs/fig3_lab.ai: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/figs/fig3_lab.ai -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/figs/fig3_lab.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/figs/fig3_lab.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/figs/fig3legend.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/figs/fig3legend.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/figs/fig4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/figs/fig4.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/figs/fig4_lab.ai: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/figs/fig4_lab.ai -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/figs/fig4_lab.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/figs/fig4_lab.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/figs/fig4legend.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/figs/fig4legend.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/metalab_synthesis.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/metalab_synthesis.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/metalab_synthesis_latex_formatted.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/metalab_synthesis_latex_formatted.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/metalab_synthesis_latex_formatted.synctex.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/open_mind/metalab_synthesis_latex_formatted.synctex.gz -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/open_mind.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/open_mind/tmp-pdfcrop-95274.tex: -------------------------------------------------------------------------------- 1 | \catcode37 14 % percent 2 | \catcode33 12 % exclam 3 | \catcode34 12 % quote 4 | \catcode35 6 % hash 5 | \catcode39 12 % apostrophe 6 | \catcode40 12 % left parenthesis 7 | \catcode41 12 % right parenthesis 8 | \catcode45 12 % minus 9 | \catcode46 12 % period 10 | \catcode60 12 % less 11 | \catcode61 12 % equals 12 | \catcode62 12 % greater 13 | \catcode64 12 % at 14 | \catcode91 12 % left square 15 | \catcode93 12 % right square 16 | \catcode96 12 % back tick 17 | \catcode123 1 % left curly brace 18 | \catcode125 2 % right curly brace 19 | \catcode126 12 % tilde 20 | \catcode`\#=6 % 21 | \escapechar=92 % 22 | \def\IfUndefined#1#2#3{% 23 | \begingroup\expandafter\expandafter\expandafter\endgroup 24 | \expandafter\ifx\csname#1\endcsname\relax 25 | #2% 26 | \else 27 | #3% 28 | \fi 29 | } 30 | \def\pdffilehex{6D6574616C61625F73796E7468657369735F66696C65732F6669677572652D6C617465782F756E6E616D65642D6368756E6B2D322D312E706466} 31 | \IfUndefined{pdfunescapehex}{% 32 | \begingroup 33 | \gdef\pdffile{}% 34 | \def\do#1#2{% 35 | \ifx\relax#2\relax 36 | \ifx\relax#1\relax 37 | \else 38 | \errmessage{Invalid hex string, should not happen!}% 39 | \fi 40 | \else 41 | \lccode`0="#1#2\relax 42 | \lowercase{% 43 | \xdef\pdffile{\pdffile0}% 44 | }% 45 | \expandafter\do 46 | \fi 47 | }% 48 | \expandafter\do\pdffilehex\relax\relax 49 | \endgroup 50 | }{% 51 | \edef\pdffile{\pdfunescapehex{\pdffilehex}}% 52 | } 53 | \immediate\write-1{Input file: \pdffile} 54 | \pdfoutput=1 % 55 | \pdfcompresslevel=9 % 56 | \csname pdfmapfile\endcsname{} 57 | \def\setpdfversion#1{% 58 | \IfUndefined{pdfobjcompresslevel}{% 59 | }{% 60 | \ifnum#1<5 % 61 | \pdfobjcompresslevel=0 % 62 | \else 63 | \pdfobjcompresslevel=2 % 64 | \fi 65 | }% 66 | \IfUndefined{pdfminorversion}{% 67 | \IfUndefined{pdfoptionpdfminorversion}{% 68 | }{% 69 | \pdfoptionpdfminorversion=#1\relax 70 | }% 71 | }{% 72 | \pdfminorversion=#1\relax 73 | }% 74 | } 75 | \def\page #1 [#2 #3 #4 #5]{% 76 | \count0=#1\relax 77 | \setbox0=\hbox{% 78 | \pdfximage page #1 mediabox{\pdffile}% 79 | \pdfrefximage\pdflastximage 80 | }% 81 | \pdfhorigin=-#2bp\relax 82 | \pdfvorigin=#3bp\relax 83 | \pdfpagewidth=#4bp\relax 84 | \advance\pdfpagewidth by -#2bp\relax 85 | \pdfpageheight=#5bp\relax 86 | \advance\pdfpageheight by -#3bp\relax 87 | \ht0=\pdfpageheight 88 | \shipout\box0\relax 89 | } 90 | \def\pageclip #1 [#2 #3 #4 #5][#6 #7 #8 #9]{% 91 | \count0=#1\relax 92 | \dimen0=#4bp\relax \advance\dimen0 by -#2bp\relax 93 | \edef\imagewidth{\the\dimen0}% 94 | \dimen0=#5bp\relax \advance\dimen0 by -#3bp\relax 95 | \edef\imageheight{\the\dimen0}% 96 | \pdfximage page #1 mediabox{\pdffile}% 97 | \setbox0=\hbox{% 98 | \kern -#2bp\relax 99 | \lower #3bp\hbox{\pdfrefximage\pdflastximage}% 100 | }% 101 | \wd0=\imagewidth\relax 102 | \ht0=\imageheight\relax 103 | \dp0=0pt\relax 104 | \pdfhorigin=#6pt\relax 105 | \pdfvorigin=#7bp\relax 106 | \pdfpagewidth=\imagewidth 107 | \advance\pdfpagewidth by #6bp\relax 108 | \advance\pdfpagewidth by #8bp\relax 109 | \pdfpageheight=\imageheight\relax 110 | \advance\pdfpageheight by #7bp\relax 111 | \advance\pdfpageheight by #9bp\relax 112 | \pdfxform0\relax 113 | \shipout\hbox{\pdfrefxform\pdflastxform}% 114 | }% 115 | \def\pageinclude#1{% 116 | \pdfhorigin=0pt\relax 117 | \pdfvorigin=0pt\relax 118 | \pdfximage page #1 mediabox{\pdffile}% 119 | \setbox0=\hbox{\pdfrefximage\pdflastximage}% 120 | \pdfpagewidth=\wd0\relax 121 | \pdfpageheight=\ht0\relax 122 | \advance\pdfpageheight by \dp0\relax 123 | \shipout\hbox{% 124 | \raise\dp0\box0\relax 125 | }% 126 | } 127 | \setpdfversion{4} 128 | \page 1 [1 2 571 391] 129 | \csname @@end\endcsname 130 | \end 131 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/paper_data/synthesis_paper_all_data.feather: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/paper_data/synthesis_paper_all_data.feather -------------------------------------------------------------------------------- /write-ups/synthesis_paper/paper_data/synthesis_paper_datasets.feather: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/paper_data/synthesis_paper_datasets.feather -------------------------------------------------------------------------------- /write-ups/synthesis_paper/paper_scripts/cache_paper_data.R: -------------------------------------------------------------------------------- 1 | # This file caches the data to be used in the synthesis paper. This is useful 2 | # because the data in the repository change frequently, and not all MAs in 3 | # metalab are reported in the paper. This file reads in the data that has been 4 | # cached by the scripts/cache_datasets.R script, and caches a second version of 5 | # the data to be used for the paper in ../paper_data/. To run this script, from main 6 | # dir: Rscript write-ups/synthesis_paper/paper_scripts/cache_paper_data.R 7 | # Thus, when data has been updated in the spreadsheets, two scripts need to be 8 | # run in order to include it in the paper (cache_datasets.R and cache_paper_data.R). 9 | 10 | library(tidyverse) 11 | library(forcats) 12 | library(feather) 13 | 14 | # MAs in synthesis paper 15 | MAS <- c("idspref", "labadv", "gaze_following", "inphondb-native", 16 | "inphondb-nonnative", "phonotactics", "word_recognition", 17 | "mutex", "pointing_concurrent", "sounds", "inworddb", "symbolism") 18 | 19 | # read in all_data 20 | source("dashboard/global.R", chdir = TRUE) 21 | 22 | ######################## ALL_DATA ######################## 23 | # remove inappropriate conditions and rename MAs 24 | synthesis_paper_all_data <- all_data %>% 25 | filter(short_name %in% MAS) %>% 26 | filter(is.na(condition_type) | condition_type == "critical") %>% # remove control conditions from labadv 27 | filter(infant_type == "typical") %>% 28 | filter(coder != "Alvaro Iturralde") %>% # can update to new data later 29 | mutate(dataset = fct_recode(dataset, 30 | "Pointing and vocabulary" = 31 | "Pointing and vocabulary (concurrent)")) 32 | 33 | # write all_data to feather 34 | write_feather(synthesis_paper_all_data, "write-ups/synthesis_paper/paper_data/synthesis_paper_all_data.feather") 35 | 36 | ######################## DATSET METADATA ######################## 37 | # need to recompute datasets summary data, based on filtered all_data 38 | synthesis_paper_studies <- synthesis_paper_all_data %>% 39 | group_by(dataset) %>% 40 | summarise(num_experiments = n(), 41 | num_papers = length(unique(study_ID))) 42 | 43 | synthesis_paper_subjects <- synthesis_paper_all_data %>% 44 | rowwise() %>% 45 | mutate(n_total = sum(c(n_1, n_2), na.rm = TRUE)) %>% 46 | group_by(dataset, study_ID, same_infant) %>% # we want all unique participants (i.e. exclude those who are in more than 1 condition (same same_infant value), but include conditions where is.na(same_infant)) 47 | mutate(id = 1:n()) %>% 48 | filter(is.na(same_infant) | id == 1) %>% 49 | select(-id) %>% 50 | group_by(dataset) %>% 51 | summarise(num_subjects = sum(n_total)) 52 | 53 | synthesis_paper_datasets <- datasets %>% 54 | rename(dataset = name) %>% 55 | select(-num_experiments, -num_papers, -num_subjects, -moderators) %>% 56 | filter(short_name %in% MAS) %>% 57 | mutate(dataset = fct_recode(dataset, "Pointing and vocabulary" = 58 | "Pointing and vocabulary (concurrent)")) %>% 59 | left_join(synthesis_paper_studies) %>% # merge in filtered meta-data 60 | left_join(synthesis_paper_subjects) %>% 61 | rename(name = dataset) 62 | 63 | # write datasets to feather 64 | write_feather(synthesis_paper_datasets, "write-ups/synthesis_paper/paper_data/synthesis_paper_datasets.feather") 65 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/paper_scripts/synthesis_helpers.R: -------------------------------------------------------------------------------- 1 | ### Helper functions for synthesis paper ### 2 | 3 | # get MA effect size (random effect or mixed-effect [multilevel = T]) 4 | overall_es <- function(ma_data, multilevel){ 5 | if (multilevel){ 6 | model = metafor::rma.mv(d_calc, V = d_var_calc, 7 | random = ~ 1 | short_cite, data = ma_data) 8 | } else { 9 | model = metafor::rma(d_calc, d_var_calc, data = ma_data) 10 | } 11 | data.frame(dataset = ma_data$short_name[1], 12 | overall.d = model$b, 13 | ci_lower = model$ci.lb, 14 | ci_upper = model$ci.ub) 15 | } 16 | 17 | 18 | # Fail safe N 19 | get_fail_safe_N <- function(dataset, ma_data, targ) { 20 | fsn_string = fsn(d_calc, d_var_calc, data = ma_data, target = targ, type = "Orwin")$fsnum 21 | data.frame(dataset = dataset, fsn_string = fsn_string) 22 | } 23 | 24 | ## Eggers test for funnel assymetry 25 | eggers_tests <- function(ma_data){ 26 | model = rma(d_calc, d_var_calc, data = ma_data) # model (mixed-effect model not implemented for eggers test) 27 | egg.random = regtest(model) # Egger's test 28 | data.frame(dataset = ma_data$short_name[1], 29 | egg.random.z = egg.random$zval, 30 | egg.random.p = egg.random$pval) 31 | } 32 | 33 | 34 | ## gets CIs on p-curves 35 | get_all_CIS_multi <- function(df, alpha, increment) { 36 | ps <- seq(increment, alpha, increment) 37 | props = ps %>% 38 | map(function(p,d){sum(d == p)}, df$p_round) %>% 39 | unlist() 40 | cis = MultinomialCI::multinomialCI(props, alpha = alpha) 41 | data.frame(dataset = df$dataset[1], 42 | p = ps, 43 | ci.lower = cis[,1], 44 | ci.upper = cis[,2]) 45 | } 46 | -------------------------------------------------------------------------------- /write-ups/synthesis_paper/psychscience/SynthesisCoverLetter.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/psychscience/SynthesisCoverLetter.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/psychscience/figs/fig3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/psychscience/figs/fig3.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/psychscience/figs/fig3_lab.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/psychscience/figs/fig3_lab.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/psychscience/figs/fig3legend.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/psychscience/figs/fig3legend.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/psychscience/figs/fig4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/psychscience/figs/fig4.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/psychscience/figs/fig4_lab.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/psychscience/figs/fig4_lab.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/psychscience/figs/fig4legend.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/psychscience/figs/fig4legend.pdf -------------------------------------------------------------------------------- /write-ups/synthesis_paper/psychscience/metalab_synthesis.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/psychscience/metalab_synthesis.docx -------------------------------------------------------------------------------- /write-ups/synthesis_paper/psychscience/metalab_synthesis.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langcog/metalab-archive/44d4072db267a221c14322da15622eb410bed481/write-ups/synthesis_paper/psychscience/metalab_synthesis.pdf --------------------------------------------------------------------------------