├── .gitignore
├── 1_data
├── 0_resources
│ ├── cpi-u-rs_1950-current.xlsx
│ ├── geocorr2014_county_to_msa.csv
│ └── msa_shortname_brookings.xlsx
├── 2_interim
│ ├── acs_data_aggregated_interpolated.rds
│ ├── changes_2010t2020_vulnerability.rds
│ └── changes_2010t2020_vulnerability_nk.xlsx
└── 3_processed
│ ├── 2020_vulnerability_data_portland.geojson
│ ├── 2020_vulnerability_data_portland.xlsx
│ ├── Change in vulnerability memo.pdf
│ ├── acs_vuln_weighting_data.rds
│ ├── acs_vuln_weighting_data_full.rds
│ ├── acs_vuln_weighting_data_interpolated.rds
│ ├── change_vulnerability_score_2010t2020.png
│ ├── vulnerability_score_2010.png
│ ├── vulnerability_score_2015.png
│ └── vulnerability_score_2020.png
├── 2_references
└── images
│ ├── weighting_demo.gif
│ └── weighting_demo_low-res.gif
├── 3_functions
├── aggregate_variables.R
├── get_counties_from_cbsa.R
├── get_states_from_stcnty_fips.R
├── notin.R
└── range01.R
├── 4_scripts
├── 1_data-processing
│ ├── 01_load_spatial_resources.R
│ ├── 02_process_acs_data.R
│ ├── 02b2_TEST_split_data_by_weights.R
│ └── 02b_process_longitudinal_acs_data.R
└── 2_analysis
│ ├── 01_generate_vulnerability_data.R
│ └── 01b_generate_vulnerability_data_interpolated.R
├── 5_apps
├── change_explorer
│ ├── app.R
│ └── changes_2010t2020_vulnerability.rds
└── weighting_tool
│ ├── 00_prep_app.R
│ ├── 00b_prep_app_interp.R
│ ├── acs_vuln_weighting_data.rds
│ ├── acs_vuln_weighting_data_imputed.rds
│ ├── app.R
│ ├── base_data_download_labels.xlsx
│ ├── hatch_households_with_limited_english.rds
│ ├── hatch_people_of_color.rds
│ ├── hatch_persons_with_disabilities.rds
│ ├── hatch_retirees_65.rds
│ ├── hatch_vulnerable.rds
│ ├── hatch_youth_0_21.rds
│ └── primary_place_outline.rds
├── 6_reports
├── Change in vulnerability memo.pdf
├── changes_2010t2020_vulnerability_nk.xlsx
├── vulnerability_changes_draft.zip
├── vulnerability_score_2010.png
├── vulnerability_score_2015.png
└── vulnerability_score_2020.png
├── GLOBAL_interpolation_addendum.R
├── LICENSE
├── README.md
├── global.R
├── variable_query.png
└── vulnerability_map.Rproj
/.gitignore:
--------------------------------------------------------------------------------
1 | # README preview files
2 | README.html
3 |
4 | # History files
5 | .Rhistory
6 | .Rapp.history
7 |
8 | # Session Data files
9 | .RData
10 | .RDataTmp
11 |
12 | # User-specific files
13 | .Ruserdata
14 |
15 | # Example code in package build process
16 | *-Ex.R
17 |
18 | # Output files from R CMD build
19 | /*.tar.gz
20 |
21 | # Output files from R CMD check
22 | /*.Rcheck/
23 |
24 | # RStudio files
25 | .Rproj.user/
26 |
27 | # produced vignettes
28 | vignettes/*.html
29 | vignettes/*.pdf
30 |
31 | # OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3
32 | .httr-oauth
33 |
34 | # knitr and R markdown default cache directories
35 | *_cache/
36 | /cache/
37 |
38 | # Temporary files created by R markdown
39 | *.utf8.md
40 | *.knit.md
41 |
42 | # R Environment Variables
43 | .Renviron
44 |
45 | # Shiny
46 | rsconnect/
47 | /rsconnect
48 |
49 | # OS generated files
50 | .DS_Store
51 | .DS_Store?
52 | ._*
53 | .Spotlight-V100
54 | .Trashes
55 | ehthumbs.db
56 | Thumbs.db
57 | *.DS_Store
58 | ~*
59 | .*
60 | !.gitignore
61 |
62 | # Private information or large datasets not to be synced to web servers
63 | private/
64 | private/*
65 |
--------------------------------------------------------------------------------
/1_data/0_resources/cpi-u-rs_1950-current.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/0_resources/cpi-u-rs_1950-current.xlsx
--------------------------------------------------------------------------------
/1_data/0_resources/geocorr2014_county_to_msa.csv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/0_resources/geocorr2014_county_to_msa.csv
--------------------------------------------------------------------------------
/1_data/0_resources/msa_shortname_brookings.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/0_resources/msa_shortname_brookings.xlsx
--------------------------------------------------------------------------------
/1_data/2_interim/acs_data_aggregated_interpolated.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/2_interim/acs_data_aggregated_interpolated.rds
--------------------------------------------------------------------------------
/1_data/2_interim/changes_2010t2020_vulnerability.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/2_interim/changes_2010t2020_vulnerability.rds
--------------------------------------------------------------------------------
/1_data/2_interim/changes_2010t2020_vulnerability_nk.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/2_interim/changes_2010t2020_vulnerability_nk.xlsx
--------------------------------------------------------------------------------
/1_data/3_processed/2020_vulnerability_data_portland.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/3_processed/2020_vulnerability_data_portland.xlsx
--------------------------------------------------------------------------------
/1_data/3_processed/Change in vulnerability memo.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/3_processed/Change in vulnerability memo.pdf
--------------------------------------------------------------------------------
/1_data/3_processed/acs_vuln_weighting_data.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/3_processed/acs_vuln_weighting_data.rds
--------------------------------------------------------------------------------
/1_data/3_processed/acs_vuln_weighting_data_full.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/3_processed/acs_vuln_weighting_data_full.rds
--------------------------------------------------------------------------------
/1_data/3_processed/acs_vuln_weighting_data_interpolated.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/3_processed/acs_vuln_weighting_data_interpolated.rds
--------------------------------------------------------------------------------
/1_data/3_processed/change_vulnerability_score_2010t2020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/3_processed/change_vulnerability_score_2010t2020.png
--------------------------------------------------------------------------------
/1_data/3_processed/vulnerability_score_2010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/3_processed/vulnerability_score_2010.png
--------------------------------------------------------------------------------
/1_data/3_processed/vulnerability_score_2015.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/3_processed/vulnerability_score_2015.png
--------------------------------------------------------------------------------
/1_data/3_processed/vulnerability_score_2020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/1_data/3_processed/vulnerability_score_2020.png
--------------------------------------------------------------------------------
/2_references/images/weighting_demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/2_references/images/weighting_demo.gif
--------------------------------------------------------------------------------
/2_references/images/weighting_demo_low-res.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/2_references/images/weighting_demo_low-res.gif
--------------------------------------------------------------------------------
/3_functions/aggregate_variables.R:
--------------------------------------------------------------------------------
1 | ## Take a "long-format" ACS table, aggregate (sum) across multiple variables,
2 | ## and append a percentage based on an input summary variable
3 | aggregate_variables <- function(long_acs_data, agg_table, varnums,
4 | summary_var = NULL,
5 | varname = "new_var"){
6 |
7 | ## If two or more summary variables are specified, re-create the summary
8 | ## variable by aggregating those variables and r-binding to the data
9 | if(length(summary_var >1)){
10 | summarized_summary_var <- long_acs_data %>%
11 | filter(variable %in% summary_var) %>%
12 | group_by(GEOID) %>%
13 | summarize(estimate = sum(estimate, na.rm = T),
14 | moe = moe_sum(moe, estimate, na.rm = T)) %>%
15 | ungroup() %>%
16 | mutate(variable = "summary_var",
17 | agg = "count")
18 |
19 | summary_var <- "summary_var"
20 |
21 | long_acs_data <- long_acs_data %>%
22 | rbind(., summarized_summary_var)
23 | }
24 |
25 | # Separate into a table and variable number field
26 | df <- long_acs_data %>%
27 | separate(., variable, into = c("table", "varnum"),
28 | sep = "_", remove = F) %>%
29 | mutate(varnum = as.numeric(varnum))
30 |
31 | # Create a summary variable temporary df
32 | df_sumvar <- df %>%
33 | filter(variable == summary_var) %>%
34 | select(GEOID, summary_est = estimate, summary_moe = moe)
35 |
36 | # Aggregate the user-provided table
37 | df_counts <- df %>%
38 | filter(table == agg_table, varnum %in% varnums) %>%
39 | arrange(GEOID) %>%
40 | group_by(GEOID, agg) %>%
41 | summarize(variable = varname,
42 | estimate = sum(estimate, na.rm = T),
43 | moe = moe_sum(moe, estimate, na.rm = T)) %>%
44 | ungroup()
45 |
46 | ## TODO Make function return df_counts if no summary variable is passed
47 | # if(is.null(summary_var)){
48 | # return(df_counts)
49 | # }
50 |
51 | # Join summary variable, calculate percents, and rbind to count data
52 | df_counts %>%
53 | left_join(., df_sumvar, by = "GEOID") %>%
54 | group_by(GEOID) %>%
55 | summarize(variable = varname,
56 | pct = estimate / summary_est,
57 | pct_moe = moe_prop(estimate, summary_est, moe, summary_moe),
58 | agg = "percent") %>%
59 | rename(estimate = pct, moe = pct_moe) %>%
60 | rbind(., df_counts) %>%
61 | mutate(table = agg_table, varnum = NA_real_)
62 | }
63 |
64 | # aggregate_variables(acs_table_query,
65 | # agg_table = "B25014",
66 | # varnums = c(5,6,7,11,12,13),
67 | # summary_var = "B25014_001",
68 | # varname = "Overcrowded HH")
69 |
70 | ## With multiple summary variables
71 | # insuff_veh <- aggregate_variables(
72 | # acs_table_query, agg_table = "B08203", varnums = c(14,20,21,26,27,28),
73 | # summary_var = c("B08203_013", "B08203_019", "B08203_025"),
74 | # varname = "Insufficient commuter vehicles")
75 |
--------------------------------------------------------------------------------
/3_functions/get_counties_from_cbsa.R:
--------------------------------------------------------------------------------
1 | ## Get counties from cbsa code
2 | get_counties_from_cbsa <- function(cbsa_code) {
3 | cbsa_code <- as.numeric(cbsa_code)
4 | county2msa %>%
5 | filter(cbsa == cbsa_code) %>%
6 | pull(county)
7 | }
--------------------------------------------------------------------------------
/3_functions/get_states_from_stcnty_fips.R:
--------------------------------------------------------------------------------
1 | ## Get states from vector of 5-digit FIPS
2 | get_states_from_stcnty_fips <- function(counties){
3 | counties %>%
4 | as.data.frame() %>%
5 | setNames("stcnty") %>%
6 | mutate(st = substr(stcnty, 1, 2)) %>%
7 | distinct(st) %>%
8 | pull(st)
9 | }
--------------------------------------------------------------------------------
/3_functions/notin.R:
--------------------------------------------------------------------------------
1 | ## opposite of %in%
2 | `%notin%` <- function(lhs, rhs) !(lhs %in% rhs)
3 |
--------------------------------------------------------------------------------
/3_functions/range01.R:
--------------------------------------------------------------------------------
1 | ## Force vector to be between 0 and 1
2 | range01 <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
3 |
--------------------------------------------------------------------------------
/4_scripts/1_data-processing/01_load_spatial_resources.R:
--------------------------------------------------------------------------------
1 |
2 | ## List of MSA shortnames
3 | msa_shortname <- rio::import("1_data/0_resources/msa_shortname_brookings.xlsx") %>%
4 | rename(cbsa13 = `CBSA FIPS (2013)`, cbsa_shortname = `CBSA Short Name (2013)`)
5 |
6 | ## County to MSA crosswalk
7 | county2msa <- read.csv(textConnection(readLines("1_data/0_resources/geocorr2014_county_to_msa.csv")[-2]),
8 | colClasses = c("character", "character", "character", "character", "integer", "integer"),
9 | header = TRUE, sep=",") %>%
10 | left_join(., msa_shortname, by = c("cbsa" = "cbsa13"))
--------------------------------------------------------------------------------
/4_scripts/1_data-processing/02_process_acs_data.R:
--------------------------------------------------------------------------------
1 | ## This script queries American Community Survey data for a user-defined metro area and
2 | ## produces a table with pre-selected vulnerability indicators. You can change the metro
3 | ## area to your own community to download the data and export it for use in the shiny app.
4 | ## This script assumes you will download regional data and also a "core" city for that
5 | ## region. Make sure you adjust the `primary_place` variable if you seek an alternate city
6 | ## within a region.
7 |
8 | library(tidyverse)
9 | library(tidycensus)
10 | library(sf)
11 | library(tigris)
12 | library(furrr)
13 |
14 | options(
15 | scipen = 999,
16 | digits = 4,
17 | tigris_class = "sf",
18 | tigris_use_cache = T
19 | )
20 |
21 | ## Source script that establishes geographic constants for this application
22 | source("GLOBAL.R")
23 | source("3_functions/aggregate_variables.R")
24 |
25 | ## Download data using tidycensus #####
26 | ## View potential variables available to use
27 | acs20 <- load_variables(2020, "acs5", cache = TRUE)
28 | s20 <- load_variables(2020, "acs5/subject", cache = TRUE)
29 |
30 | ## Desired variables ####
31 | # B25106 Housing cost burden
32 | # B19301 Per capita income
33 | # B25010 Average household size
34 | # C17002 Poverty (<1, >1)
35 |
36 | # B03002 People of color
37 | # B03002 Black, Native and Latinx people
38 | # C16002 Households with limited English
39 | # B19013 Median household income
40 | # B19058 Food stamps recipients
41 | # B25003 Renter households
42 | # B23025 Unemployed persons
43 | # B25070 Rent cost burdened
44 | # B25091 Mortgage cost burdened
45 | # B08006 Commuters
46 | # B08203 Insufficient commuter vehicles / Vehicles available (0, 1, 2+)
47 | # B18101 Persons with disabilities
48 | # B15002 Adults without a 4-yr degree
49 | # B14001 Students (K-12)
50 | # B01001 Age (18-34, 35-64, 65+)
51 | # B25014 Overcrowded households
52 | # B28001 No computer access
53 | # B28002 No broadband access
54 |
55 | tables_to_download <- c(
56 | "B01001", "B15002", "B25014", "C16002", "B23025", "B08006",
57 | "B14001", "B28002", "B08203", "B25091", "B25070", "B18101",
58 | "B03002", "B25003", "B19013", "B19058", "B28001", "B19301",
59 | "B25010", "B25106", "C17002")
60 |
61 | ## Enable multi-core processing
62 | plan(multisession, workers = availableCores())
63 |
64 | acs_table_query <- future_map_dfr(
65 | .x = tables_to_download,
66 | .f = function(acs_table){
67 | query <- get_acs(geography = "tract",
68 | table = acs_table,
69 | year = acs_year,
70 | state = STATES_TO_DOWNLOAD) %>%
71 | filter(substr(GEOID, 1, 5) %in% COUNTIES_TO_DOWNLOAD) %>%
72 | mutate(agg = "count") %>%
73 | select(-NAME)
74 | })
75 |
76 | # core_tables <- c("B25106", "B03002", "B19013", "B25010", "B15002")
77 | #
78 | # acs_table_query_longitudinal <- future_map_dfr(
79 | # .x = c(2010:2020),
80 | # .f = function(acs_year){
81 | # map_dfr(
82 | # .x = core_tables,
83 | # .f = function(acs_table){
84 | # query <- get_acs(geography = "tract",
85 | # table = acs_table,
86 | # year = acs_year,
87 | # state = STATES_TO_DOWNLOAD) %>%
88 | # filter(substr(GEOID, 1, 5) %in% COUNTIES_TO_DOWNLOAD) %>%
89 | # mutate(agg = "count",
90 | # year = acs_year) %>%
91 | # select(-NAME)
92 | # })
93 | # })
94 |
95 |
96 | ## Aggregate data
97 | hcb <- aggregate_variables(
98 | acs_table_query, agg_table = "B25106", varnums = c(6,10,14,18,22,28,32,36,40,44),
99 | summary_var = "B25106_001", varname = "Housing cost burdened")
100 |
101 | # Special case -- just change it for formatting
102 | pci <- acs_table_query %>% filter(variable == "B19301_001") %>%
103 | mutate(agg = "aggregate", variable = "Per capita income",
104 | table = "B19301", varnum = 1)
105 |
106 | # Special case -- just change it for formatting
107 | hhsize <- acs_table_query %>% filter(variable == "B25010_001") %>%
108 | mutate(agg = "aggregate", variable = "Average household size",
109 | table = "B25010", varnum = 1)
110 |
111 | pov1 <- aggregate_variables(
112 | acs_table_query, agg_table = "C17002", varnums = c(2,3),
113 | summary_var = "C17002_001", varname = "People below 1.0 poverty")
114 |
115 | pov2 <- aggregate_variables(
116 | acs_table_query, agg_table = "C17002", varnums = c(2:7),
117 | summary_var = "C17002_001", varname = "People below 2.0 poverty")
118 |
119 | youth <- aggregate_variables(
120 | acs_table_query, agg_table = "B01001", varnums = c(3:9, 27:33),
121 | summary_var = "B01001_001", varname = "Youth (0-21)")
122 |
123 | elders_and_youth <- aggregate_variables(
124 | acs_table_query, agg_table = "B01001", varnums = c(3:9, 27:33, 20:25, 44:49),
125 | summary_var = "B01001_001", varname = "Youth and retirees")
126 |
127 | poc <- aggregate_variables(
128 | acs_table_query, agg_table = "B03002", varnums = c(4:9,12),
129 | summary_var = "B03002_001", varname = "People of color")
130 |
131 | bna <- aggregate_variables(
132 | acs_table_query, agg_table = "B03002", varnums = c(4,5,14,15),
133 | summary_var = "B03002_001", varname = "Black + Native American")
134 |
135 | lep <- aggregate_variables(
136 | acs_table_query, agg_table = "C16002", varnums = c(4,7,10,13),
137 | summary_var = "C16002_001", varname = "Households with limited English")
138 |
139 | # Special case -- just change it for formatting
140 | mhi <- acs_table_query %>% filter(variable == "B19013_001") %>%
141 | mutate(agg = "median", variable = "Median household income",
142 | table = "B19013", varnum = 1)
143 |
144 | food_stamps <- aggregate_variables(
145 | acs_table_query, agg_table = "B19058", varnums = c(2),
146 | summary_var = "B19058_001", varname = "Food stamps recipients")
147 |
148 | renters <- aggregate_variables(
149 | acs_table_query, agg_table = "B25003", varnums = c(3),
150 | summary_var = "B25003_001", varname = "Renter households")
151 |
152 | unemployed_persons <- aggregate_variables(
153 | acs_table_query, agg_table = "B23025", varnums = c(5),
154 | summary_var = "B23025_001", varname = "Unemployed persons")
155 |
156 | rent_hcb <- aggregate_variables(
157 | acs_table_query, agg_table = "B25070", varnums = c(8,9,10),
158 | summary_var = "B25070_001", varname = "Cost-burdened renters (35%+)")
159 |
160 | mortgage_hcb <- aggregate_variables(
161 | acs_table_query, agg_table = "B25091", varnums = c(9,10,11),
162 | summary_var = "B25091_002", varname = "Cost-burdened owners with mortgage (35%+)")
163 |
164 | commuters <- aggregate_variables(
165 | acs_table_query, agg_table = "B08006", varnums = c(2,8,14,15,16),
166 | summary_var = "B08006_001", varname = "Commuters")
167 |
168 | # Special case -- three summary variables
169 | insuff_veh <- aggregate_variables(
170 | acs_table_query, agg_table = "B08203", varnums = c(14,20,21,26,27,28),
171 | summary_var = c("B08203_013", "B08203_019", "B08203_025"),
172 | varname = "Insufficient commuter vehicles")
173 |
174 | veh0 <- aggregate_variables(
175 | acs_table_query, agg_table = "B08203", varnums = c(2),
176 | summary_var = "B08203_001",
177 | varname = "Households without a vehicle")
178 |
179 | disabled <- aggregate_variables(
180 | acs_table_query, agg_table = "B18101", varnums = c(4,7,10,13,16,19,23,26,29,32,35,38),
181 | summary_var = "B18101_001", varname = "Persons with disabilities")
182 |
183 | no4yr <- aggregate_variables(
184 | acs_table_query, agg_table = "B15002", varnums = c(3:14, 20:31),
185 | summary_var = "B15002_001", varname = "Adults without 4-yr degree")
186 |
187 | students <- aggregate_variables(
188 | acs_table_query, agg_table = "B14001", varnums = c(3:7),
189 | summary_var = "B14001_001", varname = "Students (K-12)")
190 |
191 | retirees <- aggregate_variables(
192 | acs_table_query, agg_table = "B01001", varnums = c(20:25, 44:49),
193 | summary_var = "B01001_001", varname = "Retirees (65+)")
194 |
195 | overcrowded_hh <- aggregate_variables(
196 | acs_table_query, agg_table = "B25014", varnums = c(5,6,7,11,12,13),
197 | summary_var = "B25014_001", varname = "Overcrowded households")
198 |
199 | no_computer <- aggregate_variables(
200 | acs_table_query, agg_table = "B28001", varnums = c(11),
201 | summary_var = "B28001_001", varname = "No computer access")
202 |
203 | no_broadband <- aggregate_variables(
204 | acs_table_query, agg_table = "B28002", varnums = c(3,6,12,13),
205 | summary_var = "B28002_001", varname = "No broadband access")
206 |
207 | ## Combine all the aggregated data together
208 | acs_data_aggregated <- rbind(
209 | poc, bna, lep, mhi, food_stamps, renters, unemployed_persons,
210 | rent_hcb, mortgage_hcb, commuters, insuff_veh, disabled, no4yr,
211 | students, retirees, overcrowded_hh, no_computer, no_broadband,
212 | hcb, pci, hhsize, veh0, pov1, pov2, youth, elders_and_youth)
213 |
214 |
215 | ## Clean up original query (reconcile names) and rbind to aggregated data
216 | all_data <- acs_table_query %>%
217 | separate(., variable, into = c("table", "varnum"),
218 | sep = "_", remove = F) %>%
219 | mutate(varnum = as.numeric(varnum)) %>%
220 | rbind(acs_data_aggregated, .)
221 |
222 | ## For the app, transform the long data to wide format and export
223 | wide_data <- acs_data_aggregated %>%
224 | filter(agg %in% c("percent", "median", "aggregate")) %>%
225 | select(GEOID, variable, E = estimate) %>% #, M = moe) %>%
226 | pivot_wider(id_cols = GEOID, names_from = variable, values_from = c(E)) %>%
227 | janitor::clean_names() %>% rename(GEOID = geoid) %>%
228 | left_join(., TRACTS.SF) %>%
229 | st_as_sf() %>%
230 | select(everything(), county = NAMELSADCO, -c(STATEFP:STUSPS, STATE_NAME:AWATER)) %>%
231 | mutate(county = str_replace(county, " County", " Co."),
232 | renter_households = ifelse(is.na(renter_households), 0, renter_households),
233 | housing_cost_burdened = ifelse(is.na(housing_cost_burdened), 0, housing_cost_burdened),
234 | cost_burdened_renters_35_percent = ifelse(is.na(cost_burdened_renters_35_percent), 0, cost_burdened_renters_35_percent),
235 | cost_burdened_owners_with_mortgage_35_percent = ifelse(is.na(cost_burdened_owners_with_mortgage_35_percent), 0, cost_burdened_owners_with_mortgage_35_percent),
236 | adjusted_household_income = median_household_income / average_household_size ^ 0.5) %>%
237 | select(GEOID, people_of_color, black_native_american, adults_without_4_yr_degree,
238 | adjusted_household_income, housing_cost_burdened, households_with_limited_english,
239 | persons_with_disabilities, commuters, unemployed_persons, insufficient_commuter_vehicles,
240 | households_without_a_vehicle, no_computer_access, no_broadband_access,
241 | students_k_12, youth_0_21, retirees_65, median_household_income, per_capita_income,
242 | people_below_1_0_poverty, people_below_2_0_poverty, food_stamps_recipients,
243 | renter_households, cost_burdened_renters_35_percent,
244 | cost_burdened_owners_with_mortgage_35_percent,
245 | overcrowded_households, average_household_size, youth_and_retirees,
246 | county, in_primary_place, geometry)
247 |
248 |
249 | ## Export data #####
250 | saveRDS(wide_data, "1_data/3_processed/acs_vuln_weighting_data.rds")
251 | saveRDS(all_data, "1_data/3_processed/acs_vuln_weighting_data_full.rds")
252 |
253 | # wide_data <- readRDS("1_data/3_processed/acs_vuln_weighting_data.rds")
254 | # all_data <- readRDS("1_data/3_processed/acs_vuln_weighting_data_full.rds")
255 |
256 | # ## Optionally view map
257 | # library(mapview)
258 | # mapview::mapview(wide_data, zcol = "in_primary_place") +
259 | # mapview::mapview(PRIMARY_PLACE)
260 |
261 | # wide_data %>%
262 | # st_transform(WEB_EPSG) %>%
263 | # mapview(zcol = "in_primary_place")
264 |
--------------------------------------------------------------------------------
/4_scripts/1_data-processing/02b2_TEST_split_data_by_weights.R:
--------------------------------------------------------------------------------
1 | ## Determine weighting methods -------------------------------------------------
2 | ## Variables that are population-based should be weighted by population
3 | ## Variables that are unit-based should be weighted by households
4 | ## Variables that are aggregate may need to be re-weighted???
5 | # c("B25106", "B03002", "B19013", "B25010", "B15002")
6 |
7 | ## Population-based tables:
8 | # B03002
9 | # B15002
10 |
11 | ## Unit-based tables:
12 | # B25010
13 | # B25106
14 | # B19013
15 |
16 | ## Extensive
17 | # B03002
18 | # B15002
19 | # B25106
20 |
21 | ## Intensive (extensive == FALSE)
22 | # B25010
23 | # B19013
24 |
25 | prep_interp <- acs_table_query_longitudinal %>%
26 | separate(., variable, into = c("table", "varnum"),
27 | sep = "_", remove = F) %>%
28 | mutate(varnum = as.numeric(varnum),
29 | extensive = ifelse(table %in% c("B03002", "B15002", "B25106"), TRUE, FALSE),
30 | weight_column = ifelse(table %in% c("B03002", "B15002"), "POP20", "HOUSING20"),
31 | group = paste0(extensive, weight_column)) %>%
32 | left_join(., TRACTS_2010t2020.SF, by = c("GEOID", "year")) %>%
33 | st_as_sf()
34 |
35 | # prep_interpL <- split(prep_interp, prep_interp$group) %>%
36 | # enframe() %>% select(-name) #%>%
37 | # mutate(ext = c(FALSE, TRUE, TRUE),
38 | # wgt = c("HOUSING20", "HOUSING20", "POP20")) %>%
39 | # select(-name) %>%
40 | # mutate(n = pmap(list(value, ext, wgt),
41 | # .f = interpolate_pw(
42 | # from = value %>% as.data.frame() %>% filter(year == 2015) %>% select(estimate),
43 | # to = value %>% as.data.frame() %>% filter(year == 2020),
44 | # to_id = "GEOID",
45 | # weights = STATE_BLOCKS.2020,
46 | # weight_column = wgt,
47 | # crs = TARGET_EPSG,
48 | # extensive = ext
49 | # )))
50 |
51 | # prep_interpL %>%
52 | # as.list() %>%
53 | # pmap_dfr(.f = function(data, ext, wgt){
54 | # df <- data$value
55 | # interpolate_pw(
56 | # from = df %>% filter(year == 2015) %>% select(estimate),
57 | # to = df %>% filter(year == 2020),
58 | # to_id = "GEOID",
59 | # weights = STATE_BLOCKS.2020,
60 | # weight_column = wgt,
61 | # crs = TARGET_EPSG,
62 | # extensive = ext
63 | # )
64 | # })
65 |
66 | ext <- c(FALSE, TRUE, TRUE)
67 | wgt <- c("HOUSING20", "HOUSING20", "POP20")
68 |
69 | tmp <- split(prep_interp, prep_interp$group)[[1]] %>%
70 | split(.$variable)
71 |
72 | test <- tmp %>%
73 | future_map(.f = function(.x){
74 | wgt <- .x$weight_column[1]
75 | ext <- .x$extensive[1]
76 |
77 | interpolate_pw(
78 | from = .x %>% filter(year == 2015) %>% select(estimate),
79 | to = .x %>% filter(year == 2020),
80 | to_id = "GEOID",
81 | weights = STATE_BLOCKS.2020,
82 | weight_column = wgt,
83 | crs = TARGET_EPSG,
84 | extensive = ext)
85 |
86 | })
87 |
88 | tmp$B19013_001$weight_column[1]
89 |
90 | crosswalk()
91 |
92 | tmp <- pmap(
93 | .l = list(split(prep_interp, prep_interp$group), ext, wgt),
94 | .f = function(df, ext, wgt){
95 |
96 | df_by_var <- df %>% split(.$variable)
97 | df_by_var %>%
98 | map()
99 |
100 | interpolate_pw(
101 | from = df_by_var %>% filter(year == 2015) %>% select(estimate),
102 | to = df_by_var %>% filter(year == 2020),
103 | to_id = "GEOID",
104 | weights = STATE_BLOCKS.2020,
105 | weight_column = wgt,
106 | crs = TARGET_EPSG,
107 | extensive = ext)
108 | }
109 | )
110 |
111 |
112 | acs_table_query_longitudinal %>%
113 | separate(., variable, into = c("table", "varnum"),
114 | sep = "_", remove = F) %>%
115 | mutate(varnum = as.numeric(varnum))
116 |
117 |
118 |
119 | # left_join(., TRACTS.2019.SF, by = "GEOID") %>%
120 | # st_as_sf() %>%
121 | # filter(agg == "count", year == 2015) %>%
122 | # split(.$variable) %>%
123 | # map(~select(.x, estimate))
124 |
125 |
126 | hhs <- acs_table_query_longitudinal %>%
127 | group_by(GEOID, year) %>%
128 | mutate(hh = estimate[variable == "B25106_001"],
129 | hhsize = estimate[variable == "B25010_001"],
130 | hhswgt = hhsize * hh) %>%
131 | distinct(GEOID, year, hh, hhsize, hhswgt) %>% ungroup()
132 |
133 |
134 | hhs <- filter(variable == "B25010_001") %>%
135 | left_join(., TRACTS_2010t2020.SF, by = c("GEOID", "year")) %>%
136 | st_as_sf()
137 |
138 | new_hhs <- interpolate_pw(
139 | from = hhs %>% filter(year == 2015) %>% select(estimate),
140 | to = hhs %>% filter(year == 2020),
141 | to_id = "GEOID",
142 | weights = STATE_BLOCKS.2020,
143 | weight_column = "HOUSING20",
144 | crs = TARGET_EPSG,
145 | extensive = FALSE
146 | )
147 |
148 | hhs %>% filter(year == 2020) %>% mapview(zcol = "estimate", layer.name = "2020 estimate") +
149 | new_hhs %>% mapview(zcol = "estimate", layer.name = "2015 allocation") +
150 | acs_table_query_longitudinal %>%
151 | filter(variable == "B25010_001", year == 2015) %>%
152 | left_join(., TRACTS_2010t2020.SF, by = c("GEOID", "year")) %>%
153 | st_as_sf() %>% mapview(zcol = "estimate", layer.name = "2015 actual")
154 |
155 |
--------------------------------------------------------------------------------
/4_scripts/1_data-processing/02b_process_longitudinal_acs_data.R:
--------------------------------------------------------------------------------
1 | ## Load libraries, set options, source scripts ---------------------------------
2 | library(tidyverse)
3 | library(tidycensus)
4 | library(sf)
5 | library(tigris)
6 | library(furrr)
7 |
8 | options(
9 | scipen = 999,
10 | digits = 4,
11 | tigris_class = "sf",
12 | tigris_use_cache = T
13 | )
14 |
15 | ## Source script that establishes geographic constants for this application
16 | source("GLOBAL.R")
17 | source("GLOBAL_interpolation_addendum.R") # Addendum to add additional spatial resources
18 | source("3_functions/aggregate_variables.R")
19 |
20 | ## Load CPI inflation adjustments (CPI-U-RS)
21 | cpi <- rio::import("1_data/0_resources/cpi-u-rs_1950-current.xlsx") %>%
22 | select(year, inflation_factor = inflation_factor_2020)
23 |
24 | ## Download data using multi-threading -----------------------------------------
25 |
26 | ## Enable multi-core processing
27 | plan(multisession, workers = availableCores())
28 | set.seed(123)
29 |
30 | core_tables <- c("B25106", "B03002", "B19013", "B25010", "B15002")
31 |
32 | ## Grab data for selected years and selected tables
33 | acs_table_query_longitudinal <- future_map_dfr(
34 | .x = c(2010, 2015, 2020),
35 | .f = function(acs_year){
36 | map_dfr(
37 | .x = core_tables,
38 | .f = function(acs_table){
39 | query <- get_acs(geography = "tract",
40 | table = acs_table,
41 | year = acs_year,
42 | state = STATES_TO_DOWNLOAD) %>%
43 | filter(substr(GEOID, 1, 5) %in% COUNTIES_TO_DOWNLOAD) %>%
44 | mutate(agg = "count",
45 | year = acs_year) %>%
46 | select(-NAME)
47 | })
48 | })
49 |
50 | ## Determine weighting methods -------------------------------------------------
51 |
52 | ## Variables that are population-based should be weighted by population
53 | ## Variables that are unit-based should be weighted by households
54 | ## Variables that are aggregate may need to be re-weighted???
55 | # c("B25106", "B03002", "B19013", "B25010", "B15002")
56 |
57 | ## Population-based tables:
58 | # B03002
59 | # B15002
60 |
61 | ## Unit-based tables:
62 | # B25010
63 | # B25106
64 | # B19013
65 |
66 | ## Extensive
67 | # B03002
68 | # B15002
69 | # B25106
70 |
71 | ## Intensive (extensive == FALSE)
72 | # B25010
73 | # B19013
74 |
75 | ## Interpolate results ---------------------------------------------------------
76 | ## In this section, we interpolate data from 2010 and 2015 to 2020 census tracts.
77 | ## Since the use of the weighting variables and the use of extensive/intensive
78 | ## weighting mechanisms vary (count data vs aggregate data like medians), we must
79 | ## specify the right combination of tables and weighting columns. We do this by
80 | ## carefully specifying the list arguments for the pmap() function.
81 |
82 | ## First we transform our long data to wide data and append spatial data
83 | wide_query <- acs_table_query_longitudinal %>%
84 | pivot_wider(id_cols = c(GEOID, year), names_from = variable, values_from = estimate) %>%
85 | left_join(., TRACTS_2010t2020.SF, by = c("GEOID", "year")) %>%
86 | st_as_sf()
87 |
88 | ## Set up list arguments for pmap() function
89 | years <- c(rep(2015, 3),
90 | rep(2010, 3))
91 |
92 | tables <- list(c("B25106"), c("B25010", "B19013"), c("B03002", "B15002"),
93 | c("B25106"), c("B25010", "B19013"), c("B03002", "B15002"))
94 |
95 | weightcols <- c("HOUSING20", "HOUSING20", "POP20",
96 | "HOUSING20", "HOUSING20", "POP20")
97 |
98 | extensives <- c(TRUE, FALSE, TRUE,
99 | TRUE, FALSE, TRUE)
100 |
101 | ## Generate list of filtered data based on the years and tables
102 | dfs <- pmap(.l = list(years, tables),
103 | .f = function(.year, .table){
104 | # Expand .table in case of multiple tables
105 | .table <- paste(.table)
106 | wide_query %>%
107 | filter(year == .year) %>%
108 | select(GEOID, starts_with(c({{.table}})))
109 | })
110 |
111 | ## Interpolate all of the data
112 | interpolated <- future_pmap(
113 | .l = list(dfs, years, weightcols, extensives),
114 | .f = function(.dat, .year, .weightcol, .ext){
115 | interpolate_pw(from = .dat,
116 | to = wide_query %>%
117 | filter(year == 2020) %>%
118 | select(GEOID),
119 | to_id = "GEOID",
120 | weights = STATE_BLOCKS.2020,
121 | weight_column = .weightcol,
122 | crs = TARGET_EPSG,
123 | extensive = .ext) %>%
124 | mutate(year = .year) %>%
125 | relocate(year, .after = GEOID) %>%
126 | st_drop_geometry()
127 | },
128 | .options = furrr_options(seed = TRUE))
129 |
130 | ## Bind the list together, cast to long format, and standardize the format
131 | ## TODO there has to be a more elegant way to do this step...
132 | interpolated.df <- rbind(
133 | bind_cols(interpolated[1:3], .name_repair = "unique") %>%
134 | select(-c(GEOID...49, GEOID...55, year...50, year...56)) %>%
135 | rename(GEOID = GEOID...1, year = year...2),
136 | bind_cols(interpolated[4:6], .name_repair = "unique") %>%
137 | select(-c(GEOID...49, GEOID...55, year...50, year...56)) %>%
138 | rename(GEOID = GEOID...1, year = year...2)
139 | ) %>%
140 | pivot_longer(cols = -c(GEOID, year),
141 | names_to = "variable",
142 | values_to = "estimate") %>%
143 | mutate(moe = 0,
144 | agg = "count") %>%
145 | select(names(acs_table_query_longitudinal))
146 |
147 | ## Optionally add spatial data and generate a quick map
148 | # interpolated.sf <- interpolated.df %>%
149 | # left_join(., TRACTS.SF, by = "GEOID") %>%
150 | # st_as_sf()
151 | #
152 | # interpolated.sf %>% filter(year == 2015, variable == "B25106_001") %>% mapview(zcol = "estimate")
153 |
154 | ## Aggregate variables ---------------------------------------------------------
155 | ## With the data for 2010 and 2015 interpolated to 2020 census tracts, we can
156 | ## perform aggregations on the data after binding it to 2020 datasets
157 |
158 | ## Create input dataset for the aggregations by rbinding the interpolated dataset to 2020
159 | acs_all_years <- rbind(filter(acs_table_query_longitudinal, year == 2020), interpolated.df)
160 |
161 | hcb <- acs_all_years %>%
162 | split(.$year) %>%
163 | map_dfr(.id = "year",
164 | .f = ~aggregate_variables(
165 | long_acs_data = select(.x, -year),
166 | agg_table = "B25106", varnums = c(6,10,14,18,22,28,32,36,40,44),
167 | summary_var = "B25106_001", varname = "Housing cost burdened")
168 | )
169 |
170 | poc <- acs_all_years %>%
171 | split(.$year) %>%
172 | map_dfr(.id = "year",
173 | .f = ~aggregate_variables(
174 | long_acs_data = select(.x, -year),
175 | agg_table = "B03002", varnums = c(4:9,12),
176 | summary_var = "B03002_001", varname = "People of color")
177 | )
178 |
179 | bna <- acs_all_years %>%
180 | split(.$year) %>%
181 | map_dfr(.id = "year",
182 | .f = ~aggregate_variables(
183 | long_acs_data = select(.x, -year),
184 | agg_table = "B03002", varnums = c(4,5,14,15),
185 | summary_var = "B03002_001", varname = "Black + Native American")
186 | )
187 |
188 | no4yr <- acs_all_years %>%
189 | split(.$year) %>%
190 | map_dfr(.id = "year",
191 | .f = ~aggregate_variables(
192 | long_acs_data = select(.x, -year),
193 | agg_table = "B15002", varnums = c(3:14, 20:31),
194 | summary_var = "B15002_001", varname = "Adults without 4-yr degree")
195 | )
196 |
197 | # Special case -- change it for formatting and adjust for inflation
198 | mhi <- acs_all_years %>%
199 | filter(variable == "B19013_001") %>%
200 | mutate(agg = "median", variable = "Median household income",
201 | table = "B19013", varnum = 1) %>%
202 | left_join(., cpi, by = "year") %>%
203 | mutate(across(c(estimate, moe), ~ . * inflation_factor)) %>%
204 | select(-inflation_factor)
205 |
206 | # Special case -- just change it for formatting
207 | hhsize <- acs_all_years %>%
208 | filter(variable == "B25010_001") %>%
209 | mutate(agg = "aggregate", variable = "Average household size",
210 | table = "B25010", varnum = 1)
211 |
212 | ## Combine all the aggregated data together
213 | acs_data_aggregated <- rbind(hcb, poc, bna, no4yr, mhi, hhsize)
214 | saveRDS(acs_data_aggregated, "1_data/2_interim/acs_data_aggregated_interpolated.rds")
215 |
216 |
217 | ## Cast and reformat data ------------------------------------------------------
218 |
219 | ## Wide data to be used in the shiny app
220 | wide_data <- acs_data_aggregated %>%
221 | filter(agg %in% c("percent", "median", "aggregate")) %>%
222 | select(GEOID, year, agg, variable, E = estimate) %>%
223 | pivot_wider(id_cols = c(GEOID, year), names_from = variable, values_from = c(E)) %>%
224 | left_join(., TRACTS.SF, by = "GEOID") %>%
225 | janitor::clean_names() %>% rename(GEOID = geoid) %>%
226 | select(everything(), county = namelsadco, -c(statefp:stusps, state_name:awater)) %>%
227 | mutate(county = str_replace(county, " County", " Co."),
228 | housing_cost_burdened = ifelse(is.na(housing_cost_burdened), 0, housing_cost_burdened),
229 | adjusted_household_income = median_household_income / average_household_size ^ 0.5) %>%
230 | select(GEOID, year, people_of_color, black_native_american, adults_without_4_yr_degree,
231 | housing_cost_burdened, median_household_income, adjusted_household_income,
232 | average_household_size, county, in_primary_place, geometry) %>%
233 | st_as_sf()
234 |
235 | saveRDS(wide_data, "1_data/3_processed/acs_vuln_weighting_data_interpolated.rds")
236 |
237 |
238 |
239 | # saveRDS(changes, "1_data/3_processed/2010t2020_change_summary")
240 |
241 |
242 | changes %>%
243 | filter(
244 | agg == "median",
245 | variable == "Adjusted household income",
246 | in_primary_place == TRUE) %>%
247 | mapview(zcol = "pct_change_10t20")
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 | ## ARCHIVE -- OLD CODE ---------------------------------------------------------
257 |
258 | ## For the app, transform the long data to wide format and export
259 | # wide_data <- acs_allocation_to_2020_tracts %>%
260 | # st_drop_geometry() %>%
261 | # filter(agg %in% c("percent", "median", "aggregate")) %>%
262 | # select(GEOID, year, agg, variable, E = estimate) %>% #, M = moe) %>%
263 | # pivot_wider(id_cols = c(GEOID, year), names_from = variable, values_from = c(E)) %>%
264 | # janitor::clean_names() %>% rename(GEOID = geoid) #%>%
265 | # select(everything(), county = NAMELSADCO, -c(STATEFP:STUSPS, STATE_NAME:AWATER)) %>%
266 | # mutate(county = str_replace(county, " County", " Co."),
267 | # housing_cost_burdened = ifelse(is.na(housing_cost_burdened), 0, housing_cost_burdened),
268 | # adjusted_household_income = median_household_income / average_household_size ^ 0.5) %>%
269 | # select(GEOID, people_of_color, black_native_american, adults_without_4_yr_degree,
270 | # adjusted_household_income, housing_cost_burdened, average_household_size,
271 | # county, in_primary_place, geometry)
272 |
273 | ## alternate code july 18 807am
274 |
275 | # tmp <- acs_data_aggregated %>%
276 | # filter(agg == "count") %>%
277 | # pivot_wider(id_cols = c(GEOID, year),
278 | # names_from = variable,
279 | # values_from = c(estimate)) %>%
280 | # janitor::clean_names() %>% rename(GEOID = geoid) %>%
281 | # mutate(year = as.numeric(year)) %>%
282 | # left_join(., TRACTS_2010t2020.SF, by = c("GEOID", "year")) %>%
283 | # st_as_sf()
284 |
285 | ## This appears to work. Try to transform all of my variables into wide format
286 | ## and run the code again. by "all variables" I mean the raw acs query
287 | # tmp2 <- interpolate_pw(
288 | # from = tmp %>% filter(year == 2015) %>% select(-year),
289 | # to = tmp %>% filter(year == 2020),
290 | # to_id = "GEOID",
291 | # weights = STATE_BLOCKS.2020,
292 | # weight_column = "HOUSING20",
293 | # crs = TARGET_EPSG,
294 | # extensive = TRUE)
295 |
296 |
297 |
298 | ## Determine weighting methods -------------------------------------------------
299 | ## Variables that are population-based should be weighted by population
300 | ## Variables that are unit-based should be weighted by households
301 | ## Variables that are aggregate may need to be re-weighted???
302 | # c("B25106", "B03002", "B19013", "B25010", "B15002")
303 |
304 | ## Population-based tables:
305 | # B03002
306 | # B15002
307 |
308 | ## Unit-based tables:
309 | # B25010
310 | # B25106
311 | # B19013
312 |
313 | ## Extensive
314 | # B03002
315 | # B15002
316 | # B25106
317 |
318 | ## Intensive (extensive == FALSE)
319 | # B25010
320 | # B19013
321 |
322 | ## Interpolate results ---------------------------------------------------------
323 |
324 | ## Prep data for pw_interpolate by specifying extensive and weight columns based
325 | ## off the kinds of variables
326 | pw_acs_prep <- acs_data_aggregated %>%
327 | mutate(extensive = ifelse(table %in% c("B03002", "B15002", "B25106"), TRUE, FALSE),
328 | extensive = ifelse(agg == "percent", FALSE, extensive),
329 | weight_column = ifelse(table %in% c("B03002", "B15002"), "POP20", "HOUSING20"),
330 | group = paste(extensive, weight_column, variable, sep = "_"),
331 | year = as.numeric(year)) %>%
332 | left_join(., TRACTS_2010t2020.SF, by = c("GEOID", "year")) %>%
333 | st_as_sf()
334 |
335 |
336 | ## Call names to determine the order of the extensive/weights in order to feed
337 | ## it into the pmap() function below.
338 | # acs_groups <- split(pw_acs_prep, pw_acs_prep$group)
339 | # names(acs_groups)
340 | #
341 | # extensives <- c(
342 | # rep(FALSE, 6),
343 | # rep(TRUE, 4))
344 | #
345 | # weights <- c(
346 | # rep("HOUSING20", 3),
347 | # rep("POP20", 3),
348 | # "HOUSING20",
349 | # rep("POP20", 3))
350 | #
351 | # years <- c(rep(2015,10), rep(2010,10))
352 | #
353 | # ## Apply population-weighted interpolation across the grouped ACS data using a
354 | # ## pmap function. Be sure to append the aggregation type (pct vs count, etc.)
355 | # acs.pw <- future_pmap(.l = list(rep(acs_groups,2),
356 | # rep(extensives,2),
357 | # rep(weights,2),
358 | # years),
359 | # .f = function(.x, ext, wgt, allocation_year){
360 | # interpolate_pw(
361 | # from = .x %>% filter(year == allocation_year) %>% select(estimate),
362 | # to = .x %>% filter(year == 2020),
363 | # to_id = "GEOID",
364 | # weights = STATE_BLOCKS.2020,
365 | # weight_column = wgt,
366 | # crs = TARGET_EPSG,
367 | # extensive = ext) %>%
368 | # mutate(agg = .x$agg[[1]],
369 | # year = allocation_year)
370 | # },
371 | # .options = furrr_options(seed = TRUE))
372 | #
373 | # ## Clean up interpolated data for 2015 data allocated to 2020 tracts
374 | # acs_allocation_to_2020_tracts <- acs.pw %>%
375 | # enframe() %>%
376 | # unnest(cols = c(value)) %>%
377 | # separate(name, into = c("extensive", "weight", "variable"), sep = "_") %>%
378 | # relocate(GEOID, year, variable, agg, estimate, .before = extensive) %>%
379 | # st_as_sf()
380 | #
381 | # ## Quick map of the data
382 | # acs_allocation_to_2020_tracts %>%
383 | # filter(year == 2010,
384 | # variable == "Median household income") %>%
385 | # mapview(zcol = "estimate")
386 | #
387 | # ## Subset 2020 and match schema to new allocation data
388 | # acs2020_selection <- acs_data_aggregated %>%
389 | # filter(year == 2020) %>%
390 | # mutate(extensive = NA_character_, weight = NA_character_, year = as.numeric(year)) %>%
391 | # left_join(., TRACTS_2010t2020.SF, by = c("GEOID", "year")) %>%
392 | # st_as_sf() %>%
393 | # select(names(acs_allocation_to_2020_tracts))
394 | #
395 | # ## Bind 2020 data to allocation data and calculate change between years
396 | # acs_allocation_to_2020_tracts %>%
397 | # rbind(acs2020_selection, .) %>%
398 | # group_by(GEOID, agg, variable) %>%
399 | # mutate(change_10t20 = estimate[year == 2020] - estimate[year == 2010],
400 | # change_15t20 = estimate[year == 2020] - estimate[year == 2015]) %>%
401 | # ungroup() %>%
402 | # filter(year == 2020,
403 | # variable == "Housing cost burdened",
404 | # agg == "percent") %>%
405 | # mapview(zcol = "change_15t20")
406 | #
407 | # ## Transform data --------------------------------------------------------------
408 | # ## For the app, transform the long data to wide format and export
409 | # wide_data <- acs_allocation_to_2020_tracts %>%
410 | # st_drop_geometry() %>%
411 | # filter(agg %in% c("percent", "median", "aggregate")) %>%
412 | # select(GEOID, year, agg, variable, E = estimate) %>% #, M = moe) %>%
413 | # pivot_wider(id_cols = c(GEOID, year), names_from = variable, values_from = c(E)) %>%
414 | # janitor::clean_names() %>% rename(GEOID = geoid) #%>%
415 | #
416 | #
417 |
418 |
419 |
420 |
421 |
422 |
--------------------------------------------------------------------------------
/4_scripts/2_analysis/01_generate_vulnerability_data.R:
--------------------------------------------------------------------------------
1 | # Must first run 5_apps/weighting_tool/00_prep_app.R to generate imputed data
2 | source("GLOBAL.R")
3 | range01 <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
4 |
5 | imputed_data <- readRDS("5_apps/weighting_tool/acs_vuln_weighting_data.rds")
6 |
7 | ## Create working dataset of variables we wish to create yes/no flags for
8 | vars_to_flag <- imputed_data %>%
9 | st_drop_geometry() %>%
10 | select(GEOID, in_primary_place, people_of_color, black_native_american,
11 | adults_without_4_yr_degree, adjusted_household_income, housing_cost_burdened,
12 | households_with_limited_english, persons_with_disabilities,
13 | retirees_65, youth_0_21)
14 |
15 | ## Generate the yes/no flags based on percentile rank of .6 or more
16 | flagged_tracts <- vars_to_flag %>%
17 | pivot_longer(-c(GEOID, in_primary_place),
18 | names_to = "variable", values_to = "estimate") %>%
19 | filter(in_primary_place) %>%
20 | group_by(variable) %>%
21 | mutate(higher_proportion = ifelse(cume_dist(estimate) >= 0.6, TRUE, FALSE)) %>%
22 | ungroup() %>%
23 | select(GEOID, variable, higher_proportion) %>%
24 | pivot_wider(names_from = c(variable), values_from = c(higher_proportion),
25 | names_prefix = "hi_")
26 |
27 | ## Separate task: generate hatch overlays for select variables and save individually as RDS files
28 | hatch_vars <- c("people_of_color", "households_with_limited_english", "persons_with_disabilities", "retirees_65", "youth_0_21")
29 |
30 | map(hatch_vars, function(hatch_var){
31 | imputed_data %>%
32 | filter(in_primary_place) %>%
33 | mutate(flag_hatch = ifelse(cume_dist(.data[[hatch_var]]) >= 0.6, TRUE, FALSE)) %>%
34 | filter(flag_hatch) %>%
35 | HatchedPolygons::hatched.SpatialPolygons(., density = 400, angle = 45) %>% ## spatial projection matters for density argument
36 | mutate(col = 1,
37 | hatch_var = hatch_var) %>%
38 | saveRDS(paste0("5_apps/weighting_tool/hatch_", hatch_var, ".rds"))
39 | })
40 |
41 | ## Generate a vulnerability score based off the desired methodology
42 | vulnerability_results <- imputed_data %>%
43 | filter(in_primary_place) %>%
44 | mutate(across(.cols = c(people_of_color:youth_and_retirees),
45 | .fns = list(rnk = ~cume_dist(.),
46 | z = ~scale(.))),
47 | across(ends_with("_z"), ~ case_when(. > 3 ~ 3,
48 | . < -3 ~ -3,
49 | T ~ .)),
50 | adjusted_household_income_rnk = 1 - adjusted_household_income_rnk,
51 | adjusted_household_income_z = -adjusted_household_income_z,
52 |
53 | composite_score = people_of_color_rnk + black_native_american_rnk +
54 | adjusted_household_income_rnk + housing_cost_burdened_rnk +
55 | adults_without_4_yr_degree_rnk,
56 |
57 | indexed_score = round(range01(composite_score, na.rm = T) * 100, 0),
58 | flag_vulnerable = ifelse(indexed_score >= 60, TRUE, FALSE))
59 |
60 | ## Save the vulnerability results as a hatch overlay RDS file
61 | vulnerability_results %>%
62 | filter(flag_vulnerable) %>%
63 | HatchedPolygons::hatched.SpatialPolygons(., density = 400, angle = 135) %>% ## spatial projection matters for density argument
64 | mutate(col = 1,
65 | hatch_var = "vulnerable") %>%
66 | saveRDS("5_apps/weighting_tool/hatch_vulnerable.rds")
67 |
68 | ## Create new vulnerability data based on methodology and include the yes/no overlays
69 | ## generated in the first step above. Save as GEOJSON and as an XLSX file
70 | vulnerability_data <- vulnerability_results %>%
71 | select(GEOID, vulnerability_score = indexed_score, flag_vulnerable) %>%
72 | left_join(., select(vars_to_flag, -in_primary_place), by = "GEOID") %>%
73 | left_join(., flagged_tracts, by = "GEOID") %>%
74 | relocate(-geometry) %>%
75 | st_drop_geometry() %>%
76 | left_join(., TRACTS_HIRES.SF %>% select(GEOID)) %>%
77 | st_as_sf() %>% st_transform(TARGET_EPSG)
78 |
79 | st_write(vulnerability_data, dsn = "1_data/3_processed/2020_vulnerability_data_portland.geojson")
80 | vulnerability_data %>%
81 | st_drop_geometry() %>% rio::export("1_data/3_processed/2020_vulnerability_data_portland.xlsx")
82 |
--------------------------------------------------------------------------------
/4_scripts/2_analysis/01b_generate_vulnerability_data_interpolated.R:
--------------------------------------------------------------------------------
1 | # Must first run 5_apps/weighting_tool/00b_prep_app_interp.R to generate imputed data
2 | library(RColorBrewer)
3 | library(leaflet.extras2)
4 | library(mapview)
5 | library(sf)
6 | library(tidyverse)
7 | library(cowplot)
8 | source("GLOBAL.R")
9 | source("GLOBAL_interpolation_addendum.R")
10 | range01 <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
11 |
12 | imputed_data <- readRDS("5_apps/weighting_tool/acs_vuln_weighting_data_imputed.rds")
13 |
14 | acs_data_aggregated <- readRDS("1_data/2_interim/acs_data_aggregated_interpolated.rds")
15 |
16 | ## Generate a vulnerability score based off the desired methodology
17 | vulnerability_results <- imputed_data %>%
18 | st_drop_geometry() %>%
19 | filter(in_primary_place) %>%
20 | group_by(year) %>%
21 | mutate(across(.cols = c(people_of_color:average_household_size),
22 | .fns = list(rnk = ~cume_dist(.),
23 | z = ~scale(.))),
24 | across(ends_with("_z"), ~ case_when(. > 3 ~ 3,
25 | . < -3 ~ -3,
26 | T ~ .)),
27 | adjusted_household_income_rnk = 1 - adjusted_household_income_rnk,
28 | adjusted_household_income_z = -adjusted_household_income_z,
29 |
30 | composite_score = people_of_color_rnk + black_native_american_rnk +
31 | adjusted_household_income_rnk + housing_cost_burdened_rnk +
32 | adults_without_4_yr_degree_rnk,
33 |
34 | indexed_score = round(range01(composite_score, na.rm = T) * 100, 0),
35 | flag_vulnerable = ifelse(indexed_score >= 60, TRUE, FALSE))
36 |
37 | vulnerability_results %>%
38 | select(GEOID, year, composite_score, people_of_color_rnk, black_native_american_rnk,
39 | adjusted_household_income_rnk, housing_cost_burdened_rnk, adults_without_4_yr_degree_rnk) %>%
40 | mutate(across(ends_with("rnk"),
41 | .fns = list(contrib = ~ . / composite_score))) %>%
42 | select(GEOID, year, ends_with("contrib")) %>%
43 | sample_n(6) %>%
44 | pivot_longer(-c(GEOID, year), names_to = "metric", values_to = "contribution") %>%
45 | arrange(GEOID, year, metric) %>%
46 | filter(year == 2010) %>% #, GEOID == "41051000101") %>%
47 | ggplot(aes(x="", y=contribution, fill=metric)) +
48 | geom_bar(stat="identity", width=1, color="white") +
49 | coord_polar("y", start=0) +
50 | theme_void() +
51 | facet_wrap(~GEOID)
52 |
53 |
54 | ## Calculate changes -----------------------------------------------------------
55 |
56 | ahi <- acs_data_aggregated %>%
57 | filter(agg %in% c("percent", "aggregate", "median")) %>%
58 | pivot_wider(id_cols = c(GEOID, year), names_from = variable, values_from = estimate) %>%
59 | janitor::clean_names() %>% rename(GEOID = geoid) %>%
60 | group_by(year) %>%
61 | simputation::impute_lm(median_household_income ~ people_of_color + adults_without_4_yr_degree) %>%
62 | simputation::impute_lm(average_household_size ~ people_of_color + adults_without_4_yr_degree) %>%
63 | mutate(adjusted_household_income = median_household_income / average_household_size ^ 0.5) %>%
64 | pivot_longer(cols = -c(GEOID, year), names_to = "variable", values_to = "E") %>%
65 | filter(variable == "adjusted_household_income") %>%
66 | mutate(variable = "Adjusted household income") %>%
67 | mutate(agg = "median")
68 |
69 |
70 | vulnerability_data <- vulnerability_results %>%
71 | ungroup() %>%
72 | mutate(agg = "aggregate",
73 | variable = "Vulnerability score") %>%
74 | select(GEOID, year, agg, variable, E = indexed_score) %>%
75 | pivot_wider(id_cols = c(GEOID, agg, variable),
76 | names_from = c(year),
77 | values_from = E) %>%
78 | rename(est_2010 = `2010`, est_2015 = `2015`, est_2020 = `2020`)
79 |
80 | vulnerability_data <- vulnerability_data %>%
81 | mutate(across(c(est_2010:est_2020), .fns = ~ifelse(. >= 60, TRUE, FALSE))) %>%
82 | mutate(agg = "flag") %>%
83 | rbind(., vulnerability_data)
84 |
85 |
86 | ## Calculate changes between years
87 | changes <- acs_data_aggregated %>%
88 | select(GEOID, year, variable, E = estimate, agg) %>%
89 | rbind(., ahi) %>%
90 | pivot_wider(id_cols = c(GEOID, agg, variable), names_from = c(year), values_from = E) %>%
91 | rename(est_2010 = `2010`, est_2015 = `2015`, est_2020 = `2020`) %>%
92 | rbind(., vulnerability_data) %>%
93 | mutate(abs_change_10t20 = est_2020 - est_2010,
94 | pct_change_10t20 = abs_change_10t20 / est_2010,
95 | abs_change_15t20 = est_2020 - est_2015,
96 | pct_change_15t20 = abs_change_15t20 / est_2015) %>%
97 | ungroup() %>%
98 | group_by(agg, variable) %>%
99 | mutate(across(c(est_2010:pct_change_15t20),
100 | .fns = list(z = ~(. - mean(., na.rm=TRUE)) / sd(., na.rm=TRUE)))) %>%
101 | ungroup() %>%
102 | left_join(., select(TRACTS_HIRES.SF, GEOID), by = "GEOID") %>%
103 | st_as_sf() %>% st_transform(TARGET_EPSG)
104 |
105 | saveRDS(changes, "1_data/2_interim/changes_2010t2020_vulnerability.rds")
106 |
107 | rio::export(st_drop_geometry(changes), "1_data/2_interim/changes_2010t2020_vulnerability.xlsx")
108 |
109 | changes %>%
110 | ungroup() %>%
111 | group_by(agg, variable) %>%
112 | mutate(across(c(est_2010:pct_change_15t20),
113 | .fns = list(z = ~base::scale(.))))
114 |
115 |
116 |
117 | ## Memo figures ----------------------------------------------------------------
118 |
119 | chg10t20 <- changes %>%
120 | filter(agg == "flag",
121 | variable == "Vulnerability score") %>%
122 | mutate(across(c(abs_change_10t20, abs_change_15t20),
123 | .fns = list(desc = ~ case_when(
124 | . == -1 ~ "No longer vulnerable",
125 | . == 0 ~ "No change",
126 | . == 1 ~ "Newly vulnerable"))),
127 | across(ends_with("_desc"),
128 | .fns = ~ factor(., levels = c(
129 | "No longer vulnerable", "No change", "Newly vulnerable")
130 | ))) %>% #View()
131 | mapview(zcol = "abs_change_10t20_desc", layer.name = "Change in vulnerability 2010 to 2020",
132 | map.types = "CartoDB.Positron",
133 | col.region = RColorBrewer::brewer.pal(3, "PiYG")) ; chg10t20#BrBG PiYG PRGn PuOr RdBu RdGy RdYlBu RdYlGn Spectral
134 |
135 | mapshot(chg10t20, file = "1_data/3_processed/change_vulnerability_score_2010t2020.png",
136 | remove_controls = c("zoomControl", "layersControl", "homeButton","drawToolbar", "easyButton", "control"))
137 |
138 |
139 | changes %>%
140 | filter(agg == "aggregate",
141 | variable == "Vulnerability score") %>%
142 | mapview(zcol = "abs_change_10t20")#, layer.name = "Change in vulnerability 2015 to 2020",
143 | # col.region = RColorBrewer::brewer.pal(3, "PiYG")) #BrBG PiYG PRGn PuOr RdBu RdGy RdYlBu RdYlGn Spectral
144 |
145 |
146 | vscore10 <- changes %>%
147 | filter(agg == "aggregate",
148 | variable == "Vulnerability score") %>%
149 | mapview(zcol = "est_2010", col.region = viridis::plasma(6),
150 | alpha.region = 0.55, color = "white", lwd = 1.4,
151 | layer.name = "2010 Vulnerability Score",
152 | at = c(0, 20, 40, 60, 80, 100)) ; vscore10
153 |
154 | mapshot(vscore10, file = "1_data/3_processed/vulnerability_score_2010.png",
155 | remove_controls = c("zoomControl", "layersControl", "homeButton","drawToolbar", "easyButton", "control"))
156 |
157 | vscore15 <- changes %>%
158 | filter(agg == "aggregate",
159 | variable == "Vulnerability score") %>%
160 | mapview(zcol = "est_2015", col.region = viridis::plasma(6),
161 | alpha.region = 0.55, color = "white", lwd = 1.4,
162 | layer.name = "2015 Vulnerability Score",
163 | at = c(0, 20, 40, 60, 80, 100)) ; vscore15
164 |
165 | mapshot(vscore15, file = "1_data/3_processed/vulnerability_score_2015.png",
166 | remove_controls = c("zoomControl", "layersControl", "homeButton","drawToolbar", "easyButton", "control"))
167 |
168 | vscore20 <- changes %>%
169 | filter(agg == "aggregate",
170 | variable == "Vulnerability score") %>%
171 | mapview(zcol = "est_2020", col.region = viridis::plasma(6),
172 | alpha.region = 0.55, color = "white", lwd = 1.4,
173 | layer.name = "2020 Vulnerability Score",
174 | at = c(0, 20, 40, 60, 80, 100)) ; vscore20
175 |
176 | mapshot(vscore20, file = "1_data/3_processed/vulnerability_score_2020.png",
177 | remove_controls = c("zoomControl", "layersControl", "homeButton","drawToolbar", "easyButton", "control"))
178 |
179 |
180 | m3 <- changes %>%
181 | filter(agg == "count",
182 | GEOID %in% TRACTS_IN_PLACE,
183 | # abs_change_10t20 < 0,
184 | variable == "People of color") %>%
185 | mapview(zcol = "abs_change_10t20", layer.name = "Abs Change in # People of Color 2010-2020",
186 | col.region = RColorBrewer::brewer.pal(100, "PuOr"),
187 | at = c(-1000, -500, 0, 500, 1000, 1500, 2000, 2500))
188 |
189 |
190 | m4 <- changes %>%
191 | filter(agg == "count",
192 | GEOID %in% TRACTS_IN_PLACE,
193 | # abs_change_10t20 < 0,
194 | variable == "People of color") %>%
195 | mapview(zcol = "pct_change_10t20", layer.name = "Pct Change in # People of Color 2010-2020",
196 | col.region = RColorBrewer::brewer.pal(8, "PuOr"),
197 | at = c(-0.55, -0.2, 0, .2, .4, .6, 7)
198 | )
199 |
200 | m5 <- changes %>%
201 | filter(agg == "percent",
202 | GEOID %in% TRACTS_IN_PLACE,
203 | # abs_change_10t20 < 0,
204 | variable == "People of color") %>%
205 | mapview(zcol = "abs_change_10t20", layer.name = "Pct Point Change in % People of Color 2010-2020",
206 | col.region = RColorBrewer::brewer.pal(8, "PuOr"),
207 | at = c(-0.25, -0.12, 0, .12, .25, .33)
208 | )
209 |
210 | # Use pipe to do side-by-side map thanks to leaflet.extras2
211 | m3 | m4
212 |
213 | changes %>%
214 | filter(agg == "count",
215 | GEOID %in% TRACTS_IN_PLACE,
216 | # abs_change_10t20 < 0,
217 | variable == "Housing cost burdened") %>%
218 | mapview(zcol = "abs_change_10t20", layer.name = "Abs Change in # Cost-Burdened Households 2010-2020",
219 | col.region = RColorBrewer::brewer.pal(5, "BrBG"),
220 | at = c(-500, -250, 0, 250, 500)
221 | )
222 |
223 |
224 | changes %>%
225 | filter(agg == "percent",
226 | GEOID %in% TRACTS_IN_PLACE,
227 | # abs_change_10t20 < 0,
228 | variable == "Housing cost burdened") %>%
229 | mapview(zcol = "abs_change_10t20", layer.name = "Pct Change in # Cost-Burdened Households 2010-2020",
230 | col.region = RColorBrewer::brewer.pal(5, "BrBG"),
231 | at = c(-0.33, -0.15, 0, 0.1, 0.2)
232 | )
233 |
234 |
235 | changes %>%
236 | filter(agg == "median",
237 | GEOID %in% TRACTS_IN_PLACE,
238 | # abs_change_10t20 < 0,
239 | variable == "Adjusted household income") %>%
240 | mapview(zcol = "abs_change_10t20", layer.name = "Abs Change in Adjusted HH Income 2010-2020",
241 | col.region = RColorBrewer::brewer.pal(5, "RdYlGn"),
242 | at = c(-15000, -7000, 0, 7000, 15000, 30000, 46000)
243 | )
244 |
245 | changes %>%
246 | filter(agg == "median",
247 | GEOID %in% TRACTS_IN_PLACE,
248 | # abs_change_10t20 < 0,
249 | variable == "Median household income") %>%
250 | mapview(zcol = "pct_change_10t20", layer.name = "Pct Change in Adjusted HH Income 2010-2020",
251 | col.region = RColorBrewer::brewer.pal(10, "RdYlGn"),
252 | at = c(-0.36, -0.2, -0.1, 0, 0.15, 0.30, 0.45, 0.6, 1.7)
253 | )
254 |
255 |
256 | changes %>%
257 | filter(agg == "aggregate",
258 | GEOID %in% TRACTS_IN_PLACE,
259 | # abs_change_10t20 < 0,
260 | variable == "Average household size") %>%
261 | mapview(zcol = "pct_change_10t20", layer.name = "Pct Change in Avg HH Size 2010-2020",
262 | col.region = RColorBrewer::brewer.pal(10, "RdYlGn"),
263 | at = c(-Inf, -0.1, 0, 0.1, Inf)
264 | )
265 |
266 | changes %>%
267 | filter(agg == "aggregate",
268 | GEOID %in% TRACTS_IN_PLACE,
269 | # abs_change_10t20 < 0,
270 | variable == "Average household size") %>%
271 | mapview(zcol = "abs_change_10t20", layer.name = "Abs Change in Avg HH Size 2010-2020",
272 | col.region = RColorBrewer::brewer.pal(10, "RdYlGn"),
273 | at = c(-Inf, -0.2 -0.1, 0, 0.1, 0.2, Inf)
274 | )
275 |
276 | changes %>%
277 | filter(agg == "aggregate",
278 | GEOID %in% TRACTS_IN_PLACE,
279 | # abs_change_10t20 < 0,
280 | variable == "Average household size") %>%
281 | mapview(zcol = "pct_change_10t20_z", layer.name = "Change in Avg HH Size 2010-2020 (Z-Score)",
282 | col.region = RColorBrewer::brewer.pal(7, "RdBu"),
283 | at = c(-Inf, -2, -1, 0, 1, 2, 3, Inf)
284 | )
285 |
286 | changes %>%
287 | filter(agg == "median",
288 | GEOID %in% TRACTS_IN_PLACE,
289 | # abs_change_10t20 < 0,
290 | variable == "Median household income") %>%
291 | mapview(zcol = "pct_change_10t20_z", layer.name = "Change in Adj HH Inc 2010-2020 (Z-Score)",
292 | col.region = RColorBrewer::brewer.pal(7, "RdBu"),
293 | at = c(-Inf, -2, -1, 0, 1, 2, 3, Inf)
294 | )
295 |
296 |
297 | get_decennial(geography = "block", table = "H11I", year = 2010, state = "OR", county = "Multnomah")
298 |
299 | library(cowplot)
300 |
301 | generate_plot(changes, c("41051003302"))
302 |
303 | generate_plot(changes, c("41051003603"))
304 |
305 | generate_plot(changes, c("41051004200"))
306 |
307 | generate_plot(changes, c("41051003301"))
308 |
309 | generate_plot(changes, c("41051001702")) # 41051001702 # 41051002903
310 |
311 | generate_plot <- function(df, TRACTS_TO_GRAPH){
312 |
313 | df_abs <- df %>%
314 | filter(GEOID %in% TRACTS_TO_GRAPH,
315 | agg != "percent")
316 |
317 | df_pct <- df %>%
318 | filter(GEOID %in% TRACTS_TO_GRAPH,
319 | agg == "percent")
320 |
321 | p1 <- df_abs %>%
322 | filter(GEOID %in% TRACTS_TO_GRAPH,
323 | !(variable %in% c(
324 | "Adjusted household income", "Average household size", "Median household income"))
325 | ) %>%
326 | ggplot(aes(x = abs_change_10t20, y = variable)) +
327 | geom_col() + theme_minimal() + labs(x = "", y = "") +
328 | geom_vline(xintercept = 0) +
329 | scale_x_continuous(labels = scales::comma_format(accuracy = 1))
330 |
331 | p1b <- df_pct %>%
332 | filter(GEOID %in% TRACTS_TO_GRAPH,
333 | !(variable %in% c(
334 | "Adjusted household income", "Average household size", "Median household income"))
335 | ) %>%
336 | ggplot(aes(x = abs_change_10t20, y = variable)) +
337 | geom_col() + theme_minimal() + labs(x = "", y = "") +
338 | geom_vline(xintercept = 0) +
339 | scale_x_continuous(labels = scales::comma_format(accuracy = 1))
340 |
341 | p2 <- df_abs %>%
342 | filter(GEOID %in% TRACTS_TO_GRAPH,
343 | !(variable %in% c(
344 | "Adjusted household income", "Average household size", "Median household income"))
345 | ) %>%
346 | ggplot(aes(x = pct_change_10t20, y = variable)) +
347 | geom_col() + theme_minimal() + labs(x = "", y = "") +
348 | geom_vline(xintercept = 0) +
349 | scale_x_continuous(labels = scales::percent_format(accuracy = 1))
350 |
351 | p2b <- df_pct %>%
352 | filter(GEOID %in% TRACTS_TO_GRAPH,
353 | !(variable %in% c(
354 | "Adjusted household income", "Average household size", "Median household income"))
355 | ) %>%
356 | ggplot(aes(x = pct_change_10t20, y = variable)) +
357 | geom_col() + theme_minimal() + labs(x = "", y = "") +
358 | geom_vline(xintercept = 0) +
359 | scale_x_continuous(labels = scales::comma_format(accuracy = 1))
360 |
361 | p3 <- df_abs %>%
362 | filter(GEOID %in% TRACTS_TO_GRAPH,
363 | variable %in% c( "Adjusted household income")) %>%
364 | ggplot(aes(x = abs_change_10t20, y = variable)) +
365 | geom_col() + theme_minimal() + labs(x = "", y = "") +
366 | geom_vline(xintercept = 0) +
367 | scale_x_continuous(labels = scales::dollar_format(accuracy = 1, scale = 1/1000, suffix = "K"))
368 |
369 | p3b <- df_pct %>%
370 | filter(GEOID %in% TRACTS_TO_GRAPH,
371 | variable %in% c( "Adjusted household income")) %>%
372 | ggplot(aes(x = abs_change_10t20, y = variable)) +
373 | geom_col() + theme_minimal() + labs(x = "", y = "") +
374 | geom_vline(xintercept = 0) +
375 | scale_x_continuous(labels = scales::dollar_format(accuracy = 1, scale = 1/1000, suffix = "K"))
376 |
377 | p4 <- df_abs %>%
378 | filter(GEOID %in% TRACTS_TO_GRAPH,
379 | variable %in% c("Adjusted household income")) %>%
380 | ggplot(aes(x = pct_change_10t20, y = variable)) +
381 | geom_col() + theme_minimal() + labs(x = "", y = "") +
382 | geom_vline(xintercept = 0) +
383 | scale_x_continuous(labels = scales::percent_format(accuracy = 1))
384 |
385 | plot_grid(p1, p2, p3, p4,
386 | labels = c('Absolute', 'Percentage', '', ''),
387 | label_size = 12,
388 | rel_heights = c(8,3))
389 | }
390 |
391 | generate_plot(changes, c("41051001702"))
392 |
393 | interstate_corridor <- c("41051003801", "41051003802", "41051003803", "41051003501",
394 | "41051003502", "41051003701", "41051003702", "41051003401",
395 | "41051003502", "41051003402", "41051002203", "41051002303")
396 |
397 | stjblocks <- STATE_BLOCKS.2020 %>%
398 | mutate(tractid = substr(GEOID20,1,11) ) %>%
399 | filter(tractid %in% c("41051004104", "41051004103", "41051004200", "41051004102"))
400 |
401 |
402 |
403 | # mapview(stjblocks, lwd = 0.8, alpha.region = 0.2, zcol = "tractid", layer.name = "Blocks by Tract") +
404 | mapview(st_point_on_surface(stjblocks), cex = 1.5, zcol = "tractid", lwd = 0,
405 | layer.name = "Block Centroids") +
406 | mapview(filter(TRACTS_2010t2020.SF, year == 2010, GEOID %in% c("41051004101", "41051004102", "41051004200", c("41051004104", "41051004103", "41051004200", "41051004102"))),
407 | layer.name = "2010 Tracts", alpha.region = 0, col.region = "white", color = "black")
408 |
409 |
410 | # st_write(vulnerability_data, dsn = "1_data/3_processed/2020_vulnerability_data_portland.geojson")
411 | # vulnerability_data %>%
412 | # st_drop_geometry() %>% rio::export("1_data/3_processed/2020_vulnerability_data_portland.xlsx")
413 |
--------------------------------------------------------------------------------
/5_apps/change_explorer/app.R:
--------------------------------------------------------------------------------
1 | library(shiny)
2 | library(tidyverse)
3 | library(leaflet)
4 | library(DT)
5 |
6 | df <- readRDS("changes_2010t2020_vulnerability.rds") %>%
7 | filter(variable == "People of color", agg == "percent") %>%
8 | rmapshaper::ms_simplify(.) %>% st_transform(WEB_EPSG)
9 |
10 |
11 | ui <- fluidPage(
12 | leafletOutput("tracts", height = "550px")
13 | # fluidRow(
14 | # column(8, leafletOutput("tracts", height = "550px") ),
15 | # column(4,
16 | # span("Select a tract")#, span( style="color:green", "Origin"), span(" and "),
17 | # # span( style="color:red", "Destination"),
18 | # # span(" from the map:"),
19 | # # br(),br(),
20 | # # htmlOutput("od_info")%>% withSpinner(color="#0dc5c1"),
21 | # # hr(),
22 | # # htmlOutput("od_total")%>% withSpinner(color="#0dc5c1"),
23 | # # hr(),
24 | # # htmlOutput("od_total_5")%>% withSpinner(color="#0dc5c1")
25 | # ))
26 | # ),
27 | # br(),br(),
28 | # fluidRow(
29 | # column(9, div(DT::dataTableOutput("od_vol"), width = "100%", style = "font-size:100%"))
30 | # ),
31 | # fluidRow(
32 | # column(5, plotlyOutput("od_ton_chart", width = "100%", height = "350px")%>%
33 | # withSpinner(color="#0dc5c1")),
34 | # column(3, plotlyOutput("od_ton_pie", width = "100%", height = "250px")%>%
35 | # withSpinner(color="#0dc5c1")),
36 | # column(3, plotlyOutput("od_ton_pie_5", width = "100%", height = "250px")%>%
37 | # withSpinner(color="#0dc5c1"))
38 | # ),
39 | # hr(),
40 | # fluidRow(
41 | # column(5, plotlyOutput("od_value_chart", width = "100%", height = "350px")%>%
42 | # withSpinner(color="#0dc5c1")),
43 | # column(3, plotlyOutput("od_value_pie", width = "100%", height = "250px")%>%
44 | # withSpinner(color="#0dc5c1")),
45 | # column(3, plotlyOutput("od_value_pie_5", width = "100%", height = "250px")%>%
46 | # withSpinner(color="#0dc5c1"))
47 | # )
48 | )
49 |
50 | server <- function(input, output, session) {
51 |
52 | output$tracts <- renderLeaflet({
53 | leaflet() %>%
54 | addProviderTiles('CartoDB.Positron') %>%
55 | setView(lng = -122.65, lat = 45.52, zoom = 11) #%>%
56 | # addLegend("topright", pal = pal,
57 | # values = bins,
58 | # title = "Vulnerability Index",
59 | # opacity = 0.6) %>%
60 | #
61 |
62 | })
63 |
64 | observe({
65 | proxy <- leafletProxy("tracts", data = df)
66 |
67 | proxy %>% clearShapes() %>%
68 | addPolygons(
69 | fillColor = "gray",
70 | weight = 1,
71 | opacity = 1,
72 | color = "white",
73 | dashArray = "3",
74 | # popup = popup_context,
75 | fillOpacity = 0.5)
76 | })
77 |
78 | }
79 |
80 | shinyApp(ui, server)
--------------------------------------------------------------------------------
/5_apps/change_explorer/changes_2010t2020_vulnerability.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/change_explorer/changes_2010t2020_vulnerability.rds
--------------------------------------------------------------------------------
/5_apps/weighting_tool/00_prep_app.R:
--------------------------------------------------------------------------------
1 | ## TODO Explore simputation package to impute missing values simply
2 | ## https://cran.r-project.org/web/packages/simputation/vignettes/intro.html
3 |
4 | library(naniar)
5 | library(simputation)
6 | # setwd("~/projects/repos/vulnerability_weighting_map/")
7 | source("GLOBAL.R")
8 | range01 <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
9 |
10 | # file.copy("1_data/3_processed/acs_vuln_weighting_data.rds",
11 | # to = "5_apps/weighting_tool/",
12 | # overwrite = TRUE)
13 |
14 | wide_data <- readRDS("1_data/3_processed/acs_vuln_weighting_data.rds") %>%
15 | st_transform(WEB_EPSG) %>%
16 | rmapshaper::ms_simplify(., keep = 0.5)
17 |
18 | PRIMARY_PLACE %>%
19 | st_transform(WEB_EPSG) %>%
20 | rmapshaper::ms_simplify(., keep = 0.3) %>%
21 | saveRDS("5_apps/weighting_tool/primary_place_outline.rds")
22 |
23 | vis_miss(wide_data)
24 |
25 | imputed <- wide_data %>%
26 | st_drop_geometry() %>%
27 | impute_lm(median_household_income ~ people_of_color + adults_without_4_yr_degree) %>%
28 | impute_lm(average_household_size ~ students_k_12 + overcrowded_households) %>%
29 | mutate(adjusted_household_income = median_household_income / average_household_size ^ 0.5)
30 |
31 | final_data <- imputed %>%
32 | left_join(., wide_data %>% select(GEOID), by = "GEOID") %>%
33 | st_as_sf()
34 |
35 | vis_miss(final_data)
36 |
37 | saveRDS(final_data, "5_apps/weighting_tool/acs_vuln_weighting_data.rds")
38 |
39 | ## TODO: Run 4_scripts/2_analysis/01_generate_vulnerability_data.R after completing this
40 |
41 | ##### Test models - Do this once for each imputed variable ---------------------
42 | ## Use a linear model to impute median household income based off race and education
43 | # create model object
44 | # mhi.fit <- lm(median_household_income ~ people_of_color + adults_without_4_yr_degree,
45 | # data = wide_data)
46 | #
47 | # # see model fit... it's a good fit
48 | # summary(mhi.fit)
49 | #
50 | # avghhsize.fit <- lm(average_household_size ~ students_k_12 + overcrowded_households,
51 | # data = wide_data)
52 | # summary(avghhsize.fit)
53 |
--------------------------------------------------------------------------------
/5_apps/weighting_tool/00b_prep_app_interp.R:
--------------------------------------------------------------------------------
1 | ## TODO Explore simputation package to impute missing values simply
2 | ## https://cran.r-project.org/web/packages/simputation/vignettes/intro.html
3 |
4 | library(naniar)
5 | library(simputation)
6 | # setwd("~/projects/repos/vulnerability_weighting_map/")
7 | source("GLOBAL.R")
8 | source("GLOBAL_interpolation_addendum.R")
9 | range01 <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
10 |
11 | # file.copy("1_data/3_processed/acs_vuln_weighting_data.rds",
12 | # to = "5_apps/weighting_tool/",
13 | # overwrite = TRUE)
14 |
15 | wide_data <- readRDS("1_data/3_processed/acs_vuln_weighting_data_interpolated.rds") %>%
16 | st_transform(WEB_EPSG) %>%
17 | rmapshaper::ms_simplify(., keep = 0.5)
18 |
19 | vis_miss(wide_data)
20 |
21 | imputed <- wide_data %>%
22 | st_drop_geometry() %>%
23 | group_by(year) %>%
24 | impute_lm(median_household_income ~ people_of_color + adults_without_4_yr_degree) %>%
25 | impute_lm(average_household_size ~ people_of_color + adults_without_4_yr_degree) %>%
26 | mutate(adjusted_household_income = median_household_income / average_household_size ^ 0.5)
27 |
28 | final_data <- imputed %>%
29 | left_join(., wide_data %>% select(GEOID, year), by = c("GEOID", "year")) %>%
30 | st_as_sf()
31 |
32 | vis_miss(final_data)
33 |
34 | saveRDS(final_data, "5_apps/weighting_tool/acs_vuln_weighting_data_imputed.rds")
35 |
36 | ## TODO: Run 4_scripts/2_analysis/01_generate_vulnerability_data.R after completing this
37 |
38 | ##### Test models - Do this once for each imputed variable ---------------------
39 | ## Use a linear model to impute median household income based off race and education
40 | # create model object
41 | # mhi.fit <- lm(median_household_income ~ people_of_color + adults_without_4_yr_degree,
42 | # data = wide_data)
43 | #
44 | # # see model fit... it's a good fit
45 | # summary(mhi.fit)
46 | #
47 | # avghhsize.fit <- lm(average_household_size ~ people_of_color + adults_without_4_yr_degree,
48 | # data = wide_data)
49 | # summary(avghhsize.fit)
50 |
--------------------------------------------------------------------------------
/5_apps/weighting_tool/acs_vuln_weighting_data.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/acs_vuln_weighting_data.rds
--------------------------------------------------------------------------------
/5_apps/weighting_tool/acs_vuln_weighting_data_imputed.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/acs_vuln_weighting_data_imputed.rds
--------------------------------------------------------------------------------
/5_apps/weighting_tool/app.R:
--------------------------------------------------------------------------------
1 | library(shiny)
2 | library(tidyverse)
3 | library(leaflet)
4 | library(RColorBrewer)
5 | library(sf)
6 | library(rlang)
7 | library(scales)
8 | library(shinythemes)
9 | library(rio)
10 | library(shinyBS)
11 |
12 | #### Utility functions ####
13 | range01 <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
14 | TARGET_EPSG <- 2913
15 |
16 | # setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
17 |
18 | vulnerability_data <- readRDS("acs_vuln_weighting_data.rds") %>%
19 | mutate(NAME = substr(GEOID, 6, 11) %>% as.numeric(),
20 | NAME = paste0(NAME / 100, ", ", county))
21 |
22 | hatch_people_of_color <- readRDS("hatch_people_of_color.rds")
23 | hatch_households_with_limited_english <- readRDS("hatch_households_with_limited_english.rds")
24 | hatch_persons_with_disabilities <- readRDS("hatch_persons_with_disabilities.rds")
25 | hatch_retirees_65 <- readRDS("hatch_retirees_65.rds")
26 | hatch_youth_0_21 <- readRDS("hatch_youth_0_21.rds")
27 | hatch_vulnerable <- readRDS("hatch_vulnerable.rds")
28 |
29 | base_data_labels <- rio::import("base_data_download_labels.xlsx")
30 |
31 | usb <- readRDS("primary_place_outline.rds")
32 |
33 | ui <-
34 | fluidPage(
35 | theme = shinytheme("lumen"), ##(lumen, paper, simplex, flatly, yeti) ## See interactive themes: https://gallery.shinyapps.io/117-shinythemes/
36 | div(# Main Panel
37 | titlePanel(title = "Portland Vulnerability Weighting Tool"),
38 | style = "flex-grow:1; resize:horizontal; overflow: hidden; position:relative; margin-right: 310px ",
39 | tags$style(type = "text/css", "#vulnerabilitymap {height: calc(90vh) !important;}"),
40 | leafletOutput("vulnerabilitymap")
41 | ),
42 | wellPanel( # Sidebar
43 | style = "overflow-y: auto; position:fixed; width:300px; top:0; bottom:0;; right:0",
44 | shiny::helpText("Explore the demographic variables that go into calculating",
45 | "an economic vulnerability index."),
46 | radioButtons(inputId = "geo_filter", label = "Filter for Region or Place:", choices = c("Portland Region" = "Region", "City of Portland" = "Primary Place"), selected = "Primary Place"),
47 | # bsButton(inputId = "geo_filter_button", label = "?", style = "info",
48 | # size = "extra-small", type = "action"),
49 | bsTooltip("geo_filter", "Select the extent of the mapping results: Census tracts within the 7-county Portland region, or tracts within the City of PortlandNOTE: The the extent of your selection influences the outcome of the mapping results, as each tract is compared to the entire selection.",
50 | placement = "bottom", trigger = "hover", options = list(container = "body")),
51 | radioButtons(inputId = "calc_method", label = "Weighting Method:", choices = c("Percentile Rank" = "Percentile", "Z-Score" = "Z-Score"), selected = "Percentile"),
52 | bsTooltip("calc_method", "Percentile rank sums together the raw rank of the variables (e.g., a tract with 80% renters might rank in the 90th percentile).
The Z-score method sums together the number of standard deviations a tract is above or below the average (e.g., 80% renters might be 1.8 standard deviations above the mean).",
53 | placement = "bottom", trigger = "hover", options = list(container = "body")),
54 | # sliderInput("z_tolerance", "Z-Tolerance Bottom Code:", min = 2, max = 7, value = 3),
55 | hr(),
56 | shiny::helpText("Slide bar to desired weight for each variable",
57 | "you wish to add to the vulnerability model.",
58 | "The weight corresponds to the relative importance",
59 | "of that variable. 5 = very important, 0 = turned off.",
60 | "Download the results using button at bottom."),
61 | mainPanel(),
62 |
63 | sliderInput("wt_people_of_color", "RACE: People of color", min = 0, max = 5, value = 1),
64 | sliderInput("wt_black_native_american", "RACE: Black+Indigenous people", min = 0, max = 5, value = 1),
65 | sliderInput("wt_adults_without_4_yr_degree", "EDUCATION: Adults w/o 4-yr degree", min = 0, max = 5, value = 1),
66 | sliderInput("wt_adjusted_household_income", "INCOME: Adjusted household income", min = 0, max = 5, value = 1),
67 | sliderInput("wt_housing_cost_burdened", "HOUSING: Housing cost burden >30%", min = 0, max = 5, value = 1),
68 |
69 | hr(),
70 |
71 | sliderInput("wt_households_with_limited_english", "LEP: Limited English-proficienct households", min = 0, max = 5, value = 0),
72 | sliderInput("wt_persons_with_disabilities", "DISABILITY: Persons w disabilities", min = 0, max = 5, value = 0),
73 | sliderInput("wt_commuters", "WORK: Commuters", min = 0, max = 5, value = 0),
74 | sliderInput("wt_unemployed_persons", "WORK: Unemployed persons", min = 0, max = 5, value = 0),
75 | sliderInput("wt_insufficient_commuter_vehicles", "WORK: Insufficient commuter vehicles", min = 0, max = 5, value = 0),
76 | sliderInput("wt_households_without_a_vehicle", "ACCESS: Households without a vehicle", min = 0, max = 5, value = 0),
77 | sliderInput("wt_no_computer_access", "ACCESS: No computer access", min = 0, max = 5, value = 0),
78 | sliderInput("wt_no_broadband_access", "ACCESS: No broadband access", min = 0, max = 5, value = 0),
79 | sliderInput("wt_students_k_12", "AGE: Students (K-12)", min = 0, max = 5, value = 0),
80 | sliderInput("wt_youth_0_21", "AGE: Youth (0-21)", min = 0, max = 5, value = 0),
81 | sliderInput("wt_retirees_65", "AGE: Retirees (65+)", min = 0, max = 5, value = 0),
82 | sliderInput("wt_median_household_income", "INCOME: Median household income", min = 0, max = 5, value = 0),
83 | sliderInput("wt_people_below_1_0_poverty", "INCOME: People below 1x poverty", min = 0, max = 5, value = 0),
84 | sliderInput("wt_people_below_2_0_poverty", "INCOME: People below 2x poverty", min = 0, max = 5, value = 0),
85 | sliderInput("wt_food_stamps_recipients", "HOUSEHOLDS: Food stamps recipients", min = 0, max = 5, value = 0),
86 | sliderInput("wt_renter_households", "HOUSEHOLDS: Renter households", min = 0, max = 5, value = 0),
87 | sliderInput("wt_cost_burdened_renters_35_percent", "HOUSEHOLDS: Renters paying 35%+ on rent", min = 0, max = 5, value = 0),
88 | sliderInput("wt_cost_burdened_owners_with_mortgage_35_percent", "HOUSEHOLDS: Owners paying 35%+ on mortgage", min = 0, max = 5, value = 0),
89 | sliderInput("wt_overcrowded_households", "HOUSEHOLDS: Overcrowded households", min = 0, max = 5, value = 0),
90 |
91 |
92 | actionButton("tabBut", label = "View Table"),
93 | br(),
94 | downloadButton("download_geojson", label = "Download Weighted Results"),
95 | br(),
96 | downloadButton("download_basedata", label = "Download Base Data"),
97 | hr(),
98 | div("View code on ", tags$a(href="https://github.com/BPSTechServices/pdx_vulnerability_map_app","GitHub."))
99 | ),
100 | bsModal(id = "modalTable", title = "Data Table", trigger = "tabBut",
101 | size = "large",
102 | dataTableOutput("resultsTable"))
103 | )
104 |
105 |
106 |
107 |
108 |
109 | ##### Begin server #####
110 | server <- function(input, output, session) {
111 |
112 | filtered_vulnerability_data <- reactive({
113 | if(input$geo_filter == "Primary Place"){
114 | vulnerability_data <- vulnerability_data %>%
115 | filter(in_primary_place == TRUE)
116 | }
117 |
118 | filtered_data <- vulnerability_data %>%
119 | mutate(across(.cols = c(people_of_color:youth_and_retirees),
120 | .fns = list(rnk = ~cume_dist(.),
121 | z = ~scale(.))),
122 | across(ends_with("_z"), ~ case_when(. > 3 ~ 3,
123 | . < -3 ~ -3,
124 | T ~ .)),
125 | median_household_income_rnk = 1 - median_household_income_rnk,
126 | median_household_income_z = -median_household_income_z,
127 | adjusted_household_income_rnk = 1 - adjusted_household_income_rnk,
128 | adjusted_household_income_z = -median_household_income_z,
129 | per_capita_income_rnk = 1 - per_capita_income_rnk,
130 | per_capita_income_z = -per_capita_income_z)
131 |
132 | filtered_data
133 | })
134 |
135 | reweighted <- reactive({
136 |
137 | if(input$calc_method == "Z-Score"){
138 | reweighted_data <- filtered_vulnerability_data() %>%
139 | mutate(
140 | weighted_people_of_color = people_of_color_z * input$wt_people_of_color,
141 | weighted_black_native_american = black_native_american_z * input$wt_black_native_american,
142 | weighted_adults_without_4_yr_degree = adults_without_4_yr_degree_z * input$wt_adults_without_4_yr_degree,
143 | weighted_adjusted_household_income = adjusted_household_income_z * input$wt_adjusted_household_income,
144 | weighted_housing_cost_burdened = housing_cost_burdened_z * input$wt_housing_cost_burdened,
145 | weighted_households_with_limited_english = households_with_limited_english_z * input$wt_households_with_limited_english,
146 | weighted_persons_with_disabilities = persons_with_disabilities_z * input$wt_persons_with_disabilities,
147 | weighted_commuters = commuters_z * input$wt_commuters,
148 | weighted_unemployed_persons = unemployed_persons_z * input$wt_unemployed_persons,
149 | weighted_insufficient_commuter_vehicles = insufficient_commuter_vehicles_z * input$wt_insufficient_commuter_vehicles,
150 | weighted_households_without_a_vehicle = households_without_a_vehicle_z * input$wt_households_without_a_vehicle,
151 | weighted_no_computer_access = no_computer_access_z * input$wt_no_computer_access,
152 | weighted_no_broadband_access = no_broadband_access_z * input$wt_no_broadband_access,
153 | weighted_students_k_12 = students_k_12_z * input$wt_students_k_12,
154 | weighted_youth_0_21 = youth_0_21_z * input$wt_youth_0_21,
155 | weighted_retirees_65 = retirees_65_z * input$wt_retirees_65,
156 | weighted_median_household_income = median_household_income_z * input$wt_median_household_income,
157 | weighted_people_below_1_0_poverty = people_below_1_0_poverty_z * input$wt_people_below_1_0_poverty,
158 | weighted_people_below_2_0_poverty = people_below_2_0_poverty_z * input$wt_people_below_2_0_poverty,
159 | weighted_food_stamps_recipients = food_stamps_recipients_z * input$wt_food_stamps_recipients,
160 | weighted_renter_households = renter_households_z * input$wt_renter_households,
161 | weighted_cost_burdened_renters_35_percent = cost_burdened_renters_35_percent_z * input$wt_cost_burdened_renters_35_percent,
162 | weighted_cost_burdened_owners_with_mortgage_35_percent = cost_burdened_owners_with_mortgage_35_percent_z * input$wt_cost_burdened_owners_with_mortgage_35_percent,
163 | weighted_overcrowded_households = overcrowded_households_z * input$wt_overcrowded_households,
164 |
165 | composite_score = weighted_people_of_color + weighted_black_native_american +
166 | weighted_adults_without_4_yr_degree + weighted_adjusted_household_income +
167 | weighted_housing_cost_burdened + weighted_households_with_limited_english +
168 | weighted_persons_with_disabilities + weighted_commuters +
169 | weighted_unemployed_persons + weighted_insufficient_commuter_vehicles +
170 | weighted_households_without_a_vehicle + weighted_no_computer_access +
171 | weighted_no_broadband_access + weighted_students_k_12 + weighted_youth_0_21 +
172 | weighted_retirees_65 + weighted_median_household_income +
173 | weighted_people_below_1_0_poverty + weighted_people_below_2_0_poverty +
174 | weighted_food_stamps_recipients + weighted_renter_households +
175 | weighted_cost_burdened_renters_35_percent +
176 | weighted_cost_burdened_owners_with_mortgage_35_percent +
177 | weighted_overcrowded_households,# + weighted_user_data,
178 |
179 | indexed_score = round(range01(composite_score, na.rm = T) * 100, 0)
180 | ) %>%
181 | arrange(desc(indexed_score))
182 | }
183 |
184 | if(input$calc_method == "Percentile"){
185 | reweighted_data <- filtered_vulnerability_data() %>%
186 | mutate(
187 | weighted_people_of_color = people_of_color_rnk * input$wt_people_of_color,
188 | weighted_black_native_american = black_native_american_rnk * input$wt_black_native_american,
189 | weighted_adults_without_4_yr_degree = adults_without_4_yr_degree_rnk * input$wt_adults_without_4_yr_degree,
190 | weighted_adjusted_household_income = adjusted_household_income_rnk * input$wt_adjusted_household_income,
191 | weighted_housing_cost_burdened = housing_cost_burdened_rnk * input$wt_housing_cost_burdened,
192 | weighted_households_with_limited_english = households_with_limited_english_rnk * input$wt_households_with_limited_english,
193 | weighted_persons_with_disabilities = persons_with_disabilities_rnk * input$wt_persons_with_disabilities,
194 | weighted_commuters = commuters_rnk * input$wt_commuters,
195 | weighted_unemployed_persons = unemployed_persons_rnk * input$wt_unemployed_persons,
196 | weighted_insufficient_commuter_vehicles = insufficient_commuter_vehicles_rnk * input$wt_insufficient_commuter_vehicles,
197 | weighted_households_without_a_vehicle = households_without_a_vehicle_rnk * input$wt_households_without_a_vehicle,
198 | weighted_no_computer_access = no_computer_access_rnk * input$wt_no_computer_access,
199 | weighted_no_broadband_access = no_broadband_access_rnk * input$wt_no_broadband_access,
200 | weighted_students_k_12 = students_k_12_rnk * input$wt_students_k_12,
201 | weighted_youth_0_21 = youth_0_21_rnk * input$wt_youth_0_21,
202 | weighted_retirees_65 = retirees_65_rnk * input$wt_retirees_65,
203 | weighted_median_household_income = median_household_income_rnk * input$wt_median_household_income,
204 | weighted_people_below_1_0_poverty = people_below_1_0_poverty_rnk * input$wt_people_below_1_0_poverty,
205 | weighted_people_below_2_0_poverty = people_below_2_0_poverty_rnk * input$wt_people_below_2_0_poverty,
206 | weighted_food_stamps_recipients = food_stamps_recipients_rnk * input$wt_food_stamps_recipients,
207 | weighted_renter_households = renter_households_rnk * input$wt_renter_households,
208 | weighted_cost_burdened_renters_35_percent = cost_burdened_renters_35_percent_rnk * input$wt_cost_burdened_renters_35_percent,
209 | weighted_cost_burdened_owners_with_mortgage_35_percent = cost_burdened_owners_with_mortgage_35_percent_rnk * input$wt_cost_burdened_owners_with_mortgage_35_percent,
210 | weighted_overcrowded_households = overcrowded_households_rnk * input$wt_overcrowded_households,
211 |
212 | composite_score = weighted_people_of_color + weighted_black_native_american +
213 | weighted_adults_without_4_yr_degree + weighted_adjusted_household_income +
214 | weighted_housing_cost_burdened + weighted_households_with_limited_english +
215 | weighted_persons_with_disabilities + weighted_commuters +
216 | weighted_unemployed_persons + weighted_insufficient_commuter_vehicles +
217 | weighted_households_without_a_vehicle + weighted_no_computer_access +
218 | weighted_no_broadband_access + weighted_students_k_12 + weighted_youth_0_21 +
219 | weighted_retirees_65 + weighted_median_household_income +
220 | weighted_people_below_1_0_poverty + weighted_people_below_2_0_poverty +
221 | weighted_food_stamps_recipients + weighted_renter_households +
222 | weighted_cost_burdened_renters_35_percent +
223 | weighted_cost_burdened_owners_with_mortgage_35_percent +
224 | weighted_overcrowded_households,# + weighted_user_data,
225 |
226 | indexed_score = round(range01(composite_score, na.rm = T) * 100, 0)) %>%
227 | arrange(desc(indexed_score))
228 | }
229 | reweighted_data
230 | })
231 |
232 | output$download_geojson <- downloadHandler(
233 | filename = "custom_weight_export-geojson.geojson",
234 | content = function(file) {
235 | sf::st_write(obj = reweighted() %>% st_transform(TARGET_EPSG), dsn = file)
236 | }
237 | )
238 |
239 | output$download_basedata <- downloadHandler(
240 | filename = "base_demographic_data.xlsx",
241 | content = function(file) {
242 | datalist <- list("data" = st_drop_geometry(filtered_vulnerability_data()), "labels" = base_data_labels)
243 | rio::export(datalist, file = file)
244 | }
245 | )
246 |
247 |
248 |
249 | bins <- c(0, 20, 40, 60, 80, 100)
250 | pal <- leaflet::colorBin(viridis_pal(option = "C")(length(bins)), bins = bins)
251 |
252 | output$vulnerabilitymap <- renderLeaflet({
253 | leaflet() %>%
254 | addProviderTiles('CartoDB.Positron') %>%
255 | setView(lng = -122.65, lat = 45.52, zoom = 11) %>%
256 | addLegend("topright", pal = pal,
257 | values = bins,
258 | title = "Vulnerability Index",
259 | opacity = 0.6) %>%
260 |
261 | addPolylines(
262 | data = usb,
263 | weight = 2,
264 | opacity = 0.5,
265 | fillOpacity = 0,
266 | color = "#FF0000",
267 | group = "USB Overlay") %>%
268 |
269 | addPolylines(
270 | data = hatch_vulnerable,
271 | color = "black",
272 | weight = 0.6,
273 | group = "Vulnerability Overlay"
274 | ) %>%
275 |
276 | addPolylines(
277 | data = hatch_people_of_color,
278 | color = "black",
279 | weight = 0.6,
280 | group = "BIPOC Overlay"
281 | ) %>%
282 |
283 | addPolylines(
284 | data = hatch_households_with_limited_english,
285 | color = "black",
286 | weight = 0.6,
287 | group = "LEP Overlay"
288 | ) %>%
289 | addPolylines(
290 | data = hatch_persons_with_disabilities,
291 | color = "black",
292 | weight = 0.6,
293 | group = "Disability Overlay"
294 | ) %>%
295 | addPolylines(
296 | data = hatch_retirees_65,
297 | color = "black",
298 | weight = 0.6,
299 | group = "Retiree (65+) Overlay"
300 | ) %>%
301 | addPolylines(
302 | data = hatch_youth_0_21,
303 | color = "black",
304 | weight = 0.6,
305 | group = "Youth (0-21) Overlay"
306 | ) %>%
307 | ## Toggle group layers: https://rstudio.github.io/leaflet/showhide.html
308 | addLayersControl(
309 | overlayGroups = c("USB Overlay", "Vulnerability Overlay", "BIPOC Overlay",
310 | "LEP Overlay", "Disability Overlay", "Retiree (65+) Overlay",
311 | "Youth (0-21) Overlay"),
312 | options = layersControlOptions(collapsed = FALSE)
313 | ) %>%
314 | hideGroup(c("Vulnerability Overlay", "BIPOC Overlay",
315 | "LEP Overlay", "Disability Overlay", "Retiree (65+) Overlay",
316 | "Youth (0-21) Overlay"))
317 | })
318 |
319 | ## https://stackoverflow.com/questions/46186014/changing-leaflet-map-according-to-input-without-redrawing-multiple-polygons
320 | observe({
321 | popup_context <- paste0("Tract: ", reweighted()$NAME,
322 | "
Vulnerability score: ", scales::comma(reweighted()$indexed_score, accuracy = 1),
323 | "
People of color: ", scales::percent(reweighted()$people_of_color, accuracy = 0.1),
324 | "
Black + Indigenous: ", scales::percent(reweighted()$black_native_american, accuracy = 0.1),
325 | "
Adults without 4-yr degree: ", scales::percent(reweighted()$adults_without_4_yr_degree, accuracy = 0.1),
326 | "
Adjusted household income: ", scales::dollar(reweighted()$adjusted_household_income, accuracy = 1),
327 | "
Housing cost burdened: ", scales::percent(reweighted()$housing_cost_burdened , accuracy = 0.1),
328 | "
Households with limited English: ", scales::percent(reweighted()$households_with_limited_english, accuracy = 0.1),
329 | "
Persons with disabilities: ", scales::percent(reweighted()$persons_with_disabilities, accuracy = 0.1),
330 | "
Commuters: ", scales::percent(reweighted()$commuters, accuracy = 0.1),
331 | "
Unemployed persons: ", scales::percent(reweighted()$unemployed_persons, accuracy = 0.1),
332 | "
Insufficient commuter vehicles: ", scales::percent(reweighted()$insufficient_commuter_vehicles, accuracy = 0.1),
333 | "
Households without a vehicle: ", scales::percent(reweighted()$households_without_a_vehicle , accuracy = 0.1),
334 | "
No computer access: ", scales::percent(reweighted()$no_computer_access, accuracy = 0.1),
335 | "
No broadband access: ", scales::percent(reweighted()$no_broadband_access, accuracy = 0.1),
336 | "
Students (K-12): ", scales::percent(reweighted()$students_k_12, accuracy = 0.1),
337 | "
Youth (0-21): ", scales::percent(reweighted()$youth_0_21, accuracy = 0.1),
338 | "
Retirees (65+): ", scales::percent(reweighted()$retirees_65, accuracy = 0.1),
339 | "
Median household income: ", scales::dollar(reweighted()$median_household_income, accuracy = 1),
340 | "
People below 1x poverty: ", scales::percent(reweighted()$people_below_1_0_poverty , accuracy = 0.1),
341 | "
People below 2x poverty: ", scales::percent(reweighted()$people_below_2_0_poverty , accuracy = 0.1),
342 | "
Food stamp recipients: ", scales::percent(reweighted()$food_stamps_recipients, accuracy = 0.1),
343 | "
Renter households: ", scales::percent(reweighted()$renter_households, accuracy = 0.1),
344 | "
Cost-burdened renters: ", scales::percent(reweighted()$cost_burdened_renters_35_percent, accuracy = 0.1),
345 | "
Cost-burdened w mortgage: ", scales::percent(reweighted()$cost_burdened_owners_with_mortgage_35_percent, accuracy = 0.1),
346 | "
Overcrowded households: ", scales::percent(reweighted()$overcrowded_households, accuracy = 0.1)
347 | )
348 |
349 | proxy <- leafletProxy("vulnerabilitymap", data = reweighted())
350 |
351 | proxy %>% clearShapes() %>%
352 | addPolygons(
353 | fillColor = ~pal(indexed_score),
354 | weight = 1,
355 | opacity = 1,
356 | color = "white",
357 | dashArray = "3",
358 | popup = popup_context,
359 | fillOpacity = 0.5) %>%
360 | addPolylines(
361 | data = usb,
362 | weight = 2,
363 | opacity = 0.5,
364 | fillOpacity = 0,
365 | color = "#FF0000",
366 | group = "USB Overlay") %>%
367 |
368 | addPolylines(
369 | data = hatch_vulnerable,
370 | color = "black",
371 | weight = 0.6,
372 | group = "Vulnerability Overlay"
373 | ) %>%
374 |
375 | addPolylines(
376 | data = hatch_people_of_color,
377 | color = "black",
378 | weight = 0.6,
379 | group = "BIPOC Overlay"
380 | ) %>%
381 |
382 | addPolylines(
383 | data = hatch_households_with_limited_english,
384 | color = "black",
385 | weight = 0.6,
386 | group = "LEP Overlay"
387 | ) %>%
388 | addPolylines(
389 | data = hatch_persons_with_disabilities,
390 | color = "black",
391 | weight = 0.6,
392 | group = "Disability Overlay"
393 | ) %>%
394 | addPolylines(
395 | data = hatch_retirees_65,
396 | color = "black",
397 | weight = 0.6,
398 | group = "Retiree (65+) Overlay"
399 | ) %>%
400 | addPolylines(
401 | data = hatch_youth_0_21,
402 | color = "black",
403 | weight = 0.6,
404 | group = "Youth (0-21) Overlay"
405 | ) %>%
406 | ## Toggle group layers: https://rstudio.github.io/leaflet/showhide.html
407 | addLayersControl(
408 | overlayGroups = c("USB Overlay", "Vulnerability Overlay", "BIPOC Overlay",
409 | "LEP Overlay", "Disability Overlay", "Retiree (65+) Overlay",
410 | "Youth (0-21) Overlay"),
411 | options = layersControlOptions(collapsed = TRUE)
412 | )
413 | })
414 |
415 |
416 |
417 | output$resultsTable <- renderDataTable({
418 |
419 | reweighted() %>%
420 | arrange(desc(indexed_score)) %>%
421 | st_drop_geometry() %>%
422 | select(GEOID,
423 | `Vulnerability Score` = indexed_score,
424 | `Adj. HH Income` = adjusted_household_income ,
425 | `Avg. HH Size` = average_household_size ,
426 | `Med. HH Income` = median_household_income ,
427 | `% POC` = people_of_color ,
428 | `% Black + Indigenous` = black_native_american ,
429 | `% < 4yr Deg.` = adults_without_4_yr_degree ,
430 | `% Cost Burdened HH` = housing_cost_burdened ,
431 | `% LEP HH` = households_with_limited_english ,
432 | `% Disability` = persons_with_disabilities ,
433 | `% Commuters` = commuters ,
434 | `% Unemployed` = unemployed_persons ,
435 | `% Insufficient Vehicles` = insufficient_commuter_vehicles ,
436 | `% Zero Vehicles` = households_without_a_vehicle ,
437 | `% No Computer Access` = no_computer_access ,
438 | `% No Broadband Access` = no_broadband_access ,
439 | `% K-12 Students` = students_k_12 ,
440 | `% Age 0-21` = youth_0_21 ,
441 | `% Age 65+` = retirees_65 ,
442 | `% < 1x Poverty` = people_below_1_0_poverty ,
443 | `% < 2x Poverty` = people_below_2_0_poverty ,
444 | `% Food Stamps` = food_stamps_recipients ,
445 | `% Renters` = renter_households ,
446 | `% Cost-Burdened Renters` = cost_burdened_renters_35_percent ,
447 | `% Cost-Burdened Owners` = cost_burdened_owners_with_mortgage_35_percent ,
448 | `% Overcrowded HH` = overcrowded_households ,
449 | ) %>%
450 | mutate(across(c(`% POC`:`% Overcrowded HH`),
451 | ~scales::percent(round(., digits = 3), accuracy = 0.1)),
452 | across(c(`Adj. HH Income`, `Med. HH Income`),
453 | ~scales::dollar(.)))
454 | },
455 |
456 | # Use autoWidth and scrollX to overflow many columns problem for table modal popup
457 | # https://stackoverflow.com/questions/34850382/setting-column-width-in-r-shiny-datatable-does-not-work-in-case-of-lots-of-colum
458 | options = list(pageLength=10, autoWidth = TRUE, scrollX = TRUE))
459 |
460 | }
461 |
462 |
463 | # Run the application
464 | # options(browser = "/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary")
465 | # runApp(list(ui = ui, server = server, display.mode = "showcase"), host = "127.0.0.1", port = 7002, launch.browser = T) # 192.168.1.170 # 127.0.0.1
466 | shinyApp(ui = ui, server = server)
467 |
--------------------------------------------------------------------------------
/5_apps/weighting_tool/base_data_download_labels.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/base_data_download_labels.xlsx
--------------------------------------------------------------------------------
/5_apps/weighting_tool/hatch_households_with_limited_english.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/hatch_households_with_limited_english.rds
--------------------------------------------------------------------------------
/5_apps/weighting_tool/hatch_people_of_color.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/hatch_people_of_color.rds
--------------------------------------------------------------------------------
/5_apps/weighting_tool/hatch_persons_with_disabilities.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/hatch_persons_with_disabilities.rds
--------------------------------------------------------------------------------
/5_apps/weighting_tool/hatch_retirees_65.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/hatch_retirees_65.rds
--------------------------------------------------------------------------------
/5_apps/weighting_tool/hatch_vulnerable.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/hatch_vulnerable.rds
--------------------------------------------------------------------------------
/5_apps/weighting_tool/hatch_youth_0_21.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/hatch_youth_0_21.rds
--------------------------------------------------------------------------------
/5_apps/weighting_tool/primary_place_outline.rds:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/5_apps/weighting_tool/primary_place_outline.rds
--------------------------------------------------------------------------------
/6_reports/Change in vulnerability memo.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/6_reports/Change in vulnerability memo.pdf
--------------------------------------------------------------------------------
/6_reports/changes_2010t2020_vulnerability_nk.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/6_reports/changes_2010t2020_vulnerability_nk.xlsx
--------------------------------------------------------------------------------
/6_reports/vulnerability_changes_draft.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/6_reports/vulnerability_changes_draft.zip
--------------------------------------------------------------------------------
/6_reports/vulnerability_score_2010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/6_reports/vulnerability_score_2010.png
--------------------------------------------------------------------------------
/6_reports/vulnerability_score_2015.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/6_reports/vulnerability_score_2015.png
--------------------------------------------------------------------------------
/6_reports/vulnerability_score_2020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/6_reports/vulnerability_score_2020.png
--------------------------------------------------------------------------------
/GLOBAL_interpolation_addendum.R:
--------------------------------------------------------------------------------
1 | TRACTS.2019.SF <- map_dfr(STATES_TO_DOWNLOAD, .f = function(STATE){
2 | tigris::tracts(STATE, cb = TRUE, year = 2019) %>%
3 | filter(substr(GEOID, 1, 5) %in% COUNTIES_TO_DOWNLOAD)}) %>%
4 | st_transform(TARGET_EPSG)
5 |
6 | TRACTS.2019_IN_PLACE <- TRACTS.2019.SF %>%
7 | st_filter(., st_buffer(PRIMARY_PLACE, dist = BUFFER_DISTANCE)) %>%
8 | pull(GEOID)
9 |
10 | tracts.2019_to_remove <- c('41005022208', '41051010200')
11 |
12 | TRACTS.2019_IN_PLACE <- setdiff(TRACTS.2019_IN_PLACE, tracts.2019_to_remove)
13 |
14 | TRACTS.2019.SF <- TRACTS.2019.SF %>%
15 | mutate(in_primary_place = GEOID %in% TRACTS.2019_IN_PLACE) %>%
16 | select(GEOID, in_primary_place)
17 |
18 | # TRACTS.SF <- TRACTS.SF %>% select(GEOID, in_primary_place)
19 |
20 | TRACTS_2010t2020.SF <- rbind(
21 | mutate(TRACTS.2019.SF, year = 2010),
22 | mutate(TRACTS.2019.SF, year = 2015),
23 | TRACTS.SF %>% select(GEOID, in_primary_place) %>% mutate(year = 2020)
24 | )
25 |
26 | STATE_BLOCKS.2020 <- map_dfr(STATES_TO_DOWNLOAD, .f = function(STATE){
27 | tigris::blocks(STATE, year = 2020) %>%
28 | filter(substr(GEOID20, 1, 5) %in% COUNTIES_TO_DOWNLOAD)}) %>%
29 | st_transform(TARGET_EPSG)
30 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 BPSTechServices
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Vulnerability Weighting Map Tool
2 | This repository hosts a free and open-source mapping tool that allows users to map socioeconomic vulnerability indicators in their community. Social vulnerability is a salient but complex topic, and there are many studies that have tried to assess and evaluate different indicator metrics for their appropriateness.
3 |
4 | This tool is built to allow users to experiment with different indicators and their weighting mechanism to see how changing the weight of one metric affects the outcome on a map. For example, how well would a census tract "perform" if computer access became a more important consideration to weight? Would the tract be considered more or less vulnerable with increased focus on that metric?
5 |
6 | 
7 |
8 | The intended purpose of this tool is to:
9 |
10 | 1. Demonstrate how many social vulnerability indicators co-vary with one another. In other words, the map results for some communities are not much different from each other when the weighting mechanism is modified.
11 | 2. Quickly assess the geographic distribution of an indexed vulnerability indicator across a few metrics. For example, if a community organization wanted to prioritize areas that lacked computer access, the tool allows you to focus on computer access as a key metric and add in supplemental vulnerability metrics.
12 | 3. Serve as an educational tool to understanding the geographic distribution of intersectional equity questions. How does race intersect with income? Or disability intersect with age?
13 | 4. Allow analysts to break down the components of an indexed or composite indicator into its constituent parts to see how it impacts the index.
14 |
15 | ## Required and recommended software
16 | To run the app in your community, you will need a few pieces of software and libraries:
17 | * [R](https://cran.r-project.org/)
18 | * [R Studio](https://www.rstudio.com/products/rstudio/)
19 | * Kyle Walker's [tidycensus](https://github.com/walkerke/tidycensus) package
20 | * [Census Bureau API key](https://api.census.gov/data/key_signup.html)
21 | * [Shinyapps.io](https://www.shinyapps.io/) account (free with up to 5 web apps)
--------------------------------------------------------------------------------
/global.R:
--------------------------------------------------------------------------------
1 | ## 01. Load libraries, set options, source functions and resources -------------
2 | require(tidyverse)
3 | require(tidycensus)
4 | require(sf)
5 | require(tigris)
6 | require(mapview)
7 |
8 | options(
9 | scipen = 999,
10 | digits = 4,
11 | tigris_class = "sf",
12 | tigris_use_cache = T
13 | )
14 |
15 | source("4_scripts/1_data-processing/01_load_spatial_resources.R")
16 | source("3_functions/get_counties_from_cbsa.R")
17 | source("3_functions/get_states_from_stcnty_fips.R")
18 |
19 | ## 02. Determine and input target region's FIPS/CBSA codes and places ----------
20 |
21 | # TODO: Enter a region to look up in the line below to search for the correct CBSA FIPS
22 | REGION_OF_INTEREST <- "portland"
23 |
24 | # Use code below to determine the FIPS code for the region of interest ("CBSA" = region).
25 | # Look at the results and decide what the region's FIPS code is
26 | county2msa %>%
27 | filter(str_detect(cbsaname15, regex(REGION_OF_INTEREST, ignore_case = TRUE)))
28 |
29 | # TODO: Enter the correct CBSA FIPS code found from lookup above to determine
30 | # the counties that are part of that CBSA
31 | COUNTIES_TO_DOWNLOAD <- get_counties_from_cbsa(38900)
32 |
33 | ## 03. Automatically generate other variables we will need based ---------------
34 |
35 | ## Determine states from counties
36 | STATES_TO_DOWNLOAD <- get_states_from_stcnty_fips(COUNTIES_TO_DOWNLOAD)
37 |
38 | TRACTS.SF <- map_dfr(STATES_TO_DOWNLOAD, .f = function(STATE){
39 | tigris::tracts(STATE, cb = TRUE, year = 2020) %>%
40 | filter(substr(GEOID, 1, 5) %in% COUNTIES_TO_DOWNLOAD)})
41 |
42 | TRACTS_HIRES.SF <- map_dfr(STATES_TO_DOWNLOAD, .f = function(STATE){
43 | tigris::tracts(STATE, cb = FALSE, year = 2020) %>%
44 | filter(substr(GEOID, 1, 5) %in% COUNTIES_TO_DOWNLOAD)})
45 |
46 | # TARGET_EPSG <- crsuggest::suggest_top_crs(TRACTS.SF, units = "us-ft")
47 | TARGET_EPSG <- 2913
48 | WEB_EPSG <- 4326
49 |
50 | ## Main places in CBSA
51 | PLACES_IN_CBSA <- tigris::places(STATES_TO_DOWNLOAD) %>%
52 | st_filter(., filter(tigris::counties(state = STATES_TO_DOWNLOAD, cb = T), GEOID %in% COUNTIES_TO_DOWNLOAD)) %>%
53 | st_transform(TARGET_EPSG) %>%
54 | arrange(desc(ALAND)) %>%
55 | head()
56 |
57 | ## Grab GEOID/FIPS code of the primary place of interest, which is USUALLY
58 | ## the largest by land area.
59 | PRIMARY_PLACE <- PLACES_IN_CBSA[1,]
60 |
61 | TRACTS.SF <- st_transform(TRACTS.SF, TARGET_EPSG)
62 | TRACTS_HIRES.SF <- st_transform(TRACTS_HIRES.SF, TARGET_EPSG)
63 |
64 | ## Identify a list of tracts that are within the primary place of interest
65 | ## Adjust negative distance buffer to fit desired tolerance
66 | BUFFER_DISTANCE <- -1320
67 |
68 | TRACTS_IN_PLACE <- TRACTS.SF %>%
69 | st_filter(., st_buffer(PRIMARY_PLACE, dist = BUFFER_DISTANCE)) %>%
70 | pull(GEOID)
71 |
72 | ## Quick view of the tracts in place
73 | TRACTS.SF %>%
74 | mutate(in_primary_place = GEOID %in% TRACTS_IN_PLACE) %>%
75 | mapview(zcol = 'in_primary_place')
76 |
77 | tracts_to_remove <- c('41005022208', '41051010200')
78 |
79 | TRACTS_IN_PLACE <- setdiff(TRACTS_IN_PLACE, tracts_to_remove)
80 |
81 | ## Create new variable on whether that tract is in the primary place of interest
82 | TRACTS.SF <- TRACTS.SF %>%
83 | mutate(in_primary_place = GEOID %in% TRACTS_IN_PLACE)
84 |
85 | TRACTS_HIRES.SF <- TRACTS_HIRES.SF %>%
86 | mutate(in_primary_place = GEOID %in% TRACTS_IN_PLACE)
87 |
--------------------------------------------------------------------------------
/variable_query.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BPSTechServices/pdx_vulnerability_map_app/72b3c1b82356634f666085f0b0afec838416e8ac/variable_query.png
--------------------------------------------------------------------------------
/vulnerability_map.Rproj:
--------------------------------------------------------------------------------
1 | Version: 1.0
2 |
3 | RestoreWorkspace: Default
4 | SaveWorkspace: Default
5 | AlwaysSaveHistory: Default
6 |
7 | EnableCodeIndexing: Yes
8 | UseSpacesForTab: Yes
9 | NumSpacesForTab: 2
10 | Encoding: UTF-8
11 |
12 | RnwWeave: Sweave
13 | LaTeX: pdfLaTeX
14 |
--------------------------------------------------------------------------------