├── Data └── data.txt ├── Outputs └── outputs.txt ├── github-training.Rproj ├── .gitmessage ├── save_ppt_slides.vbs ├── .github ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md └── pull_request_template.md ├── .gitignore ├── index.Rmd ├── read_ons_bulletin.R ├── README.md ├── functions.R ├── email_search.R ├── jupyter-notebook.ipynb └── report_functions.R /Data/data.txt: -------------------------------------------------------------------------------- 1 | All input data should be stored in this folder. 2 | 3 | No files in this folder will be picked up by Github. -------------------------------------------------------------------------------- /Outputs/outputs.txt: -------------------------------------------------------------------------------- 1 | All sensitive outputs (e.g. html files, data tables, etc) should be stored in this folder. 2 | 3 | No files in this folder will be picked up by Github. -------------------------------------------------------------------------------- /github-training.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | -------------------------------------------------------------------------------- /.gitmessage: -------------------------------------------------------------------------------- 1 | # Ensure you have not committed any secrets: 2 | # e.g passwords, API keys, data files or OFFSEN outputs. 3 | 4 | # Make sure you include a clear commit message, 5 | # including the reason for any change 6 | 7 | # Once you have completed your message, use CTRL+X to exit 8 | # Then press Y and enter to save the message 9 | 10 | # Write your git commit message below this line: 11 | -------------------------------------------------------------------------------- /save_ppt_slides.vbs: -------------------------------------------------------------------------------- 1 | '[PowerPointExport.vbs]' 2 | 3 | With CreateObject("PowerPoint.Application") 4 | 5 | Set p = .Presentations.Open(WScript.Arguments(0)) 6 | local = true 7 | 8 | With CreateObject("Scripting.FileSystemObject") 9 | 10 | For Each s In p.Slides 11 | 12 | s.Export .BuildPath(WScript.Arguments(1), s.SlideNumber & ".png"), "png" 13 | 14 | Next 15 | 16 | End With 17 | 18 | .Quit 19 | End With -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: 'enhancement' 6 | assignees: '' 7 | 8 | --- 9 | 10 | * **Is your feature request related to a problem? Please describe.** 11 | 12 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 13 | 14 | * **Describe the solution you'd like** 15 | 16 | A clear and concise description of what you want to happen. 17 | 18 | * **Describe alternatives you've considered** 19 | 20 | A clear and concise description of any alternative solutions or features you've considered. 21 | 22 | * **Additional context** 23 | 24 | Add any other context or screenshots about the feature request here. 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report an error in this code 4 | title: '' 5 | labels: 'bug' 6 | assignees: '' 7 | 8 | --- 9 | 10 | * **Describe the bug** 11 | 12 | A clear and concise description of what the bug is. 13 | 14 | * **Tell us how to reproduce the error** 15 | 16 | Steps to reproduce the behavior: 17 | 1. Go to '...' 18 | 2. Click on '....' 19 | 3. Scroll down to '....' 20 | 4. See error 21 | 22 | * **Expected behavior** 23 | 24 | A clear and concise description of what you expected to happen. 25 | 26 | * **Platform** 27 | 28 | Were you running the code on: 29 | 30 | - [ ] Local R 31 | - [ ] Citrix 32 | - [ ] Non-network laptop 33 | 34 | * **Screenshots** 35 | 36 | If applicable, add screenshots to help explain your problem. 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # History files 2 | .Rhistory 3 | .Rapp.history 4 | 5 | # Session Data files 6 | .RData 7 | 8 | # User-specific files 9 | .Ruserdata 10 | 11 | # Example code in package build process 12 | *-Ex.R 13 | 14 | # Output files from R CMD build 15 | /*.tar.gz 16 | 17 | # Output files from R CMD check 18 | /*.Rcheck/ 19 | 20 | # RStudio files 21 | .Rproj.user/ 22 | 23 | # produced vignettes 24 | vignettes/*.html 25 | vignettes/*.pdf 26 | 27 | # OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 28 | .httr-oauth 29 | 30 | # knitr and R markdown default cache directories 31 | *_cache/ 32 | /cache/ 33 | 34 | # Temporary files created by R markdown 35 | *.utf8.md 36 | *.knit.md 37 | 38 | # R Environment Variables 39 | .Renviron 40 | 41 | ##Data and output folders 42 | Data/* 43 | Outputs/ 44 | 45 | # Common data filetypes 46 | *.xlsx 47 | *.xls 48 | *.csv 49 | *.ods 50 | *.png 51 | .Rproj.user 52 | 53 | !*/lookup_table.csv -------------------------------------------------------------------------------- /index.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "This is my test pages page" 3 | output: html_document 4 | date: '2023-07-24' 5 | --- 6 | 7 | ```{r setup, include=FALSE} 8 | knitr::opts_chunk$set(echo = TRUE) 9 | ``` 10 | 11 | ## R Markdown 12 | 13 | testing testing testing 14 | 15 | This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see . 16 | 17 | When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this: 18 | 19 | ```{r cars} 20 | summary(cars) 21 | ``` 22 | 23 | ## Including Plots 24 | 25 | You can also embed plots, for example: 26 | 27 | ```{r pressure, echo=FALSE} 28 | plot(pressure) 29 | ``` 30 | 31 | Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot. 32 | -------------------------------------------------------------------------------- /read_ons_bulletin.R: -------------------------------------------------------------------------------- 1 | ##Get latest ONS HTML release------------------------------------------------ 2 | 3 | #Set up a function to neatly read the file in 4 | read_release <- function(){ 5 | #Read all HTML 6 | pg <- read_html("https://www.ons.gov.uk/peoplepopulationandcommunity/") 7 | 8 | ##Keep the text content only 9 | text_only <- html_text(html_nodes(pg,'p')) 10 | 11 | ##Search for term to identify sentence about period, sample, etc 12 | text_only <- text_only[grepl("sample size|sampled", text_only)] 13 | 14 | ##Return as a list of sentences 15 | strsplit(text_only, "[.]")[[1]] 16 | 17 | } 18 | 19 | ##Get latest ONS HTML release------------------------------------------------ 20 | 21 | #Set up a function to neatly read the file in 22 | read_dates <- function(){ 23 | 24 | #Read all HTML 25 | 26 | pg <- read_html("https://www.ons.gov.uk/peoplepopulationandcommunity/") 27 | 28 | 29 | ##Keep the text content only 30 | text_only <- html_text(html_nodes(pg,'p')) 31 | 32 | ##Search for release date and next release, and bind into a list 33 | dates <- list(text_only[grepl("Release date", text_only)], 34 | text_only[grepl("Next release", text_only)]) 35 | 36 | #Keep dates only 37 | lapply(dates, FUN = gsub, pattern = ".*: |\n", replacement = "") 38 | 39 | } 40 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Git and GitHub Training Repository 2 | 3 | This is a GitHub repository designed for beginner training in Git and GitHub for colleagues in DfT. 4 | 5 | ## To use 6 | 7 | To clone this repository into your own Git, please select the green "code" button and ensure you have selected the SSH code. You may then clone this repo as normal. 8 | 9 | ## Features 10 | 11 | ### Raising issues 12 | 13 | The repository contains two issue templates which are loaded automatically; one for bug reporting, and one for feature suggestions. These can be used to record issues and planned improvements within your code, and the standardised template ensures you capture all of the required information every time. 14 | 15 | ### Pull requests 16 | 17 | The repository contains a pull request template which loads automatically. This standardised form to complete ensures you are appropriately reviewing pull requests and provides a QA record of code changes. 18 | 19 | 20 | ### Gitignore 21 | 22 | The git ignore file is set to ignore common data formats such as xlsx, csv and ods tables. It also ignores the .renviron file to allow you to store secrets such as API keys securely in your local environment. 23 | 24 | The repository also includes Data and Output folders. Putting data inputs and outputs into these folders ensures they will not be pushed to Git, regardless of format. This is ideal when you have a project with a large number of varied inputs or outputs (e.g. XML files, or HTML outputs). 25 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Proposed changes 2 | 3 | Describe the big picture of your changes here to make it clear what your pull request will change. Make sure to explain the **what** e.g. the functionality it changes not the **how** e.g. what code you have written. If it fixes a bug or resolves a feature request, be sure to link to that issue. 4 | 5 | ## Types of changes 6 | 7 | What types of changes does your code introduce? 8 | _Put an `x` in the boxes that apply_ 9 | 10 | - [ ] Bugfix (change which fixes an issue) 11 | - [ ] New feature (change which adds functionality) 12 | - [ ] Documentation Update (change to naming or other documentation) 13 | 14 | ## Checklist 15 | 16 | _Put an `x` in the boxes that apply. You can also fill these out after creating the PR._ 17 | 18 | - [ ] I have added unit tests that prove my fix is effective or that my feature works 19 | - [ ] I have added necessary documentation (if appropriate) 20 | - [ ] I have checked that my changes have not broken any other functionality 21 | - [ ] I have linked to any issues this PR fixes 22 | 23 | ## Points for review 24 | 25 | Add checkboxes here for any specific aspects of your PR you would like to be checked in peer review. If you don't specify anything here, the reviewer will check: 26 | 27 | - [ ] All of your unit tests pass 28 | - [ ] Your code is clear and easy to understand 29 | - [ ] You have documented any new features 30 | - [ ] They can run the code you have written 31 | - [ ] Your changes do not break any other functionality 32 | 33 | ## Who is reviewing this PR? 34 | 35 | Tag your reviewer here to ensure they get a notification! 36 | -------------------------------------------------------------------------------- /functions.R: -------------------------------------------------------------------------------- 1 | ##Function to find file with a specific word in the title 2 | find_file <- function(name){ 3 | file <- list.files("G:/AFP/IHACAll/IHAC/015 DDU/005 Covid reporting/0001 R Projects/transport_modes_table/Data", recursive = TRUE, full.names = TRUE) 4 | file <- file[!grepl("~$", file, fixed = TRUE)] 5 | file <- file[grepl("xlsx", file, fixed = TRUE)] 6 | file <- file[grepl(name, file, ignore.case = TRUE)] 7 | file 8 | } 9 | 10 | ##Function to round 0.5 up (instead of sometimes down) 11 | round2 = function(x, n) { 12 | posneg = sign(x) 13 | z = abs(x)*10^n 14 | z = z + 0.5 + sqrt(.Machine$double.eps) 15 | z = trunc(z) 16 | z = z/10^n 17 | z*posneg 18 | } 19 | 20 | 21 | ##%ni% 22 | "%ni%" <- Negate("%in%") 23 | 24 | #Calculate linear regression function 25 | 26 | lm_eqn <- function(df, x, y){ 27 | m <- lm(y ~ x, df); 28 | eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2, 29 | list(a = format(unname(coef(m)[1]), digits = 2), 30 | b = format(unname(coef(m)[2]), digits = 2), 31 | r2 = format(summary(m)$r.squared, digits = 3))) 32 | as.character(as.expression(eq)); 33 | } 34 | 35 | 36 | ##Function to write data while replacing NA values with ".." 37 | write.replace.data <- function(wb, 38 | sheet, 39 | x, 40 | colNames = T, 41 | startCol = 1, 42 | startRow = 1, 43 | rep.NA = '..', 44 | ...){ 45 | openxlsx::writeData(wb, 46 | sheet, 47 | x, 48 | startCol, 49 | startRow, 50 | colNames = colNames, 51 | ...) 52 | 53 | row_values <- 1:nrow(x) 54 | 55 | if (!is.null(rep.NA)) { 56 | for(c in 1:ncol(x)-1){ 57 | for (r in row_values[is.na(x[,c])]){ 58 | openxlsx::writeData(wb, 59 | sheet, 60 | rep.NA, 61 | startRow = startRow + r - 1, 62 | startCol = startCol + c - 1, 63 | colNames = FALSE)} 64 | } 65 | } 66 | 67 | } -------------------------------------------------------------------------------- /email_search.R: -------------------------------------------------------------------------------- 1 | library(RDCOMClient) 2 | 3 | ##Set up Outlook application 4 | outlook_app <- COMCreate("Outlook.Application") 5 | 6 | ##Create email search and save function 7 | save_attachments <- function(search_string, file_type, folder){ 8 | 9 | ##Search in CM Analytics inbox for email subjects containing given string 10 | search <- outlook_app$AdvancedSearch( 11 | "'\\CM Analytics\\Inbox\\000 COVID-19\\transport modes'", 12 | paste0("urn:schemas:httpmail:subject LIKE '%", search_string, "%'") 13 | ) 14 | 15 | ##Sleep to allow search to run; this happens slowly in the background! 16 | Sys.sleep(5) 17 | 18 | #Count number of emails returned 19 | search$Results()$Count() 20 | 21 | ##Save all returned emails into a list 22 | results <- search$Results() 23 | emails <- list() 24 | 25 | for(i in 1:results$Count()){ 26 | emails <- append(emails, results$Item(i)) 27 | } 28 | 29 | ##Return a message with number of emails found 30 | message(paste(length(emails), "relevant emails found")) 31 | 32 | ##Pull attachments from all emails 33 | for(i in 1:length(emails)){ 34 | attachment_names <- c() 35 | email <- emails[[i]] 36 | for(j in 1:email$Attachments()$Count()){ 37 | attachment_names <- c(attachment_names, 38 | email$Attachments(j)$DisplayName()) 39 | } 40 | 41 | 42 | ##Keep only attachments of interest 43 | attachments_to_keep <- grep(file_type, attachment_names, fixed = TRUE) 44 | 45 | ##Return number of relevant attachments per email 46 | message(paste("Email", i, length(attachments_to_keep), "relevant attachments found")) 47 | 48 | ##Stop if length is zero 49 | if(length(attachments_to_keep != 0)){ 50 | 51 | for(i in attachments_to_keep){ 52 | ##Create a filename for the attachment of interest 53 | filename <- paste(normalizePath(folder), 54 | attachment_names[i], sep = "\\") 55 | 56 | #Save the docx attachment to specified folder 57 | email$Attachments(i)$SaveAsFile(filename) 58 | } 59 | } 60 | } 61 | } 62 | ##Save BTP attachments 63 | save_attachments(search_string = "BTP", 64 | file_type = "docx", 65 | folder = "G:/AFP/IHACAll/IHAC/015 DDU/005 Covid reporting/0001 R Projects/transport_modes_table/Data/Face coverings/BTP") 66 | 67 | ##Save YOYallmodes file 68 | save_attachments(search_string = "Passenger Demand Report", 69 | file_type = "xlsx", 70 | folder = "G:/AFP/IHACAll/IHAC/015 DDU/005 Covid reporting/0001 R Projects/transport_modes_table/Data/TfL") 71 | 72 | ##Save absence dashboard file 73 | save_attachments(search_string = "Absence Dashboard", 74 | file_type = "pptx", 75 | folder = "G:/AFP/IHACAll/IHAC/015 DDU/005 Covid reporting/0001 R Projects/transport_modes_table/Data/Staff absence") 76 | 77 | ##Save TfL Face Covering File 78 | save_attachments(search_string = "TfL COVID-19 update report", 79 | file_type = "docx", 80 | folder = "G:/AFP/IHACAll/IHAC/015 DDU/005 Covid reporting/0001 R Projects/transport_modes_table/Data/Face coverings/TfL") 81 | -------------------------------------------------------------------------------- /jupyter-notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Jupyter Notebook\n", 8 | "\n", 9 | "This notebook contains some basic python code." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "print(\"hello git and github training\")" 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "metadata": {}, 24 | "source": [ 25 | "## Maths" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# program two numbers labelled n1 and n2\n", 35 | "n1 = 2\n", 36 | "n2 = 3\n", 37 | "# add the two number together under the variable sum\n", 38 | "sum = n1 + n2\n", 39 | "# display the sum\n", 40 | "print(sum)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "# Python Program to calculate the square root\n", 50 | "\n", 51 | "# Note: change this value for a different result\n", 52 | "num = 8 \n", 53 | "\n", 54 | "# To take the input from the user\n", 55 | "#num = float(input('Enter a number: '))\n", 56 | "\n", 57 | "num_sqrt = num ** 0.5\n", 58 | "print('The square root of %0.3f is %0.3f'%(num ,num_sqrt))" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": {}, 64 | "source": [ 65 | "## Working With Dates" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "# using datetime\n", 75 | "from datetime import datetime\n", 76 | "\n", 77 | "my_date_string = \"Mar 11 2011 11:31AM\"\n", 78 | "\n", 79 | "datetime_object = datetime.strptime(my_date_string, '%b %d %Y %I:%M%p')\n", 80 | "\n", 81 | "print(type(datetime_object))\n", 82 | "print(datetime_object)" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "# using dateutil\n", 92 | "from dateutil import parser\n", 93 | "\n", 94 | "date_time = parser.parse(\"Mar 11 2011 11:31AM\")\n", 95 | "\n", 96 | "print(date_time)\n", 97 | "print(type(date_time))" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [] 106 | } 107 | ], 108 | "metadata": { 109 | "kernelspec": { 110 | "display_name": "Python 3", 111 | "language": "python", 112 | "name": "python3" 113 | }, 114 | "language_info": { 115 | "codemirror_mode": { 116 | "name": "ipython", 117 | "version": 3 118 | }, 119 | "file_extension": ".py", 120 | "mimetype": "text/x-python", 121 | "name": "python", 122 | "nbconvert_exporter": "python", 123 | "pygments_lexer": "ipython3", 124 | "version": "3.6.5" 125 | } 126 | }, 127 | "nbformat": 4, 128 | "nbformat_minor": 2 129 | } 130 | -------------------------------------------------------------------------------- /report_functions.R: -------------------------------------------------------------------------------- 1 | 2 | ##Format dates nicely 3 | date_formatter <- function(dates, abbr_day = TRUE, abbr_month = TRUE, include_year = FALSE){ 4 | dayy <- lubridate::day(dates) 5 | suff <- dplyr::case_when(dayy %in% c(11,12,13) ~ "th", 6 | dayy %% 10 == 1 ~ 'st', 7 | dayy %% 10 == 2 ~ 'nd', 8 | dayy %% 10 == 3 ~'rd', 9 | TRUE ~ "th") 10 | 11 | if(include_year == FALSE){ 12 | paste0(lubridate::wday(dates, label = TRUE, abbr = abbr_day), " ", dayy, suff, " ", lubridate::month(dates, label = TRUE, abbr = abbr_month)) 13 | } 14 | 15 | } 16 | 17 | ##Function to find a value that is neither the max nor the min 18 | mid <- function(data){ 19 | #Keep only unique values 20 | data <- unique(data) 21 | 22 | #Find min and max 23 | mx <- max(data) 24 | mn <- min(data) 25 | 26 | data[!data %in% c(mn, mx)] 27 | } 28 | 29 | ##Function to interrogate QHR data to find difference between two dates by hour 30 | hourly_diff <- function(data, hour_1, hour_2){ 31 | #Get individual maximum and comparator dates 32 | mx <- max(data$date) 33 | md <- min(data$date) 34 | 35 | ##Filter data to just those dates and the hours given 36 | compare <- data[date %in% c(md, mx) & hour >= hour_1 & hour < hour_2, 37 | ##Summarise by date to add all volumes together 38 | .(count = sum(count, na.rm = TRUE)), 39 | by = list(date)] 40 | 41 | ##Find change from last week to this week 42 | change <- round( 43 | ( 44 | (compare[date == mx, count]/compare[date == md, count]) - 1) 45 | * 100) 46 | 47 | 48 | #~Format as percentage with positive or negative sign 49 | if(hour_1 == 00 & hour_2 == 24){ 50 | paste0("(", sprintf("%+2g%%", change),")") 51 | } 52 | else if(hour_2 == 24){ 53 | paste0("from ", hour_format(hour_1), " onwards "," (", sprintf("%+2g%%", change),")") 54 | }else{ 55 | paste0("between ", hour_format(hour_1), " - ", hour_format(hour_2), " (", sprintf("%+2g%%", change),")") 56 | } 57 | } 58 | 59 | ##Format the hours into times 60 | hour_format <- function(x){ 61 | 62 | #Split out hour and minute 63 | hour <- gsub("[.].*", "", x) 64 | mins <- gsub("[[:digit:]]*[.]", "", format(x, nsmall = 2)) 65 | #Turn minutes into actual minutes 66 | mins <- if(mins == "00") mins else(as.numeric(mins) * 0.6) 67 | 68 | #Return pasted time 69 | paste(hour, mins, sep = ":") 70 | } 71 | 72 | 73 | #Find most recent percentage value 74 | comparison_percent <- function(transport_mode, days_diff = 7){ 75 | 76 | data <- all_data %>% 77 | dplyr::filter(transport_type == transport_mode) %>% 78 | dplyr::filter(date == (max(date, na.rm = TRUE) - days_diff)) 79 | 80 | scales::percent(data$dash_value) 81 | 82 | } 83 | 84 | ##Function to pull most recent data and day 85 | extract_current_data <- function(transport_mode){ 86 | 87 | data <- all_data %>% 88 | dplyr::filter(transport_type == transport_mode) %>% 89 | dplyr::filter(date == max(date, na.rm = TRUE)) 90 | 91 | ##Format day 92 | if(transport_mode == "national_rail"){ 93 | day <- paste("w/e", date_formatter(data$date)) 94 | } else{ 95 | day <- date_formatter(data$date) 96 | } 97 | 98 | paste0(scales::percent(data$dash_value), "
(", day, ")") 99 | 100 | } 101 | 102 | ##Function to pull year ago comparison to most current data and day 103 | extract_last_year_data <- function(transport_mode, date_minus = 364){ 104 | 105 | data <- all_data %>% 106 | dplyr::filter(transport_type == transport_mode) %>% 107 | dplyr::filter(date == max(date, na.rm = TRUE) - date_minus) 108 | 109 | ##Format day 110 | if(transport_mode == "national_rail"){ 111 | day <- paste("w/e", date_formatter(data$date, include_year = TRUE)) 112 | } else{ 113 | day <- date_formatter(data$date, include_year = TRUE) 114 | } 115 | 116 | paste0(scales::percent(data$dash_value), "
(", day, ")") 117 | 118 | } 119 | 120 | ##Function to pull lowest weekday value and date 121 | extract_min_data <- function(transport_mode){ 122 | 123 | data <- all_data %>% 124 | dplyr::mutate(weekday = lubridate::wday(date)) %>% 125 | dplyr::filter(transport_type == transport_mode & 126 | weekday %in% c(2, 3, 4, 5, 6) & 127 | ##Remove bank holidays 128 | !date %in% as.Date(c("2020-01-01", "2020-04-10", "2020-04-13", 129 | "2020-05-04", "2020-05-08", "2020-05-25", 130 | "2020-08-31", "2020-12-25", "2020-12-28", 131 | "2021-01-01", "2021-04-02", "2021-04-05", 132 | "2021-05-03", "2021-05-31", "2021-08-30", 133 | "2021-12-27", "2021-12-28"))) %>% 134 | dplyr::filter(dash_value == min(dash_value, na.rm = TRUE)) 135 | 136 | scales::percent(unique(data$dash_value)) 137 | 138 | } 139 | 140 | ##Function to pull most recent date from data 141 | current_date <- function(transport_mode){ 142 | 143 | ##Filter data to max date and 1 week ago 144 | data <- all_data %>% 145 | dplyr::filter(transport_type == transport_mode) 146 | 147 | date_formatter(max(data$date, na.rm = TRUE), abbr_day = FALSE, abbr_month = FALSE) 148 | } 149 | 150 | ##Function to pull most recent date from data 151 | comparison_date <- function(transport_mode, days_diff = 7){ 152 | 153 | ##Filter data to max date and 1 week ago 154 | data <- all_data %>% 155 | dplyr::filter(transport_type == transport_mode) 156 | 157 | date_formatter((max(data$date, na.rm = TRUE) - days_diff), abbr_day = FALSE, abbr_month = FALSE) 158 | } 159 | 160 | ##Function to pull out current cycling data 161 | current_cycling <- function(){ 162 | 163 | ##Filter data to max date and 1 week ago 164 | data <- all_data %>% 165 | dplyr::filter(transport_type == "cycling") %>% 166 | dplyr::filter(date > (max(date, na.rm = TRUE) - 5)) 167 | 168 | paste0(scales::percent(min(data$dash_value, na.rm = TRUE)), "-", scales::percent(max(data$dash_value, na.rm = TRUE))) 169 | } 170 | 171 | ##Function to pull out current cycling dates 172 | date_cycling <- function(format = "full_month"){ 173 | 174 | ##Filter data to max date and 1 week ago 175 | data <- all_data %>% 176 | dplyr::filter(transport_type == "cycling") %>% 177 | dplyr::filter(date > (max(date, na.rm = TRUE) - 5)) 178 | 179 | 180 | if(format == "full_month"){ 181 | 182 | paste0(date_formatter(min(data$date, na.rm = TRUE), abbr_day = FALSE, abbr_month = FALSE), " and ", date_formatter(max(data$date, na.rm = TRUE), abbr_day = FALSE, abbr_month = FALSE)) 183 | } else{ 184 | paste0(date_formatter(min(data$date, na.rm = TRUE)), " - ", date_formatter(max(data$date, na.rm = TRUE))) 185 | 186 | } 187 | } 188 | 189 | ##Function to pull out previous cycling data 190 | previous_cycling <- function(){ 191 | 192 | ##Filter data to max date and 1 week ago 193 | data <- all_data %>% 194 | dplyr::filter(transport_type == "cycling") %>% 195 | dplyr::filter(date > (max(date, na.rm = TRUE) - 12) & date < (max(date, na.rm = TRUE) - 6)) 196 | 197 | paste0(scales::percent(min(data$dash_value, na.rm = TRUE)), "-", scales::percent(max(data$dash_value, na.rm = TRUE))) 198 | } 199 | 200 | ##Function to pull out current cycling dates 201 | previous_date_cycling <- function(){ 202 | 203 | ##Filter data to max date and 1 week ago 204 | data <- all_data %>% 205 | dplyr::filter(transport_type == "cycling") %>% 206 | dplyr::filter(date > (max(date, na.rm = TRUE) - 12) & date < (max(date, na.rm = TRUE) - 6)) 207 | 208 | paste0(date_formatter(min(data$date, na.rm = TRUE), abbr_day = FALSE, abbr_month = FALSE), " and ", date_formatter(max(data$date, na.rm = TRUE), abbr_day = FALSE, abbr_month = FALSE, include_year = TRUE)) 209 | 210 | } 211 | 212 | ##Function to extract commentary for a specific mode from the table 213 | extract_commentary <- function(data, mode){ 214 | data %>% 215 | dplyr::filter(Mode == mode) %>% 216 | dplyr::pull(Commentary) 217 | } 218 | --------------------------------------------------------------------------------