├── .dockerignore ├── .gitignore ├── COPYRIGHT NOTICE.txt ├── Dockerfile ├── README.md ├── VERSION ├── docker-entrypoint.sh ├── install.sh ├── lib ├── config.sh ├── datalib.sh ├── docker.sh ├── excep.sh └── utils.sh ├── license ├── rc-cli.sh ├── scoring ├── Dockerfile ├── README.md ├── docker-entrypoint.sh ├── main.py ├── requirements.txt └── score.py └── templates ├── README.md ├── custom_dev_stack.md ├── data_structures.md ├── rc_base ├── .dockerignore ├── Dockerfile ├── model_apply.sh ├── model_build.sh ├── snapshots │ ├── .gitignore │ └── README.md └── src │ ├── model_apply.py │ ├── model_apply.sh │ └── model_build.sh ├── rc_python ├── .dockerignore ├── Dockerfile ├── model_apply.sh ├── model_build.sh ├── requirements.txt ├── snapshots │ ├── .gitignore │ └── README.md └── src │ ├── model_apply.py │ └── model_build.py ├── rc_python_lite ├── .dockerignore ├── Dockerfile ├── model_apply.sh ├── model_build.sh ├── requirements.txt ├── snapshots │ ├── .gitignore │ └── README.md └── src │ ├── model_apply.py │ └── model_build.py └── rc_r ├── .dockerignore ├── Dockerfile ├── model_apply.sh ├── model_build.sh ├── requirements.txt ├── snapshots ├── .gitignore └── README.md └── src ├── model_apply.r └── model_build.r /.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git 3 | .gitignore 4 | 5 | # Docker-related files 6 | Dockerfile 7 | .dockerignore 8 | 9 | # Markdown files 10 | *.md 11 | 12 | # Logs 13 | logs 14 | *.log 15 | 16 | # dotenv environment variables file 17 | # **/.env 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | *.tar.gz 3 | data/ 4 | CONFIG 5 | logs/ 6 | -------------------------------------------------------------------------------- /COPYRIGHT NOTICE.txt: -------------------------------------------------------------------------------- 1 | (c) Copyright 2021 Massachusetts Institute of Technology Center for Transportation & Logistics. All rights reserved. 2 | 3 | This repository contains proprietary information of the Massachusetts Institute of Technology Center for Transportation & Logistics and is protected under U.S. and international copyright and other intellectual property laws. 4 | 5 | THIS COPYRIGHT NOTICE MUST BE RETAINED AS PART OF THIS REPOSITORY AT ALL TIMES. -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:1.2 2 | ARG DOCKER_VERSION=20.10.21 3 | 4 | FROM docker:${DOCKER_VERSION}-dind 5 | LABEL edu.mit.cave.tester.image.vendor="MIT CTL Computational and Visual Education Lab" 6 | LABEL edu.mit.cave.tester.image.authors="Connor Makowski , Luis Vasquez , Willem Guter " 7 | LABEL edu.mit.cave.tester.image.title="Routing Challenge Tester" 8 | LABEL edu.mit.cave.tester.image.licenses="Copyright (c) 2021 MIT CTL CAVE Lab" 9 | LABEL edu.mit.cave.tester.image.created="2022-12-02 07:19:23-05:00" 10 | LABEL edu.mit.cave.tester.image.version="0.1.3" 11 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint-alt.sh 12 | ENTRYPOINT ["docker-entrypoint-alt.sh"] 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RC CLI 2 | ## Introduction 3 | This repository houses all code needed to setup, evaluate and test code for the Amazon Routing Challenge. 4 | 5 | Using the `rc-cli`, participants of the Amazon Routing Challenge will be able to: 6 | - Create a new app 7 | - Run local development code with competition data 8 | - Run environment agnostic code with competition data 9 | - Save solution files (a file that can be submitted for evaluation) 10 | - Test local code and solutions with the official scoring algorithm 11 | 12 | Saved solutions that have been tested can be uploaded to the competition site: [routingchallenge.io](https://routingchallenge.io) 13 | 14 | ## Mac and Unix Setup 15 | 1. Install [Docker](https://docs.docker.com/get-docker/): 16 | - Note: Make sure to install Docker Engine v18.09 or later 17 | - If you have an older version of Docker, make sure [BuildKits are enabled](https://docs.docker.com/develop/develop-images/build_enhancements/#to-enable-buildkit-builds) 18 | - On Linux based systems you may need to follow the post installation setup instructions for docker: 19 | - `sudo groupadd docker` 20 | - `sudo usermod -aG docker $USER` 21 | - Validate the Installation 22 | - `docker run hello-world` 23 | 24 | 2. Install [Git](https://git-scm.com) 25 | - It is likely `git` is already installed. You can check with: 26 | ```sh 27 | git --version 28 | ``` 29 | 30 | 3. Install the `rc-cli` 31 | - Run the following commands to install the `rc-cli` 32 | ```sh 33 | bash <(curl -s https://raw.githubusercontent.com/MIT-CAVE/rc-cli/main/install.sh) 34 | ``` 35 | - Follow the prompts to finish the installation process 36 | 37 | 4. Validate the installation was successful 38 | - Run the following command: 39 | ```sh 40 | rc-cli version 41 | ``` 42 | - If successful, the output should look something like: 43 | ``` 44 | Routing Challenge CLI 0.2.0 45 | ``` 46 | - If unsuccessful, you may get something like: 47 | ``` 48 | rc-cli: command not found 49 | ``` 50 | 51 | 5. Continue to the [Download Your Dataset section](#download-your-dataset) below 52 | 53 | ## Windows 10 Setup 54 | 1. Install [Docker](https://hub.docker.com/editions/community/docker-ce-desktop-windows/) 55 | - Install WSL2 update during Docker installation 56 | - Update your WSL2 Kernel (If you are prompted during installation) 57 | - Click on link to the Windows Documentation about WSL2 58 | - Download the file to update the WSL kernel package to WSL2 59 | - Use the downloaded package to install the WSL2 Kernel 60 | - Reboot your system 61 | 62 | 2. Open **PowerShell** as **Administrator** 63 | - In PowerShell type: 64 | ``` 65 | wsl --set-default-version 2 66 | ``` 67 | - Press `Enter` 68 | - Exit PowerShell 69 | 70 | 3. Install Ubuntu 20.04 71 | - In the Microsoft store, search for `Ubuntu 20.04` 72 | - Install the Ubuntu 20.04 App 73 | 74 | 4. Open the `Ubuntu 20.04` app 75 | - This may take a while the first time 76 | - You will be prompted for a username and password 77 | - Set your username and password 78 | - **MAKE SURE TO REMEMBER THESE** 79 | - Close the app 80 | 81 | 5. Open the Docker Desktop app 82 | - In settings > resources > WSL Integration 83 | - Allow Ubuntu 20.04 84 | - Reboot Docker 85 | 86 | 6. Open the `Ubuntu 20.04` app 87 | - Run the following commands to finish setting up Docker: 88 | - Note: You may be prompted for your password 89 | - This is your Ubuntu password 90 | ```sh 91 | sudo groupadd docker 92 | ``` 93 | ```sh 94 | sudo usermod -aG docker $USER 95 | ``` 96 | - Validate Docker is working with the following command: 97 | ```sh 98 | docker run hello-world 99 | ``` 100 | - This may not work until you close and re-open Docker. 101 | 102 | 7. Install the `rc-cli` in the `Ubuntu 20.04` app 103 | - Run the following commands to install the `rc-cli` 104 | ```sh 105 | bash <(curl -s https://raw.githubusercontent.com/MIT-CAVE/rc-cli/main/install.sh) 106 | ``` 107 | - Follow the prompts to finish the installation process 108 | 109 | 8. Validate the installation was successful in the `Ubuntu 20.04` app 110 | - Run the following command: 111 | ```sh 112 | rc-cli version 113 | ``` 114 | 115 | 9. Continue to the [Download Your Dataset section](#download-your-dataset) below 116 | 117 | ## Download Your Dataset 118 | As of version `0.2.x`, RC CLI does not download the data sources for you in the installation process. Please follow the steps below to download your data set: 119 | 120 | 1. Install the [AWS CLI tool](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) 121 | 122 | 2. Alternatively, explore the datasets available for the [2021 Amazon Last Mile Routing Research Challenge](https://registry.opendata.aws/amazon-last-mile-challenges/) 123 | ```sh 124 | aws s3 ls --no-sign-request s3://amazon-last-mile-challenges/almrrc2021/ 125 | ``` 126 | 127 | 3. Select the desired dataset (**evaluation** or **training**) and copy it to your `data` directory in the `rc-cli` installation path 128 | - Evaluation dataset: 129 | ``` 130 | aws s3 sync --no-sign-request s3://amazon-last-mile-challenges/almrrc2021/almrrc2021-data-evaluation/ ~/.rc-cli/data/ 131 | ``` 132 | - Training dataset: 133 | ``` 134 | aws s3 sync --no-sign-request s3://amazon-last-mile-challenges/almrrc2021/almrrc2021-data-training/ ~/.rc-cli/data/ 135 | ``` 136 | 137 | 4. For more information, see the official [Registry of Open Data on AWS](https://registry.opendata.aws/amazon-last-mile-challenges/) 138 | 139 | 5. Continue to the [Create Your App section](#create-your-app) below 140 | 141 | ## Create Your App 142 | 1. Get available commands 143 | ```sh 144 | rc-cli help 145 | ``` 146 | 147 | 2. Create an app in your current directory 148 | - Note: Feel free to change `my-app` to any name you want 149 | ```sh 150 | rc-cli new-app my-app 151 | ``` 152 | 153 | 3. Enter the app directory 154 | ```sh 155 | cd my-app 156 | ``` 157 | 158 | 4. Get the folder location on your machine to open the app in your favorite editor. 159 | - On Mac: 160 | - Open your current directory in finder 161 | ```sh 162 | open . 163 | ``` 164 | - Display your current directory as a path 165 | ```sh 166 | echo $PWD 167 | ``` 168 | - On Linux: 169 | - Display your current directory as a path 170 | ```sh 171 | echo $PWD 172 | ``` 173 | - On Windows 10 (using WSL Ubuntu 20.04) 174 | - Open explorer from your current directory 175 | ```sh 176 | explorer.exe . 177 | ``` 178 | - Alternatively, your `Ubuntu 20.04` app stores files on your local operating system at: 179 | - `\\wsl$`>`Ubuntu-20.04`>`home`>`your-username` 180 | 181 | 5. All `rc-cli` commands and usages are documented in your created application as `README.md` 182 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 0.2.0 2 | -------------------------------------------------------------------------------- /docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -u 3 | 4 | readonly CHARS_LINE="============================" 5 | readonly RC_IMAGE_TAG="rc-cli" 6 | readonly MODEL_BUILD_TIMEOUT=$((12*60*60)) 7 | readonly MODEL_APPLY_TIMEOUT=$((4*60*60)) 8 | readonly APP_DEST_MNT="/home/app/data" 9 | 10 | wait_for_docker() { 11 | while ! docker ps; do 12 | sleep 1 13 | done 14 | } 15 | 16 | ####################################### 17 | # Load a Docker image created by rc-cli. 18 | # Globals: 19 | # None 20 | # Arguments: 21 | # image_file 22 | # Returns: 23 | # None 24 | ####################################### 25 | load_image() { 26 | image_file=$1 27 | printf "Loading the Image... " 28 | load_stdout=$(docker load --quiet --input "/mnt/${image_file}" 2> /dev/null) 29 | old_image_tag="${load_stdout:14}" 30 | new_image_tag="${image_file:0:-7}:${RC_IMAGE_TAG}" 31 | # Force the image tag to be that of the tar archive filename. 32 | if [ "${old_image_tag}" != "${new_image_tag}" ]; then 33 | docker tag ${old_image_tag} ${new_image_tag} 34 | docker rmi ${old_image_tag} > /dev/null 2>&1 35 | fi 36 | printf "done\n" 37 | } 38 | 39 | # Convert a number of seconds to the ISO 8601 standard. 40 | secs_to_iso_8601() { 41 | printf "%dh:%dm:%ds" $(($1 / 3600)) $(($1 % 3600 / 60)) $(($1 % 60)) 42 | } 43 | 44 | # Get a status message from a given stderr value 45 | get_status() { 46 | error=$1 47 | case ${error} in 48 | Killed) 49 | printf "\nWARNING! production-test: Timeout has occurred when running '$1'\n" >&2 50 | printf "timeout" 51 | ;; 52 | "") 53 | printf "success" 54 | ;; 55 | *) 56 | printf "\n${error}\n" >&2 57 | printf "failure" 58 | ;; 59 | esac 60 | } 61 | 62 | ####################################### 63 | # Send the output and time stats of the running app container 64 | # to the standard output and a given output file. 65 | # Globals: 66 | # None 67 | # Arguments: 68 | # secs, error, out_file 69 | # Returns: 70 | # None 71 | ####################################### 72 | print_stdout_stats() { 73 | secs=$1 74 | error=$2 75 | out_file=$3 76 | printf "{ \"time\": ${secs}, \"status\": \"$(get_status "${error}")\" }" > ${out_file} 77 | printf "\nTime Elapsed: $(secs_to_iso_8601 ${secs})\n" 78 | } 79 | 80 | ####################################### 81 | # Run a snapshot (Docker image) for a given 'model-*' command 82 | # Globals: 83 | # None 84 | # Arguments: 85 | # cmd, image_name, timeout_in_secs, run_opts 86 | # Returns: 87 | # None 88 | ####################################### 89 | run_app_image() { 90 | cmd=$1 91 | image_name=$2 92 | timeout_in_secs=$3 93 | run_opts=$4 94 | 95 | printf "\n${CHARS_LINE}\n" 96 | printf "Running the Image [${image_name}] (${cmd}):\n\n" 97 | 98 | start_time=$(date +%s) 99 | # TODO: Improve redirection to avoid using a file for stderr 100 | timeout -s KILL ${timeout_in_secs} \ 101 | docker run --rm --entrypoint "${cmd}.sh" ${run_opts} \ 102 | --volume "/data/${cmd}_inputs:${APP_DEST_MNT}/${cmd}_inputs:ro" \ 103 | --volume "/data/${cmd}_outputs:${APP_DEST_MNT}/${cmd}_outputs" \ 104 | ${image_name}:${RC_IMAGE_TAG} 2>/var/tmp/error 105 | secs=$(($(date +%s) - start_time)) 106 | 107 | [ -f /var/tmp/error ] && error=$(cat /var/tmp/error) || error="" 108 | print_stdout_stats "${secs}" "${error}" \ 109 | "/data/model_score_timings/${cmd}_time.json" 110 | } 111 | 112 | printf "Starting the Docker daemon... " 113 | /usr/local/bin/dockerd-entrypoint.sh dockerd > /dev/null 2>&1 & 114 | wait_for_docker > /dev/null 2>&1 115 | printf "done\n" 116 | 117 | load_image ${IMAGE_FILE} 118 | image_name=${IMAGE_FILE:0:-7} # Remove the '.tar.gz' extension 119 | run_app_image "model_build" ${image_name} ${MODEL_BUILD_TIMEOUT} "" 120 | run_app_image "model_apply" ${image_name} ${MODEL_APPLY_TIMEOUT} \ 121 | "--volume /data/model_build_outputs:${APP_DEST_MNT}/model_build_outputs:ro" 122 | 123 | printf "\n" 124 | load_image ${SCORING_IMAGE} 125 | scoring_name=${SCORING_IMAGE:0:-7} 126 | printf "\n${CHARS_LINE}\n" 127 | printf "Running the Scoring Image [${scoring_name}]:\n\n" 128 | # The time stats file is mounted in a different directory 129 | docker run --rm \ 130 | --volume "/data/model_apply_inputs:${APP_DEST_MNT}/model_apply_inputs:ro" \ 131 | --volume "/data/model_apply_outputs:${APP_DEST_MNT}/model_apply_outputs:ro" \ 132 | --volume "/data/model_score_inputs:${APP_DEST_MNT}/model_score_inputs:ro" \ 133 | --volume "/data/model_score_timings:${APP_DEST_MNT}/model_score_timings:ro" \ 134 | --volume "/data/model_score_outputs:${APP_DEST_MNT}/model_score_outputs" \ 135 | ${scoring_name}:${RC_IMAGE_TAG} 136 | 137 | exec "$@" 138 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Install the RC CLI on Unix Systems 4 | 5 | # Constants 6 | readonly CHARS_LINE="============================" 7 | readonly RC_CLI_PATH="${HOME}/.rc-cli" 8 | readonly RC_CLI_SHORT_NAME="RC CLI" 9 | readonly RC_CLI_COMMAND="rc-cli" 10 | readonly RC_CLI_VERSION="0.2.0" 11 | readonly BIN_DIR="/usr/local/bin" 12 | readonly DATA_DIR="data" 13 | readonly SSH_CLONE_URL="git@github.com:MIT-CAVE/rc-cli.git" 14 | readonly HTTPS_CLONE_URL="https://github.com/MIT-CAVE/rc-cli.git" 15 | readonly MIN_DOCKER_VERSION="18.09.00" 16 | readonly MIN_TAR_VERSION="1.22" 17 | readonly MIN_BSDTAR_VERSION="0" # FIXME: Minimum version for which 'xz' compression is supported 18 | 19 | err() { # Display an error message 20 | printf "$0: $1\n" >&2 21 | } 22 | 23 | # Get the current version of the 'tar' archiving utility 24 | get_tar_version() { 25 | if [[ -n $(which bsdtar) ]]; then 26 | printf "$(bsdtar --version | sed 's/\([a-z]\+\s\)\(.*\)\-.*/\2/g')" 27 | elif [[ -n $(which tar) ]]; then 28 | printf "$(tar --version | grep -m1 -o ").*" | sed "s/) //")" 29 | fi 30 | } 31 | 32 | check_os() { # Validate that the current OS 33 | case "$(uname -s)" in 34 | Linux*) machine="Linux";; 35 | Darwin*) machine="Mac";; 36 | *) machine="UNKNOWN" 37 | esac 38 | if [ $machine = "UNKNOWN" ]; then 39 | printf "Error: Unknown operating system.\n" 40 | printf "Please run this command on one of the following:\n" 41 | printf "- MacOS\n- Linux\n- Windows (Using Ubuntu 20.04 on Windows Subsystem for Linux 2 - WSL2)" 42 | exit 1 43 | fi 44 | } 45 | 46 | get_compressed_data_info() { # Get information on compressed data to download 47 | DATA_URL="${1:-''}" 48 | compressed_file_name=$(printf "$(basename $DATA_URL)" | sed 's/?.*//') 49 | compressed_file_path="${RC_CLI_PATH}/${compressed_file_name}" 50 | compressed_file_type="${compressed_file_path##*.}" 51 | if [[ "$compressed_file_type" = "xz" ]]; then 52 | compressed_file_name_no_ext==${compressed_file_name%.*.*} 53 | compressed_folder_name=${compressed_file_name%.*.*} 54 | else 55 | compressed_file_name_no_ext==${compressed_file_name%.*} 56 | compressed_folder_name=${compressed_file_name%.*} 57 | fi 58 | 59 | } 60 | 61 | validate_install() { 62 | local PROGRAM_NAME="$1" 63 | local EXIT_BOOL="$2" 64 | local ERROR_STRING="$3" 65 | if [ "$($PROGRAM_NAME --version)" = "" ]; then 66 | err "${PROGRAM_NAME} is not installed. ${ERROR_STRING}" 67 | if [ "${EXIT_BOOL}" = "1" ]; then 68 | exit 1 69 | fi 70 | fi 71 | } 72 | 73 | validate_version() { 74 | local PROGRAM_NAME="$1" 75 | local EXIT_BOOL="$2" 76 | local ERROR_STRING="$3" 77 | local MIN_VERSION="$4" 78 | local CURRENT_VERSION="$5" 79 | if [ ! "$(printf '%s\n' "$MIN_VERSION" "$CURRENT_VERSION" | sort -V | head -n1)" = "$MIN_VERSION" ]; then 80 | err "Your current $PROGRAM_NAME version ($CURRENT_VERSION) is too old. ${ERROR_STRING}" 81 | if [ "${EXIT_BOOL}" = "1" ]; then 82 | exit 1 83 | fi 84 | fi 85 | 86 | } 87 | 88 | check_compression() { # Validate tar compression command is installed 89 | if [[ "${compressed_file_type}" == "xz" ]]; then 90 | [[ -n $(which bsdtar) ]] \ 91 | && min_tar_ver=${MIN_TAR_VERSION} \ 92 | || min_tar_ver=${MIN_BSDTAR_VERSION} 93 | install_tar="\nPlease install version ${min_tar_ver} or greater. \nIf your machine does not support tar, you may consider installing {$RC_CLI_SHORT_NAME} using a zip folder. \nThis requires the unzip function to be installed locally.\n" 94 | validate_install "tar" "1" "${install_tar}" 95 | CURRENT_TAR_VERSION=$(get_tar_version) 96 | validate_version "tar" "1" "${install_tar}" "${min_tar_ver}" "${CURRENT_TAR_VERSION}" 97 | # Can not validate unzip as version pipes out to stderr 98 | elif [[ "${compressed_file_type}" == "zip" ]]; then 99 | : # Do nothing 100 | # install_unzip="\nPlease install unzip." 101 | # validate_install "unzip" "1" "$install_unzip" 102 | else 103 | err "The data file you are installing with is not recognized. \nPlease install the $RC_CLI_SHORT_NAME with a tar.xz or zip file." 104 | exit 1 105 | fi 106 | } 107 | 108 | check_docker() { # Validate docker is installed 109 | install_docker="\nPlease install version ${MIN_DOCKER_VERSION} or greater. \nFor more information see: 'https://docs.docker.com/get-docker/'" 110 | validate_install "docker" "1" "$install_docker" 111 | CURRENT_DOCKER_VERSION=$(docker --version | sed -e 's/Docker version \(.*\), build.*/\1/') 112 | validate_version "docker" "1" "$install_docker" "$MIN_DOCKER_VERSION" "$CURRENT_DOCKER_VERSION" 113 | } 114 | 115 | check_git() { # Validate git is installed 116 | install_git="\nPlease install git. \nFor more information see: 'https://git-scm.com'" 117 | validate_install "git" "1" "$install_git" 118 | } 119 | 120 | check_previous_installation() { # Check to make sure previous installations are removed before continuing 121 | if [ -d "${RC_CLI_PATH}" ]; then 122 | LOCAL_CLI_VERSION=$(<${RC_CLI_PATH}/VERSION) 123 | printf "An existing installation of ${RC_CLI_SHORT_NAME} ($LOCAL_CLI_VERSION) was found \nLocation: ${RC_CLI_PATH}\n" 124 | printf "You are installing ${RC_CLI_SHORT_NAME} ($RC_CLI_VERSION)\n" 125 | if [ "$LOCAL_CLI_VERSION" = "$RC_CLI_VERSION" ] ; then 126 | read -r -p "Would you like to reinstall ${RC_CLI_SHORT_NAME} ($RC_CLI_VERSION)? [y/N] " input 127 | else 128 | data_warn="WARNING! As of version 0.2.0, ${RC_CLI_SHORT_NAME} does not download the dataset for you in the installation process. The data in your current version of ${RC_CLI_SHORT_NAME} will be backed up temporarily and restored once the update process completes. If you want to update your data sources manually please refer to:\n1. https://github.com/MIT-CAVE/rc-cli#download-your-dataset\n2. https://registry.opendata.aws/amazon-last-mile-challenges/\n\n" 129 | [ "${RC_CLI_PATH:2:1}" = 2 ] && printf "${data_warn}" 130 | read -r -p "Would you like to update to ${RC_CLI_SHORT_NAME} ($RC_CLI_VERSION)? [y/N] " input 131 | fi 132 | case ${input} in 133 | [yY][eE][sS] | [yY]) 134 | printf "Moving your dataset to a temporary location... " 135 | data_path_tmp="${HOME}/${RC_CLI_COMMAND}-data-$(uuidgen)" 136 | mv "${RC_CLI_PATH}/${DATA_DIR}" "${data_path_tmp}" 137 | printf "done\n" 138 | printf "Removing old installation... " 139 | rm -rf "${RC_CLI_PATH}" 140 | printf "done\n" 141 | ;; 142 | [nN][oO] | [nN] | "") 143 | err "Installation canceled" 144 | exit 1 145 | ;; 146 | *) 147 | err "Invalid input: Installation canceled." 148 | exit 1 149 | ;; 150 | esac 151 | fi 152 | } 153 | 154 | install_new() { # Copy the needed files locally 155 | printf "Creating application folder at '${RC_CLI_PATH}'... " 156 | mkdir -p "${RC_CLI_PATH}" 157 | printf "done\n" 158 | printf "${CHARS_LINE}\n" 159 | if [[ $1 = "--dev" ]]; then 160 | CLONE_URL="$SSH_CLONE_URL" 161 | INSTALL_PARAM="--dev" 162 | else 163 | clone_opts="--depth=1" 164 | CLONE_URL="$HTTPS_CLONE_URL" 165 | INSTALL_PARAM="" 166 | fi 167 | git clone "${CLONE_URL}" \ 168 | ${clone_opts} \ 169 | "${RC_CLI_PATH}" > /dev/null 170 | if [ ! -d "${RC_CLI_PATH}" ]; then 171 | err "Git Clone Failed. Installation Canceled" 172 | [ -n "${data_path_tmp}" ] && printf "Your data was backed up to '${data_path_tmp}'.\n" 173 | exit 1 174 | else 175 | if [ -n "${data_path_tmp}" ]; then 176 | printf "Restoring data from previous installation... " 177 | mv "${data_path_tmp}" "${RC_CLI_PATH}/${DATA_DIR}" 178 | printf "done\n" 179 | fi 180 | printf "INSTALL_PARAM=\"${INSTALL_PARAM}\"\n" > "${RC_CLI_PATH}/CONFIG" 181 | fi 182 | } 183 | 184 | copy_compressed_data_down() { # Copy the needed data files locally 185 | # Takes three optional parameters (order matters) 186 | # EG: 187 | # copy_compressed_data_down URL LOCAL_PATH NEW_DIR_NAME 188 | new_dir_name="${3:-$compressed_folder_name}" 189 | printf "Copying data down from $1...\n" 190 | curl -L -o "${compressed_file_path}" "$1" --progress-bar 191 | printf "done\n" 192 | printf "Decompressing downloaded data...\n" 193 | if [[ "${compressed_file_type}" = "xz" ]]; then 194 | tar -xf "${compressed_file_path}" -C "$2" 195 | elif [[ "${compressed_file_type}" = "zip" ]]; then 196 | unzip -qq "${compressed_file_path}" -d "$2" 197 | fi 198 | rm "${compressed_file_path}" 199 | if [[ ! "${2}/${compressed_folder_name}" = "${2}/${new_dir_name}" ]]; then 200 | mv "${2}/${zip_folder_name}" "${2}/${new_dir_name}" 201 | fi 202 | if [ ! -d "${2}/${new_dir_name}" ]; then 203 | err "Unable to access data from ${1}. Installation Canceled" 204 | exit 1 205 | fi 206 | printf "done\n" 207 | 208 | } 209 | 210 | get_data() { # Copy the needed data files locally 211 | copy_compressed_data_down "$DATA_URL" "${RC_CLI_PATH}" "$DATA_DIR" 212 | printf "Setting data URL locally for future CLI Updates... " 213 | printf "DATA_URL=\"${DATA_URL}\"\n" >> "${RC_CLI_PATH}/CONFIG" 214 | printf "done\n" 215 | } 216 | 217 | check_args() { 218 | local cmd_ex="bash <(curl -s https://raw.githubusercontent.com/MIT-CAVE/rc-cli/main/install.sh)" 219 | if [[ $# -gt 0 && $1 != "--dev" ]]; then 220 | err "Too many arguments for CLI installation. Please only specify a '--dev' option if you want to work on the development of ${RC_CLI_SHORT_NAME}. Otherwise:\n${cmd_ex}" 221 | exit 1 222 | fi 223 | } 224 | 225 | add_to_path() { # Add the cli to a globally accessable path 226 | printf "${CHARS_LINE}\n" 227 | printf "Making '${RC_CLI_COMMAND}' globally accessable: \nCreating link from '${RC_CLI_PATH}/${RC_CLI_COMMAND}.sh' as '${BIN_DIR}/${RC_CLI_COMMAND}':\n" 228 | if [ ! $(ln -sf "${RC_CLI_PATH}/${RC_CLI_COMMAND}.sh" "${BIN_DIR}/${RC_CLI_COMMAND}") ]; then 229 | printf "WARNING! Super User privileges required to complete link! Using 'sudo'.\n" 230 | sudo ln -sf "${RC_CLI_PATH}/${RC_CLI_COMMAND}.sh" "${BIN_DIR}/${RC_CLI_COMMAND}" 231 | fi 232 | printf "done\n" 233 | } 234 | 235 | success_message() { # Send a success message to the user on successful installation 236 | printf "${CHARS_LINE}\n" 237 | printf "${RC_CLI_SHORT_NAME} (${RC_CLI_COMMAND}) has been successfully installed \n" 238 | printf "You can verify the installation with '${RC_CLI_COMMAND} version'\n" 239 | printf "To get started use '${RC_CLI_COMMAND} help'\n" 240 | } 241 | 242 | main() { 243 | check_args "$@" 244 | check_os 245 | # get_compressed_data_info "$@" 246 | # check_compression 247 | check_docker 248 | check_git 249 | check_previous_installation 250 | install_new "$@" 251 | # get_data 252 | add_to_path 253 | success_message 254 | } 255 | 256 | main "$@" 257 | -------------------------------------------------------------------------------- /lib/config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | readonly CHARS_LINE="============================" 3 | readonly RC_CLI_LONG_NAME="Routing Challenge CLI" 4 | readonly RC_CLI_SHORT_NAME="RC CLI" 5 | readonly RC_CLI_VERSION=$(<${RC_CLI_PATH}/VERSION) 6 | readonly RC_IMAGE_TAG="rc-cli" 7 | readonly TMP_DIR="/tmp" 8 | 9 | readonly APP_DEST_MNT="/home/app/data" 10 | 11 | readonly DATA_DIR="data" 12 | -------------------------------------------------------------------------------- /lib/datalib.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | readonly BSDTAR_BIN="bsdtar" 3 | readonly TAR_BIN="tar" 4 | readonly UNZIP_BIN="unzip" 5 | readonly MIN_TAR_VERSION="1.22" 6 | readonly MIN_BSDTAR_VERSION="0" # FIXME: Minimum version for which 'xz' compression is supported 7 | 8 | # Gets the available file archiver installed on 9 | # the system in a specific order of preference. 10 | # bsdtar (Mac) > tar (Unix) > (un)zip 11 | get_file_archiver() { 12 | if [[ -n $(which ${BSDTAR_BIN}) ]]; then 13 | printf "bsdtar" 14 | elif [[ -n $(which ${TAR_BIN}) ]]; then 15 | printf "tar" 16 | elif [[ -n $(which ${UNZIP_BIN}) ]]; then 17 | printf "unzip" 18 | fi 19 | } 20 | 21 | # Gets the URL of the data file to download. 22 | get_data_url() { 23 | local file_arch=$1 24 | case ${file_arch} in 25 | ${BSDTAR_BIN} | ${TAR_BIN}) 26 | printf "${DATA_URL_XZ}" 27 | ;; 28 | ${UNZIP_BIN}) 29 | printf "${DATA_URL_ZIP}" 30 | ;; 31 | *) 32 | excep::err "Could not find a URL compatible with the provided file archiver" 33 | ;; 34 | esac 35 | } 36 | 37 | # Gets the version of the given file archiver name. 38 | get_file_arch_version() { 39 | local file_arch=$1 40 | case ${file_arch} in 41 | ${BSDTAR_BIN}) 42 | printf "$(bsdtar --version | sed 's/\([a-z]\+\s\)\(.*\)\-.*/\2/g')" 43 | ;; 44 | ${TAR_BIN}) 45 | printf "$(tar --version | grep -m1 -o ").*" | sed "s/) //")" 46 | ;; 47 | ${UNZIP_BIN}) 48 | # TODO 49 | ;; 50 | *) 51 | excep::err "Error" 52 | ;; 53 | esac 54 | } 55 | 56 | check_version() { 57 | local prog_name="$1" 58 | local exit_code="$2" 59 | local err_str="$3" 60 | local min_ver="$4" 61 | local current_ver="$5" 62 | if [[ ! "$(printf '%s\n' "${min_ver}" "${current_ver}" | sort -V | head -n1)" == "${min_ver}" ]]; then 63 | excep::err "Your current ${prog_name} version (${current_ver}) is too old. ${err_str}" 64 | [[ ${exit_code} -eq 1 ]] && exit 1 65 | fi 66 | } 67 | 68 | check_file_archiver() { 69 | local file_arch 70 | local file_arch_ver 71 | local min_ver 72 | local install_msg 73 | file_arch=$(get_file_archiver) 74 | if [[ -z ${file_arch} ]]; then 75 | excep::err "There is no compatible file archiver installed on your system.\nPlease install tar (preferably) or zip." 76 | exit 1 77 | fi 78 | # Validate file archiver version 79 | file_arch_ver=$(get_file_arch_version ${file_arch}) 80 | case ${file_arch} in 81 | ${BSDTAR_BIN} | ${TAR_BIN}) 82 | [[ ${file_arch} == "${TAR_BIN}" ]] \ 83 | && min_ver=${MIN_TAR_VERSION} \ 84 | || min_ver=${MIN_BSDTAR_VERSION} 85 | ;; 86 | ${UNZIP_BIN}) 87 | # TODO: 88 | # Check the compatibility with the unzip version 89 | # and the compression level used for the data. 90 | # excep::err "The data file you are installing with is not recognized. \nPlease install the ${RC_CLI_SHORT_NAME} with a 'xz' or 'ZIP' file." 91 | ;; 92 | *) # This should not happen unless there's a bug in get_file_archiver 93 | excep::err "The file archiver is not recognized" 94 | exit 1 95 | ;; 96 | esac 97 | install_msg="\nPlease install ${file_arch} version ${min_ver} or greater.\n" 98 | check_version ${file_arch} 1 "${install_msg}" ${min_ver} ${file_arch_ver} 99 | } 100 | 101 | # Shoutout to: 102 | # https://unix.stackexchange.com/a/450405 103 | # https://stackoverflow.com/a/39615292 104 | datalib::get_content_length() { 105 | local url=$1 106 | local redirect_sizes 107 | local size 108 | redirect_sizes="$(curl -sLI "${url}" | awk -v IGNORECASE=1 '/^Content-Length/ { print $2 }')" 109 | size=$(echo ${redirect_sizes##*$'\n'} | sed 's/\r$//') 110 | printf "${size}" 111 | } 112 | 113 | # Download the data file(s) from the given URL 114 | download_data() { 115 | local data_url=$1 116 | local f_name 117 | f_name="$(basename ${data_url})" 118 | # TODO: save to a rc-cli-$(uuidgen) directory 119 | local f_path="${TMP_DIR}/${f_name}" 120 | 121 | local tmp_dl_path="${TMP_DIR}/${f_name}" 122 | printf "Downloading data from ${data_url}...\n" >&2 123 | curl -L -o "${tmp_dl_path}" --progress-bar ${data_url} 124 | printf ${tmp_dl_path} 125 | } 126 | 127 | # Checks if the integrity of a downloaded file is compromised. 128 | # TODO 129 | check_file_integrity() { 130 | local f_path=$1 131 | if [[ -n "" ]]; then 132 | excep::err "The file '${f_path}' is corrupted" 133 | exit 1 134 | fi 135 | } 136 | 137 | # Decompress a given file and move its contents to a destination directory. 138 | decompress_and_load() { 139 | local f_path=$1 140 | local dest_path=$2 141 | 142 | if [[ ! ${dest_path} -ef ${RC_CLI_PATH} ]]; then 143 | rm -rf ${dest_path} # Remove old data 144 | mkdir -p ${dest_path} 145 | fi 146 | # Since the compressed file contains a 'data' directory 147 | local base_path 148 | base_path=$(dirname ${dest_path}) 149 | printf "\nDecompressing data... " 150 | case "${f_path##*.}" in 151 | xz) 152 | tar -xf ${f_path} -C ${base_path} 153 | ;; 154 | zip) 155 | unzip -qq ${f_path} -d ${base_path} 156 | ;; 157 | esac 158 | printf "done\n\n" 159 | rm ${f_path} 160 | } 161 | 162 | # Validate the data URL. 163 | # BUG: Check return value. See: https://stackoverflow.com/q/5431909 164 | valid_data_url() { 165 | local data_url=$1 166 | local size 167 | size=$(datalib::get_content_length ${data_url}) 168 | [[ $((${size})) -gt 0 ]] 169 | } 170 | 171 | # Check that the given URL is valid. 172 | datalib::check_data_url() { 173 | local data_url=$1 174 | size=$(datalib::get_content_length ${data_url}) 175 | if [[ $((${size})) -eq 0 ]]; then 176 | excep::err "The URL '${data_url}' is invalid" 177 | exit 1 178 | fi 179 | } 180 | 181 | # Prompts for a DATA_URL if the given URL is invalid. 182 | data_url_prompt() { 183 | local src_cmd=$1 184 | local data_url=$2 185 | local input=${data_url} 186 | 187 | local size 188 | size=$(datalib::get_content_length ${input}) 189 | # [[ $(valid_data_url ${input}) -eq 0 ]] && echo "Yes" >&2 || echo "No" >&2 190 | while [[ -n ${input} && $((${size})) -eq 0 ]]; do 191 | # Prompt confirmation to overwrite or rename image 192 | printf "WARNING! ${src_cmd}: could not find a valid data in '${input}'\n" >&2 193 | read -r -p "Enter a new URL (or a blank input to cancel): " input 194 | size=$(datalib::get_content_length ${input}) 195 | [[ $((${size})) -gt 0 ]] && data_url=${input} || printf "\n" >&2 196 | done 197 | printf "${data_url}" 198 | } 199 | 200 | # Saves the DATA_URL and INSTALL_PARAM to the CONFIG file. 201 | save_config() { 202 | local data_url=$1 203 | printf "Setting data URL locally for future CLI Updates... " 204 | printf "DATA_URL=\"${data_url}\"\n" > "${RC_CLI_PATH}/CONFIG" 205 | [[ -n ${INSTALL_PARAM} ]] \ 206 | && printf "INSTALL_PARAM=\"${INSTALL_PARAM}\"\n" >> "${RC_CLI_PATH}/CONFIG" 207 | printf "done\n" 208 | } 209 | 210 | # Load the CONFIG file. If it doesn't exist, it is created. 211 | datalib::load_or_create_config() { 212 | local file_arch 213 | file_arch=$(get_file_archiver) 214 | check_file_archiver ${file_arch} 215 | local data_url 216 | data_url=$(get_data_url ${file_arch}) 217 | if [[ ! -f "${RC_CLI_PATH}/CONFIG" ]]; then 218 | printf "\nWARNING! Could not find a CONFIG file.\n" 219 | printf "A CONFIG file will be created now... " 220 | printf "DATA_URL=\"${data_url}\"\n" > "${RC_CLI_PATH}/CONFIG" 221 | printf "done\n" 222 | fi 223 | # shellcheck source=./CONFIG 224 | . "${RC_CLI_PATH}/CONFIG" 225 | } 226 | 227 | datalib::update_data() { 228 | local data_url=$1 229 | local dest_path=$2 230 | 231 | local file_arch 232 | file_arch=$(get_file_archiver) 233 | check_file_archiver ${file_arch} 234 | 235 | local f_path 236 | f_path=$(download_data ${data_url}) 237 | check_file_integrity ${f_path} 238 | decompress_and_load ${f_path} ${dest_path} 239 | 240 | save_config ${data_url} 241 | } 242 | -------------------------------------------------------------------------------- /lib/docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # A library of Docker-related functions. 4 | 5 | # Check if the Docker daemon is running. 6 | docker::check_status() { 7 | if ! docker ps > /dev/null; then 8 | excep::err "cannot connect to the Docker daemon. Is the Docker daemon running?" 9 | exit 1 10 | fi 11 | } 12 | 13 | # Check if the given Docker image is already built in the host 14 | docker::is_image_built() { 15 | local image_and_tag=$1 16 | docker image inspect ${image_and_tag} &> /dev/null 17 | } 18 | -------------------------------------------------------------------------------- /lib/excep.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ####################################### 4 | # Display an error message when the user input is invalid. 5 | # Globals: 6 | # None 7 | # Arguments: 8 | # None 9 | # Returns: 10 | # None 11 | ####################################### 12 | excep::err() { 13 | printf "$(basename $0): $1\n" >&2 14 | } 15 | -------------------------------------------------------------------------------- /lib/utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # A library of util functions. 4 | 5 | # Convert string from kebab case to snake case. 6 | utils::kebab_to_snake() { 7 | echo $1 | sed s/-/_/ 8 | } 9 | 10 | # Get the current date and time expressed according to ISO 8601. 11 | utils::timestamp() { 12 | date +"%Y-%m-%dT%H:%M:%S" 13 | } 14 | 15 | # Convert a number of seconds to the ISO 8601 standard. 16 | utils::secs_to_iso_8601() { 17 | printf "%dh:%dm:%ds" $(($1 / 3600)) $(($1 % 3600 / 60)) $(($1 % 60)) 18 | } 19 | -------------------------------------------------------------------------------- /license: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 MIT Center for Transportation & Logistics 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /rc-cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # A CLI for the Routing Challenge. 4 | 5 | # TODO(luisvasq): Set -u globally and fix all unbound variables 6 | 7 | # Constants 8 | readonly VALID_NAME_PATTERN="^[abcdefghijklmnopqrstuvwxyz0-9_-]+$" 9 | readonly INVALID_NAME_PATTERN_1="^[-_]+.*$" 10 | readonly INVALID_NAME_PATTERN_2="^.*[-_]+$" 11 | readonly INVALID_NAME_PATTERN_3="(-_)+" 12 | readonly INVALID_NAME_PATTERN_4="(_-)+" 13 | readonly RC_CLI_DEFAULT_TEMPLATE="rc_python" 14 | readonly RC_CONFIGURE_APP_NAME="configure_app" 15 | readonly RC_SCORING_IMAGE="rc-scoring" 16 | readonly RC_TEST_IMAGE="rc-test" 17 | readonly NO_LOGS="no_logs" 18 | readonly ROOT_LOGS="root_logs" 19 | # Both constant and environment 20 | declare -xr RC_CLI_PATH="${HOME}/.rc-cli" 21 | 22 | # Import libraries 23 | # shellcheck source=lib/config.sh 24 | . ${RC_CLI_PATH}/lib/config.sh 25 | # shellcheck source=lib/excep.sh 26 | . ${RC_CLI_PATH}/lib/excep.sh 27 | # shellcheck source=lib/docker.sh 28 | . ${RC_CLI_PATH}/lib/docker.sh 29 | # shellcheck source=lib/utils.sh 30 | . ${RC_CLI_PATH}/lib/utils.sh 31 | 32 | # Determine if the current directory contains a valid RC app 33 | valid_app_dir() { 34 | [[ 35 | -f Dockerfile \ 36 | && -f model_apply.sh \ 37 | && -f model_build.sh \ 38 | && -d src \ 39 | && -d snapshots \ 40 | && -d data/model_apply_inputs \ 41 | && -d data/model_apply_outputs \ 42 | && -d data/model_build_inputs \ 43 | && -d data/model_build_outputs \ 44 | && -d data/model_score_inputs \ 45 | && -d data/model_score_outputs \ 46 | && -d data/model_score_timings 47 | ]] 48 | } 49 | 50 | is_rc_image_built() { 51 | docker::is_image_built $1:${RC_IMAGE_TAG} 52 | } 53 | 54 | get_app_name() { 55 | printf "$(basename "$(pwd)")" 56 | } 57 | 58 | valid_app_name() { 59 | local app_name=$1 60 | if [[ ${#app_name} -lt 2 || ${#app_name} -gt 255 ]]; then 61 | printf "The app name needs to be two to 255 characters" 62 | elif [[ ! ${app_name} =~ ${VALID_NAME_PATTERN} ]]; then 63 | printf "The app name can only contain lowercase letters, numbers, hyphens (-), and underscores (_)" 64 | elif [[ ${app_name} =~ ${INVALID_NAME_PATTERN_1} ]]; then 65 | printf "The app name cannot start with a hyphen (-) or an underscore (_)" 66 | elif [[ ${app_name} =~ ${INVALID_NAME_PATTERN_2} ]]; then 67 | printf "The app name cannot end with a hyphen (-) or an underscore (_)" 68 | elif [[ ${app_name} =~ ${INVALID_NAME_PATTERN_3} ]]; then 69 | printf "The app name cannot contain a hyphen (-) followed by an underscore (_)" 70 | elif [[ ${app_name} =~ ${INVALID_NAME_PATTERN_4} ]]; then 71 | printf "The app name cannot contain an underscore (_) followed by a hyphen (-)" 72 | fi 73 | } 74 | 75 | # Determine if the given app name complies with Docker repository names. 76 | check_app_name() { 77 | local app_name_err 78 | app_name_err=$(valid_app_name $1) 79 | if [[ -n ${app_name_err} ]]; then 80 | excep::err "${app_name_err}" 81 | exit 1 82 | fi 83 | } 84 | 85 | # Check that the CLI is run from a valid app directory. 86 | check_app_dir() { 87 | if ! valid_app_dir; then 88 | excep::err "Error: You are not in a valid app directory. Make sure to cd into an app directory that you created with the rc-cli." 89 | exit 1 90 | fi 91 | } 92 | 93 | # Foolproof basic setup to minimize user-side errors 94 | foolproof_setup() { 95 | local scripts 96 | scripts="$(ls *.sh) $(find src/ -type f -name "*.sh")" 97 | for sh_file in ${scripts}; do 98 | # Force chmod to 755 99 | chmod +x ${sh_file} 100 | # Force line endings to LF 101 | awk 'BEGIN{RS="^$";ORS="";getline;gsub("\r","");print>ARGV[1]}' ${sh_file} 102 | done 103 | } 104 | 105 | # Run basic checks on requirements for some commands. 106 | basic_checks() { 107 | check_app_dir 108 | check_app_name "$(get_app_name)" 109 | docker::check_status 110 | foolproof_setup 111 | } 112 | 113 | get_templates() { 114 | printf "$(ls -d ${RC_CLI_PATH}/templates/*/ | awk -F'/' ' {print $(NF-1)} ')" 115 | } 116 | 117 | get_new_template_string() { 118 | printf "$(get_templates)" | sed 's/\([^\n]*\)/- \1/' 119 | } 120 | 121 | get_help_template_string() { 122 | printf "$(get_templates)" | sed 's/\([^\n]*\)/ - \1/' 123 | } 124 | 125 | # Strips off any leading directory components. 126 | get_snapshot() { 127 | # Allows easy autocompletion in bash using created folder names 128 | # Example: my-image/ -> my-image, path/to/snapshot 129 | printf "$(basename ${1:-''})" 130 | } 131 | 132 | check_snapshot() { 133 | local snapshot=$1 134 | local f_name 135 | f_name="$(get_snapshot ${snapshot})" 136 | if [[ ! -f "snapshots/${f_name}/${f_name}.tar.gz" ]]; then 137 | excep::err "${f_name}: snapshot not found" 138 | exit 1 139 | fi 140 | } 141 | 142 | # Prompts for a 'snapshot' name if the given snapshot exists 143 | image_name_prompt() { 144 | local src_cmd=$1 145 | local snapshot=$2 146 | 147 | local app_name_err 148 | local input=${snapshot} 149 | app_name_err=$(valid_app_name ${input}) 150 | while [[ -n ${app_name_err} || -f "snapshots/${input}/${input}.tar.gz" ]]; do 151 | if [[ -z ${app_name_err} ]]; then 152 | # Prompt confirmation to overwrite or rename image 153 | printf "WARNING! ${src_cmd}: Snapshot with name '${snapshot}' exists\n" >&2 154 | read -r -p "Enter a new name or overwrite [${snapshot}]: " input 155 | else 156 | printf "WARNING! ${src_cmd}: ${app_name_err}\n" >&2 157 | read -r -p "Enter a new name: " input 158 | fi 159 | app_name_err=$(valid_app_name ${input}) 160 | [[ -z ${app_name_err} && -n ${input} ]] && snapshot=${input} 161 | printf "\n" >&2 162 | done 163 | printf ${snapshot} 164 | } 165 | 166 | select_template() { 167 | local template=$1 168 | 169 | local rc_templates 170 | rc_templates="$(get_templates)" 171 | while ! printf "${rc_templates}" | grep -w -q "${template}"; do 172 | # Prompt confirmation to select proper template 173 | if [[ -z ${template} ]]; then 174 | printf "WARNING! new: A template was not provided:\n" >&2 175 | else 176 | printf "WARNING! new: The supplied template (${template}) does not exist.\n" >&2 177 | fi 178 | printf "The following are valid templates:\n$(get_new_template_string)\n" >&2 179 | template="${RC_CLI_DEFAULT_TEMPLATE}" 180 | read -r -p "Enter your selection [${template}]: " input 181 | [[ -n ${input} ]] && template=${input} 182 | printf "\n" >&2 183 | done 184 | printf ${template} 185 | } 186 | 187 | save_scoring_image() { 188 | printf "Saving the '${RC_SCORING_IMAGE}' image... " 189 | docker save ${RC_SCORING_IMAGE}:${RC_IMAGE_TAG} | gzip > "${RC_CLI_PATH}/scoring/${RC_SCORING_IMAGE}.tar.gz" 190 | printf "done\n\n" 191 | } 192 | 193 | ####################################### 194 | # Build a Docker image based on the given arguments. 195 | # Globals: 196 | # None 197 | # Arguments: 198 | # src_cmd, image_name, context, build_opts 199 | # Returns: 200 | # None 201 | ####################################### 202 | configure_image() { 203 | local src_cmd=$1 204 | local image_name=$2 205 | local context="${3:-.}" 206 | local build_opts=${@:4} # FIXME 207 | 208 | local f_name 209 | local out_file 210 | f_name="$(utils::kebab_to_snake ${src_cmd})" 211 | if [[ ${f_name} == "${ROOT_LOGS}" ]]; then 212 | make_root_logs 213 | [[ -d "${RC_CLI_PATH}/logs/" ]] \ 214 | && out_file="${RC_CLI_PATH}/logs/${image_name}_configure_$(utils::timestamp).log" \ 215 | || out_file="/dev/null" 216 | elif [[ ${f_name} != "${NO_LOGS}" ]]; then 217 | make_logs ${f_name} 218 | [[ -d "logs/${f_name}" ]] \ 219 | && out_file="logs/${f_name}/${image_name}_configure_$(utils::timestamp).log" \ 220 | || out_file="/dev/null" 221 | else 222 | out_file="/dev/null" 223 | fi 224 | printf "${CHARS_LINE}\n" 225 | printf "Configure Image [${image_name}]:\n\n" 226 | printf "Configuring the '${image_name}' image... " 227 | docker rmi ${image_name}:${RC_IMAGE_TAG} &> /dev/null 228 | docker build --file ${context}/Dockerfile --tag ${image_name}:${RC_IMAGE_TAG} \ 229 | ${build_opts} ${context} &> ${out_file} 230 | printf "done\n\n" 231 | } 232 | 233 | # Load the Docker image for a given snapshot name. 234 | load_snapshot() { 235 | local snapshot=$1 236 | local old_image_tag 237 | docker rmi ${snapshot}:${RC_IMAGE_TAG} &> /dev/null 238 | load_stdout=$(docker load --quiet --input "snapshots/${snapshot}/${snapshot}.tar.gz" 2> /dev/null) 239 | old_image_tag="${load_stdout:14}" 240 | # Force the image tag to be that of the tar archive filename. 241 | if [[ "${old_image_tag}" != "${snapshot}:${RC_IMAGE_TAG}" ]]; then 242 | docker tag ${old_image_tag} ${snapshot}:${RC_IMAGE_TAG} 243 | docker rmi ${old_image_tag} &> /dev/null 244 | fi 245 | } 246 | 247 | # Get the relative path of the data directory based 248 | # on the existence or not of a given 'snapshot' arg. 249 | get_data_context() { 250 | local snapshot=$1 251 | [[ -z ${snapshot} ]] && printf "data" || printf "snapshots/${snapshot}/data" 252 | } 253 | 254 | # Same than 'get_data_context' but return the absolute path. 255 | get_data_context_abs() { 256 | printf "$(pwd)/$(get_data_context $1)" 257 | } 258 | 259 | # Save a Docker image to the 'snapshots' directory. 260 | save_image() { 261 | local image_name=$1 262 | 263 | printf "${CHARS_LINE}\n" 264 | printf "Save Image [${image_name}]:\n\n" 265 | printf "Saving the '${image_name}' image to 'snapshots'... " 266 | snapshot_path="snapshots/${image_name}" 267 | mkdir -p ${snapshot_path} 268 | cp -R "${RC_CLI_PATH}/data" "${snapshot_path}/data" 269 | docker save ${image_name}:${RC_IMAGE_TAG} \ 270 | | gzip > "${snapshot_path}/${image_name}.tar.gz" 271 | printf "done\n\n" 272 | } 273 | 274 | build_if_missing() { # Build the image if it is missing under the model configure terminology 275 | if ! is_rc_image_built $1; then 276 | printf "${CHARS_LINE}\n" 277 | printf "No prebuilt image exists yet. Configuring Image with 'configure-app'\n\n" 278 | configure_image ${RC_CONFIGURE_APP_NAME} ${1} 279 | fi 280 | } 281 | 282 | ####################################### 283 | # Retrieve a clean copy of 'data' from the 'rc-cli' sources. 284 | # Globals: 285 | # None 286 | # Arguments: 287 | # src_cmd, data_path 288 | # Returns: 289 | # None 290 | ####################################### 291 | reset_data_prompt() { 292 | local src_cmd=$1 293 | local data_path=$2 294 | 295 | printf "WARNING! ${src_cmd}: This will reset the data directory at '${data_path}' to the initial data state\n" 296 | read -r -p "Are you sure you want to continue? [y/N] " input 297 | case ${input} in 298 | [yY][eE][sS] | [yY]) 299 | printf "Resetting the data... " 300 | rm -rf "${data_path}" 301 | cp -R "${RC_CLI_PATH}/data" "${data_path}" 302 | printf "done\n" 303 | ;; 304 | [nN][oO] | [nN] | "") 305 | printf "${src_cmd} was canceled by the user\n" 306 | exit # Required to prevent subsequent script commands from running 307 | ;; 308 | *) 309 | excep::err "invalid input: The ${src_cmd} was canceled" 310 | exit 1 311 | ;; 312 | esac 313 | } 314 | 315 | get_status() { 316 | [[ -z $1 ]] \ 317 | && printf "success" \ 318 | || printf "failure" # : $(printf $1 | sed s/\"/\"/)" # TODO: handle newlines 319 | } 320 | 321 | ####################################### 322 | # Send the output and time stats of the running app container 323 | # to the standard output and a given output file. 324 | # Globals: 325 | # None 326 | # Arguments: 327 | # secs, error, out_file 328 | # Returns: 329 | # None 330 | ####################################### 331 | print_stdout_stats() { 332 | local secs=$1 333 | local error=$2 334 | local out_file=$3 335 | printf "{ \"time\": ${secs}, \"status\": \"$(get_status ${error})\" }" > ${out_file} 336 | printf "Time Elapsed: $(utils::secs_to_iso_8601 ${secs})\n" 337 | printf "\n${CHARS_LINE}\n" 338 | } 339 | 340 | ####################################### 341 | # Run a Docker image for the specified 'model-*' command 342 | # Globals: 343 | # None 344 | # Arguments: 345 | # src_cmd, image_type, image_name, src_mnt, run_opts 346 | # Returns: 347 | # None 348 | ####################################### 349 | run_app_image() { 350 | local src_cmd=$1 351 | local image_type=$2 352 | local image_name=$3 353 | local src_mnt=$4 354 | local run_opts=${@:5} 355 | 356 | local f_name 357 | local entrypoint 358 | local cmd 359 | f_name="$(utils::kebab_to_snake ${src_cmd})" 360 | local script="${f_name}.sh" 361 | if [[ ${image_type} == "Snapshot" ]]; then 362 | entrypoint="--entrypoint ${script}" 363 | cmd="" 364 | else 365 | entrypoint="" 366 | cmd="${script}" 367 | run_opts="${run_opts} --volume $(pwd)/src:/home/app/src --volume $(pwd)/${script}:/home/app/${script}" 368 | fi 369 | 370 | printf "${CHARS_LINE}\n" 371 | printf "Running ${image_type} [${image_name}] (${src_cmd}):\n\n" 372 | start_time=$(date +%s) 373 | local log_file 374 | log_file="logs/${f_name}/${image_name}_$(utils::timestamp).log" 375 | # TODO: save to a rc-cli-$(uuidgen) directory 376 | local stderr_file="${TMP_DIR}/rc_cli_${f_name}_error" 377 | 378 | docker run --rm ${entrypoint} ${run_opts} \ 379 | --volume ${src_mnt}/${f_name}_inputs:${APP_DEST_MNT}/${f_name}_inputs:ro \ 380 | --volume ${src_mnt}/${f_name}_outputs:${APP_DEST_MNT}/${f_name}_outputs \ 381 | ${image_name}:${RC_IMAGE_TAG} ${cmd} 2>${stderr_file} | tee ${log_file} 382 | error=$(<${stderr_file}) 383 | echo ${error} | tee -a ${log_file} 384 | secs=$(($(date +%s) - start_time)) 385 | print_stdout_stats "${secs}" "${error}" \ 386 | "${src_mnt}/model_score_timings/${f_name}_time.json" 387 | } 388 | 389 | ####################################### 390 | # Run a production test with the '${RC_TEST_IMAGE}' 391 | # Globals: 392 | # None 393 | # Arguments: 394 | # src_cmd, image_name, data_path 395 | # Returns: 396 | # None 397 | ####################################### 398 | run_test_image() { 399 | local src_cmd=$1 400 | local image_name=$2 401 | local data_path=$3 402 | 403 | local src_mnt 404 | local src_mnt_image 405 | local image_file="${image_name}.tar.gz" 406 | local scoring_image="${RC_SCORING_IMAGE}.tar.gz" 407 | src_mnt="$(pwd)/${data_path}" 408 | 409 | # Check if the 'snapshot' argument was not specified, i.e. 410 | # "get_data_context $2" in 'production-test' returned 'data'. 411 | [[ ${data_path} == 'data' ]] \ 412 | && src_mnt_image="${TMP_DIR}/${image_file}" \ 413 | || src_mnt_image="$(pwd)/snapshots/${image_name}/${image_file}" 414 | printf "${src_cmd}: The data at '${data_path}' has been reset to the initial state\n\n" 415 | printf "${CHARS_LINE}\n" 416 | printf "Preparing Image [${image_name}] to Run With [${RC_TEST_IMAGE}]:\n\n" 417 | 418 | docker run --privileged --rm \ 419 | --env IMAGE_FILE=${image_file} \ 420 | --env SCORING_IMAGE=${scoring_image} \ 421 | --volume "${RC_CLI_PATH}/scoring/${scoring_image}:/mnt/${scoring_image}:ro" \ 422 | --volume "${src_mnt_image}:/mnt/${image_file}:ro" \ 423 | --volume "${src_mnt}/model_build_inputs:/data/model_build_inputs:ro" \ 424 | --volume "${src_mnt}/model_build_outputs:/data/model_build_outputs" \ 425 | --volume "${src_mnt}/model_apply_inputs:/data/model_apply_inputs:ro" \ 426 | --volume "${src_mnt}/model_apply_outputs:/data/model_apply_outputs" \ 427 | --volume "${src_mnt}/model_score_inputs:/data/model_score_inputs:ro" \ 428 | --volume "${src_mnt}/model_score_outputs:/data/model_score_outputs" \ 429 | --volume "${src_mnt}/model_score_timings:/data/model_score_timings" \ 430 | ${RC_TEST_IMAGE}:${RC_IMAGE_TAG} 2>&1 \ 431 | | tee "logs/$(utils::kebab_to_snake ${src_cmd})/${image_name}_run_$(utils::timestamp).log" 432 | } 433 | 434 | ####################################### 435 | # Run the scoring Docker image for the model. 436 | # Globals: 437 | # None 438 | # Arguments: 439 | # src_cmd, image_name, data_path 440 | # Returns: 441 | # None 442 | ####################################### 443 | run_scoring_image() { 444 | local src_cmd=$1 445 | local app_name=$2 446 | local src_mnt=$3 447 | 448 | printf "${CHARS_LINE}\n" 449 | printf "Running the Scoring Image [${RC_SCORING_IMAGE}]:\n\n" 450 | docker run --rm \ 451 | --volume "${src_mnt}/model_apply_inputs:${APP_DEST_MNT}/model_apply_inputs:ro" \ 452 | --volume "${src_mnt}/model_apply_outputs:${APP_DEST_MNT}/model_apply_outputs:ro" \ 453 | --volume "${src_mnt}/model_score_inputs:${APP_DEST_MNT}/model_score_inputs:ro" \ 454 | --volume "${src_mnt}/model_score_timings:${APP_DEST_MNT}/model_score_timings:ro" \ 455 | --volume "${src_mnt}/model_score_outputs:${APP_DEST_MNT}/model_score_outputs" \ 456 | ${RC_SCORING_IMAGE}:${RC_IMAGE_TAG} 2>&1 \ 457 | | tee "logs/$(utils::kebab_to_snake ${src_cmd})/${app_name}_$(utils::timestamp).log" 458 | printf "\n${CHARS_LINE}\n" 459 | } 460 | 461 | make_logs() { # Ensure the necessary log file structure for the calling command 462 | mkdir -p "logs/$(utils::kebab_to_snake $1)" 463 | } 464 | 465 | make_root_logs() { # Ensure the necessary log file structure for the calling command 466 | mkdir -p "${RC_CLI_PATH}/logs/" 467 | } 468 | 469 | # Single main function 470 | main() { 471 | if [[ $# -lt 1 ]]; then 472 | excep::err "missing command operand" 473 | exit 1 474 | elif [[ 475 | $# -gt 2 \ 476 | && $1 != "new-app" \ 477 | && $1 != "new" \ 478 | && $1 != "app" \ 479 | && $1 != 'na' \ 480 | ]]; then 481 | excep::err "Too many arguments" 482 | exit 1 483 | fi 484 | 485 | # Select the command 486 | case $1 in 487 | new-app | new | app | na) 488 | # Create a new app based on a template 489 | if [[ $# -lt 2 ]]; then 490 | excep::err "Missing arguments. Try using:\nrc-cli help" 491 | exit 1 492 | elif [[ -d "$2" ]]; then 493 | excep::err "Cannot create app '$2': This folder already exists in the current directory" 494 | exit 1 495 | fi 496 | check_app_name $2 497 | 498 | template=$(select_template ${3:-"None Provided"}) 499 | template_path="${RC_CLI_PATH}/templates/${template}" 500 | cp -R "${template_path}" "$2" 501 | cp "${RC_CLI_PATH}/templates/README.md" "$2" 502 | cp "${RC_CLI_PATH}/templates/custom_dev_stack.md" "$2" 503 | cp "${RC_CLI_PATH}/templates/data_structures.md" "$2" 504 | cp -R "${RC_CLI_PATH}/data" "$2" 505 | chmod +x $(echo "$2/*.sh") 506 | [[ -z $3 ]] && optional="by default " 507 | printf "the '${template}' template has been created ${optional}at '$(pwd)/$2'\n" 508 | ;; 509 | 510 | save-snapshot | save | snapshot | ss) 511 | # Build the app image and save it to the 'snapshots' directory 512 | cmd="save-snapshot" 513 | basic_checks 514 | snapshot="$(basename ${2:-''})" 515 | [[ -z ${snapshot} ]] && tmp_name=$(get_app_name) || tmp_name=${snapshot} 516 | printf "${CHARS_LINE}\n" 517 | printf "Save Precheck for App [${tmp_name}]:\n\n" 518 | image_name=$(image_name_prompt ${cmd} ${tmp_name}) 519 | printf "Save Precheck Complete\n\n" 520 | configure_image ${RC_CONFIGURE_APP_NAME} ${image_name} 521 | save_image ${image_name} 522 | printf "${CHARS_LINE}\n" 523 | ;; 524 | 525 | model-build | build | mb | model-apply | apply | ma) 526 | # Build and run the 'model-[build,apply].sh' script 527 | [[ $1 == "model-build" || $1 == "build" || $1 == "mb" ]] \ 528 | && cmd="model-build" \ 529 | || cmd="model-apply" 530 | make_logs ${cmd} 531 | basic_checks 532 | 533 | if [[ -z $2 ]]; then 534 | local app_name 535 | app_name=$(get_app_name) 536 | build_if_missing "${app_name}" 537 | image_name="${app_name}" 538 | image_type="App" 539 | src_mnt="$(pwd)/data" 540 | else 541 | check_snapshot $2 542 | image_name=$(get_snapshot $2) 543 | load_snapshot ${image_name} 544 | image_type="Snapshot" 545 | src_mnt=$(get_data_context_abs $2) 546 | fi 547 | [[ ${cmd} == "model-apply" ]] \ 548 | && run_opts="--volume ${src_mnt}/model_build_outputs:${APP_DEST_MNT}/model_build_outputs:ro" 549 | run_app_image ${cmd} ${image_type} ${image_name} ${src_mnt} ${run_opts} 550 | ;; 551 | 552 | configure-app | configure | ca) 553 | # Rebuild a Docker image for the current app 554 | if [[ $# -gt 1 ]]; then 555 | excep::err "Too many arguments" 556 | exit 1 557 | fi 558 | cmd="configure-app" 559 | basic_checks 560 | configure_image ${RC_CONFIGURE_APP_NAME} "$(get_app_name)" 561 | printf "${CHARS_LINE}\n" 562 | ;; 563 | 564 | production-test | production | test | pt) 565 | # Run the tests with the '${RC_TEST_IMAGE}' 566 | basic_checks 567 | [[ -n $2 ]] && check_snapshot $2 # Sanity check 568 | 569 | cmd="production-test" 570 | data_path=$(get_data_context $2) 571 | reset_data_prompt ${cmd} ${data_path} 572 | printf '\n' # Improve formatting 573 | 574 | make_logs ${cmd} 575 | 576 | if [[ -z $2 ]]; then 577 | image_name=$(get_app_name) 578 | configure_image ${RC_CONFIGURE_APP_NAME} ${image_name} 579 | docker save ${image_name}:${RC_IMAGE_TAG} | gzip > "${TMP_DIR}/${image_name}.tar.gz" 580 | else 581 | image_name=$(get_snapshot $2) 582 | load_snapshot ${image_name} 583 | fi 584 | 585 | # Saving time if some images exist. 586 | if ! is_rc_image_built ${RC_TEST_IMAGE}; then 587 | configure_image ${NO_LOGS} ${RC_TEST_IMAGE} ${RC_CLI_PATH} 588 | fi 589 | if ! is_rc_image_built ${RC_SCORING_IMAGE}; then 590 | configure_image ${NO_LOGS} ${RC_SCORING_IMAGE} ${RC_CLI_PATH}/scoring 591 | fi 592 | if [[ ! -f "${RC_CLI_PATH}/scoring/${RC_SCORING_IMAGE}.tar.gz" ]]; then 593 | save_scoring_image 594 | fi 595 | run_test_image ${cmd} ${image_name} ${data_path} 596 | printf "\n${CHARS_LINE}\n" 597 | ;; 598 | 599 | model-score | score | ms) 600 | # Calculate the score for the app or the specified snapshot. 601 | basic_checks 602 | [[ -z $2 ]] \ 603 | && image_name=$(get_app_name) \ 604 | || image_name=$(get_snapshot $2) 605 | # Validate that build and apply have happened by checking for timings. 606 | src_mnt=$(get_data_context_abs $2) 607 | model_build_time="${src_mnt}/model_score_timings/model_build_time.json" 608 | model_apply_time="${src_mnt}/model_score_timings/model_apply_time.json" 609 | if [[ ! -f "${model_build_time}" ]]; then 610 | excep::err "'${model_build_time}': file not found" 611 | exit 1 612 | elif [[ ! -f "${model_apply_time}" ]]; then 613 | excep::err "'${model_apply_time}': file not found" 614 | exit 1 615 | fi 616 | cmd="model-score" 617 | make_logs ${cmd} 618 | 619 | if ! is_rc_image_built ${RC_SCORING_IMAGE}; then 620 | configure_image ${NO_LOGS} ${RC_SCORING_IMAGE} ${RC_CLI_PATH}/scoring 621 | fi 622 | run_scoring_image ${cmd} ${image_name} ${src_mnt} 623 | ;; 624 | 625 | enter-app | model-debug | debug | md | ea) 626 | # Enable an interactive shell at runtime to debug the app container. 627 | cmd="enter-app" 628 | # make_logs ${cmd} 629 | basic_checks 630 | if [[ -z $2 ]]; then 631 | local app_name 632 | app_name=$(get_app_name) 633 | build_if_missing "${app_name}" 634 | image_name="${app_name}" 635 | run_opts="--volume $(pwd)/src:/home/app/src" 636 | for f in $(pwd)/*.sh; do 637 | run_opts="$run_opts --volume $(pwd)/$(basename ${f}):/home/app/$(basename ${f})" 638 | done 639 | else 640 | check_snapshot $2 641 | image_name=$(get_snapshot $2) 642 | load_snapshot ${image_name} 643 | run_opts="" 644 | fi 645 | # Find all available shells in container and choose bash if available 646 | valid_sh=$(docker run --rm --entrypoint="" ${image_name}:${RC_IMAGE_TAG} cat /etc/shells) 647 | [[ -n $(echo ${valid_sh} | grep "/bin/bash") ]] \ 648 | && app_sh="/bin/bash" || app_sh="/bin/sh" 649 | printf "${CHARS_LINE}\n" 650 | printf "Entering your app:\n" 651 | printf " - You are in the equivalent of your current app directory inside of your app's Docker container\n" 652 | printf " - You can test your code directly in this environment\n" 653 | printf " - EG: try running:\n" 654 | printf " ${CHARS_LINE}\n" 655 | printf " ./model_build.sh'\n" 656 | printf " ${CHARS_LINE}\n" 657 | printf " - Use the 'exit' command to exit the current shell\n" 658 | printf "\nEnabling an interactive shell in the Docker image...\n" 659 | src_mnt=$(get_data_context_abs $2) 660 | docker run --rm --entrypoint="" --user root ${run_opts}\ 661 | --volume "${src_mnt}/model_build_inputs:${APP_DEST_MNT}/model_build_inputs:ro" \ 662 | --volume "${src_mnt}/model_build_outputs:${APP_DEST_MNT}/model_build_outputs" \ 663 | --volume "${src_mnt}/model_apply_inputs:${APP_DEST_MNT}/model_apply_inputs:ro" \ 664 | --volume "${src_mnt}/model_apply_outputs:${APP_DEST_MNT}/model_apply_outputs" \ 665 | --interactive --tty ${image_name}:${RC_IMAGE_TAG} ${app_sh} 666 | printf "${CHARS_LINE}\n" 667 | ;; 668 | 669 | purge) 670 | # Remove all the logs, images and snapshots created by 'rc-cli'. 671 | if [[ $# -gt 1 ]]; then 672 | excep::err "Too many arguments" 673 | exit 1 674 | fi 675 | # Prompt confirmation to delete user 676 | printf "${CHARS_LINE}\n" 677 | printf "WARNING! purge: This will remove all logs, Docker images and snapshots created by ${RC_CLI_SHORT_NAME}\n" 678 | read -r -p "Are you sure you want to continue? [y/N] " input 679 | case ${input} in 680 | [yY][eE][sS] | [yY]) 681 | printf "Removing logs... " 682 | rm -rf "logs/" 683 | printf "done\n" 684 | printf "Removing images... " 685 | rc_images=$(docker images --all --filter reference="*:${RC_IMAGE_TAG}" --quiet) 686 | if [[ ${rc_images} ]]; then 687 | docker rmi --force ${rc_images} &> /dev/null 688 | fi 689 | printf "done\n" 690 | 691 | printf "Removing snapshots... " 692 | rm -rf snapshots/*/ # Remove only directories 693 | printf "done\n" 694 | printf "Finished!\n" 695 | ;; 696 | [nN][oO] | [nN] | "") 697 | printf "$1 was canceled by the user\n" 698 | ;; 699 | *) 700 | excep::err "invalid input: The $1 was canceled" 701 | exit 1 702 | ;; 703 | esac 704 | printf "${CHARS_LINE}\n" 705 | ;; 706 | 707 | reset-data | reset | rd) 708 | # Flush the output data in the directories 709 | data_path=$(get_data_context $2) 710 | reset_data_prompt $1 ${data_path} 711 | ;; 712 | 713 | configure-utils | cu) # Run maintenance commands to configure the utility images during development 714 | printf "${CHARS_LINE}\n" 715 | printf "Configuring Utility Images\n" 716 | docker::check_status 717 | configure_image ${ROOT_LOGS} ${RC_TEST_IMAGE} ${RC_CLI_PATH} 718 | configure_image ${ROOT_LOGS} ${RC_SCORING_IMAGE} ${RC_CLI_PATH}/scoring 719 | save_scoring_image 720 | 721 | printf "${CHARS_LINE}\n" 722 | ;; 723 | 724 | update) 725 | # Update rc-cli & run maintenance commands after breaking changes on the framework. 726 | if [[ $# -gt 1 ]]; then 727 | excep::err "Too many arguments" 728 | exit 1 729 | fi 730 | printf "${CHARS_LINE}\n" 731 | printf "Checking for updates...\n" 732 | docker::check_status 733 | local_rc_cli_ver=$(<${RC_CLI_PATH}/VERSION) 734 | latest_rc_cli_ver=$(curl -s https://raw.githubusercontent.com/MIT-CAVE/rc-cli/main/VERSION) 735 | if [[ "${local_rc_cli_ver}" == "${latest_rc_cli_ver}" ]]; then 736 | printf "\nYou already have the latest version of ${RC_CLI_SHORT_NAME} (${latest_rc_cli_ver}).\n" 737 | read -r -p "Would you like to reinstall this version? [y/N] " input 738 | else 739 | printf "A new version of ${RC_CLI_SHORT_NAME} (${latest_rc_cli_ver}) is available.\n" 740 | read -r -p "Would you like to update now? [y/N] " input 741 | fi 742 | case ${input} in 743 | [yY][eE][sS] | [yY]) 744 | printf "\nUpdating ${RC_CLI_SHORT_NAME} (${local_rc_cli_ver} -> ${latest_rc_cli_ver})... " 745 | git -C ${RC_CLI_PATH} reset --hard origin/main > /dev/null 746 | git -C ${RC_CLI_PATH} checkout main > /dev/null 747 | git -C ${RC_CLI_PATH} pull > /dev/null 748 | printf "done\n" 749 | 750 | printf "\n${CHARS_LINE}\n" 751 | printf "Running other update maintenance tasks\n" 752 | configure_image ${NO_LOGS} ${RC_TEST_IMAGE} ${RC_CLI_PATH} 753 | configure_image ${NO_LOGS} ${RC_SCORING_IMAGE} ${RC_CLI_PATH}/scoring 754 | save_scoring_image 755 | 756 | printf "${CHARS_LINE}\n" 757 | printf "\n${RC_CLI_SHORT_NAME} was updated successfully.\n" 758 | ;; 759 | [nN][oO] | [nN] | "") 760 | excep::err "Update canceled" 761 | exit 1 762 | ;; 763 | *) 764 | excep::err "Invalid input: Update canceled." 765 | exit 1 766 | ;; 767 | esac 768 | ;; 769 | 770 | uninstall) 771 | if [[ $# -gt 1 ]]; then 772 | excep::err "Too many arguments" 773 | exit 1 774 | fi 775 | printf "${CHARS_LINE}\n" 776 | # Prompt confirmation to delete 777 | printf "WARNING! uninstall: This will remove: \ 778 | \n- ${RC_CLI_SHORT_NAME} (${RC_CLI_VERSION}) \ 779 | \n- All associated Docker images.\n" 780 | read -r -p "Are you sure you want to continue? [y/N] " input 781 | case ${input} in 782 | [yY][eE][sS] | [yY]) 783 | printf "Removing all Docker images..." 784 | rc_images=$(docker images --all --filter reference="*:${RC_IMAGE_TAG}" --quiet) 785 | if [[ ${rc_images} ]]; then 786 | docker rmi --force ${rc_images} &> /dev/null 787 | fi 788 | printf "done\n" 789 | 790 | printf "Uninstalling ${RC_CLI_SHORT_NAME} (${RC_CLI_VERSION})\n" 791 | rm -rf "${RC_CLI_PATH}" 792 | printf "Uninstall Complete!\n" 793 | ;; 794 | [nN][oO] | [nN] | "") 795 | printf "$1 was canceled by the user\n" 796 | ;; 797 | *) 798 | excep::err "invalid input: The $1 was canceled" 799 | exit 1 800 | ;; 801 | esac 802 | ;; 803 | 804 | help | --help) # Display the help 805 | cat 1>&2 < 8 | 9 | where sequence is the historically realized sequence of deliveries, sequence is the algorithm-produced sequence of deliveries, denotes the Sequence Deviation of with respect to , denotes the Edit Distance with Real Penalty applied to sequences and with normalized travel times, and denotes the number of edits prescribed by the algorithm on sequence with respect to . If edit distance with real penalty prescribes 0 edits, then the above formula is replaced by the sequence deviation, multiplied by 0. 10 | 11 | If a user-submitted route is deemed invalid -- meaning it does not provide a valid sequence that contains the station (as Stop 0) and all stops -- the route is given a score equivalent to a typical complete random perturbation of the driver-taken sequence. 12 | 13 | User-submitted sequences that perfectly match the driver-taken sequence are given a score of 0. Scores increase as the user-submitted sequence differs more and more from the driver-taken sequence. Complete random shuffles of all the stops in the driver-taken route typically receive scores betweeen 0.8 and 1.2. 14 | 15 | The score for an entire submission is the simple, unweighted average of all route scores within the submission. 16 | 17 | # 'Evaluate' Function 18 | To incorporate the scoring logic into your own models, call the function 'evaluate' located within the score.py script and provide it the following inputs: 19 | 20 | ## Inputs 21 | - actual_routes_json: the filepath of the JSON object that contains the sequence of historically-realized stops in the route, given as a string 22 | - submission_json: the filepath of the JSON object that contains the sequence of stops generated by the user's model, given as a string 23 | - cost_matrices_json: the filepath of the JSON object that contains the transit times between each stop in the route, given as a string 24 | - invalid_scores_json: the filepath of the JSON object that contains the scores assigned to the user-submitted routes if they are deemed invalid, given as a string 25 | 26 | All JSON files above may contain the necessary information corresponding to one or more routes. 27 | 28 | ## Outputs 29 | - scores: a python dictionary that contains the submission score, the scores assigned to each route, the feasibility of each route, and any inputted kwargs. 30 | 31 | ## Specifications of submission_json 32 | - The submission_json file must have the same format as the actual_routes_json file (e.g actual_sequences.json), but it should have 'proposed' everywhere actual_routes_json has 'actual'. 33 | - The route sequences your model ouputs in submission_json should begin but not end at the station. In other words, the station's stop number should be 0 and only 0. To match reality, in which a driver must return to the station after completing a route, the station will be automatically appended to the end of the route during the scoring process. -------------------------------------------------------------------------------- /scoring/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | exec python -u main.py "$@" 5 | -------------------------------------------------------------------------------- /scoring/main.py: -------------------------------------------------------------------------------- 1 | import os, json, time 2 | # Import local score file 3 | import score 4 | 5 | # Read JSON data from the given filepath 6 | def read_json_data(filepath): 7 | try: 8 | with open(filepath, newline = '') as in_file: 9 | return json.load(in_file) 10 | except FileNotFoundError: 11 | print("The '{}' file is missing!".format(filepath)) 12 | except json.JSONDecodeError: 13 | print("Error in the '{}' JSON data!".format(filepath)) 14 | except Exception as e: 15 | print("Error when reading the '{}' file!".format(filepath)) 16 | print(e) 17 | return None 18 | 19 | if __name__ == '__main__': 20 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 21 | 22 | # Read JSON time inputs 23 | model_build_time = read_json_data(os.path.join(BASE_DIR,'data/model_score_timings/model_build_time.json')) 24 | model_apply_time = read_json_data(os.path.join(BASE_DIR,'data/model_score_timings/model_apply_time.json')) 25 | 26 | print('Beginning Score Evaluation... ', end='') 27 | output = score.evaluate( 28 | actual_routes_json = os.path.join(BASE_DIR,'data/model_score_inputs/new_actual_sequences.json'), 29 | invalid_scores_json = os.path.join(BASE_DIR,'data/model_score_inputs/new_invalid_sequence_scores.json'), 30 | submission_json = os.path.join(BASE_DIR,'data/model_apply_outputs/proposed_sequences.json'), 31 | cost_matrices_json = os.path.join(BASE_DIR,'data/model_apply_inputs/new_travel_times.json'), 32 | model_apply_time = model_apply_time.get("time"), 33 | model_build_time = model_build_time.get("time") 34 | ) 35 | print('done') 36 | 37 | # Write Outputs to File 38 | output_path = os.path.join(BASE_DIR,'data/model_score_outputs/scores.json') 39 | with open(output_path, 'w') as out_file: 40 | json.dump(output, out_file) 41 | 42 | # Print Pretty Output 43 | print("\nsubmission_score:", output.get('submission_score')) 44 | rt_show=output.get('route_scores') 45 | extra_str=None 46 | if len(rt_show.keys())>5: 47 | rt_show=dict(list(rt_show.items())[:5]) 48 | extra_str="..." 49 | print("\nFirst five route_scores:") 50 | else: 51 | print("\nAll route_scores:") 52 | for rt_key, rt_score in rt_show.items(): 53 | print(rt_key,": ",rt_score) 54 | if extra_str: 55 | print(extra_str) 56 | -------------------------------------------------------------------------------- /scoring/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.20.1 2 | -------------------------------------------------------------------------------- /scoring/score.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import json 3 | import sys 4 | 5 | def read_json_data(filepath): 6 | ''' 7 | Loads JSON file and generates a dictionary from it. 8 | 9 | Parameters 10 | ---------- 11 | filepath : str 12 | Path of desired file. 13 | 14 | Raises 15 | ------ 16 | JSONDecodeError 17 | The file exists and is readable, but it does not have the proper 18 | formatting for its place in the inputs of evaluate. 19 | 20 | Returns 21 | ------- 22 | file : dict 23 | Dictionary form of the JSON file to which filepath points. 24 | 25 | ''' 26 | try: 27 | with open(filepath, newline = '') as in_file: 28 | file=json.load(in_file) 29 | in_file.close() 30 | except FileNotFoundError: 31 | print("The '{}' file is missing!".format(filepath)) 32 | sys.exit() 33 | except Exception as e: 34 | print("Error when reading the '{}' file!".format(filepath)) 35 | print(e) 36 | sys.exit() 37 | return file 38 | 39 | def good_format(file,input_type,filepath): 40 | ''' 41 | Checks if input dictionary has proper formatting. 42 | 43 | Parameters 44 | ---------- 45 | file : dict 46 | Dictionary loaded from evaluate input file. 47 | input_type : str 48 | Indicates which input of evaluate the current file is. Can be 49 | "actual," "proposed," "costs," or "invalids." 50 | filepath : str 51 | Path from which file was loaded. 52 | 53 | Raises 54 | ------ 55 | JSONDecodeError 56 | The file exists and is readable, but it does not have the proper 57 | formatting for its place in the inputs of evaluate. 58 | 59 | Returns 60 | ------- 61 | None. 62 | 63 | ''' 64 | 65 | for route in file: 66 | if route[:8]!='RouteID_': 67 | raise JSONDecodeError('Improper route ID in {}. Every route must be denoted by a string that begins with "RouteID_".'.format(filepath)) 68 | if input_type=='proposed' or input_type=='actual': 69 | for route in file: 70 | if type(file[route])!=dict or len(file[route])!=1: 71 | raise JSONDecodeError('Improper route in {}. Each route ID must map to a dictionary with a single key.'.format(filepath)) 72 | if input_type not in file[route]: 73 | if input_type=='proposed': 74 | raise JSONDecodeError('Improper route in {}. Each route\'s dictionary in a proposed sequence file must have the key, "proposed".'.format(filepath)) 75 | else: 76 | raise JSONDecodeError('Improper route in {}. Each route\'s dictionary in an actual sequence file must have the key, "actual".'.format(filepath)) 77 | if type(file[route][input_type])!=dict: 78 | raise JSONDecodeError('Improper route in {}. Each sequence must be in the form of a dictionary.'.format(filepath)) 79 | num_stops=len(file[route][input_type]) 80 | for stop in file[route][input_type]: 81 | if type(stop)!=str or len(stop)!=2: 82 | raise JSONDecodeError('Improper stop ID in {}. Each stop must be denoted by a two-letter ID string.'.format(filepath)) 83 | stop_num=file[route][input_type][stop] 84 | if type(stop_num)!=int or stop_num>=num_stops: 85 | file[route][input_type][stop]='invalid' 86 | if input_type=='costs': 87 | for route in file: 88 | if type(file[route])!=dict: 89 | raise JSONDecodeError('Improper matrix in {}. Each cost matrix must be a dictionary.'.format(filepath)) 90 | for origin in file[route]: 91 | if type(origin)!=str or len(origin)!=2: 92 | raise JSONDecodeError('Improper stop ID in {}. Each stop must be denoted by a two-letter ID string.'.format(filepath)) 93 | if type(file[route][origin])!=dict: 94 | raise JSONDecodeError('Improper matrix in {}. Each origin in a cost matrix must map to a dictionary of destinations'.format(filepath)) 95 | for dest in file[route][origin]: 96 | if type(dest)!=str or len(dest)!=2: 97 | raise JSONDecodeError('Improper stop ID in {}. Each stop must be denoted by a two-letter ID string.'.format(filepath)) 98 | if not(type(file[route][origin][dest])==float or type(file[route][origin][dest])==int): 99 | raise JSONDecodeError('Improper time in {}. Every travel time must be a float or int.'.format(filepath)) 100 | if input_type=='invalids': 101 | for route in file: 102 | if not(type(file[route])==float or type(file[route])==int): 103 | raise JSONDecodeError('Improper score in {}. Every score in an invalid score file must be a float or int.'.format(filepath)) 104 | 105 | class JSONDecodeError(Exception): 106 | pass 107 | 108 | def evaluate(actual_routes_json,submission_json,cost_matrices_json, invalid_scores_json,**kwargs): 109 | ''' 110 | Calculates score for a submission. 111 | 112 | Parameters 113 | ---------- 114 | actual_routes_json : str 115 | filepath of JSON of actual routes. 116 | submission_json : str 117 | filepath of JSON of participant-created routes. 118 | cost_matrices_json : str 119 | filepath of JSON of estimated times to travel between stops of routes. 120 | invalid_scores_json : str 121 | filepath of JSON of scores assigned to routes if they are invalid. 122 | **kwargs : 123 | Inputs placed in output. Intended for testing_time_seconds and 124 | training_time_seconds 125 | 126 | Returns 127 | ------- 128 | scores : dict 129 | Dictionary containing submission score, individual route scores, feasibility 130 | of routes, and kwargs. 131 | 132 | ''' 133 | actual_routes=read_json_data(actual_routes_json) 134 | good_format(actual_routes,'actual',actual_routes_json) 135 | submission=read_json_data(submission_json) 136 | good_format(submission,'proposed',submission_json) 137 | cost_matrices=read_json_data(cost_matrices_json) 138 | good_format(cost_matrices,'costs',cost_matrices_json) 139 | invalid_scores=read_json_data(invalid_scores_json) 140 | good_format(invalid_scores,'invalids',invalid_scores_json) 141 | scores={'submission_score':'x','route_scores':{},'route_feasibility':{}} 142 | for kwarg in kwargs: 143 | scores[kwarg]=kwargs[kwarg] 144 | for route in actual_routes: 145 | if route not in submission: 146 | scores['route_scores'][route]=invalid_scores[route] 147 | scores['route_feasibility'][route]=False 148 | else: 149 | actual_dict=actual_routes[route] 150 | actual=route2list(actual_dict) 151 | try: 152 | sub_dict=submission[route] 153 | sub=route2list(sub_dict) 154 | except: 155 | scores['route_scores'][route]=invalid_scores[route] 156 | scores['route_feasibility'][route]=False 157 | else: 158 | if isinvalid(actual,sub): 159 | scores['route_scores'][route]=invalid_scores[route] 160 | scores['route_feasibility'][route]=False 161 | else: 162 | cost_mat=cost_matrices[route] 163 | scores['route_scores'][route]=score(actual,sub,cost_mat) 164 | scores['route_feasibility'][route]=True 165 | submission_score=np.mean(list(scores['route_scores'].values())) 166 | scores['submission_score']=submission_score 167 | return scores 168 | 169 | def score(actual,sub,cost_mat,g=1000): 170 | ''' 171 | Scores individual routes. 172 | 173 | Parameters 174 | ---------- 175 | actual : list 176 | Actual route. 177 | sub : list 178 | Submitted route. 179 | cost_mat : dict 180 | Cost matrix. 181 | g : int/float, optional 182 | ERP gap penalty. Irrelevant if large and len(actual)==len(sub). The 183 | default is 1000. 184 | 185 | Returns 186 | ------- 187 | float 188 | Accuracy score from comparing sub to actual. 189 | 190 | ''' 191 | norm_mat=normalize_matrix(cost_mat) 192 | return seq_dev(actual,sub)*erp_per_edit(actual,sub,norm_mat,g) 193 | 194 | def erp_per_edit(actual,sub,matrix,g=1000): 195 | ''' 196 | Outputs ERP of comparing sub to actual divided by the number of edits involved 197 | in the ERP. If there are 0 edits, returns 0 instead. 198 | 199 | Parameters 200 | ---------- 201 | actual : list 202 | Actual route. 203 | sub : list 204 | Submitted route. 205 | matrix : dict 206 | Normalized cost matrix. 207 | g : int/float, optional 208 | ERP gap penalty. The default is 1000. 209 | 210 | Returns 211 | ------- 212 | int/float 213 | ERP divided by number of ERP edits or 0 if there are 0 edits. 214 | 215 | ''' 216 | total,count=erp_per_edit_helper(actual,sub,matrix,g) 217 | if count==0: 218 | return 0 219 | else: 220 | return total/count 221 | 222 | def erp_per_edit_helper(actual,sub,matrix,g=1000,memo=None): 223 | ''' 224 | Calculates ERP and counts number of edits in the process. 225 | 226 | Parameters 227 | ---------- 228 | actual : list 229 | Actual route. 230 | sub : list 231 | Submitted route. 232 | matrix : dict 233 | Normalized cost matrix. 234 | g : int/float, optional 235 | Gap penalty. The default is 1000. 236 | memo : dict, optional 237 | For memoization. The default is None. 238 | 239 | Returns 240 | ------- 241 | d : float 242 | ERP from comparing sub to actual. 243 | count : int 244 | Number of edits in ERP. 245 | 246 | ''' 247 | if memo==None: 248 | memo={} 249 | actual_tuple=tuple(actual) 250 | sub_tuple=tuple(sub) 251 | if (actual_tuple,sub_tuple) in memo: 252 | d,count=memo[(actual_tuple,sub_tuple)] 253 | return d,count 254 | if len(sub)==0: 255 | d=gap_sum(actual,g) 256 | count=len(actual) 257 | elif len(actual)==0: 258 | d=gap_sum(sub,g) 259 | count=len(sub) 260 | else: 261 | head_actual=actual[0] 262 | head_sub=sub[0] 263 | rest_actual=actual[1:] 264 | rest_sub=sub[1:] 265 | score1,count1=erp_per_edit_helper(rest_actual,rest_sub,matrix,g,memo) 266 | score2,count2=erp_per_edit_helper(rest_actual,sub,matrix,g,memo) 267 | score3,count3=erp_per_edit_helper(actual,rest_sub,matrix,g,memo) 268 | option_1=score1+dist_erp(head_actual,head_sub,matrix,g) 269 | option_2=score2+dist_erp(head_actual,'gap',matrix,g) 270 | option_3=score3+dist_erp(head_sub,'gap',matrix,g) 271 | d=min(option_1,option_2,option_3) 272 | if d==option_1: 273 | if head_actual==head_sub: 274 | count=count1 275 | else: 276 | count=count1+1 277 | elif d==option_2: 278 | count=count2+1 279 | else: 280 | count=count3+1 281 | memo[(actual_tuple,sub_tuple)]=(d,count) 282 | return d,count 283 | 284 | def normalize_matrix(mat): 285 | ''' 286 | Normalizes cost matrix. 287 | 288 | Parameters 289 | ---------- 290 | mat : dict 291 | Cost matrix. 292 | 293 | Returns 294 | ------- 295 | new_mat : dict 296 | Normalized cost matrix. 297 | 298 | ''' 299 | new_mat=mat.copy() 300 | time_list=[] 301 | for origin in mat: 302 | for destination in mat[origin]: 303 | time_list.append(mat[origin][destination]) 304 | avg_time=np.mean(time_list) 305 | std_time=np.std(time_list) 306 | min_new_time=np.inf 307 | for origin in mat: 308 | for destination in mat[origin]: 309 | old_time=mat[origin][destination] 310 | new_time=(old_time-avg_time)/std_time 311 | if new_time 62 | ├── Dockerfile 63 | ├── model_build.sh 64 | └── model_apply.sh 65 | ``` 66 | 67 | When developing your model, you can run any code from the `src/` directory and read from the data/ directory matching the phase you are executing. We suggest you develop your code in the `src/` folder and use relative paths to reference the datasets in the `data/` folders. 68 | 69 | **NOTE:** The `data/` directory will not be included as part of your submission. Clean data will be mounted using evaluation data that matches this structure during the submission scoring. 70 | 71 | For additional details on Dockerfile setup please refer to [custom_dev_stack.md](custom_dev_stack.md). 72 | 73 | To see a more detailed example file structure, expand the Python example below: 74 | 75 | ### An example Python-based project structure 76 |
77 | Details 78 | 79 | There are templates available for Python, a Unix shell, and R. See the [Create your Project section](../README.md#create-your-project) of the [RC-CLI readme](../README.md) for more information. This is an example file structure for a Python-based solution. 80 | 81 | When you create a `new-app`, the RC-CLI creates a Docker image. In this Python example, the `Dockerfile` installs packages from `requirements.txt` needed for your dockerized Python environment. 82 | 83 | The `model_build.sh` and `model_apply.sh` scripts are called by the RC-CLI inside the Docker image and serve as the entry point to your code. 84 | 85 | The folders listed below include additional folders used for logging, storing saved models, and scoring not required for submission. The `new-app` command creates these folders. 86 | - `data/model_build_outputs` would contain a trained model created from the "build inputs" dataset. 87 | - `data/model_apply_outputs` folder would contain the predicted routes based on your model and the "apply inputs" dataset. 88 | - `data/model_score_inputs`,`data/model_score_outputs`, and `data/model_score_timings` directories are utilized by the RC-CLI when scoring your application and not necessary for submission. After scoring your model, find the results in `data/model_score_outputs/scores.json`. 89 | - `snapshots` contains saved Docker images and their corresponding data files. 90 | - `logs` contains folders created by the RC-CLI while running commands. Logs are kept for `configure-app`, `enter-app`, `save_snapshot`, etc... 91 | 92 | ``` 93 | ├── data 94 | │ ├── model_build_inputs 95 | │ │ ├── actual_sequences.json 96 | │ │ ├── invalid_sequence_scores.json 97 | │ │ ├── package_data.json 98 | │ │ ├── route_data.json 99 | │ │ └── travel_times.json 100 | │ ├── model_build_outputs 101 | │ │ └── 102 | │ ├── model_apply_inputs 103 | │ │ ├── new_package_data.json 104 | │ │ ├── new_route_data.json 105 | │ │ └── new_travel_times.json 106 | │ ├── model_apply_outputs 107 | │ │ └── proposed_sequences.json 108 | │ ├── model_score_inputs 109 | │ │ ├── new_actual_sequences.json 110 | │ │ └── new_invalid_sequence_scores.json 111 | │ ├── model_score_outputs 112 | │ │ └── scores.json 113 | │ └── model_score_timings 114 | │ ├── model_apply_time.json 115 | │ └── model_build_time.json 116 | ├── src 117 | │ ├── model_build.py 118 | │ └── model_apply.py 119 | ├── snapshots 120 | │ └── test_model 121 | │ ├── data 122 | │ └── test_model.tar.gz 123 | ├── logs 124 | │ └── save_snapshot 125 | │ └── test_model_configure_2021-03-15T00:00:00.log 126 | ├── .dockerignore 127 | ├── Dockerfile 128 | ├── model_build.sh 129 | ├── model_apply.sh 130 | └── requirements.txt 131 | ``` 132 |
133 | 134 | ### Data 135 | Please refer to [data_structures.md](data_structures.md) for more details on the format of each of the data files contained in the `data` folder. 136 | 137 | ## Managing your Docker environment 138 | If you have ever tried to distribute code for others to run, you know that it can be frustrating when others try to run your code and it fails because they do not have the same setup as you do. 139 | 140 | The RC-CLI avoids this issue by ensuring that you include everything needed to run your program in your Docker image. All the system settings, libraries, and packages need to be specified so that when it comes time to evaluate your submission, we can build your Docker image and know that things will work. 141 | 142 | We suggest contestants use an environment to ensure they have included everything necessary in their Docker image. Using an environment starts with a clean slate and forces you to install packages in your environment to use them. After you have your code running, you can query your active environment to list the required packages to include in your Docker image. 143 | 144 | For Python, there are two main environment managers - `virtualenv` and `conda`. There are other environment managers, such as renv for R and conan for C/C++, but we will not be covering those. 145 | 146 |
147 | Virtualenv Example 148 | 149 | When using `virtualenv`, you will usually have a few Python packages installed globally on your system. One of these will be `virtualenv` itself. This example shows how to create a virtual environment and capture its requirements for use in RC-CLI. 150 | 151 | To start, you navigate to your project directory. Create a virtual environment and activate it. 152 | ```sh 153 | $ virtualenv -p python3 venv 154 | Created virtual environment in venv/bin/python 155 | $ source venv/bin/activate 156 | (venv) $ 157 | ``` 158 | 159 | Next, install a package that your program will need. Then export the virtual environment's requirements to include in your Docker image. Last, use the RC-CLI to update the Docker image. The RC-CLI builds the Docker image by importing packages using `pip` and `requirements.txt`. 160 | ```sh 161 | (venv) $ pip install numpy 162 | Successfully installed numpy-1.20.1 163 | (venv) $ pip freeze > requirements.txt 164 | (venv) $ more requirements.txt 165 | numpy==1.20.1 166 | (venv) $ rc-cli configure-app 167 | ``` 168 |
169 | 170 |
171 | Conda Example 172 | 173 | In this example, we create an empty environment, activate it, install a package, and export the environment. 174 | ```sh 175 | $ conda create --name example_env python=3.9 176 | $ source activate example_env 177 | (example_env) $ conda install numpy 178 | (example_env) $ conda env export > environment.yaml 179 | (example_env) $ more environment.yaml 180 | name: example_env 181 | channels: 182 | - defaults 183 | dependencies: 184 | - libcxx=10.0.0 185 | - libedit=3.1.20191231 186 | - libffi=3.3 187 | - ncurses=6.2 188 | - pip=21.0.1 189 | - python=3.9.2 190 | - readline=8.1 191 | - setuptools=52.0.0 192 | - sqlite=3.33.0 193 | - tk=8.6.10 194 | - tzdata=2020f 195 | - wheel=0.36.2 196 | - xz=5.2.5 197 | - zlib=1.2.11 198 | - pip 199 | - numpy==1.20.1 200 | ``` 201 | 202 | The `environment.yaml` file lists the `conda` dependencies and pip dependencies (with version numbers) that need to be included to match this environment. The RC-CLI sample Python template's Dockerfile uses `pip` to update the image by installing the packages in `requirements.txt`. 203 | 204 | At this point, you have two options: 205 | 1. Copy the `pip` lines from the `environment.yaml` file into `requirements.txt`. This only works if you use `pip` to install packages while your `conda` environment is active. 206 | 2. Edit the Dockerfile to specify a Base Image that includes `conda`. You can then import the `environment.yaml` file directly into the Dockerfile image. The example we provided does not include `conda`. 207 | 208 | If you choose option 2, we recommend you read [custom_dev_stack.md](custom_dev_stack.md) to learn more about creating a custom development stack. 209 |
210 | 211 | ## Routing Challenge CLI Commands 212 | General Usage: `rc-cli COMMAND [options]` 213 | 214 | The RC-CLI commands will be presented in the usual order for the phases of analysis. Additional commands will be covered at the end. 215 | 216 | ### new-app 217 | ```sh 218 | rc-cli new-app [app-name] [template-name] 219 | ``` 220 | Create an application directory containing training data and an example Dockerfile. The following templates are available: 221 | - `rc_base`: `Ubuntu 20.04` Docker image. It is a lightweight Unix version with a bash shell. Good option if you plan to customize your environment. 222 | - `rc_python`: Alpine Docker image with Python 3.9.1 installed. The place to start if you are coding in Python. 223 | - `rc_r`: `R 4.0.4` Docker image. A simple R example to get you started. 224 | 225 | This command copies all of the data and creates a Docker image. This process can take several minutes as both the dataset and the Docker image are large files and creating an image takes time. 226 | 227 | ### configure-app 228 | ```sh 229 | rc-cli configure-app 230 | ``` 231 | **NOTE:** Upon running `rc-cli new-app`, a default Docker image was created. 232 | 233 | Configure your app's current Docker image using your local Dockerfile. Every time you update your project root (shell scripts, requirements, or Dockerfile), you should run `rc-cli configure-app` again. This overwrites the previous image giving you an updated image that your model will run in. 234 | 235 | **Example:** To add necessary Python packages to your Docker image, use `pip freeze` or `conda list --export` to generate a list of requirements for your environment. Only install the packages needed for this script, not everything listed in your default environment. These commands will show you which package versions you need. 236 | 237 | If running `pip freeze` lists `numpy==1.20.1`, add this version information to `requirements.txt` and use `rc-cli configure-app` to configure your Docker image. If you are using `conda list --export`, make sure you change the output to pip style formatting before updating `requirements.txt`. 238 | 239 | ### model-build 240 | ```sh 241 | rc-cli model-build [snapshot-name] 242 | ``` 243 | Execute the `model_build.sh` script inside of your app's Docker image. During the `model-build` phase you will have access to the following data/ directories: 244 | - `data/model_build_inputs` (read) 245 | - `data/model_build_outputs` (read/write) 246 | 247 | In `data/model_build_inputs`, you will have access to historical data of known routes. During this phase, you will use that data to create a model that can predict a proposed route sequence based on new data available in the `model_apply ` phase. You can save any models, graphs, variables, or data that you generated during this phase in `data/model_build_outputs` directory to be used in the `model-apply` phase. 248 | 249 | If you have not saved a model using the snapshot command, the `model-build` phase is run on the current directory. 250 | 251 | > **NOTE: The maximum duration allowed for the `build-model` phase is exactly 12 hours**; Otherwise, a timeout will stop the process and the build phase of your model will not complete. 252 | 253 | ### model-apply 254 | ```sh 255 | rc-cli model-apply [snapshot-name] 256 | ``` 257 | Execute the `model_apply.sh` script inside of your app's Docker image. Run after the `model-build` phase. 258 | 259 | During the `model-apply` phase you will have access to the following `data/` directories: 260 | - `data/model_build_outputs` (read) 261 | - `data/model_apply_inputs` (read) 262 | - `data/model_apply_outputs` (read/write) 263 | 264 | You do not have access to the historical data at this phase, but there is a new dataset provided in `data/model_apply_inputs` that will be used by the model created in the `model-build` phase to generate predicted routes. The predicted routes should be saved in `data/model_apply_outputs/proposed_sequences.json` 265 | 266 | > **NOTE: The maximum duration allowed for the `apply-model` phase is exactly 4 hours**; Otherwise, a timeout will stop the process and the apply phase of your model will not complete. 267 | 268 | ### model-score 269 | ```sh 270 | model-score [snapshot-name] 271 | ``` 272 | Apply the scoring algorithm using `data/model_apply_outputs/proposed_sequences.json` created during the `model-apply` phase. The scoring algorithm compares your proposed route sequences against the actual sequences for the same set of stops. It outputs a numerical score that quantifies the proximity / similarity of both sequences. This algorithm will be the same one used when evaluating submissions at the end of the competition. The only difference will be the dataset provided during the `model-apply` phase. 273 | 274 | ### enter-app 275 | ```sh 276 | rc-cli enter-app [snapshot-name] 277 | ``` 278 | Use this command to enter your current app's docker image. This will start the Docker image for your project. You can run shell scripts and execute files. You can test if your Docker image has the correct environment to run your source code. The `enter-app` command provides the following directory access: 279 | - `data/model_build_inputs` (read) 280 | - `data/model_build_outputs` (read/write) 281 | - `data/model_apply_inputs` (read) 282 | - `data/model_apply_outputs` (read/write) 283 | 284 | ### save-snapshot 285 | ```sh 286 | rc-cli save-snapshot [snapshot-name] 287 | ``` 288 | Save the current app as a snapshot with the same name as your app or with a specified name. This command copies the current data into a folder that will be reference by this snapshot and saves a Docker image of your current app. Most commands can specify a snapshot to run. 289 | 290 | To create a submission for this challenge, create a snapshot and upload the `[snapshot-name].tar.gz` to https://routingchallenge.io/. Please note that after saving the Docker image, all participants are strongly encouraged to validate their solutions using `rc-cli production-test [snapshot-name]` before submitting the Docker image files through the platform. 291 | 292 | ### production-test 293 | ```sh 294 | rc-cli production-test [snapshot-name] 295 | ``` 296 | This command tests the complete scoring process on your app. 297 | **WARNING:** This command resets your data directory. We recommend creating a snapshot and run this command against the snapshot. 298 | 299 | The following commands are run in order: 300 | 1. `rc-cli reset-data` - Reset the data folder 301 | 2. `rc-cli model-build` - Build a model using provided training data 302 | 3. `rc-cli model-apply` - Apply your model to a new dataset 303 | 4. `rc-cli model-score` 304 | - Score your model against actual sequences for the stop data in the new dataset 305 | 306 | ### reset-data 307 | ```sh 308 | rc-cli reset-data [snapshot-name] 309 | ``` 310 | This command resets all of the files in `data/`. Any files saved in the output, such as models or predicted sequences, will be lost. 311 | 312 | ### update 313 | ```sh 314 | rc-cli update 315 | ``` 316 | This command pulls down the latest data and executables for RC-CLI. It also configures Docker images for testing and scoring. 317 | -------------------------------------------------------------------------------- /templates/custom_dev_stack.md: -------------------------------------------------------------------------------- 1 | # Dockerfile setup 2 | ## Introduction 3 | Although we provide [Python](https://www.python.org/) and [R](https://www.r-project.org/) templates, teams can choose any programming languages and create their own custom app template. 4 | 5 | This document will help you to correctly set up a functional custom app template. 6 | 7 | ## Initial setup 8 | 1. Make sure the [rc-cli](https://github.com/mit-cave/rc-cli) is installed locally on your system 9 | - You can check with: 10 | ``` 11 | rc-cli --version 12 | ``` 13 | 14 | 2. Folder structure is very important when using the `rc-cli` 15 | - To get started on any custom template, you should create a base template 16 | - This includes some starter code as well as all the necessary files and folder structures to properly set up your template 17 | - You can boostrap a new (and fully functional) app named `my-app` in your current directory using the `rc_base` template by running: 18 | ``` 19 | rc-cli new-app my-app rc_base 20 | ``` 21 | - You should now be ready to begin setting up your development stack with a `Dockerfile` 22 | 23 | ## Custom Development Stack Using Docker 24 | Please follow these guidelines to ensure the `rc-cli` will work with your development stack as you setup your `Dockerfile`: 25 | 26 | 1. Select an appropriate Docker image(s) for your desired development stack 27 | - A good place to start looking is [Docker Hub](https://hub.docker.com/search?q=&type=image&category=languages) 28 | - Note: If your development stack does not have much Docker support or if you prefer to create a custom image for your environment, you may want to extend the Ubuntu based `Dockerfile` in the `rc_base` template. 29 | 30 | 2. In order for your `Dockerfile` to work with the `rc-cli`, it should meet the following minimum requirements: 31 | - Create an `app` user and `app` group for the container 32 | - Create a `/home/app/` directory 33 | - The directory `src` along with the `model_build.sh` and `model_apply.sh` script files in your template must be copied to `/home/app/` 34 | - `model_build.sh` and `model_apply.sh` should both be executable by the file owners 35 | - This should be set on your local OS before configuring your app image 36 | - You can do this on most unix systems with: `sudo chmod 755 model_build.sh & sudo chmod 755 model_apply.sh` 37 | - `model_build.sh` and `model_apply.sh` should both be owned by the Docker `app` user 38 | - This can happen in the Dockerfile when executing a `copy` command 39 | - The `/home/app/` directory must be included in the `PATH` environment variable, i.e. `/home/app/` must be part of the list of executable directories 40 | - This allows `rc-cli` to execute the `model_build.sh` and `model_apply.sh` files from anywhere 41 | - The default `USER` for your Docker image must be `app` 42 | - You should not define an `ENTRYPOINT` instruction within your custom Dockerfile 43 | - This will be overwritten by `rc-cli` to run the `model_build.sh` and `model_apply.sh` scripts 44 | - Instead, add any shell commands that you want to execute at run-time to the relevant `sh` files 45 | - Place a `CMD` instruction at the end of the Dockerfile to run the default shell of the image, e.g. `CMD ["/bin/bash"]` or `CMD ["/bin/sh"]` 46 | - Only the `src` directory, `model_build.sh` and `model_apply.sh` should be copied in your `Dockerfile`: 47 | - Code in your `src` directory can pull data from the relative app `data` directory path during local development and `rc-cli` testing 48 | - During local development: 49 | - You will have unrestricted access to the `data` directory as you will be executing code locally 50 | - During `rc-cli` development (`model-build`, `model-apply`, `enter-app`) 51 | - The `rc-cli` will mount the needed data for each command from your local `data` folder as they exist at run time 52 | - Changes to this data during run time will be reflected in your local `data` folder 53 | - Data is restricted to the current phase 54 | 1. `model-build`: 55 | - `data/model_build_inputs` (read) 56 | - `data/model_build_outputs` (read/write) 57 | 2. `model-apply`: 58 | - `data/model_build_outputs` (read) 59 | - `data/model_apply_inputs` (read) 60 | - `data/model_apply_outputs` (read/write) 61 | - `enter-app`: 62 | - `data/model_build_inputs` (read/write) 63 | - `data/model_build_outputs` (read/write) 64 | - `data/model_apply_inputs` (read/write) 65 | - `data/model_apply_outputs` (read/write) 66 | - During `rc-cli` production testing (`production-test`) 67 | - The `rc-cli` will first reset your local `data` folder to the initial data state 68 | - This is exactly how the data will be formatted during official scoring 69 | - This will remove any local changes you have made in the `data` folder 70 | - Remember that none of your local data will ever get sent for scoring 71 | - If you want to manipulate the data, you need to adjust this data accordingly during the `model-build` and `model-apply` phases 72 | - The `rc-cli` will then mount the data for each command from your local `data` folder at run time 73 | - During the official scoring: 74 | - The `rc-cli` will mount different evaluation data in the exact same manner as production testing 75 | - This data will be the exact same structure 76 | 77 | 3. Once you have finished setting up or make any changes to your `Dockerfile` or `*.sh` scripts: 78 | - Run `configure-app` 79 | `rc-cli configure-app` 80 | - The output Docker logs can be found at: 81 | `logs/configure_app/` 82 | 83 | 84 | ## Example 85 | ### Linux-based image with a Bash shell 86 |
87 | Dockerfile 88 | 89 | ```Dockerfile 90 | # syntax = docker/dockerfile:1.2 91 | ARG SOURCE_DIR=/home/app/ 92 | # base image - replace : with your values 93 | FROM : 94 | ARG SOURCE_DIR 95 | ENV SOURCE_DIR $SOURCE_DIR 96 | ENV PATH $PATH:$SOURCE_DIR 97 | RUN mkdir -p $SOURCE_DIR 98 | RUN groupadd --gid 1000 app \ 99 | && useradd --uid 1000 --gid app --shell /bin/bash --create-home app 100 | WORKDIR $SOURCE_DIR 101 | # TODO: install dependencies (optional) 102 | COPY --chown=app:app ./*.sh ./ 103 | COPY --chown=app:app ./src/ ./src/ 104 | USER app 105 | CMD ["/bin/bash"] 106 | ``` 107 |
108 | 109 | ## References 110 | - If you need more information about the syntax of **Dockerfile**, check the [official documentation](https://docs.docker.com/engine/reference/builder/) 111 | - [Best practices](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/) for writing Dockerfiles 112 | -------------------------------------------------------------------------------- /templates/data_structures.md: -------------------------------------------------------------------------------- 1 | # Data Structures 2 | ## Introduction 3 | Data is at the very heart of your project. In order for you to correctly parse the input files in your program and generate the correct format for the output files in each phase (`model-build` and `model-apply`), you must become familiar with the input and output data structures. 4 | 5 | In this document we provide each data format of the input and output files included in the `data` folder. 6 | 7 | ## Placeholder Elements 8 | In each "Data Format" section, some placeholder elements are specified to provide information about the property regarding its context or value type. All placeholders are enclosed in double quotes and angle brackets `"<>"`. If you are not sure about the value of a property, please expand the "Example" below the data structure. 9 | - ``: an ISO 8601 compliant date format. 10 | - ``: an ISO 8601 compliant datetime format that typically represents a timestamp. 11 | - ``: a boolean value {`false`, `true`}. 12 | - ``: a decimal number. 13 | - ``: a unique identifier appended to the `RouteID` or `PackageID` property. 14 | - ``: a time format in hours, minutes, and seconds. 15 | - ``: status of a `model-build` or `model-apply` run {`success` | `failure` | `timeout`}. 16 | - ``: an integer number contained in the `[0, 65535]` range. 17 | - ``: an integer number contained in the `[0, 4294967295]` range. 18 | 19 | ## Data Field Definitions 20 | Below are defined the data fields you will encounter in provided `model_build_inputs`, `model_apply_inputs`, and `model_score_inputs` files. 21 | - `RouteID_`: an alphanumeric string that uniquely identifies each route. 22 | - ``: an identifier code for each stop within a route {`AA` | `AB` | ... | `ZZ`}. Stop identifier codes may be shared among routes. Do not assume, however, that stop identifiers shared by multiple routes refer to the same stop. 23 | - `PackageID_`: an alphanumeric string that uniquely identifies each package within a route. Package identifiers are not shared between routes. 24 | - `scan_status`: categorical variable denoting the delivery status of a package {`DELIVERED` | `DELIVERY_ATTEMPTED` | `REJECTED`}. If a package’s delivery was attempted but not successful, delivery may be reattempted later in the route. 25 | - `time_window`: the interval of time in which package delivery is acceptable, defined by `start_time_utc` and `end_time_utc`, both specified in Coordinated Universal Time (UTC). If a package’s `start_time_utc` and `end_time_utc` fields are `NaN`, no time window was specified. 26 | - `planned_service_time_seconds`: The duration of time expected to deliver the package once the delivery person has arrived at the package’s delivery location, specified in seconds. Service time may include time required to park and hand-off the package at the drop-off location. 27 | - `dimensions`: the approximate depth, height, and width of the package {`depth_cm`, `height_cm`, and `width_cm`}, specified in centimeters. 28 | - `station_code`: an alphanumeric string that uniquely identifies the delivery station (or depot) at which the route began. 29 | - `date_YYYY_MM_DD`: the date the delivery vehicle departed from the station. 30 | - `departure_time_utc`: the time the delivery vehicle departed from the station, specified in UTC. 31 | - `executor_capacity_cm3`: the volume capacity of the delivery vehicle, specified in cm^3. 32 | - `lat`, `lng`: the latitude and longitude of each stop specified via the WGS 84 projection system. 33 | - `route_score`: categorical variable denoting the quality of the observed stop sequence {`High` | `Medium` | `Low`}. The quality score is based both on the level of time window adherence and the amount of backtracking in the observed sequence. Backtracking occurs when a delivery vehicle delivers packages within some neighborhood or geographical area, leaves the neighborhood or geographical area, then returns later during the route. Backtracking is inefficient and should be limited when possible. 34 | - `type`: categorical variable denoting the type of stop {`Station` | `Dropoff`}. The delivery vehicle acquires all packages at the station and delivers them at subsequent drop-off locations. 35 | - `zone_id`: a unique identifier denoting the geographical planning area into which the stop falls. The numeral before the dash denotes a high-level planning zone. The text after the dash denotes the subzone within the high-level zone. 36 | - Travel times provided in the travel_times.json and new_travel_times.json files are, for a given pair of stops, the average of historically realized travel times between all combinations of package delivery locations between those stops, specified in seconds. 37 | 38 | ## Templates 39 | ### `model_build_inputs`: 40 | 1. `actual_sequences.json` 41 | 42 | Data format: 43 | ```json 44 | { 45 | "RouteID_": { 46 | "actual": { 47 | "": "", 48 | "..." 49 | } 50 | }, 51 | "..." 52 | } 53 | ``` 54 | 55 |
56 | Example 57 | 58 | ```json 59 | { 60 | "RouteID_1a279ac2-41aa-4a5e-85de-061f3475896c": { 61 | "actual": { 62 | "AA": 182, 63 | "AC": 168, 64 | "AD": 35, 65 | "AJ": 22 66 | } 67 | }, 68 | "RouteID_1a2bfa66-0a93-4e55-a261-0e341b6f05b6": { 69 | "actual": { 70 | "AA": 137, 71 | "AM": 173 72 | } 73 | }, 74 | "RouteID_1a48f1f3-d2a2-4431-a078-18f832745700": { 75 | "actual": { 76 | "AA": 65, 77 | "AD": 127, 78 | "AG": 48 79 | } 80 | } 81 | } 82 | ``` 83 |
84 | 85 | 86 | 2. `invalid_sequence_scores.json` 87 | 88 | Data format: 89 | ```json 90 | { 91 | "RouteID_": "", 92 | "..." 93 | } 94 | ``` 95 | 96 |
97 | Example 98 | 99 | ```json 100 | { 101 | "RouteID_1a279ac2-41aa-4a5e-85de-061f3475896c": 1.396440219978261, 102 | "RouteID_1a2bfa66-0a93-4e55-a261-0e341b6f05b6": 1.2147466479550677, 103 | "RouteID_1a48f1f3-d2a2-4431-a078-18f832745700": 1.2812543267775713, 104 | "RouteID_1a4903de-1a85-4bca-921a-f746c68fbf7a": 0.9012432765379605, 105 | "RouteID_1a4e2edf-3fde-409f-8bf6-f01ff98d5afa": 0.7468055008554786 106 | } 107 | ``` 108 |
109 | 110 | 111 | 3. `package_data.json` 112 | 113 | Data format: 114 | ```json 115 | { 116 | "": { 117 | "": { 118 | "PackageID_": { 119 | "scan_status": "", 120 | "time_window": { 121 | "start_time_utc": "", 122 | "end_time_utc": "" 123 | }, 124 | "planned_service_time_seconds": "", 125 | "dimensions": { 126 | "depth_cm": "", 127 | "height_cm": "", 128 | "width_cm": "" 129 | } 130 | }, 131 | "..." 132 | }, 133 | "..." 134 | }, 135 | "..." 136 | } 137 | ``` 138 | 139 |
140 | Example 141 | 142 | ```json 143 | { 144 | "RouteID_1a279ac2-41aa-4a5e-85de-061f3475896c": { 145 | "AA": { 146 | "PackageID_ad0f6eb7-8498-4c71-b1f1-dcd81e4bde9f": { 147 | "scan_status": "DELIVERED", 148 | "time_window": { 149 | "start_time_utc": "2018-07-23 14:00:00", 150 | "end_time_utc": "2018-07-23 21:00:00" 151 | }, 152 | "planned_service_time_seconds": 84, 153 | "dimensions": { 154 | "depth_cm": 48.3, 155 | "height_cm": 15.2, 156 | "width_cm": 33 157 | } 158 | } 159 | }, 160 | "AY": { 161 | "PackageID_0ed922ae-d59e-49c1-b71b-827e4353ffed": { 162 | "scan_status": "DELIVERED", 163 | "time_window": { 164 | "start_time_utc": "2018-07-23 15:00:00", 165 | "end_time_utc": "2018-07-23 22:00:00" 166 | }, 167 | "planned_service_time_seconds": 18, 168 | "dimensions": { 169 | "depth_cm": 30.7, 170 | "height_cm": 2.3, 171 | "width_cm": 28.2 172 | } 173 | }, 174 | "PackageID_254e2768-2f30-4731-a3e4-2e0f43268b25": { 175 | "scan_status": "DELIVERED", 176 | "time_window": { 177 | "start_time_utc": "2018-07-23 11:00:00", 178 | "end_time_utc": "2018-07-23 12:00:00" 179 | }, 180 | "planned_service_time_seconds": 18, 181 | "dimensions": { 182 | "depth_cm": 35.8, 183 | "height_cm": 11.9, 184 | "width_cm": 32.5 185 | } 186 | } 187 | } 188 | } 189 | } 190 | ``` 191 |
192 | 193 | 194 | 4. `route_data.json` 195 | 196 | Data format: 197 | ```json 198 | { 199 | "RouteID_": { 200 | "station_code": "", 201 | "date_YYYY_MM_DD": "", 202 | "departure_time_utc": "", 203 | "executor_capacity_cm3": "", 204 | "route_score": "", 205 | "stops": { 206 | "": { 207 | "lat": "", 208 | "lng": "", 209 | "type": "", 210 | "zone_id": "" 211 | }, 212 | "..." 213 | }, 214 | "..." 215 | }, 216 | "..." 217 | } 218 | ``` 219 | 220 |
221 | Example 222 | 223 | ```json 224 | { 225 | "RouteID_1a279ac2-41aa-4a5e-85de-061f3475896c": { 226 | "station_code": "DAU1", 227 | "date_YYYY_MM_DD": "2018-07-23", 228 | "departure_time_utc": "15:36:07", 229 | "executor_capacity_cm3": 4247527, 230 | "route_score": "Medium", 231 | "stops": { 232 | "AA": { 233 | "lat": 30.396307, 234 | "lng": -97.691442, 235 | "type": "Dropoff", 236 | "zone_id": "E-20.2H" 237 | }, 238 | "AC": { 239 | "lat": 30.399494, 240 | "lng": -97.692166, 241 | "type": "Dropoff", 242 | "zone_id": "E-19.3H" 243 | }, 244 | "AD": { 245 | "lat": 30.393832, 246 | "lng": -97.69988, 247 | "type": "Dropoff", 248 | "zone_id": "E-19.3J" 249 | } 250 | } 251 | }, 252 | "RouteID_1a2bfa66-0a93-4e55-a261-0e341b6f05b6": { 253 | "station_code": "DLA5", 254 | "date_YYYY_MM_DD": "2018-08-07", 255 | "departure_time_utc": "16:00:58", 256 | "executor_capacity_cm3": 3313071, 257 | "route_score": "High", 258 | "stops": { 259 | "AA": { 260 | "lat": 33.958825, 261 | "lng": -117.41668, 262 | "type": "Dropoff", 263 | "zone_id": "C-18.1C" 264 | }, 265 | "AD": { 266 | "lat": 33.96314, 267 | "lng": -117.401855, 268 | "type": "Dropoff", 269 | "zone_id": "C-18.2A" 270 | } 271 | } 272 | } 273 | } 274 | ``` 275 |
276 | 277 | 278 | 5. `travel_times.json` 279 | 280 | Data format: 281 | ```json 282 | { 283 | "RouteID_": { 284 | "": { 285 | "": 0, 286 | "": "", 287 | "": "", 288 | "..." 289 | }, 290 | "": { 291 | "": "", 292 | "": 0, 293 | "": "", 294 | "..." 295 | }, 296 | "..." 297 | }, 298 | "..." 299 | } 300 | ``` 301 | 302 |
303 | Example 304 | 305 | ```json 306 | { 307 | "RouteID_1a279ac2-41aa-4a5e-85de-061f3475896c": { 308 | "AA": { 309 | "AA": 0, 310 | "AC": 211.5, 311 | "AD": 258.7, 312 | "AJ": 244.9 313 | }, 314 | "AC": { 315 | "AA": 219.3, 316 | "AC": 0, 317 | "AD": 233, 318 | "AJ": 235.7 319 | } 320 | }, 321 | "RouteID_1a2bfa66-0a93-4e55-a261-0e341b6f05b6": { 322 | "AA": { 323 | "AA": 0, 324 | "AD": 370.4 325 | }, 326 | "AD": { 327 | "AA": 452.9, 328 | "AD": 0, 329 | } 330 | } 331 | } 332 | ``` 333 |
334 | 335 | 336 | ### `model_build_outputs`: 337 | As for the model build output data, the file(s) generated in the `model-build` process can be in the format(s) that best suits your needs. Please note that the output file(s) should be used in your `model-apply` implementation. 338 | 339 | ### `model_apply_inputs`: 340 | 1. `new_package_data.json` 341 | 342 | Data format: 343 | ```json 344 | { 345 | "": { 346 | "": { 347 | "PackageID_": { 348 | "time_window": { 349 | "start_time_utc": "", 350 | "end_time_utc": "" 351 | }, 352 | "planned_service_time_seconds": "", 353 | "dimensions": { 354 | "depth_cm": "", 355 | "height_cm": "", 356 | "width_cm": "" 357 | } 358 | } 359 | }, 360 | "..." 361 | }, 362 | "..." 363 | } 364 | ``` 365 | 366 |
367 | Example 368 | 369 | ```json 370 | { 371 | "RouteID_1a4903de-1a85-4bca-921a-f746c68fbf7a": { 372 | "AD": { 373 | "PackageID_e28a5205-bc08-4757-af6f-635946cd0551": { 374 | "time_window": { 375 | "start_time_utc": "2018-08-12 12:00:00", 376 | "end_time_utc": "2018-08-12 15:00:00" 377 | }, 378 | "planned_service_time_seconds": 42, 379 | "dimensions": { 380 | "depth_cm": 31.8, 381 | "height_cm": 3.8, 382 | "width_cm": 19.1 383 | } 384 | } 385 | }, 386 | "AR": { 387 | "PackageID_ef435cac-7555-4989-84da-330adae351b5": { 388 | "time_window": { 389 | "start_time_utc": "2018-08-13 1:00:00", 390 | "end_time_utc": "2018-08-13 2:30:00" 391 | }, 392 | "planned_service_time_seconds": 36, 393 | "dimensions": { 394 | "depth_cm": 34.3, 395 | "height_cm": 11.4, 396 | "width_cm": 26.7 397 | } 398 | } 399 | }, 400 | "AX": { 401 | "PackageID_4b50af8d-0fb8-49ed-ac5a-31ed1e19bce6": { 402 | "time_window": { 403 | "start_time_utc": "2018-08-14 14:00:00", 404 | "end_time_utc": "2018-08-14 21:00:00" 405 | }, 406 | "planned_service_time_seconds": 14.5, 407 | "dimensions": { 408 | "depth_cm": 24.1, 409 | "height_cm": 3.6, 410 | "width_cm": 16.5 411 | } 412 | }, 413 | "PackageID_844d116d-fba0-4567-94b6-d9a9b2158136": { 414 | "time_window": { 415 | "start_time_utc": "2018-08-14 14:00:00", 416 | "end_time_utc": "2018-08-14 21:00:00" 417 | }, 418 | "planned_service_time_seconds": 14.5, 419 | "dimensions": { 420 | "depth_cm": 36.2, 421 | "height_cm": 27.9, 422 | "width_cm": 28.6 423 | } 424 | } 425 | } 426 | } 427 | } 428 | ``` 429 |
430 | 431 | 432 | 2. `new_route_data.json` 433 | 434 | Data format: 435 | ```json 436 | { 437 | "RouteID_": { 438 | "station_code": "", 439 | "date_YYYY_MM_DD": "", 440 | "departure_time_utc": "", 441 | "executor_capacity_cm3": "", 442 | "stops": { 443 | "": { 444 | "lat": "", 445 | "lng": "", 446 | "type": "", 447 | "zone_id": "" 448 | }, 449 | "..." 450 | } 451 | }, 452 | "..." 453 | } 454 | ``` 455 | 456 |
457 | Example 458 | 459 | ```json 460 | { 461 | "RouteID_1a4903de-1a85-4bca-921a-f746c68fbf7a": { 462 | "station_code": "DCH4", 463 | "date_YYYY_MM_DD": "2018-08-14", 464 | "departure_time_utc": "14:06:53", 465 | "executor_capacity_cm3": 4247527, 466 | "stops": { 467 | "AD": { 468 | "lat": 42.078681, 469 | "lng": -88.171583, 470 | "type": "Dropoff", 471 | "zone_id": "D-16.2C" 472 | }, 473 | "AR": { 474 | "lat": 42.076736, 475 | "lng": -88.164158, 476 | "type": "Dropoff", 477 | "zone_id": "D-16.3D" 478 | } 479 | } 480 | }, 481 | "RouteID_1a4e2edf-3fde-409f-8bf6-f01ff98d5afa": { 482 | "station_code": "DLA7", 483 | "date_YYYY_MM_DD": "2018-08-09", 484 | "departure_time_utc": "16:07:13", 485 | "executor_capacity_cm3": 3313071, 486 | "stops": { 487 | "AC": { 488 | "lat": 34.119254, 489 | "lng": -117.614684, 490 | "type": "Dropoff", 491 | "zone_id": "G-25.2E" 492 | } 493 | } 494 | } 495 | } 496 | ``` 497 |
498 | 499 | 500 | 3. `new_travel_times.json` 501 | 502 | Data format: 503 | ```json 504 | { 505 | "RouteID_": { 506 | "": { 507 | "": 0, 508 | "": "", 509 | "": "", 510 | "..." 511 | }, 512 | "": { 513 | "": "", 514 | "": 0, 515 | "": "", 516 | "..." 517 | } 518 | } 519 | } 520 | ``` 521 | 522 |
523 | Example 524 | 525 | ```json 526 | { 527 | "RouteID_1a4903de-1a85-4bca-921a-f746c68fbf7a": { 528 | "AD": { 529 | "AD": 0, 530 | "AR": 225, 531 | "AX": 478.1, 532 | }, 533 | "AR": { 534 | "AD": 251.9, 535 | "AR": 0, 536 | "AX": 424.6, 537 | } 538 | }, 539 | "RouteID_1a4e2edf-3fde-409f-8bf6-f01ff98d5afa": { 540 | "AC": { 541 | "AC": 0, 542 | "AH": 419.3, 543 | }, 544 | "AH": { 545 | "AC": 443.2, 546 | "AH": 0 547 | } 548 | } 549 | } 550 | ``` 551 |
552 | 553 | ### `model_apply_outputs`: 554 | 1. `proposed_sequences.json` 555 | 556 | Data format: 557 | ```json 558 | { 559 | "RouteID_": { 560 | "proposed": { 561 | "": 0, 562 | "": 1, 563 | "": 2, 564 | "": 3, 565 | "..." 566 | }, 567 | "..." 568 | } 569 | } 570 | ``` 571 | 572 |
573 | Example 574 | 575 | ```json 576 | { 577 | "RouteID_1a4903de-1a85-4bca-921a-f746c68fbf7a": { 578 | "proposed": { 579 | "RY": 0, 580 | "QH": 1, 581 | "PY": 2, 582 | "NS": 3 583 | }, 584 | }, 585 | "RouteID_1a4e2edf-3fde-409f-8bf6-f01ff98d5afa": { 586 | "proposed": { 587 | "SF": 0, 588 | "PT": 1, 589 | "LG": 2, 590 | "GU": 3 591 | } 592 | } 593 | } 594 | ``` 595 |
596 | 597 | ### `model_score_inputs`: 598 | 1. `new_actual_sequences.json` 599 | 600 | Data format: 601 | ```json 602 | { 603 | "RouteID_": { 604 | "actual": { 605 | "": "", 606 | "..." 607 | } 608 | }, 609 | "..." 610 | } 611 | ``` 612 | 613 |
614 | Example 615 | 616 | ```json 617 | { 618 | "RouteID_1a4903de-1a85-4bca-921a-f746c68fbf7a": { 619 | "actual": { 620 | "AD": 44, 621 | "AR": 26, 622 | "AX": 4, 623 | "BA": 85 624 | } 625 | }, 626 | "RouteID_1a4e2edf-3fde-409f-8bf6-f01ff98d5afa": { 627 | "actual": { 628 | "AC": 62, 629 | "AH": 132 630 | } 631 | } 632 | } 633 | ``` 634 |
635 | 636 | 637 | 2. `new_invalid_sequence_scores.json` 638 | 639 | Data format: 640 | ```json 641 | { 642 | "RouteID_": "", 643 | "..." 644 | } 645 | ``` 646 | 647 |
648 | Example 649 | 650 | ```json 651 | { 652 | "RouteID_1a4903de-1a85-4bca-921a-f746c68fbf7a": 0.9012432765379605, 653 | "RouteID_1a4e2edf-3fde-409f-8bf6-f01ff98d5afa": 0.7468055008554786 654 | } 655 | ``` 656 |
657 | 658 | ### `model_score_timings`: 659 | 1. `model_build_time.json` 660 | 661 | Data format: 662 | ```json 663 | { 664 | "time": "", 665 | "status": "" 666 | } 667 | ``` 668 | 669 |
670 | Example 671 | 672 | ```json 673 | { 674 | "time": 14030, 675 | "status": "success" 676 | } 677 | ``` 678 |
679 | 680 | 681 | 2. `model_apply_time.json` 682 | 683 | Data format: 684 | ```json 685 | { 686 | "time": "", 687 | "status": "" 688 | } 689 | ``` 690 | 691 |
692 | Example 693 | 694 | ```json 695 | { 696 | "time": 3920, 697 | "status": "success" 698 | } 699 | ``` 700 |
701 | 702 | ### `model_score_outputs`: 703 | 1. `scores.json` 704 | 705 | Data format: 706 | ```json 707 | { 708 | "submission_score": "", 709 | "route_scores": { 710 | "RouteID_": "", 711 | "RouteID_": "", 712 | "..." 713 | }, 714 | "route_feasibility": { 715 | "RouteID_": "", 716 | "RouteID_": "", 717 | "..." 718 | }, 719 | "model_apply_time": "", 720 | "model_build_time": "" 721 | } 722 | ``` 723 | 724 |
725 | Example 726 | 727 | ```json 728 | { 729 | "submission_score": 0.6086179706085669, 730 | "route_scores": { 731 | "RouteID_1a4903de-1a85-4bca-921a-f746c68fbf7a": 0.9012432765379605, 732 | "RouteID_1a4e2edf-3fde-409f-8bf6-f01ff98d5afa": 0.31599266467917336 733 | }, 734 | "route_feasibility": { 735 | "RouteID_1a4903de-1a85-4bca-921a-f746c68fbf7a": false, 736 | "RouteID_1a4e2edf-3fde-409f-8bf6-f01ff98d5afa": true 737 | }, 738 | "model_apply_time": 3920, 739 | "model_build_time": 14030 740 | } 741 | ``` 742 |
743 | -------------------------------------------------------------------------------- /templates/rc_base/.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git 3 | .gitignore 4 | 5 | # Markdown files 6 | *.md 7 | -------------------------------------------------------------------------------- /templates/rc_base/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:1.2 2 | ARG UBUNTU_RELEASE=20.04 3 | ARG SOURCE_DIR=/home/app/ 4 | 5 | FROM ubuntu:$UBUNTU_RELEASE 6 | ARG SOURCE_DIR 7 | ENV SOURCE_DIR $SOURCE_DIR 8 | ENV PATH $PATH:$SOURCE_DIR 9 | RUN mkdir -p $SOURCE_DIR 10 | WORKDIR $SOURCE_DIR 11 | RUN groupadd --gid 1000 app \ 12 | && useradd --uid 1000 --gid app --shell /bin/bash --create-home app \ 13 | # install pkgs 14 | && apt-get update \ 15 | && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ 16 | # you might need build-essential 17 | build-essential \ 18 | python3 \ 19 | # other pkgs... 20 | && rm -rf /var/lib/apt/lists/* 21 | COPY --chown=app:app ./*.sh ./ 22 | COPY --chown=app:app ./src/ ./src/ 23 | USER app 24 | CMD ["/bin/bash"] 25 | -------------------------------------------------------------------------------- /templates/rc_base/model_apply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | exec "./src/model_apply.sh" 5 | -------------------------------------------------------------------------------- /templates/rc_base/model_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | exec "./src/model_build.sh" 5 | -------------------------------------------------------------------------------- /templates/rc_base/snapshots/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except these files 4 | !.gitignore 5 | !README.md 6 | -------------------------------------------------------------------------------- /templates/rc_base/snapshots/README.md: -------------------------------------------------------------------------------- 1 | # What is this directory for? 2 | 3 | The current directory serves as a folder to help participants organize and save their Docker images to be submitted and optionally to be validated by the `production-test`. All files under this directory will be ignored by the Git repository, except this README file (see [.gitignore](.gitignore)). 4 | -------------------------------------------------------------------------------- /templates/rc_base/src/model_apply.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | import sys, json, time 3 | 4 | BASE_DIR = path.dirname(path.dirname(path.abspath(__file__))) 5 | model_path=path.join(BASE_DIR, 'data/model_build_outputs/model.json') 6 | with open(model_path, newline='') as in_file: 7 | model_build_out = json.load(in_file) 8 | 9 | prediction_routes_path=path.join(BASE_DIR, 'data/model_apply_inputs/new_route_data.json') 10 | with open(prediction_routes_path, newline='') as in_file: 11 | prediction_routes = json.load(in_file) 12 | 13 | def sort_by_key(stops, sort_by): 14 | stops_list=[{**value, **{'id':key}} for key, value in stops.items()] 15 | ordered_stop_list=sorted(stops_list, key=lambda x: x[sort_by]) 16 | ordered_stop_list_ids=[i['id'] for i in ordered_stop_list] 17 | return {i:ordered_stop_list_ids.index(i) for i in ordered_stop_list_ids} 18 | 19 | def propose_all_routes(prediction_routes, sort_by): 20 | return {key:{'proposed':sort_by_key(stops=value['stops'], sort_by=sort_by)} for key, value in prediction_routes.items()} 21 | 22 | sort_by=model_build_out.get("sort_by") 23 | output=propose_all_routes(prediction_routes=prediction_routes, sort_by=sort_by) 24 | 25 | output_path=path.join(BASE_DIR, 'data/model_apply_outputs/proposed_sequences.json') 26 | with open(output_path, 'w') as out_file: 27 | json.dump(output, out_file) 28 | -------------------------------------------------------------------------------- /templates/rc_base/src/model_apply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | readonly BASE_DIR=$(dirname $0) 3 | readonly OUTPUTS_DIR="$(dirname ${BASE_DIR})/data/model_apply_outputs" 4 | 5 | echo "Reading Input Data" 6 | sleep 1 7 | echo "Solving Dark Matter Waveforms" 8 | sleep 1 9 | echo "Quantum Computer is Overheating" 10 | sleep 1 11 | echo "Trying Alternate Measurement Cycles" 12 | sleep 1 13 | echo "Found a Great Solution!" 14 | sleep 1 15 | echo "Checking Validity" 16 | sleep 1 17 | echo "The Answer is 42!" 18 | sleep 1 19 | 20 | # Remove any old solution if it exists 21 | rm -rf ${OUTPUTS_DIR}/proposed_sequences.json 2> /dev/null 22 | 23 | echo "Executing a python script from the Shell Script to actually solve the problem" 24 | python3 src/model_apply.py \ 25 | && echo "Success: The '${OUTPUTS_DIR}/proposed_sequences.json' file has been saved" \ 26 | || echo "Failure: Something did not quite work correct when executing the Python script!" 27 | if [ ! -f "${OUTPUTS_DIR}/proposed_sequences.json" ]; then 28 | exit 1 29 | fi 30 | echo "Done!" 31 | -------------------------------------------------------------------------------- /templates/rc_base/src/model_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | readonly BASE_DIR=$(dirname $0) 3 | readonly OUT_FILE="$(dirname ${BASE_DIR})/data/model_build_outputs/model.json" 4 | 5 | echo "Initializing Quark Reducer" 6 | sleep 1 7 | echo "Placing Nano Tubes In Gravitational Wavepool" 8 | sleep 1 9 | echo "Measuring Particle Deviations" 10 | sleep 1 11 | echo "Programming Artificial Noggins" 12 | sleep 1 13 | echo "Beaming in Complex Materials" 14 | sleep 1 15 | echo "Solving Model" 16 | sleep 1 17 | echo "Saving Solved Model State" 18 | sleep 1 19 | 20 | echo '{ 21 | "Model": "Hello from the model_build.py script!", 22 | "sort_by": "lat" 23 | }' > ${OUT_FILE} 24 | echo "Success: The '${OUT_FILE}' file has been saved" 25 | -------------------------------------------------------------------------------- /templates/rc_python/.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git 3 | .gitignore 4 | 5 | # Markdown files 6 | *.md 7 | -------------------------------------------------------------------------------- /templates/rc_python/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:1.2 2 | ARG UBUNTU_RELEASE=20.04 3 | ARG SOURCE_DIR=/home/app/ 4 | 5 | FROM ubuntu:$UBUNTU_RELEASE 6 | ARG SOURCE_DIR 7 | ENV SOURCE_DIR $SOURCE_DIR 8 | ENV PATH $PATH:$SOURCE_DIR 9 | RUN mkdir -p $SOURCE_DIR 10 | WORKDIR $SOURCE_DIR 11 | RUN groupadd --gid 1000 app \ 12 | && useradd --uid 1000 --gid app --shell /bin/bash --create-home app \ 13 | # install pkgs 14 | && apt-get update \ 15 | && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ 16 | # you might need build-essential 17 | build-essential \ 18 | python3 \ 19 | python3-pip \ 20 | python3-dev \ 21 | # other pkgs... 22 | && rm -rf /var/lib/apt/lists/* 23 | # make some useful symlinks 24 | RUN cd /usr/local/bin \ 25 | && ln -s /usr/bin/python3 python \ 26 | && ln -s /usr/bin/python3-config python-config 27 | COPY --chown=app:app ./requirements.txt ./requirements.txt 28 | RUN pip3 install --upgrade pip && pip3 install -r requirements.txt 29 | COPY --chown=app:app ./*.sh ./ 30 | COPY --chown=app:app ./src/ ./src/ 31 | USER app 32 | CMD ["/bin/bash"] 33 | -------------------------------------------------------------------------------- /templates/rc_python/model_apply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | export PYTHONUNBUFFERED=1 5 | exec python src/model_apply.py 6 | -------------------------------------------------------------------------------- /templates/rc_python/model_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | export PYTHONUNBUFFERED=1 5 | exec python src/model_build.py 6 | -------------------------------------------------------------------------------- /templates/rc_python/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIT-CAVE/rc-cli/fdafa50e8b8a18d4cbff112ec432011952b92c7d/templates/rc_python/requirements.txt -------------------------------------------------------------------------------- /templates/rc_python/snapshots/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except these files 4 | !.gitignore 5 | !README.md 6 | -------------------------------------------------------------------------------- /templates/rc_python/snapshots/README.md: -------------------------------------------------------------------------------- 1 | # What is this directory for? 2 | 3 | The current directory serves as a folder to help participants organize and save their Docker images to be submitted and optionally to be validated by the `production-test`. All files under this directory will be ignored by the Git repository, except this README file (see [.gitignore](.gitignore)). 4 | -------------------------------------------------------------------------------- /templates/rc_python/src/model_apply.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | import sys, json, time 3 | 4 | # Get Directory 5 | BASE_DIR = path.dirname(path.dirname(path.abspath(__file__))) 6 | 7 | # Read input data 8 | print('Reading Input Data') 9 | # Model Build output 10 | model_path=path.join(BASE_DIR, 'data/model_build_outputs/model.json') 11 | with open(model_path, newline='') as in_file: 12 | model_build_out = json.load(in_file) 13 | # Prediction Routes (Model Apply input) 14 | prediction_routes_path = path.join(BASE_DIR, 'data/model_apply_inputs/new_route_data.json') 15 | with open(prediction_routes_path, newline='') as in_file: 16 | prediction_routes = json.load(in_file) 17 | 18 | def sort_by_key(stops, sort_by): 19 | """ 20 | Takes in the `prediction_routes[route_id]['stops']` dictionary 21 | Returns a dictionary of the stops with their sorted order always placing the depot first 22 | 23 | EG: 24 | 25 | Input: 26 | ``` 27 | stops={ 28 | "Depot": { 29 | "lat": 42.139891, 30 | "lng": -71.494346, 31 | "type": "depot", 32 | "zone_id": null 33 | }, 34 | "StopID_001": { 35 | "lat": 43.139891, 36 | "lng": -71.494346, 37 | "type": "delivery", 38 | "zone_id": "A-2.2A" 39 | }, 40 | "StopID_002": { 41 | "lat": 42.139891, 42 | "lng": -71.494346, 43 | "type": "delivery", 44 | "zone_id": "P-13.1B" 45 | } 46 | } 47 | 48 | print (sort_by_key(stops, 'lat')) 49 | ``` 50 | 51 | Output: 52 | ``` 53 | { 54 | "Depot":1, 55 | "StopID_001":3, 56 | "StopID_002":2 57 | } 58 | ``` 59 | 60 | """ 61 | # Serialize keys as id into each dictionary value and make the dict a list 62 | stops_list=[{**value, **{'id':key}} for key, value in stops.items()] 63 | 64 | # Sort the stops list by the key specified when calling the sort_by_key func 65 | ordered_stop_list=sorted(stops_list, key=lambda x: x[sort_by]) 66 | 67 | # Keep only sorted list of ids 68 | ordered_stop_list_ids=[i['id'] for i in ordered_stop_list] 69 | 70 | # Serialize back to dictionary format with output order as the values 71 | return {i:ordered_stop_list_ids.index(i) for i in ordered_stop_list_ids} 72 | 73 | def propose_all_routes(prediction_routes, sort_by): 74 | """ 75 | Applies `sort_by_key` to each route's set of stops and returns them in a dictionary under `output[route_id]['proposed']` 76 | 77 | EG: 78 | 79 | Input: 80 | ``` 81 | prediction_routes = { 82 | "RouteID_001": { 83 | ... 84 | "stops": { 85 | "Depot": { 86 | "lat": 42.139891, 87 | "lng": -71.494346, 88 | "type": "depot", 89 | "zone_id": null 90 | }, 91 | ... 92 | } 93 | }, 94 | ... 95 | } 96 | 97 | print(propose_all_routes(prediction_routes, 'lat')) 98 | ``` 99 | 100 | Output: 101 | ``` 102 | { 103 | "RouteID_001": { 104 | "proposed": { 105 | "Depot": 0, 106 | "StopID_001": 1, 107 | "StopID_002": 2 108 | } 109 | }, 110 | ... 111 | } 112 | ``` 113 | """ 114 | return {key:{'proposed':sort_by_key(stops=value['stops'], sort_by=sort_by)} for key, value in prediction_routes.items()} 115 | 116 | # Apply faux algorithms to pass time 117 | time.sleep(1) 118 | print('Solving Dark Matter Waveforms') 119 | time.sleep(1) 120 | print('Quantum Computer is Overheating') 121 | time.sleep(1) 122 | print('Trying Alternate Measurement Cycles') 123 | time.sleep(1) 124 | print('Found a Great Solution!') 125 | time.sleep(1) 126 | print('Checking Validity') 127 | time.sleep(1) 128 | print('The Answer is 42!') 129 | time.sleep(1) 130 | 131 | 132 | print('\nApplying answer with real model...') 133 | sort_by=model_build_out.get("sort_by") 134 | print('Sorting data by the key: {}'.format(sort_by)) 135 | output=propose_all_routes(prediction_routes=prediction_routes, sort_by=sort_by) 136 | print('Data sorted!') 137 | 138 | # Write output data 139 | output_path=path.join(BASE_DIR, 'data/model_apply_outputs/proposed_sequences.json') 140 | with open(output_path, 'w') as out_file: 141 | json.dump(output, out_file) 142 | print("Success: The '{}' file has been saved".format(output_path)) 143 | 144 | print('Done!') 145 | -------------------------------------------------------------------------------- /templates/rc_python/src/model_build.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | import sys, json, time 3 | 4 | # Get Directory 5 | BASE_DIR = path.dirname(path.dirname(path.abspath(__file__))) 6 | 7 | # Read input data 8 | print('Reading Input Data') 9 | training_routes_path=path.join(BASE_DIR, 'data/model_build_inputs/route_data.json') 10 | with open(training_routes_path, newline='') as in_file: 11 | actual_routes = json.load(in_file) 12 | 13 | 14 | # Solve for something hard 15 | print('Initializing Quark Reducer') 16 | time.sleep(1) 17 | print('Placing Nano Tubes In Gravitational Wavepool') 18 | time.sleep(1) 19 | print('Measuring Particle Deviations') 20 | time.sleep(1) 21 | print('Programming Artificial Noggins') 22 | time.sleep(1) 23 | print('Beaming in Complex Materials') 24 | time.sleep(1) 25 | print('Solving Model') 26 | time.sleep(1) 27 | print('Saving Solved Model State') 28 | output={ 29 | 'Model':'Hello from the model_build.py script!', 30 | 'sort_by':'lat' 31 | } 32 | 33 | # Write output data 34 | model_path=path.join(BASE_DIR, 'data/model_build_outputs/model.json') 35 | with open(model_path, 'w') as out_file: 36 | json.dump(output, out_file) 37 | print("Success: The '{}' file has been saved".format(model_path)) 38 | -------------------------------------------------------------------------------- /templates/rc_python_lite/.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git 3 | .gitignore 4 | 5 | # Markdown files 6 | *.md 7 | -------------------------------------------------------------------------------- /templates/rc_python_lite/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:1.2 2 | ARG PYTHON_VERSION=3.9.1 3 | ARG SOURCE_DIR=/home/app/ 4 | 5 | FROM python:${PYTHON_VERSION}-alpine 6 | RUN apk update && apk --no-cache add \ 7 | # adding deps required by some popular Python packages 8 | g++ 9 | COPY ./requirements.txt ./ 10 | # install Python dependencies to the local user directory 11 | RUN pip install --user --requirement ./requirements.txt 12 | 13 | ARG SOURCE_DIR 14 | ENV SOURCE_DIR $SOURCE_DIR 15 | ENV PATH $PATH:$SOURCE_DIR 16 | RUN addgroup -g 1000 app && adduser -u 1000 -G app -s /bin/sh -D app 17 | RUN mkdir -p $SOURCE_DIR 18 | WORKDIR $SOURCE_DIR 19 | COPY --chown=app:app ./*.sh ./ 20 | COPY --chown=app:app ./src/ ./src/ 21 | USER app 22 | CMD ["/bin/sh"] 23 | -------------------------------------------------------------------------------- /templates/rc_python_lite/model_apply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | export PYTHONUNBUFFERED=1 5 | exec python src/model_apply.py 6 | -------------------------------------------------------------------------------- /templates/rc_python_lite/model_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | export PYTHONUNBUFFERED=1 5 | exec python src/model_build.py 6 | -------------------------------------------------------------------------------- /templates/rc_python_lite/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MIT-CAVE/rc-cli/fdafa50e8b8a18d4cbff112ec432011952b92c7d/templates/rc_python_lite/requirements.txt -------------------------------------------------------------------------------- /templates/rc_python_lite/snapshots/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except these files 4 | !.gitignore 5 | !README.md 6 | -------------------------------------------------------------------------------- /templates/rc_python_lite/snapshots/README.md: -------------------------------------------------------------------------------- 1 | # What is this directory for? 2 | 3 | The current directory serves as a folder to help participants organize and save their Docker images to be submitted and optionally to be validated by the `production-test`. All files under this directory will be ignored by the Git repository, except this README file (see [.gitignore](.gitignore)). 4 | -------------------------------------------------------------------------------- /templates/rc_python_lite/src/model_apply.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | import sys, json, time 3 | 4 | # Get Directory 5 | BASE_DIR = path.dirname(path.dirname(path.abspath(__file__))) 6 | 7 | # Read input data 8 | print('Reading Input Data') 9 | # Model Build output 10 | model_path=path.join(BASE_DIR, 'data/model_build_outputs/model.json') 11 | with open(model_path, newline='') as in_file: 12 | model_build_out = json.load(in_file) 13 | # Prediction Routes (Model Apply input) 14 | prediction_routes_path = path.join(BASE_DIR, 'data/model_apply_inputs/new_route_data.json') 15 | with open(prediction_routes_path, newline='') as in_file: 16 | prediction_routes = json.load(in_file) 17 | 18 | def sort_by_key(stops, sort_by): 19 | """ 20 | Takes in the `prediction_routes[route_id]['stops']` dictionary 21 | Returns a dictionary of the stops with their sorted order always placing the depot first 22 | 23 | EG: 24 | 25 | Input: 26 | ``` 27 | stops={ 28 | "Depot": { 29 | "lat": 42.139891, 30 | "lng": -71.494346, 31 | "type": "depot", 32 | "zone_id": null 33 | }, 34 | "StopID_001": { 35 | "lat": 43.139891, 36 | "lng": -71.494346, 37 | "type": "delivery", 38 | "zone_id": "A-2.2A" 39 | }, 40 | "StopID_002": { 41 | "lat": 42.139891, 42 | "lng": -71.494346, 43 | "type": "delivery", 44 | "zone_id": "P-13.1B" 45 | } 46 | } 47 | 48 | print (sort_by_key(stops, 'lat')) 49 | ``` 50 | 51 | Output: 52 | ``` 53 | { 54 | "Depot":1, 55 | "StopID_001":3, 56 | "StopID_002":2 57 | } 58 | ``` 59 | 60 | """ 61 | # Serialize keys as id into each dictionary value and make the dict a list 62 | stops_list=[{**value, **{'id':key}} for key, value in stops.items()] 63 | 64 | # Sort the stops list by the key specified when calling the sort_by_key func 65 | ordered_stop_list=sorted(stops_list, key=lambda x: x[sort_by]) 66 | 67 | # Keep only sorted list of ids 68 | ordered_stop_list_ids=[i['id'] for i in ordered_stop_list] 69 | 70 | # Serialize back to dictionary format with output order as the values 71 | return {i:ordered_stop_list_ids.index(i) for i in ordered_stop_list_ids} 72 | 73 | def propose_all_routes(prediction_routes, sort_by): 74 | """ 75 | Applies `sort_by_key` to each route's set of stops and returns them in a dictionary under `output[route_id]['proposed']` 76 | 77 | EG: 78 | 79 | Input: 80 | ``` 81 | prediction_routes = { 82 | "RouteID_001": { 83 | ... 84 | "stops": { 85 | "Depot": { 86 | "lat": 42.139891, 87 | "lng": -71.494346, 88 | "type": "depot", 89 | "zone_id": null 90 | }, 91 | ... 92 | } 93 | }, 94 | ... 95 | } 96 | 97 | print(propose_all_routes(prediction_routes, 'lat')) 98 | ``` 99 | 100 | Output: 101 | ``` 102 | { 103 | "RouteID_001": { 104 | "proposed": { 105 | "Depot": 0, 106 | "StopID_001": 1, 107 | "StopID_002": 2 108 | } 109 | }, 110 | ... 111 | } 112 | ``` 113 | """ 114 | return {key:{'proposed':sort_by_key(stops=value['stops'], sort_by=sort_by)} for key, value in prediction_routes.items()} 115 | 116 | # Apply faux algorithms to pass time 117 | time.sleep(1) 118 | print('Solving Dark Matter Waveforms') 119 | time.sleep(1) 120 | print('Quantum Computer is Overheating') 121 | time.sleep(1) 122 | print('Trying Alternate Measurement Cycles') 123 | time.sleep(1) 124 | print('Found a Great Solution!') 125 | time.sleep(1) 126 | print('Checking Validity') 127 | time.sleep(1) 128 | print('The Answer is 42!') 129 | time.sleep(1) 130 | 131 | 132 | print('\nApplying answer with real model...') 133 | sort_by=model_build_out.get("sort_by") 134 | print('Sorting data by the key: {}'.format(sort_by)) 135 | output=propose_all_routes(prediction_routes=prediction_routes, sort_by=sort_by) 136 | print('Data sorted!') 137 | 138 | # Write output data 139 | output_path=path.join(BASE_DIR, 'data/model_apply_outputs/proposed_sequences.json') 140 | with open(output_path, 'w') as out_file: 141 | json.dump(output, out_file) 142 | print("Success: The '{}' file has been saved".format(output_path)) 143 | 144 | print('Done!') 145 | -------------------------------------------------------------------------------- /templates/rc_python_lite/src/model_build.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | import sys, json, time 3 | 4 | # Get Directory 5 | BASE_DIR = path.dirname(path.dirname(path.abspath(__file__))) 6 | 7 | # Read input data 8 | print('Reading Input Data') 9 | training_routes_path=path.join(BASE_DIR, 'data/model_build_inputs/route_data.json') 10 | with open(training_routes_path, newline='') as in_file: 11 | actual_routes = json.load(in_file) 12 | 13 | 14 | # Solve for something hard 15 | print('Initializing Quark Reducer') 16 | time.sleep(1) 17 | print('Placing Nano Tubes In Gravitational Wavepool') 18 | time.sleep(1) 19 | print('Measuring Particle Deviations') 20 | time.sleep(1) 21 | print('Programming Artificial Noggins') 22 | time.sleep(1) 23 | print('Beaming in Complex Materials') 24 | time.sleep(1) 25 | print('Solving Model') 26 | time.sleep(1) 27 | print('Saving Solved Model State') 28 | output={ 29 | 'Model':'Hello from the model_build.py script!', 30 | 'sort_by':'lat' 31 | } 32 | 33 | # Write output data 34 | model_path=path.join(BASE_DIR, 'data/model_build_outputs/model.json') 35 | with open(model_path, 'w') as out_file: 36 | json.dump(output, out_file) 37 | print("Success: The '{}' file has been saved".format(model_path)) 38 | -------------------------------------------------------------------------------- /templates/rc_r/.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git 3 | .gitignore 4 | 5 | # Markdown files 6 | *.md 7 | -------------------------------------------------------------------------------- /templates/rc_r/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:1.2 2 | ARG R_BASE_VERSION=4.0.4 3 | ARG SOURCE_DIR=/home/app/ 4 | 5 | # install pkgs 6 | # RUN apt update \ 7 | # && DEBIAN_FRONTEND=noninteractive apt install -y --no-install-recommends \ 8 | # # Packages here \ 9 | # && rm -rf /var/lib/apt/lists/* 10 | RUN Rscript -e "install.packages(\"versions\")" 11 | COPY ./requirements.txt ./ 12 | # install R dependencies from requirements.txt file 13 | RUN while IFS=" " read -r package version; do \ 14 | Rscript -e "versions::install.versions(c('$package'), c('$version'))"; \ 15 | done < "requirements.txt" 16 | 17 | ARG SOURCE_DIR 18 | ENV SOURCE_DIR $SOURCE_DIR 19 | ENV PATH $PATH:$SOURCE_DIR 20 | RUN mkdir -p $SOURCE_DIR 21 | WORKDIR $SOURCE_DIR 22 | RUN deluser --quiet --remove-all-files docker \ 23 | && groupadd --gid 1000 app \ 24 | && useradd --uid 1000 --gid app --shell /bin/bash --create-home app 25 | COPY --chown=app:app ./*.sh ./ 26 | COPY --chown=app:app ./src/ ./src/ 27 | USER app 28 | CMD ["/bin/bash"] 29 | -------------------------------------------------------------------------------- /templates/rc_r/model_apply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | exec Rscript ./src/model_apply.r 5 | -------------------------------------------------------------------------------- /templates/rc_r/model_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | exec Rscript ./src/model_build.r 5 | -------------------------------------------------------------------------------- /templates/rc_r/requirements.txt: -------------------------------------------------------------------------------- 1 | curl 4.3 2 | jsonlite 1.7.2 3 | -------------------------------------------------------------------------------- /templates/rc_r/snapshots/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except these files 4 | !.gitignore 5 | !README.md 6 | -------------------------------------------------------------------------------- /templates/rc_r/snapshots/README.md: -------------------------------------------------------------------------------- 1 | # What is this directory for? 2 | 3 | The current directory serves as a folder to help participants organize and save their Docker images to be submitted and optionally to be validated by the `production-test`. All files under this directory will be ignored by the Git repository, except this README file (see [.gitignore](.gitignore)). 4 | -------------------------------------------------------------------------------- /templates/rc_r/src/model_apply.r: -------------------------------------------------------------------------------- 1 | # r model apply example 2 | print("Reading Input Data") 3 | Sys.sleep(1) 4 | print("Solving Dark Matter Waveforms") 5 | Sys.sleep(1) 6 | print("Quantum Computer is Overheating") 7 | Sys.sleep(1) 8 | print("Trying Alternate Measurement Cycles") 9 | Sys.sleep(1) 10 | print("Found a Great Solution!") 11 | Sys.sleep(1) 12 | print("Checking Validity") 13 | Sys.sleep(1) 14 | print("The Answer is 42!") 15 | Sys.sleep(1) 16 | 17 | # Copy in example output as the output for this algorithm 18 | if (file.exists("data/model_apply_outputs/proposed_sequences.json")) { 19 | file.remove("data/model_apply_outputs/proposed_sequences.json") 20 | } 21 | cat("{}", file = "data/model_apply_outputs/proposed_sequences.json") 22 | 23 | print(paste( 24 | "Success: The '", 25 | getwd(), 26 | "/data/model_apply_outputs/proposed_sequences.json' file has been saved.", 27 | sep = "" 28 | )) 29 | print("Done!") 30 | -------------------------------------------------------------------------------- /templates/rc_r/src/model_build.r: -------------------------------------------------------------------------------- 1 | # import base package for Sys.sleep 2 | # install.packages("base") 3 | 4 | print("Initializing Quark Reducer") 5 | Sys.sleep(1) 6 | print("Placing Nano Tubes In Gravitational Wavepool") 7 | Sys.sleep(1) 8 | print("Measuring Particle Deviations") 9 | Sys.sleep(1) 10 | print("Programming Artificial Noggins") 11 | Sys.sleep(1) 12 | print("Beaming in Complex Materials") 13 | Sys.sleep(1) 14 | print("Solving Model") 15 | Sys.sleep(1) 16 | print("Saving Solved Model State") 17 | Sys.sleep(1) 18 | 19 | # write an output file 20 | cat('{ 21 | "Model": "Hello from the model_build.r script!", 22 | "sort_by": "lat" 23 | }', file = "data/model_build_outputs/model.json") 24 | 25 | print(paste( 26 | "Success: The '", 27 | getwd(), 28 | "/data/model_build_outputs/model.json' file has been saved.", 29 | sep = "" 30 | )) 31 | --------------------------------------------------------------------------------