├── .ci ├── py310_latest.yaml ├── py311_latest.yaml ├── py312_latest.yaml ├── py313_dev.yaml ├── py313_latest.yaml ├── py38_latest.yaml └── py39_latest.yaml ├── .gitattributes ├── .github └── workflows │ ├── build_docs.yml │ ├── testing.yml │ └── upload_package.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── access ├── __init__.py ├── access.py ├── datasets.py ├── fca.py ├── helpers.py ├── raam.py ├── tests │ ├── data │ │ └── model_output.json │ ├── test_access.py │ ├── test_datasets.py │ ├── test_euclidean.py │ ├── test_floating_catchment_area.py │ ├── test_helpers.py │ ├── test_hospital_example.py │ ├── test_misc.py │ ├── test_raam.py │ ├── test_weighted_catchment.py │ ├── test_weights.py │ └── util.py └── weights.py ├── codecov.yml ├── docs ├── Makefile ├── _static │ ├── images │ │ ├── access_cook_hospitals.png │ │ ├── access_cook_hospitals_2.png │ │ ├── compare_access.png │ │ ├── csds.png │ │ ├── euclidean_distance.png │ │ ├── fig1.png │ │ ├── full_us.jpg │ │ ├── googlemaps.png │ │ ├── graphhopper.png │ │ ├── gravity_model.png │ │ ├── osrm.png │ │ ├── otp.png │ │ ├── pandana.png │ │ ├── pgrouting.png │ │ ├── pysal_favicon.ico │ │ ├── r.png │ │ ├── screenshot_cost_website.png │ │ ├── thumbnail_workflow.png │ │ └── valhalla.png │ ├── pysal-styles.css │ └── references.bib ├── _templates │ └── globaltoc.html ├── access_class.rst ├── access_functions.rst ├── api.rst ├── conf.py ├── index.rst ├── installation.rst ├── make.bat ├── references.rst ├── resources.rst └── tutorials.rst ├── environment.yml ├── notebooks ├── Generating and Plotting a Variety of Access Scores.ipynb ├── How to Read, Filter, and Convert Shapefiles to .geojson.ipynb ├── How to Subset the Travel Cost Matricies.ipynb ├── How to Use access to Compute Access Scores to Resources Given XY Coordinates Joined to Census Tracts.ipynb ├── How to Use access to Compute Access to Locations Given X Y Coordinates.ipynb └── r.awk ├── pyproject.toml └── tools └── gitcount.ipynb /.ci/py310_latest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: test 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python=3.10 7 | - geopandas 8 | - numpy 9 | - pandas 10 | - scipy 11 | # testing, etc 12 | - pytest 13 | - pytest-cov 14 | - pytest-xdist 15 | -------------------------------------------------------------------------------- /.ci/py311_latest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: test 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python=3.11 7 | - geopandas 8 | - numpy 9 | - pandas 10 | - scipy 11 | # testing, etc 12 | - pytest 13 | - pytest-cov 14 | - pytest-xdist 15 | -------------------------------------------------------------------------------- /.ci/py312_latest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: test 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python=3.12 7 | - geopandas 8 | - numpy 9 | - pandas 10 | - scipy 11 | # testing, etc 12 | - pytest 13 | - pytest-cov 14 | - pytest-xdist 15 | -------------------------------------------------------------------------------- /.ci/py313_dev.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: test 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python=3.13 7 | - git 8 | - pip 9 | # testing 10 | - pytest 11 | - pytest-cov 12 | - pytest-xdist 13 | - pip: 14 | # dev versions of packages 15 | - --pre \ 16 | --index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ 17 | --extra-index-url https://pypi.org/simple 18 | - numpy 19 | - pandas 20 | - pyogrio 21 | - scipy 22 | - shapely 23 | - git+https://github.com/geopandas/geopandas.git 24 | -------------------------------------------------------------------------------- /.ci/py313_latest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: test 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python=3.13 7 | - geopandas 8 | - numpy 9 | - pandas 10 | - scipy 11 | # testing, etc 12 | - pytest 13 | - pytest-cov 14 | - pytest-xdist 15 | # docs 16 | - nbsphinx 17 | - numpydoc 18 | - quilt3 19 | - sphinx 20 | - sphinxcontrib-napoleon 21 | - sphinx-gallery 22 | - sphinxcontrib-bibtex 23 | - sphinx_bootstrap_theme 24 | -------------------------------------------------------------------------------- /.ci/py38_latest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: test 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python=3.8 7 | - geopandas 8 | - numpy 9 | - pandas 10 | - scipy 11 | # testing, etc 12 | - pytest 13 | - pytest-cov 14 | - pytest-xdist 15 | -------------------------------------------------------------------------------- /.ci/py39_latest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: test 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python=3.9 7 | - geopandas 8 | - numpy 9 | - pandas 10 | - scipy 11 | # testing, etc 12 | - pytest 13 | - pytest-cov 14 | - pytest-xdist 15 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | access/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.github/workflows/build_docs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build Docs 3 | 4 | on: 5 | push: 6 | tags: 7 | - 'v*' 8 | workflow_dispatch: 9 | inputs: 10 | version: 11 | description: Manual Doc Build 12 | default: run-doc-build 13 | required: false 14 | 15 | jobs: 16 | docs: 17 | name: CI (${{ matrix.os }}-${{ matrix.environment-file }}) 18 | runs-on: ${{ matrix.os }} 19 | continue-on-error: ${{ matrix.experimental }} 20 | timeout-minutes: 90 21 | strategy: 22 | matrix: 23 | os: ['ubuntu-latest'] 24 | environment-file: [py313_latest] 25 | experimental: [false] 26 | steps: 27 | - uses: actions/checkout@v2 28 | - uses: conda-incubator/setup-miniconda@v2 29 | with: 30 | miniconda-version: 'latest' 31 | auto-update-conda: true 32 | auto-activate-base: false 33 | environment-file: .ci/ ${{ matrix.environment-file }} .yaml 34 | activate-environment: test 35 | - shell: bash -l {0} 36 | run: conda info --all 37 | - shell: bash -l {0} 38 | run: conda list 39 | - shell: bash -l {0} 40 | run: conda config --show-sources 41 | - shell: bash -l {0} 42 | run: conda config --show 43 | - shell: bash -l {0} 44 | run: pip install -e . --no-deps --force-reinstall 45 | - shell: bash -l {0} 46 | run: cd docs; make html 47 | - name: Commit documentation changes 48 | run: | 49 | git clone https://github.com/ammaraskar/sphinx-action-test.git --branch gh-pages --single-branch gh-pages 50 | cp -r docs/_build/html/* gh-pages/ 51 | cd gh-pages 52 | git config --local user.email "action@github.com" 53 | git config --local user.name "GitHub Action" 54 | git add . 55 | git commit -m "Update documentation" -a || true 56 | # The above command will fail if no changes were present, so we ignore 57 | # the return code. 58 | - name: Push changes 59 | uses: ad-m/github-push-action@master 60 | with: 61 | branch: gh-pages 62 | directory: gh-pages 63 | github_token: ${{ secrets.GITHUB_TOKEN }} 64 | force: true 65 | -------------------------------------------------------------------------------- /.github/workflows/testing.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Continuous Integration 3 | 4 | on: 5 | push: 6 | branches: [main] 7 | pull_request: 8 | branches: 9 | - "*" 10 | schedule: 11 | - cron: "59 23 * * *" 12 | workflow_dispatch: 13 | inputs: 14 | version: 15 | description: Manual CI Run 16 | default: test 17 | required: false 18 | 19 | jobs: 20 | tests: 21 | name: CI (${{ matrix.os }}-${{ matrix.environment-file }}) 22 | runs-on: ${{ matrix.os }} 23 | timeout-minutes: 90 24 | defaults: 25 | run: 26 | shell: bash -l {0} 27 | strategy: 28 | matrix: 29 | os: [ubuntu-latest] 30 | environment-file: [ 31 | py38_latest, 32 | py39_latest, 33 | py310_latest, 34 | py311_latest, 35 | py312_latest, 36 | py313_latest, 37 | py313_dev, 38 | ] 39 | experimental: [false] 40 | include: 41 | - environment-file: py313_latest 42 | os: macos-latest 43 | - environment-file: py313_latest 44 | os: windows-latest 45 | fail-fast: false 46 | 47 | steps: 48 | - name: checkout repo 49 | uses: actions/checkout@v4 50 | with: 51 | fetch-depth: 0 # Fetch all history for all branches and tags. 52 | 53 | - name: setup micromamba 54 | uses: mamba-org/setup-micromamba@v2 55 | with: 56 | environment-file: .ci/${{ matrix.environment-file }}.yaml 57 | micromamba-version: "latest" 58 | 59 | - name: install package 60 | run: "pip install -e . --no-deps" 61 | 62 | - name: run tests 63 | run: | 64 | pytest \ 65 | access/ \ 66 | -vvv \ 67 | -r a \ 68 | --numprocesses logical \ 69 | --color yes \ 70 | --cov access \ 71 | --cov-append \ 72 | --cov-report term-missing \ 73 | --cov-report xml . 74 | 75 | - name: codecov 76 | uses: codecov/codecov-action@v1 77 | with: 78 | token: ${{ secrets.CODECOV_TOKEN }} 79 | file: ./coverage.xml 80 | name: access-codecov 81 | -------------------------------------------------------------------------------- /.github/workflows/upload_package.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release Package 3 | 4 | on: 5 | push: 6 | # Sequence of patterns matched against refs/tags 7 | tags: 8 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v1 15 | - name: Set up Python 16 | uses: actions/setup-python@v1 17 | with: 18 | python-version: '3.x' 19 | - name: Install dependencies 20 | run: | 21 | python -m pip install --upgrade pip 22 | pip install setuptools wheel twine jupyter urllib3 pandas pyyaml 23 | python setup.py sdist bdist_wheel 24 | - name: Publish distribution 📦 to PyPI 25 | uses: pypa/gh-action-pypi-publish@master 26 | with: 27 | password: ${{ secrets.PYPI_PASSWORD }} 28 | - name: Run Changelog 29 | run: | 30 | jupyter nbconvert --to notebook --execute --inplace --ExecutePreprocessor.timeout=-1 --ExecutePreprocessor.kernel_name=python3 tools/gitcount.ipynb 31 | - name: Cat Changelog 32 | uses: pCYSl5EDgo/cat@master 33 | id: changetxt 34 | with: 35 | path: ./tools/changelog.md 36 | env: 37 | TEXT: ${{ steps.changetxt.outputs.text }} 38 | - name: Create Release 39 | id: create_release 40 | uses: actions/create-release@v1 41 | env: 42 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 43 | with: 44 | tag_name: ${{ github.ref }} 45 | release_name: Release ${{ github.ref }} 46 | body: ${{ steps.changetxt.outputs.text }} 47 | draft: false 48 | prerelease: false 49 | - name: Get Asset name 50 | run: | 51 | export PKG=$(ls dist/) 52 | set -- $PKG 53 | echo "name=$1" >> $GITHUB_ENV 54 | - name: Upload Release Asset 55 | id: upload-release-asset 56 | uses: actions/upload-release-asset@v1 57 | env: 58 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 59 | with: 60 | upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps 61 | asset_path: dist/${{ env.name }} 62 | asset_name: ${{ env.name }} 63 | asset_content_type: application/zip 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.pyc 3 | .rope* 4 | .idea/ 5 | notebooks/.ipynb_checkpoints/ 6 | .DS_Store 7 | .ipynb_checkpoints/ 8 | *.bak 9 | .eggs/ 10 | *.egg-info/ 11 | 12 | # Packages 13 | *.egg 14 | *.egg-info 15 | dist 16 | build 17 | eggs 18 | parts 19 | bin 20 | var 21 | sdist 22 | develop-eggs 23 | .installed.cfg 24 | lib 25 | lib64 26 | __pycache__ 27 | examples/il_med/il_times.csv 28 | docs/_static/images/publishing_agreement.pdf 29 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: 22.3.0 4 | hooks: 5 | - id: black 6 | language_version: python3 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright 2018 pysal-access developers 4 | 5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 10 | 11 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Spatial Access 2 | 3 | ![tag](https://img.shields.io/github/v/release/pysal/access?include_prereleases&sort=semver) 4 | [![Documentation](https://img.shields.io/static/v1.svg?label=docs&message=current&color=9cf)](http://pysal.org/access/) 5 | [![Continuous Integration](https://github.com/pysal/access/actions/workflows/testing.yml/badge.svg)](https://github.com/pysal/access/actions/workflows/testing.yml) 6 | 7 | This package provides classical and novel measures of spatial accessibility to services. 8 | 9 | For full documentation, see [here](https://pysal.org/access/). 10 | -------------------------------------------------------------------------------- /access/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | :mod:`access` --- Accessibility Metrics 3 | ================================================= 4 | """ 5 | 6 | import contextlib 7 | from importlib.metadata import PackageNotFoundError, version 8 | 9 | from .access import Access 10 | from .datasets import Datasets 11 | 12 | with contextlib.suppress(PackageNotFoundError): 13 | __version__ = version("access") -------------------------------------------------------------------------------- /access/datasets.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pandas as pd 4 | 5 | 6 | class Datasets(object): 7 | @staticmethod 8 | def load_data(key): 9 | """ 10 | Return path for available datasets. 11 | """ 12 | 13 | _datasets = { 14 | "chi_times": "chicago_metro_times.csv.bz2", 15 | "chi_doc": "chicago_metro_docs_dentists.csv", 16 | "chi_pop": "chicago_metro_pop.csv", 17 | "chi_doc_geom": "chicago_metro_docs_dentists.geojson", 18 | "chi_pop_geom": "chicago_metro_pop.geojson", 19 | "chi_euclidean": "chicago_metro_euclidean_costs.csv.bz2", 20 | "chi_euclidean_neighbors": "chicago_metro_euclidean_cost_neighbors.csv.bz2", 21 | "cook_county_hospitals": "cook_county_hospitals.csv", 22 | "cook_county_hospitals_geom": "hospitals_cookcty.geojson", 23 | "cook_county_tracts": "cook_county_tracts.geojson", 24 | } 25 | 26 | url = f"https://d2r7gabxtstf5s.cloudfront.net/ex_datasets/{_datasets[key]}" 27 | 28 | if ".geojson" in url: 29 | import geopandas as gpd 30 | 31 | return gpd.read_file(url) 32 | 33 | return pd.read_csv(url) 34 | 35 | @staticmethod 36 | def available_datasets(): 37 | desc = """ 38 | chi_times: Cost matrix with travel times from each Chicago Census Tract to all others.\n 39 | chi_doc: Doctor and dentist counts for each Chicago Census Tract.\n 40 | chi_pop: Population counts for each Chicago Census Tract.\n 41 | chi_doc_geom: Doctor and dentist counts for each Chicago Census Tract along with geometric representations for Census Tracts.\n 42 | chi_pop_geom: Population counts for each Chicago Census Tract along with geometric representations for Census Tracts.\n 43 | chi_euclidean: Euclidean distance cost matrix with distances from each demand Chicago Census Tract to all others.\n 44 | chi_euclidean_neighbors: Euclidean distance cost matrix with distances from each supply Census Tract to all others.\n 45 | cook_county_hospitals: Contains data for each hospital location in Cook County including X Y coordinates.\n 46 | cook_county_hospitals_geom: Contains data for each hospital location in Cook County including X Y coordinates, and geometric points for each hospital.\n 47 | cook_county_tracts: Geometric representation of each Census Tract in Cook County. 48 | """ 49 | print(desc) 50 | -------------------------------------------------------------------------------- /access/fca.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import numpy as np 3 | import pandas as pd 4 | 5 | from .weights import step_fn 6 | 7 | 8 | def weighted_catchment( 9 | loc_df, 10 | cost_df, 11 | max_cost=None, 12 | cost_source="origin", 13 | cost_dest="dest", 14 | cost_cost="cost", 15 | loc_index="geoid", 16 | loc_value=None, 17 | weight_fn=None, 18 | three_stage_weight=None, 19 | ): 20 | """ 21 | Calculation of the floating catchment (buffered) accessibility 22 | sum, from DataFrames with computed distances. 23 | This catchment may be either a simple buffer -- with cost below 24 | a single threshold -- or an additional weight may be applied 25 | as a function of the access cost. 26 | 27 | Parameters 28 | ---------- 29 | 30 | loc_df : `pandas.DataFrame `_ 31 | should contain at _least_ a list of the locations (`df_dest`) at which facilities are located. 32 | loc_index : {bool, str} 33 | is the the name of the df column that holds the facility locations. 34 | If it is a bool, then the it the location is already on the index. 35 | loc_value : str 36 | If this value is `None`, a count will be used in place of a weight. 37 | Use this, for instance, to count restaurants, instead of total doctors in a practice. 38 | cost_df : `pandas.DataFrame `_ 39 | This dataframe contains the precomputed costs from an origin/index location to destinations. 40 | cost_source : str 41 | The name of the column name of the index locations -- this is what will be grouped. 42 | cost_dest : str 43 | The name of the column name of the destination locations. 44 | This is what will be _in_ each group. 45 | cost_cost : str 46 | This is is the name of the cost column. 47 | weight_fn : function 48 | This function will weight the value of resources/facilities, 49 | as a function of the raw cost. 50 | max_cost : float 51 | This is the maximum cost to consider in the weighted sum; 52 | note that it applies _along with_ the weight function. 53 | 54 | Returns 55 | ------- 56 | resources : pandas.Series 57 | A -- potentially weighted -- sum of resources, facilities, or consumers. 58 | """ 59 | # merge the loc dataframe and cost dataframe together 60 | if loc_index is True: 61 | temp = pd.merge(cost_df, loc_df, left_on=cost_source, right_index=True) 62 | else: 63 | temp = pd.merge(cost_df, loc_df, left_on=cost_source, right_on=loc_index) 64 | 65 | # constrain by max cost 66 | if max_cost is not None: 67 | temp = temp[temp[cost_cost] < max_cost].copy() 68 | 69 | # apply a weight function if inputted -- either enhanced two stage or three stage 70 | if weight_fn: 71 | if three_stage_weight is not None: 72 | new_loc_value_column = temp[loc_value] * temp.W3 * temp.G 73 | temp = temp.drop([loc_value], axis=1) 74 | temp[loc_value] = new_loc_value_column 75 | else: 76 | temp[loc_value] *= temp[cost_cost].apply(weight_fn) 77 | 78 | return temp.groupby([cost_dest])[loc_value].sum() 79 | 80 | 81 | def fca_ratio( 82 | demand_df, 83 | supply_df, 84 | demand_cost_df, 85 | supply_cost_df, 86 | max_cost, 87 | demand_index="geoid", 88 | demand_name="demand", 89 | supply_index="geoid", 90 | supply_name="supply", 91 | demand_cost_origin="origin", 92 | demand_cost_dest="dest", 93 | demand_cost_name="cost", 94 | supply_cost_origin="origin", 95 | supply_cost_dest="dest", 96 | supply_cost_name="cost", 97 | weight_fn=None, 98 | normalize=False, 99 | noise="quiet", 100 | ): 101 | """Calculation of the floating catchment accessibility 102 | ratio, from DataFrames with precomputed distances. 103 | This is accomplished through two calls of the :meth:`Access.access.weighted_catchment` method. 104 | 105 | Parameters 106 | ---------- 107 | 108 | demand_df : `pandas.DataFrame `_ 109 | The origins dataframe, containing a location index and a total demand. 110 | supply_df : `pandas.DataFrame `_ 111 | The origins dataframe, containing a location index and level of supply 112 | demand_cost_df : `pandas.DataFrame `_ 113 | This dataframe contains a link between neighboring demand locations, and a cost between them. 114 | supply_cost_df : `pandas.DataFrame `_ 115 | This dataframe contains a link between neighboring supply locations, and a cost between them. 116 | max_cost : float 117 | This is the maximum cost to consider in the weighted sum; 118 | note that it applies *along with* the weight function. 119 | demand_index : str 120 | is the name of the column that holds the IDs. 121 | demand_name : str 122 | is the name of the column of `demand` that holds the aggregate demand at a location. 123 | supply_index : str 124 | is the name of the column that holds the IDs. 125 | supply_name : str 126 | is the name of the column of `supply_df` that holds the aggregate supply at a location. 127 | demand_cost_origin : str 128 | The column name of the index locations -- this is what will be grouped. 129 | demand_cost_dest : str 130 | The column name of the index locations -- this is what will be grouped. 131 | demand_cost_name : str 132 | The column name of the travel cost. 133 | supply_cost_origin : str 134 | The column name of the index locations -- this is what will be grouped. 135 | supply_cost_dest : str 136 | The column name of the index locations -- this is what will be grouped. 137 | supply_cost_name : str 138 | The column name of the travel cost. 139 | weight_fn : function 140 | This function will weight the value of resources/facilities, 141 | as a function of the raw cost. 142 | normalize : bool 143 | True to normalize the FCA series, by default False. 144 | noise : str 145 | Default 'quiet', otherwise gives messages that indicate potential issues. 146 | 147 | Returns 148 | ------- 149 | access : pandas.Series 150 | A -- potentially-weighted -- access ratio. 151 | """ 152 | 153 | # if there is a discrepancy between the demand and supply cost dataframe locations, print it 154 | if ( 155 | len( 156 | set(demand_df.index.tolist()) 157 | - set(supply_cost_df[supply_cost_dest].unique()) 158 | ) 159 | != 0 160 | ): 161 | warnings.warn("some tracts may be unaccounted for in supply_cost", stacklevel=1) 162 | 163 | # get a series of the total demand within the buffer zone 164 | total_demand_series = weighted_catchment( 165 | demand_df, 166 | demand_cost_df, 167 | max_cost, 168 | cost_source=demand_cost_dest, 169 | cost_dest=demand_cost_origin, 170 | cost_cost=demand_cost_name, 171 | loc_index=demand_index, 172 | loc_value=demand_name, 173 | weight_fn=weight_fn, 174 | ) 175 | # get a series of the total supply within the buffer zone 176 | total_supply_series = weighted_catchment( 177 | supply_df, 178 | supply_cost_df, 179 | max_cost, 180 | cost_source=supply_cost_dest, 181 | cost_dest=supply_cost_origin, 182 | cost_cost=supply_cost_name, 183 | loc_index=supply_index, 184 | loc_value=supply_name, 185 | weight_fn=weight_fn, 186 | ) 187 | 188 | # join the aggregate demand and the aggregate supply into one dataframe 189 | temp = ( 190 | total_supply_series.to_frame(name="supply") 191 | .join(total_demand_series.to_frame(name="demand"), how="right") 192 | .fillna(0) 193 | ) 194 | 195 | # calculate the floating catchement area, or supply divided by demand 196 | temp["FCA"] = temp["supply"] / temp["demand"] 197 | base_FCA_series = temp["FCA"] 198 | 199 | if noise != "quiet": 200 | # depending on the version history of the census tract data you use, this will print out the tracts that have undefined FCA values 201 | print(base_FCA_series[pd.isna(base_FCA_series)]) 202 | 203 | return base_FCA_series 204 | 205 | 206 | def two_stage_fca( 207 | demand_df, 208 | supply_df, 209 | cost_df, 210 | max_cost=None, 211 | demand_index="geoid", 212 | demand_name="demand", 213 | supply_index="geoid", 214 | supply_name="supply", 215 | cost_origin="origin", 216 | cost_dest="dest", 217 | cost_name="cost", 218 | weight_fn=None, 219 | normalize=False, 220 | ): 221 | """ 222 | Calculation of the two-stage floating catchment accessibility 223 | ratio, from DataFrames with precomputed distances. 224 | This is accomplished through a single call of the `access.weighted_catchment` method, 225 | to retrieve the patients using each provider. 226 | The ratio of providers per patient is then calculated at each care destination, 227 | and that ratio is weighted and summed at each corresponding demand site. 228 | This is based on the original paper by Luo and Wang :cite:`2002_luo_spatial_accessibility_chicago`, 229 | as extended by Luo and Qi :cite:`2009_luo_qi_E2SFCA` 230 | and McGrail and Humphreys :cite:`2009_mcgrail_improved_2SFCA`. 231 | 232 | Parameters 233 | ---------- 234 | 235 | demand_df : `pandas.DataFrame `_ 236 | The origins dataframe, containing a location index and a total demand. 237 | demand_origin : str 238 | is the name of the column of `demand_df` that holds the origin ID. 239 | demand_value : str 240 | is the name of the column of `demand_df` that holds the aggregate demand at a location. 241 | supply_df : `pandas.DataFrame `_ 242 | The origins dataframe, containing a location index and level of supply 243 | supply_origin : str 244 | is the name of the column of `supply_df` that holds the origin ID. 245 | supply_value : str 246 | is the name of the column of `supply_df` that holds the aggregate demand at a location. 247 | cost_df : `pandas.DataFrame `_ 248 | This dataframe contains a link between neighboring demand locations, and a cost between them. 249 | cost_origin : str 250 | The column name of the locations of users or consumers. 251 | cost_dest : str 252 | The column name of the supply or resource locations. 253 | cost_name : str 254 | The column name of the travel cost between origins and destinations 255 | weight_fn : function 256 | This fucntion will weight the value of resources/facilities, 257 | as a function of the raw cost. 258 | max_cost : float 259 | This is the maximum cost to consider in the weighted sum; 260 | note that it applies _along with_ the weight function. 261 | normalize : bool 262 | True to normalize the FCA series, by default False. 263 | Returns 264 | ------- 265 | access : pandas.Series 266 | A -- potentially-weighted -- two-stage access ratio. 267 | """ 268 | # get a series of total demand then calculate the supply to total demand ratio for each location 269 | total_demand_series = weighted_catchment( 270 | demand_df, 271 | cost_df, 272 | max_cost, 273 | cost_source=cost_origin, 274 | cost_dest=cost_dest, 275 | cost_cost=cost_name, 276 | loc_index=demand_index, 277 | loc_value=demand_name, 278 | weight_fn=weight_fn, 279 | ) 280 | 281 | # create a temporary dataframe, temp, that holds the supply and aggregate demand at each location 282 | total_demand_series.name += "_W" 283 | temp = supply_df.join(total_demand_series, how="right") 284 | 285 | # there may be NA values due to a shorter supply dataframe than the demand dataframe. 286 | # in this case, replace any potential NA values(which correspond to supply locations with no supply) with 0. 287 | temp[supply_name].fillna(0, inplace=True) 288 | 289 | # calculate the fractional ratio of supply to aggregate demand at each location, or Rl 290 | temp["Rl"] = temp[supply_name] / temp[demand_name + "_W"] 291 | 292 | # separate the fractional ratio of supply to aggregate demand at each location, or Rl, into a new dataframe 293 | supply_to_total_demand_frame = pd.DataFrame(data={"Rl": temp["Rl"]}) 294 | supply_to_total_demand_frame.index.name = "geoid" 295 | 296 | # sum, into a series, the supply to total demand ratios for each location 297 | two_stage_fca_series = weighted_catchment( 298 | supply_to_total_demand_frame, 299 | cost_df, 300 | max_cost, 301 | cost_source=cost_dest, 302 | cost_dest=cost_origin, 303 | cost_cost=cost_name, 304 | loc_index="geoid", 305 | loc_value="Rl", 306 | weight_fn=weight_fn, 307 | ) 308 | 309 | return two_stage_fca_series 310 | 311 | 312 | def three_stage_fca( 313 | demand_df, 314 | supply_df, 315 | cost_df, 316 | max_cost, 317 | demand_index="geoid", 318 | demand_name="demand", 319 | supply_index="geoid", 320 | supply_name="supply", 321 | cost_origin="origin", 322 | cost_dest="dest", 323 | cost_name="cost", 324 | weight_fn=None, 325 | normalize=False, 326 | ): 327 | """Calculation of the three-stage floating catchment accessibility 328 | ratio, from DataFrames with precomputed distances. 329 | This is accomplished through a single call of the :meth:`access.access.weighted_catchment` method, 330 | to retrieve the patients using each provider. 331 | The ratio of providers per patient is then calculated at each care destination, 332 | and that ratio is weighted and summed at each corresponding demand site. 333 | The only difference weight respect to the 2SFCA method is that, 334 | in addition to a distance-dependent weight (`weight_fn`), 335 | a preference weight *G* is calculated. That calculation 336 | uses the value :math:`\\beta`. 337 | See the original paper by Wan, Zou, and Sternberg. :cite:`2012_wan_3SFCA` 338 | 339 | Parameters 340 | ---------- 341 | 342 | demand_df : `pandas.DataFrame `_ 343 | The origins dataframe, containing a location index and a total demand. 344 | demand_origin : str 345 | is the name of the column of `demand` that holds the origin ID. 346 | demand_value : str 347 | is the name of the column of `demand` that holds the aggregate demand at a location. 348 | supply_df : `pandas.DataFrame `_ 349 | The origins dataframe, containing a location index and level of supply 350 | supply_df : `pandas.DataFrame `_ 351 | The origins dataframe, containing a location index and level of supply 352 | cost_df : `pandas.DataFrame `_ 353 | This dataframe contains a link between neighboring demand locations, and a cost between them. 354 | cost_origin : str 355 | The column name of the locations of users or consumers. 356 | cost_dest : str 357 | The column name of the supply or resource locations. 358 | cost_name : str 359 | The column name of the travel cost between origins and destinations 360 | weight_fn : function 361 | This fucntion will weight the value of resources/facilities, 362 | as a function of the raw cost. 363 | max_cost : float 364 | This is the maximum cost to consider in the weighted sum; 365 | note that it applies *along with* the weight function. 366 | preference_weight_beta : float 367 | Parameter scaling with the gaussian weights, 368 | used to generate preference weights. 369 | 370 | Returns 371 | ------- 372 | access : pandas.Series 373 | A -- potentially-weighted -- three-stage access ratio. 374 | """ 375 | 376 | # create preference weight 'G', which is the weight 377 | cost_df["W3"] = cost_df[cost_name].apply(weight_fn) 378 | W3sum_frame = ( 379 | cost_df[[cost_origin, "W3"]] 380 | .groupby(cost_origin) 381 | .sum() 382 | .rename(columns={"W3": "W3sum"}) 383 | .reset_index() 384 | ) 385 | cost_df = pd.merge(cost_df, W3sum_frame) 386 | cost_df["G"] = cost_df.W3 / cost_df.W3sum 387 | 388 | # get a series of total demand then calculate the supply to total demand ratio for each location 389 | total_demand_series = weighted_catchment( 390 | demand_df, 391 | cost_df, 392 | max_cost, 393 | cost_source=cost_origin, 394 | cost_dest=cost_dest, 395 | cost_cost=cost_name, 396 | loc_index=demand_index, 397 | loc_value=demand_name, 398 | weight_fn=weight_fn, 399 | three_stage_weight=True, 400 | ) 401 | 402 | # create a temporary dataframe, temp, that holds the supply and aggregate demand at each location 403 | total_demand_series.name += "_W" 404 | temp = supply_df.join(total_demand_series, how="right") 405 | 406 | # there may be NA values due to a shorter supply dataframe than the demand dataframe. 407 | # in this case, replace any potential NA values(which correspond to supply locations with no supply) with 0. 408 | temp[supply_name].fillna(0, inplace=True) 409 | 410 | # calculate the fractional ratio of supply to aggregate demand at each location, or Rl 411 | temp["Rl"] = temp[supply_name] / temp[demand_name + "_W"] 412 | 413 | # separate the fractional ratio of supply to aggregate demand at each location, or Rl, into a new dataframe 414 | supply_to_total_demand_frame = pd.DataFrame(data={"Rl": temp["Rl"]}) 415 | supply_to_total_demand_frame.index.name = "geoid" 416 | 417 | # sum, into a series, the supply to total demand ratios for each location 418 | three_stage_fca_series = weighted_catchment( 419 | supply_to_total_demand_frame, 420 | cost_df.sort_index(), 421 | max_cost, 422 | cost_source=cost_dest, 423 | cost_dest=cost_origin, 424 | cost_cost=cost_name, 425 | loc_index="geoid", 426 | loc_value="Rl", 427 | weight_fn=weight_fn, 428 | three_stage_weight=True, 429 | ) 430 | 431 | # remove the preference weight G from the original costs dataframe 432 | cost_df.drop(columns=["G", "W3", "W3sum"], inplace=True) 433 | 434 | return three_stage_fca_series 435 | -------------------------------------------------------------------------------- /access/helpers.py: -------------------------------------------------------------------------------- 1 | def sanitize_supply_cost(a, cost, name): 2 | 3 | if cost is None: 4 | 5 | cost = a.default_cost 6 | if len(a.cost_names) > 1: 7 | a.log.info("Using default cost, {}, for {}.".format(cost, name)) 8 | 9 | if cost not in a.cost_names: 10 | 11 | raise ValueError("{} not an available cost.".format(cost)) 12 | 13 | return cost 14 | 15 | 16 | def sanitize_demand_cost(a, cost, name): 17 | 18 | if cost is None: 19 | 20 | cost = a.neighbor_default_cost 21 | if len(a.cost_names) > 1: 22 | a.log.info("Using default neighbor cost, {}, for {}.".format(cost, name)) 23 | 24 | if cost not in a.neighbor_cost_names: 25 | 26 | raise ValueError("{} not an available neighbor cost.".format(cost)) 27 | 28 | return cost 29 | 30 | 31 | def sanitize_supplies(a, supply_values): 32 | 33 | if type(supply_values) is str: 34 | supply_values = [supply_values] 35 | elif supply_values is None: 36 | supply_values = a.supply_types 37 | elif type(supply_values) is not list: 38 | raise ValueError( 39 | "supply_values should be a list or string (or -- default -- None)" 40 | ) 41 | 42 | return supply_values 43 | 44 | 45 | def normalized_access(a, columns): 46 | 47 | mean_access_values = ( 48 | a.access_df[columns].multiply(a.access_df[a.demand_value], axis=0).sum() 49 | / a.access_df[a.demand_value].sum() 50 | ) 51 | 52 | return a.access_df[columns].divide(mean_access_values) 53 | -------------------------------------------------------------------------------- /access/raam.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | 5 | def iterate_raam( 6 | demand, 7 | supply, 8 | travel, 9 | max_cycles=151, 10 | initial_step=0.2, 11 | min_step=0.005, 12 | half_life=50, 13 | limit_initial=20, 14 | verbose=False, 15 | ): 16 | 17 | norig, ndest = travel.shape 18 | assignment = np.zeros((norig, ndest)) 19 | assignment[range(norig), travel.argmin(axis=1)] = demand 20 | 21 | for i in range(max_cycles): 22 | 23 | demand_at_supply = assignment.sum(axis=0) 24 | congestion_cost = demand_at_supply / supply 25 | total_cost = congestion_cost + travel 26 | 27 | max_locations = np.ma.masked_array(total_cost, assignment == 0).argmax(axis=1) 28 | min_locations = total_cost.argmin(axis=1) 29 | 30 | slmin = supply[min_locations] 31 | slmax = supply[max_locations] 32 | 33 | trlmin = travel[range(norig), min_locations] 34 | trlmax = travel[range(norig), max_locations] 35 | 36 | drlmin = assignment[range(norig), min_locations] 37 | drlmax = assignment[range(norig), max_locations] 38 | 39 | dr = drlmin + drlmax 40 | 41 | drotherlmin = demand_at_supply[min_locations] - drlmin 42 | drotherlmax = demand_at_supply[max_locations] - drlmax 43 | 44 | drlmin_new = ((slmin * slmax) / (slmin + slmax)) * ( 45 | (trlmax - trlmin) + (dr + drotherlmax) / slmax - drotherlmin / slmin 46 | ) 47 | 48 | delta = drlmin_new - drlmin 49 | 50 | delta = np.minimum(delta, drlmax) 51 | delta = np.where(max_locations == min_locations, 0, delta) 52 | 53 | if type(initial_step) is float: 54 | step_size = initial_step * 0.5 ** (i / half_life) 55 | if step_size < min_step: 56 | step_size = min_step 57 | 58 | delta = np.minimum(delta, step_size * demand).astype(int) 59 | 60 | else: 61 | 62 | step_size = int(np.round(initial_step * 0.5 ** (i / half_life))) 63 | if step_size < min_step: 64 | step_size = min_step 65 | 66 | delta = np.minimum(delta, step_size).astype(int) 67 | 68 | ## We don't want "attractive locations" getting mobbed. 69 | ## This will only happen in the first 10-20 cycles. 70 | ## So only do these (somewhat costly checks) then. 71 | if i < limit_initial: 72 | 73 | delta_mat = np.zeros(travel.shape) 74 | delta_mat[range(norig), min_locations] += delta 75 | 76 | naive_assignment = delta_mat.sum(axis=0) / (supply) # * rho) 77 | scale_factor = np.maximum(naive_assignment, 1) 78 | 79 | delta_mat = (delta_mat / scale_factor).round().astype(int) 80 | 81 | delta = delta_mat.sum(axis=1) 82 | 83 | assignment[range(norig), min_locations] += delta 84 | assignment[range(norig), max_locations] -= delta 85 | 86 | assert (assignment.sum(axis=1) == demand).all() 87 | 88 | if not (i % 25): 89 | raam_cost = (total_cost * assignment).sum(axis=1) / assignment.sum(axis=1) 90 | 91 | if verbose: 92 | print( 93 | "{:d} {:.2f} {:d} {:.3f}".format( 94 | i, raam_cost.mean(), delta.sum(), step_size 95 | ), 96 | end=" || ", 97 | ) 98 | 99 | raam_cost = (total_cost * assignment).sum(axis=1) / assignment.sum(axis=1) 100 | 101 | return raam_cost 102 | 103 | 104 | def raam( 105 | demand_df, 106 | supply_df, 107 | cost_df, 108 | demand_index=True, 109 | demand_name="demand", 110 | supply_index=True, 111 | supply_name="supply", 112 | cost_origin="origin", 113 | cost_dest="dest", 114 | cost_name="cost", 115 | tau=60, 116 | rho=None, 117 | max_cycles=150, 118 | initial_step=0.2, 119 | min_step=0.005, 120 | half_life=50, 121 | verbose=False, 122 | ): 123 | """Calculate the rational agent access model's total cost -- 124 | a weighted travel and congestion cost. 125 | The balance of the two costs is expressed by the 126 | :math:`\\tau` parameter, which corresponds to the travel time 127 | required to accept of congestion by 100% of the mean demand to supply ratio 128 | in the study area. 129 | 130 | Parameters 131 | ---------- 132 | 133 | demand_df : `pandas.DataFrame `_ 134 | The origins dataframe, containing a location index and a total demand. 135 | demand_origin : str 136 | is the name of the column of `demand` that holds the origin ID. 137 | demand_value : str 138 | is the name of the column of `demand` that holds the aggregate demand at a location. 139 | supply_origin : str 140 | is the name of the column of `demand` that holds the origin ID. 141 | supply_df : `pandas.DataFrame `_ 142 | The origins dataframe, containing a location index and level of supply 143 | cost_df : `pandas.DataFrame `_ 144 | This dataframe contains a link between neighboring demand locations, and a cost between them. 145 | cost_origin : str 146 | The column name of the locations of users or consumers. 147 | cost_dest : str 148 | The column name of the supply or resource locations. 149 | cost_name : str 150 | The column name of the travel cost between origins and destinations 151 | weight_fn : function 152 | This fucntion will weight the value of resources/facilities, 153 | as a function of the raw cost. 154 | max_cycles : int 155 | Max number of cycles. 156 | max_shift : int 157 | This is the maximum number to shift in each cycle. 158 | max_cost : float 159 | This is the maximum cost to consider in the weighted sum; 160 | note that it applies along with the weight function. 161 | 162 | Returns 163 | ------- 164 | access : pandas.Series 165 | 166 | A -- potentially-weighted -- Rational Agent Access Model cost. 167 | """ 168 | 169 | if demand_index is not True: 170 | demand_df = demand_df.set_index(demand_index) 171 | if supply_index is not True: 172 | supply_df = supply_df.set_index(supply_index) 173 | 174 | demand_df = demand_df[demand_df[demand_name] > 0].copy() 175 | supply_df = supply_df[supply_df[supply_name] > 0].copy() 176 | 177 | demand_locations = list(set(cost_df[cost_origin]) & set(demand_df.index)) 178 | supply_locations = list(set(cost_df[cost_dest]) & set(supply_df.index)) 179 | 180 | cost_pivot = cost_df.pivot(index=cost_origin, columns=cost_dest, values=cost_name) 181 | try: 182 | travel_np = cost_pivot.loc[demand_locations, supply_locations].to_numpy().copy() 183 | except: 184 | travel_np = cost_pivot.loc[demand_locations, supply_locations].values.copy() 185 | 186 | travel_np = travel_np / tau 187 | travel_np = np.ma.masked_array(travel_np, np.isnan(travel_np)) 188 | 189 | # If it is not specified, rho is the average demand to supply ratio. 190 | if rho is None: 191 | rho = demand_df[demand_name].sum() / supply_df[supply_name].sum() 192 | 193 | try: 194 | supply_np = supply_df.loc[supply_locations, supply_name].to_numpy().copy() 195 | except: 196 | supply_np = supply_df.loc[supply_locations, supply_name].values.copy() 197 | 198 | supply_np = supply_np * rho 199 | 200 | # Change this -- should be 201 | try: 202 | demand_np = demand_df.loc[demand_locations, demand_name].to_numpy().copy() 203 | except: 204 | demand_np = demand_df.loc[demand_locations, demand_name].values.copy() 205 | 206 | raam_cost = iterate_raam( 207 | demand_np, 208 | supply_np, 209 | travel_np, 210 | verbose=verbose, 211 | max_cycles=max_cycles, 212 | initial_step=initial_step, 213 | min_step=min_step, 214 | half_life=half_life, 215 | ) 216 | 217 | rs = pd.Series(name="RAAM", index=demand_locations, data=raam_cost) 218 | 219 | return rs 220 | -------------------------------------------------------------------------------- /access/tests/data/model_output.json: -------------------------------------------------------------------------------- 1 | {'data':'from a fit model that you want to serialize in order to test. Prefer plaintext to binary.'} 2 | -------------------------------------------------------------------------------- /access/tests/test_access.py: -------------------------------------------------------------------------------- 1 | from access import Access 2 | from access.access import weights 3 | 4 | import math 5 | import unittest 6 | 7 | import numpy as np 8 | import pandas as pd 9 | import geopandas as gpd 10 | 11 | import util as tu 12 | 13 | 14 | class TestAccess(unittest.TestCase): 15 | def setUp(self): 16 | n = 5 17 | self.supply_grid = tu.create_nxn_grid(n) 18 | self.demand_grid = self.supply_grid.sample(1) 19 | self.cost_matrix = tu.create_cost_matrix(self.supply_grid, "euclidean") 20 | 21 | def test_access_initialize_without_demand_index_col_raises_value_error(self): 22 | with self.assertRaises(ValueError): 23 | bad_index_name = "Not a col in demand df" 24 | 25 | Access( 26 | demand_df=self.demand_grid, 27 | demand_index=bad_index_name, 28 | demand_value="value", 29 | supply_df=self.supply_grid, 30 | supply_index="id", 31 | supply_value="value", 32 | ) 33 | 34 | def test_access_initialize_without_supply_index_col_raises_value_error(self): 35 | with self.assertRaises(ValueError): 36 | bad_index_name = "Not a col in supply df" 37 | 38 | Access( 39 | demand_df=self.demand_grid, 40 | demand_index="id", 41 | demand_value="value", 42 | supply_df=self.supply_grid, 43 | supply_index=bad_index_name, 44 | supply_value="value", 45 | ) 46 | 47 | def test_access_initialize_without_demand_value_col_raises_value_error(self): 48 | with self.assertRaises(ValueError): 49 | bad_value_name = "Not a col in demand df" 50 | 51 | Access( 52 | demand_df=self.demand_grid, 53 | demand_index="id", 54 | demand_value=bad_value_name, 55 | supply_df=self.supply_grid, 56 | supply_index="id", 57 | supply_value="value", 58 | ) 59 | 60 | def test_access_initialize_without_supply_value_col_raises_value_error(self): 61 | with self.assertRaises(ValueError): 62 | bad_value_name = "Not a col in supply df" 63 | 64 | Access( 65 | demand_df=self.demand_grid, 66 | demand_index="id", 67 | demand_value="value", 68 | supply_df=self.supply_grid, 69 | supply_index="id", 70 | supply_value=bad_value_name, 71 | ) 72 | 73 | def test_access_initialize_without_supply_value_col_in_list_raises_value_error( 74 | self, 75 | ): 76 | with self.assertRaises(ValueError): 77 | bad_value_name = ["Not a col in supply df"] 78 | 79 | Access( 80 | demand_df=self.demand_grid, 81 | demand_index="id", 82 | demand_value="value", 83 | supply_df=self.supply_grid, 84 | supply_index="id", 85 | supply_value=bad_value_name, 86 | ) 87 | 88 | def test_access_initialize_with_supply_value_col_in_list(self): 89 | value_in_list = ["value"] 90 | 91 | self.model = Access( 92 | demand_df=self.demand_grid, 93 | demand_index="id", 94 | demand_value="value", 95 | supply_df=self.supply_grid, 96 | supply_index="id", 97 | supply_value=value_in_list, 98 | ) 99 | 100 | actual = self.model.supply_types 101 | 102 | self.assertEqual(actual, ["value"]) 103 | 104 | def test_access_initialize_with_supply_value_col_in_dict_raises_value_error(self): 105 | with self.assertRaises(ValueError): 106 | value_in_dict = {"value": ""} 107 | 108 | self.model = Access( 109 | demand_df=self.demand_grid, 110 | demand_index="id", 111 | demand_value="value", 112 | supply_df=self.supply_grid, 113 | supply_index="id", 114 | supply_value=value_in_dict, 115 | ) 116 | 117 | def test_access_initialize_without_valid_cost_origin_raises_value_error(self): 118 | with self.assertRaises(ValueError): 119 | bad_cost_origin = "Not a valid cost origin column" 120 | 121 | Access( 122 | demand_df=self.demand_grid, 123 | demand_index="id", 124 | demand_value="value", 125 | supply_df=self.supply_grid, 126 | supply_index="id", 127 | supply_value="value", 128 | cost_df=self.cost_matrix, 129 | cost_origin=bad_cost_origin, 130 | cost_dest="dest", 131 | cost_name="cost", 132 | ) 133 | 134 | def test_access_initialize_without_valid_cost_dest_raises_value_error(self): 135 | with self.assertRaises(ValueError): 136 | bad_cost_dest = "Not a valid cost dest column" 137 | 138 | Access( 139 | demand_df=self.demand_grid, 140 | demand_index="id", 141 | demand_value="value", 142 | supply_df=self.supply_grid, 143 | supply_index="id", 144 | supply_value="value", 145 | cost_df=self.cost_matrix, 146 | cost_origin="origin", 147 | cost_dest=bad_cost_dest, 148 | cost_name="cost", 149 | ) 150 | 151 | def test_access_initialize_without_valid_cost_name_raises_value_error(self): 152 | with self.assertRaises(ValueError): 153 | bad_cost_name = "Not a valid cost name column" 154 | 155 | Access( 156 | demand_df=self.demand_grid, 157 | demand_index="id", 158 | demand_value="value", 159 | supply_df=self.supply_grid, 160 | supply_index="id", 161 | supply_value="value", 162 | cost_df=self.cost_matrix, 163 | cost_origin="origin", 164 | cost_dest="dest", 165 | cost_name=bad_cost_name, 166 | ) 167 | 168 | def test_access_initialize_without_valid_cost_name_in_list_raises_value_error(self): 169 | with self.assertRaises(ValueError): 170 | bad_cost_name = ["Not a valid cost name column"] 171 | 172 | Access( 173 | demand_df=self.demand_grid, 174 | demand_index="id", 175 | demand_value="value", 176 | supply_df=self.supply_grid, 177 | supply_index="id", 178 | supply_value="value", 179 | cost_df=self.cost_matrix, 180 | cost_origin="origin", 181 | cost_dest="dest", 182 | cost_name=bad_cost_name, 183 | ) 184 | 185 | def test_access_initialize_with_valid_cost_name_in_list(self): 186 | cost_name_list = ["cost"] 187 | 188 | self.model = Access( 189 | demand_df=self.demand_grid, 190 | demand_index="id", 191 | demand_value="value", 192 | supply_df=self.supply_grid, 193 | supply_index="id", 194 | supply_value="value", 195 | cost_df=self.cost_matrix, 196 | cost_origin="origin", 197 | cost_dest="dest", 198 | cost_name=cost_name_list, 199 | ) 200 | 201 | actual = self.model.cost_names 202 | 203 | self.assertEqual(actual, ["cost"]) 204 | 205 | def test_access_initialize_with_valid_cost_name_in_dict_raises_value_error(self): 206 | with self.assertRaises(ValueError): 207 | cost_name_dict = {"cost": ""} 208 | 209 | self.model = Access( 210 | demand_df=self.demand_grid, 211 | demand_index="id", 212 | demand_value="value", 213 | supply_df=self.supply_grid, 214 | supply_index="id", 215 | supply_value="value", 216 | cost_df=self.cost_matrix, 217 | cost_origin="origin", 218 | cost_dest="dest", 219 | cost_name=cost_name_dict, 220 | ) 221 | 222 | def test_access_initialize_without_valid_neighbor_cost_origin_raises_value_error( 223 | self, 224 | ): 225 | with self.assertRaises(ValueError): 226 | bad_cost_origin = "Not a valid cost origin column" 227 | 228 | Access( 229 | demand_df=self.demand_grid, 230 | demand_index="id", 231 | demand_value="value", 232 | supply_df=self.supply_grid, 233 | supply_index="id", 234 | supply_value="value", 235 | neighbor_cost_df=self.cost_matrix, 236 | neighbor_cost_origin=bad_cost_origin, 237 | neighbor_cost_dest="dest", 238 | neighbor_cost_name="cost", 239 | ) 240 | 241 | def test_access_initialize_without_valid_neighbor_cost_dest_raises_value_error( 242 | self, 243 | ): 244 | with self.assertRaises(ValueError): 245 | bad_cost_dest = "Not a valid cost dest column" 246 | 247 | Access( 248 | demand_df=self.demand_grid, 249 | demand_index="id", 250 | demand_value="value", 251 | supply_df=self.supply_grid, 252 | supply_index="id", 253 | supply_value="value", 254 | neighbor_cost_df=self.cost_matrix, 255 | neighbor_cost_origin="origin", 256 | neighbor_cost_dest=bad_cost_dest, 257 | neighbor_cost_name="cost", 258 | ) 259 | 260 | def test_access_initialize_without_valid_neighbor_cost_name_raises_value_error( 261 | self, 262 | ): 263 | with self.assertRaises(ValueError): 264 | bad_cost_name = "Not a valid cost name column" 265 | 266 | Access( 267 | demand_df=self.demand_grid, 268 | demand_index="id", 269 | demand_value="value", 270 | supply_df=self.supply_grid, 271 | supply_index="id", 272 | supply_value="value", 273 | neighbor_cost_df=self.cost_matrix, 274 | neighbor_cost_origin="origin", 275 | neighbor_cost_dest="dest", 276 | neighbor_cost_name=bad_cost_name, 277 | ) 278 | 279 | def test_access_initialize_without_valid_neighbor_cost_name_in_list_raises_value_error( 280 | self, 281 | ): 282 | with self.assertRaises(ValueError): 283 | bad_cost_name = ["Not a valid cost name column"] 284 | 285 | Access( 286 | demand_df=self.demand_grid, 287 | demand_index="id", 288 | demand_value="value", 289 | supply_df=self.supply_grid, 290 | supply_index="id", 291 | supply_value="value", 292 | neighbor_cost_df=self.cost_matrix, 293 | neighbor_cost_origin="origin", 294 | neighbor_cost_dest="dest", 295 | neighbor_cost_name=bad_cost_name, 296 | ) 297 | 298 | def test_access_initialize_with_valid_neighbor_cost_name_in_list(self): 299 | cost_name_list = ["cost"] 300 | 301 | self.model = Access( 302 | demand_df=self.demand_grid, 303 | demand_index="id", 304 | demand_value="value", 305 | supply_df=self.supply_grid, 306 | supply_index="id", 307 | supply_value="value", 308 | neighbor_cost_df=self.cost_matrix, 309 | neighbor_cost_origin="origin", 310 | neighbor_cost_dest="dest", 311 | neighbor_cost_name=cost_name_list, 312 | ) 313 | 314 | actual = self.model.neighbor_cost_names 315 | 316 | self.assertEqual(actual, ["cost"]) 317 | 318 | def test_access_initialize_with_valid_neighbor_cost_name_in_dict_raises_value_error( 319 | self, 320 | ): 321 | with self.assertRaises(ValueError): 322 | cost_name_dict = {"cost": ""} 323 | 324 | self.model = Access( 325 | demand_df=self.demand_grid, 326 | demand_index="id", 327 | demand_value="value", 328 | supply_df=self.supply_grid, 329 | supply_index="id", 330 | supply_value="value", 331 | neighbor_cost_df=self.cost_matrix, 332 | neighbor_cost_origin="origin", 333 | neighbor_cost_dest="dest", 334 | neighbor_cost_name=cost_name_dict, 335 | ) 336 | -------------------------------------------------------------------------------- /access/tests/test_datasets.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pandas as pd 4 | import geopandas as gpd 5 | from access import Datasets 6 | 7 | 8 | class TestDatasets(unittest.TestCase): 9 | def test_load_geopandas_dataset(self): 10 | result = Datasets.load_data("chi_doc_geom") 11 | assert isinstance(result, gpd.GeoDataFrame) 12 | 13 | def test_load_pandas_dataset(self): 14 | result = Datasets.load_data("chi_times") 15 | assert isinstance(result, pd.DataFrame) 16 | 17 | def test_prints_available_datasets(self): 18 | Datasets.available_datasets() 19 | -------------------------------------------------------------------------------- /access/tests/test_euclidean.py: -------------------------------------------------------------------------------- 1 | from access import Access 2 | from access.access import weights 3 | 4 | import math 5 | import unittest 6 | 7 | import numpy as np 8 | import pandas as pd 9 | import geopandas as gpd 10 | import util as tu 11 | 12 | 13 | class TestEuclidean(unittest.TestCase): 14 | def setUp(self): 15 | demand_data = pd.DataFrame({"id": [0], "x": [0], "y": [0], "value": [1]}) 16 | demand_grid = gpd.GeoDataFrame( 17 | demand_data, geometry=gpd.points_from_xy(demand_data.x, demand_data.y) 18 | ) 19 | demand_grid["geometry"] = demand_grid.buffer(0.5) 20 | 21 | supply_data = pd.DataFrame({"id": [1], "x": [0], "y": [1], "value": [1]}) 22 | supply_grid = gpd.GeoDataFrame( 23 | supply_data, geometry=gpd.points_from_xy(supply_data.x, supply_data.y) 24 | ) 25 | supply_grid["geometry"] = supply_grid.buffer(0.5) 26 | 27 | cost_matrix = pd.DataFrame({"origin": [0], "dest": [1], "cost": [1]}) 28 | 29 | self.model = Access( 30 | demand_df=demand_grid, 31 | demand_index="id", 32 | demand_value="value", 33 | supply_df=supply_grid, 34 | supply_index="id", 35 | supply_value="value", 36 | cost_df=cost_matrix, 37 | cost_origin="origin", 38 | cost_dest="dest", 39 | cost_name="cost", 40 | neighbor_cost_df=cost_matrix, 41 | neighbor_cost_origin="origin", 42 | neighbor_cost_dest="dest", 43 | neighbor_cost_name="cost", 44 | ) 45 | 46 | def test_euclidean_point_to_point(self): 47 | self.model.create_euclidean_distance( 48 | name="euclidian", threshold=2, centroid_o=True, centroid_d=True 49 | ) 50 | actual = self.model.cost_df["euclidian"][0] 51 | 52 | self.assertAlmostEqual(actual, 1) 53 | 54 | def test_euclidean_point_to_poly(self): 55 | self.model.create_euclidean_distance( 56 | name="euclidian", threshold=2, centroid_o=True, centroid_d=False 57 | ) 58 | actual = self.model.cost_df["euclidian"][0] 59 | 60 | self.assertAlmostEqual(actual, 0.5) 61 | 62 | def test_euclidean_poly_to_poly(self): 63 | self.model.create_euclidean_distance( 64 | name="euclidian", threshold=2, centroid_o=False, centroid_d=False 65 | ) 66 | actual = self.model.cost_df["euclidian"][0] 67 | 68 | self.assertAlmostEqual(actual, 0) 69 | 70 | def test_euclidean_without_geopandas_demand_dataframe_raises_TypeError(self): 71 | with self.assertRaises(TypeError): 72 | self.model.demand_df = self.model.demand_df[["x", "y", "value"]] 73 | self.model.create_euclidean_distance() 74 | 75 | def test_euclidean_without_geopandas_supply_dataframe_raises_TypeError(self): 76 | with self.assertRaises(TypeError): 77 | self.model.supply_df = self.model.supply_df[["x", "y", "value"]] 78 | self.model.create_euclidean_distance() 79 | 80 | def test_euclidean_sets_euclidean_as_default_if_no_default_exists(self): 81 | delattr(self.model, "_default_cost") 82 | self.model.create_euclidean_distance() 83 | 84 | actual = hasattr(self.model, "_default_cost") 85 | 86 | self.assertEqual(actual, True) 87 | 88 | 89 | class TestEuclideanNeighbors(unittest.TestCase): 90 | def setUp(self): 91 | demand_data = pd.DataFrame( 92 | {"id": [0, 1], "x": [0, 0], "y": [0, 1], "value": [1, 1]} 93 | ) 94 | demand_grid = gpd.GeoDataFrame( 95 | demand_data, geometry=gpd.points_from_xy(demand_data.x, demand_data.y) 96 | ) 97 | demand_grid["geometry"] = demand_grid.buffer(0.5) 98 | 99 | supply_data = pd.DataFrame({"id": [1], "x": [0], "y": [1], "value": [1]}) 100 | supply_grid = gpd.GeoDataFrame( 101 | supply_data, geometry=gpd.points_from_xy(supply_data.x, supply_data.y) 102 | ) 103 | 104 | cost_matrix = pd.DataFrame( 105 | {"origin": [0, 0, 1, 1], "dest": [1, 0, 0, 1], "cost": [1, 0, 1, 0]} 106 | ) 107 | 108 | self.model = Access( 109 | demand_df=demand_grid, 110 | demand_index="id", 111 | demand_value="value", 112 | supply_df=supply_grid, 113 | supply_index="id", 114 | supply_value="value", 115 | cost_df=cost_matrix, 116 | cost_origin="origin", 117 | cost_dest="dest", 118 | cost_name="cost", 119 | neighbor_cost_df=cost_matrix, 120 | neighbor_cost_origin="origin", 121 | neighbor_cost_dest="dest", 122 | neighbor_cost_name="cost", 123 | ) 124 | 125 | def test_euclidean_neighbors_centroids(self): 126 | self.model.create_euclidean_distance_neighbors( 127 | name="euclidian", threshold=2, centroid=True 128 | ) 129 | actual1 = self.model.neighbor_cost_df["euclidian"][0] 130 | actual2 = self.model.neighbor_cost_df["euclidian"][2] 131 | self.assertAlmostEqual(actual1, 1) 132 | self.assertAlmostEqual(actual2, 1) 133 | 134 | def test_euclidean_neighbors_poly(self): 135 | self.model.create_euclidean_distance_neighbors( 136 | name="euclidian", threshold=2, centroid=False 137 | ) 138 | actual1 = self.model.neighbor_cost_df["euclidian"][0] 139 | actual2 = self.model.neighbor_cost_df["euclidian"][2] 140 | self.assertAlmostEqual(actual1, 0) 141 | self.assertAlmostEqual(actual2, 0) 142 | 143 | def test_euclidean_neighbors_without_geopandas_demand_dataframe_raises_TypeError( 144 | self, 145 | ): 146 | with self.assertRaises(TypeError): 147 | self.model.demand_df = self.model.demand_df[["x", "y", "value"]] 148 | self.model.create_euclidean_distance_neighbors() 149 | 150 | def test_euclidean_neighbors_sets_euclidean_as_default_if_no_default_exists(self): 151 | delattr(self.model, "_neighbor_default_cost") 152 | self.model.create_euclidean_distance_neighbors() 153 | 154 | actual = hasattr(self.model, "_neighbor_default_cost") 155 | 156 | self.assertEqual(actual, True) 157 | -------------------------------------------------------------------------------- /access/tests/test_floating_catchment_area.py: -------------------------------------------------------------------------------- 1 | from access import Access 2 | from access.access import weights 3 | 4 | import math 5 | import unittest 6 | 7 | import numpy as np 8 | import pandas as pd 9 | import geopandas as gpd 10 | import util as tu 11 | 12 | 13 | class TestFloatingCatchmentArea(unittest.TestCase): 14 | def setUp(self): 15 | n = 5 16 | supply_grid = tu.create_nxn_grid(n) 17 | demand_grid = supply_grid.sample(5) 18 | cost_matrix = tu.create_cost_matrix(supply_grid, "euclidean") 19 | 20 | self.model = Access( 21 | demand_df=demand_grid, 22 | demand_index="id", 23 | demand_value="value", 24 | supply_df=supply_grid, 25 | supply_index="id", 26 | supply_value="value", 27 | cost_df=cost_matrix, 28 | cost_origin="origin", 29 | cost_dest="dest", 30 | cost_name="cost", 31 | neighbor_cost_df=cost_matrix, 32 | neighbor_cost_origin="origin", 33 | neighbor_cost_dest="dest", 34 | neighbor_cost_name="cost", 35 | ) 36 | 37 | def test_floating_catchment_area_ratio_large_catchment(self): 38 | result = self.model.fca_ratio() 39 | actual = self.model.access_df.iloc[0]["fca_value"] 40 | 41 | total_demand = self.model.access_df["value"].sum() 42 | total_supply = self.model.supply_df["value"].sum() 43 | expected = total_supply / total_demand 44 | 45 | self.assertEqual(actual, expected) 46 | 47 | def test_floating_catchment_area_ratio_small_catchment(self): 48 | small_catchment = 0.9 49 | result = self.model.fca_ratio(max_cost=small_catchment) 50 | actual = self.model.access_df.iloc[0]["fca_value"] 51 | 52 | self.assertTrue((1 == self.model.access_df["fca_value"]).all()) 53 | 54 | def test_floating_catchment_area_ratio_large_catchment_normalized(self): 55 | result = self.model.fca_ratio(normalize=True) 56 | actual = self.model.access_df.iloc[0]["fca_value"] 57 | 58 | self.assertEqual(actual, 5) 59 | 60 | def test_floating_catchment_area_ratio_warns_if_demand_supply_locs_differ_and_noise( 61 | self, 62 | ): 63 | new_dem_row = pd.DataFrame( 64 | [[1, 1, 1, None], [1, 1, 1, None]], 65 | columns=["x", "y", "value", "geometry"], 66 | index=[28, 29], 67 | ) 68 | self.model.demand_df = pd.concat([self.model.demand_df, new_dem_row]) 69 | self.model.demand_df.index.name = "id" 70 | result = self.model.fca_ratio(noise=True) 71 | 72 | def test_floating_catchment_area_ratio_overwrites_column(self): 73 | small_catchment = 0.9 74 | result = self.model.fca_ratio(max_cost=small_catchment) 75 | small_catchment = 0.8 76 | result = self.model.fca_ratio(max_cost=small_catchment) 77 | 78 | actual = self.model.access_df.iloc[0]["fca_value"] 79 | 80 | self.assertEqual(actual, 1) 81 | 82 | def test_floating_catchment_area_ratio_zero_catchment(self): 83 | zero_catchment = 0 84 | result = self.model.fca_ratio(max_cost=zero_catchment) 85 | actual = math.isnan(self.model.access_df.iloc[0]["fca_value"]) 86 | 87 | self.assertEqual(actual, True) 88 | 89 | def test_two_stage_floating_catchment_area_large_catchment(self): 90 | result = self.model.two_stage_fca() 91 | actual = self.model.access_df.iloc[0]["2sfca_value"] 92 | 93 | self.assertEqual(actual, 5) 94 | 95 | def test_two_stage_floating_catchment_area_small_catchment(self): 96 | small_catchment = 0.9 97 | result = self.model.two_stage_fca(max_cost=small_catchment) 98 | actual = self.model.access_df.iloc[0]["2sfca_value"] 99 | 100 | self.assertEqual(actual, 1) 101 | 102 | def test_two_stage_floating_catchment_area_zero_catchment(self): 103 | zero_catchment = 0 104 | result = self.model.two_stage_fca(max_cost=zero_catchment) 105 | actual = math.isnan(self.model.access_df.iloc[0]["2sfca_value"]) 106 | 107 | self.assertEqual(actual, True) 108 | 109 | def test_two_stage_floating_catchment_area_warning_default_cost_if_more_than_one( 110 | self, 111 | ): 112 | 113 | cost_list = ["cost", "other_cost"] 114 | self.model.cost_names = cost_list 115 | 116 | self.model.two_stage_fca() 117 | actual = self.model.default_cost 118 | 119 | self.assertEqual(actual, "cost") 120 | 121 | def test_two_stage_floating_catchment_area_unavailable_cost_name_raises_ValueError( 122 | self, 123 | ): 124 | with self.assertRaises(ValueError): 125 | bad_cost_name = "euclidean" 126 | self.model.two_stage_fca(cost=bad_cost_name) 127 | 128 | def test_two_stage_floating_catchment_area_large_catchment_supply_value_explicit( 129 | self, 130 | ): 131 | result = self.model.two_stage_fca(supply_values="value") 132 | actual = self.model.access_df.iloc[0]["2sfca_value"] 133 | 134 | self.assertEqual(actual, 5) 135 | 136 | def test_two_stage_floating_catchment_area_run_again_and_test_overwrite(self): 137 | result = self.model.two_stage_fca() 138 | result = self.model.two_stage_fca() 139 | actual = self.model.access_df.iloc[0]["2sfca_value"] 140 | 141 | self.assertEqual(actual, 5) 142 | 143 | def test_two_stage_floating_catchment_area_large_catchment_normalize(self): 144 | result = self.model.two_stage_fca(normalize=True) 145 | 146 | actual = self.model.access_df.iloc[0]["2sfca_value"] 147 | 148 | self.assertEqual(actual, 5) 149 | 150 | def test_three_stage_floating_catchment_area_large_catchment(self): 151 | wfn = weights.step_fn({10: 25}) 152 | result = self.model.three_stage_fca(weight_fn=wfn) 153 | actual = self.model.access_df.iloc[0]["3sfca_value"] 154 | 155 | self.assertEqual(actual, 5) 156 | 157 | def test_three_stage_floating_catchment_area_large_catchment_run_again_and_test_overwrite( 158 | self, 159 | ): 160 | wfn = weights.step_fn({10: 25}) 161 | result = self.model.three_stage_fca(weight_fn=wfn) 162 | result = self.model.three_stage_fca(weight_fn=wfn) 163 | actual = self.model.access_df.iloc[0]["3sfca_value"] 164 | 165 | self.assertEqual(actual, 5) 166 | 167 | def test_three_stage_floating_catchment_area_large_catchment_normalize(self): 168 | wfn = weights.step_fn({10: 25}) 169 | result = self.model.three_stage_fca(weight_fn=wfn, normalize=True) 170 | actual = self.model.access_df.iloc[0]["3sfca_value"] 171 | 172 | self.assertEqual(actual, 5) 173 | 174 | def test_three_stage_floating_catchment_area_small_catchment(self): 175 | small_catchment = 0.9 176 | wfn = weights.step_fn({10: 25}) 177 | result = self.model.three_stage_fca(max_cost=small_catchment, weight_fn=wfn) 178 | actual = self.model.access_df.iloc[0]["3sfca_value"] 179 | 180 | self.assertEqual(actual, 1) 181 | 182 | def test_three_stage_floating_catchment_area_zero_catchment(self): 183 | zero_catchment = 0 184 | result = self.model.three_stage_fca(max_cost=zero_catchment) 185 | actual = math.isnan(self.model.access_df.iloc[0]["3sfca_value"]) 186 | 187 | self.assertEqual(actual, True) 188 | 189 | def test_enhanced_two_stage_floating_catchment_area_large_catchment(self): 190 | result = self.model.enhanced_two_stage_fca() 191 | actual = self.model.access_df.iloc[0]["e2sfca_value"] 192 | 193 | self.assertEqual(actual, 5) 194 | 195 | def test_enhanced_two_stage_floating_catchment_area_small_catchment(self): 196 | small_catchment = 0.9 197 | result = self.model.enhanced_two_stage_fca(max_cost=small_catchment) 198 | actual = self.model.access_df.iloc[0]["e2sfca_value"] 199 | 200 | self.assertEqual(actual, 1) 201 | 202 | def test_enhanced_two_stage_floating_catchment_area_zero_catchment(self): 203 | zero_catchment = 0 204 | result = self.model.enhanced_two_stage_fca(max_cost=zero_catchment) 205 | actual = math.isnan(self.model.access_df.iloc[0]["e2sfca_value"]) 206 | 207 | self.assertEqual(actual, True) 208 | -------------------------------------------------------------------------------- /access/tests/test_helpers.py: -------------------------------------------------------------------------------- 1 | from access import Access 2 | from access.access import weights, helpers 3 | 4 | import math 5 | import unittest 6 | 7 | import numpy as np 8 | import pandas as pd 9 | import geopandas as gpd 10 | import util as tu 11 | 12 | 13 | class TestHelpers(unittest.TestCase): 14 | def setUp(self): 15 | n = 5 16 | supply_grid = tu.create_nxn_grid(n) 17 | demand_grid = supply_grid.sample(1) 18 | cost_matrix = tu.create_cost_matrix(supply_grid, "euclidean") 19 | 20 | self.model = Access( 21 | demand_df=demand_grid, 22 | demand_index="id", 23 | demand_value="value", 24 | supply_df=supply_grid, 25 | supply_index="id", 26 | supply_value="value", 27 | cost_df=cost_matrix, 28 | cost_origin="origin", 29 | cost_dest="dest", 30 | cost_name="cost", 31 | neighbor_cost_df=cost_matrix, 32 | neighbor_cost_origin="origin", 33 | neighbor_cost_dest="dest", 34 | neighbor_cost_name="cost", 35 | ) 36 | 37 | def test_sanitize_supply_cost_set_cost_as_default(self): 38 | self.model.cost_names.append("other_cost") 39 | helpers.sanitize_supply_cost(self.model, None, "value") 40 | actual = self.model.default_cost 41 | 42 | self.assertEqual(actual, "cost") 43 | 44 | def test_sanitize_supply_cost_raise_ValueError_if_cost_not_found(self): 45 | with self.assertRaises(ValueError): 46 | helpers.sanitize_supply_cost(self.model, "some_cost", "value") 47 | 48 | def test_sanitize_demand_cost_set_cost_as_default(self): 49 | self.model.cost_names.append("other_cost") 50 | helpers.sanitize_demand_cost(self.model, None, "value") 51 | actual = self.model.default_cost 52 | 53 | self.assertEqual(actual, "cost") 54 | 55 | def test_sanitize_demand_cost_raise_ValueError_if_cost_not_found(self): 56 | with self.assertRaises(ValueError): 57 | helpers.sanitize_demand_cost(self.model, "some_cost", "value") 58 | 59 | def test_sanitize_supplies_provide_value_as_string(self): 60 | actual = helpers.sanitize_supplies(self.model, "some_value") 61 | 62 | self.assertEqual(actual, ["some_value"]) 63 | 64 | def test_sanitize_supplies_raise_ValueError_if_input_other_than_str_or_list(self): 65 | with self.assertRaises(ValueError): 66 | sesult = helpers.sanitize_supplies(self.model, 5) 67 | -------------------------------------------------------------------------------- /access/tests/test_hospital_example.py: -------------------------------------------------------------------------------- 1 | from access import Access 2 | from access.access import weights 3 | 4 | import math 5 | import unittest 6 | 7 | import numpy as np 8 | import pandas as pd 9 | import geopandas as gpd 10 | from access import Access, weights 11 | import util as tu 12 | 13 | 14 | def simple_2sfca(OD, supply, demand, locs, max_travel=61, three_stage=False): 15 | """ 16 | Base python implementation / sanity check of 2SFCA results. 17 | 18 | Assumes gravity weights with power of -1 19 | 20 | Params: 21 | OD (pd.DataFrame): origin/destination matrix with origin columns and destinations on index. 22 | supply (Dict[int,int]): amount of supply at each location 23 | demand (Dict[int,int]): amount of demand at each location 24 | locs (List[int]): list of locations. 25 | 26 | Returns: 27 | Dict[int, float]: access per location 28 | 29 | """ 30 | 31 | GOD = OD 32 | if three_stage: 33 | 34 | W = 1 / OD 35 | 36 | WS = W.sum(axis=0) # Sum over destinations / within columns. 37 | G = W.divide(WS, axis=1) # Divide columns by their sums. 38 | 39 | GOD = OD / G 40 | 41 | D = { 42 | hosp: sum( 43 | demand[res] / GOD[res][hosp] for res in locs if OD[res][hosp] < max_travel 44 | ) 45 | for hosp in locs 46 | } 47 | 48 | R = {l: supply[l] / D[l] for l in locs} 49 | 50 | A = { 51 | res: sum( 52 | R[hosp] / GOD[res][hosp] for hosp in locs if OD[res][hosp] < max_travel 53 | ) 54 | for res in locs 55 | } 56 | 57 | return A 58 | 59 | 60 | class TestHospitalExample(unittest.TestCase): 61 | def setUp(self): 62 | 63 | tracts = pd.DataFrame( 64 | [ 65 | {"geoid": 1, "pop": 100, "doc": 15}, 66 | {"geoid": 2, "pop": 50, "doc": 20}, 67 | {"geoid": 3, "pop": 10, "doc": 100}, 68 | ] 69 | ) 70 | 71 | self.costs = [] 72 | 73 | # Scenario 0 is gridlock 74 | # Gridlock. travel is congested in both directions 75 | self.costs.append( 76 | pd.DataFrame( 77 | [ 78 | # Self 79 | {"origin": 1, "dest": 1, "cost": 1}, 80 | {"origin": 2, "dest": 2, "cost": 1}, 81 | {"origin": 3, "dest": 3, "cost": 1}, 82 | # Inbound 83 | {"origin": 1, "dest": 3, "cost": 40}, 84 | {"origin": 2, "dest": 3, "cost": 40}, 85 | # Outbound 86 | {"origin": 3, "dest": 1, "cost": 40}, 87 | {"origin": 3, "dest": 2, "cost": 40}, 88 | # Cross-city 89 | {"origin": 1, "dest": 2, "cost": 80}, 90 | {"origin": 2, "dest": 1, "cost": 80}, 91 | ] 92 | ) 93 | ) 94 | 95 | # Scenario 1 is faster treavel to the city 96 | # Commuter toll lane into the city (similar dynamics to PM peak) 97 | self.costs.append( 98 | pd.DataFrame( 99 | [ 100 | # Self 101 | {"origin": 1, "dest": 1, "cost": 1}, 102 | {"origin": 2, "dest": 2, "cost": 1}, 103 | {"origin": 3, "dest": 3, "cost": 1}, 104 | # Inbound -- faster 105 | {"origin": 1, "dest": 3, "cost": 20}, 106 | {"origin": 2, "dest": 3, "cost": 20}, 107 | # Outbound 108 | {"origin": 3, "dest": 1, "cost": 40}, 109 | {"origin": 3, "dest": 2, "cost": 40}, 110 | # Cross-city -- should also 40 + 20, but leaving... 111 | {"origin": 1, "dest": 2, "cost": 80}, 112 | {"origin": 2, "dest": 1, "cost": 80}, 113 | ] 114 | ) 115 | ) 116 | 117 | # Scenario 2 is faster travel out of the city 118 | # Commuter toll lane out of the city (similar dynamics to AM peak) 119 | self.costs.append( 120 | pd.DataFrame( 121 | [ 122 | # Self 123 | {"origin": 1, "dest": 1, "cost": 1}, 124 | {"origin": 2, "dest": 2, "cost": 1}, 125 | {"origin": 3, "dest": 3, "cost": 1}, 126 | # Inbound 127 | {"origin": 1, "dest": 3, "cost": 40}, 128 | {"origin": 2, "dest": 3, "cost": 40}, 129 | # Outbound - faster 130 | {"origin": 3, "dest": 1, "cost": 20}, 131 | {"origin": 3, "dest": 2, "cost": 20}, 132 | # Cross-city - should also be 40 + 20. 133 | {"origin": 1, "dest": 2, "cost": 80}, 134 | {"origin": 2, "dest": 1, "cost": 80}, 135 | ] 136 | ) 137 | ) 138 | 139 | # Scenario 3 is symmetric, but faster travel. 140 | # Commuter toll lane out of the city (similar dynamics to AM peak) 141 | self.costs.append( 142 | pd.DataFrame( 143 | [ 144 | # Self 145 | {"origin": 1, "dest": 1, "cost": 1}, 146 | {"origin": 2, "dest": 2, "cost": 1}, 147 | {"origin": 3, "dest": 3, "cost": 1}, 148 | # Inbound 149 | {"origin": 1, "dest": 3, "cost": 20}, 150 | {"origin": 2, "dest": 3, "cost": 20}, 151 | # Outbound - faster 152 | {"origin": 3, "dest": 1, "cost": 20}, 153 | {"origin": 3, "dest": 2, "cost": 20}, 154 | # Cross-city -- twice each half... 155 | {"origin": 1, "dest": 2, "cost": 40}, 156 | {"origin": 2, "dest": 1, "cost": 40}, 157 | ] 158 | ) 159 | ) 160 | 161 | # input parameters fixed across scenarios 162 | # Neighbor cost not used; supressed to avoid confusion. 163 | params = dict( 164 | demand_df=tracts, 165 | demand_index="geoid", 166 | demand_value="pop", 167 | supply_df=tracts, 168 | supply_index="geoid", 169 | supply_value="doc", 170 | cost_origin="origin", 171 | cost_dest="dest", 172 | cost_name="cost", 173 | ) 174 | 175 | # Dictionaries for simple version. 176 | locs = [1, 2, 3] 177 | pops = tracts.set_index("geoid")["pop"].to_dict() 178 | docs = tracts.set_index("geoid")["doc"].to_dict() 179 | 180 | # Instantiate the objects and run access. 181 | self.val_2sfca = {} 182 | self.val_3sfca = {} 183 | self.reference_2sfca = {} 184 | self.reference_3sfca = {} 185 | 186 | for n, costs in enumerate(self.costs): 187 | 188 | a = Access(**params, cost_df=costs) 189 | a.two_stage_fca( 190 | name=f"2sfca_s{n}", weight_fn=weights.gravity(1, -1), max_cost=61 191 | ) 192 | a.three_stage_fca( 193 | name=f"3sfca_s{n}", weight_fn=weights.gravity(1, -1), max_cost=61 194 | ) 195 | 196 | self.val_2sfca[n] = a.access_df[f"2sfca_s{n}_doc"].to_dict() 197 | self.val_3sfca[n] = a.access_df[f"3sfca_s{n}_doc"].to_dict() 198 | 199 | OD = costs.pivot_table(index="origin", columns="dest", values="cost").T 200 | 201 | self.reference_2sfca[n] = simple_2sfca(OD, docs, pops, locs) 202 | self.reference_3sfca[n] = simple_2sfca( 203 | OD, docs, pops, locs, three_stage=True 204 | ) 205 | 206 | def test_simple_2sfca_scenario_0(self): 207 | 208 | self.assertAlmostEqual(self.val_2sfca[0][1], self.reference_2sfca[0][1]) 209 | self.assertAlmostEqual(self.val_2sfca[0][2], self.reference_2sfca[0][2]) 210 | self.assertAlmostEqual(self.val_2sfca[0][3], self.reference_2sfca[0][3]) 211 | 212 | def test_simple_2sfca_scenario_1(self): 213 | 214 | self.assertAlmostEqual(self.val_2sfca[1][1], self.reference_2sfca[1][1]) 215 | self.assertAlmostEqual(self.val_2sfca[1][2], self.reference_2sfca[1][2]) 216 | self.assertAlmostEqual(self.val_2sfca[1][3], self.reference_2sfca[1][3]) 217 | 218 | def test_simple_2sfca_scenario_2(self): 219 | 220 | self.assertAlmostEqual(self.val_2sfca[2][1], self.reference_2sfca[2][1]) 221 | self.assertAlmostEqual(self.val_2sfca[2][2], self.reference_2sfca[2][2]) 222 | self.assertAlmostEqual(self.val_2sfca[2][3], self.reference_2sfca[2][3]) 223 | 224 | def test_simple_2sfca_scenario_3(self): 225 | 226 | self.assertAlmostEqual(self.val_2sfca[3][1], self.reference_2sfca[3][1]) 227 | self.assertAlmostEqual(self.val_2sfca[3][2], self.reference_2sfca[3][2]) 228 | self.assertAlmostEqual(self.val_2sfca[3][3], self.reference_2sfca[3][3]) 229 | 230 | def test_scenario_0_v_1(self): 231 | 232 | # access at 1 should increase. Supply at 3 is more pertinent / lower cost since people can get there faster. 233 | self.assertTrue(self.val_2sfca[1][1] > self.val_2sfca[0][1]) 234 | 235 | # access at 2 should increase. Same reasoning as above. 236 | self.assertTrue(self.val_2sfca[1][2] > self.val_2sfca[0][2]) 237 | 238 | # access at 3 should decrease. More patients from 1 and 2 means greater demands on 3's doctors. 239 | self.assertTrue(self.val_2sfca[1][3] < self.val_2sfca[0][3]) 240 | 241 | def test_scenario_0_v_2(self): 242 | 243 | # access at 1 should decrease. There is more demand coming from 3 since people can come from there faster 244 | self.assertTrue(self.val_2sfca[2][1] < self.val_2sfca[0][1]) 245 | 246 | # access at 2 should decrease. There is more demand coming from 3 since people can come from there faster 247 | self.assertTrue(self.val_2sfca[2][2] < self.val_2sfca[0][2]) 248 | 249 | # access at 3 should increase. There is more supply available from 1,2 since people can get there faster 250 | self.assertTrue(self.val_2sfca[2][3] > self.val_2sfca[0][3]) 251 | 252 | def test_scenario_0_v_3(self): 253 | 254 | # access at 1 should increase. It is easier to use the place with more docs. 255 | self.assertTrue(self.val_2sfca[3][1] > self.val_2sfca[0][1]) 256 | 257 | # access at 2 should increase. Same reasoning as for 1. 258 | self.assertTrue(self.val_2sfca[3][2] > self.val_2sfca[0][2]) 259 | 260 | # access at 3 should decrease. Same but in reverse -- suburbanites are using "urban" supply. 261 | self.assertTrue(self.val_2sfca[3][3] < self.val_2sfca[0][3]) 262 | 263 | def test_simple_3sfca_scenario_0(self): 264 | 265 | self.assertAlmostEqual(self.val_3sfca[0][1], self.reference_3sfca[0][1]) 266 | self.assertAlmostEqual(self.val_3sfca[0][2], self.reference_3sfca[0][2]) 267 | self.assertAlmostEqual(self.val_3sfca[0][3], self.reference_3sfca[0][3]) 268 | 269 | def test_simple_3sfca_scenario_1(self): 270 | 271 | self.assertAlmostEqual(self.val_3sfca[1][1], self.reference_3sfca[1][1]) 272 | self.assertAlmostEqual(self.val_3sfca[1][2], self.reference_3sfca[1][2]) 273 | self.assertAlmostEqual(self.val_3sfca[1][3], self.reference_3sfca[1][3]) 274 | 275 | def test_simple_3sfca_scenario_2(self): 276 | 277 | self.assertAlmostEqual(self.val_3sfca[2][1], self.reference_3sfca[2][1]) 278 | self.assertAlmostEqual(self.val_3sfca[2][2], self.reference_3sfca[2][2]) 279 | self.assertAlmostEqual(self.val_3sfca[2][3], self.reference_3sfca[2][3]) 280 | 281 | def test_simple_3sfca_scenario_3(self): 282 | 283 | self.assertAlmostEqual(self.val_3sfca[3][1], self.reference_3sfca[3][1]) 284 | self.assertAlmostEqual(self.val_3sfca[3][2], self.reference_3sfca[3][2]) 285 | self.assertAlmostEqual(self.val_3sfca[3][3], self.reference_3sfca[3][3]) 286 | -------------------------------------------------------------------------------- /access/tests/test_misc.py: -------------------------------------------------------------------------------- 1 | from access import Access 2 | from access.access import weights 3 | 4 | import math 5 | import unittest 6 | 7 | import numpy as np 8 | import pandas as pd 9 | import geopandas as gpd 10 | import util as tu 11 | 12 | 13 | class TestMisc(unittest.TestCase): 14 | def setUp(self): 15 | n = 5 16 | supply_grid = tu.create_nxn_grid(n) 17 | demand_grid = supply_grid.sample(1) 18 | cost_matrix = tu.create_cost_matrix(supply_grid, "euclidean") 19 | 20 | self.model = Access( 21 | demand_df=demand_grid, 22 | demand_index="id", 23 | demand_value="value", 24 | supply_df=supply_grid, 25 | supply_index="id", 26 | supply_value="value", 27 | cost_df=cost_matrix, 28 | cost_origin="origin", 29 | cost_dest="dest", 30 | cost_name="cost", 31 | neighbor_cost_df=cost_matrix, 32 | neighbor_cost_origin="origin", 33 | neighbor_cost_dest="dest", 34 | neighbor_cost_name="cost", 35 | ) 36 | 37 | def test_score_half_weight_halves_original_value(self): 38 | self.model.raam() 39 | self.model.score(col_dict={"raam_value": 0.5}) 40 | expected = self.model.access_df["raam_value"].iloc[0] / 2 41 | actual = self.model.access_df["score"].iloc[0] 42 | 43 | self.assertEqual(actual, expected) 44 | 45 | def test_score_run_again_and_test_overwrite(self): 46 | self.model.raam() 47 | self.model.score(col_dict={"raam_value": 0.5}) 48 | 49 | self.model.score(col_dict={"raam_value": 0.25}) 50 | expected = self.model.access_df["raam_value"].iloc[0] / 4 51 | actual = self.model.access_df["score"].iloc[0] 52 | 53 | self.assertEqual(actual, expected) 54 | 55 | def test_score_invalid_access_value_raises_value_error(self): 56 | with self.assertRaises(ValueError): 57 | bad_access_value = "Not in access df" 58 | self.model.score(col_dict={bad_access_value: 0.5}) 59 | 60 | def test_set_cost_reconizes_column_newly_added(self): 61 | self.model.cost_names.append("new_cost") 62 | 63 | self.model.default_cost = "new_cost" 64 | actual = self.model.default_cost 65 | 66 | self.assertEqual(actual, "new_cost") 67 | 68 | def test_set_cost_unavailable_cost_measure_raises_value_error(self): 69 | with self.assertRaises(ValueError): 70 | bad_cost_name = "Not an available cost name" 71 | self.model.default_cost = bad_cost_name 72 | 73 | def test_set_cost_neighbors(self): 74 | self.model.neighbor_cost_names.append("new_cost") 75 | 76 | self.model.neighbor_default_cost = "new_cost" 77 | actual = self.model.neighbor_default_cost 78 | 79 | self.assertEqual(actual, "new_cost") 80 | 81 | def test_set_cost_neighbors_unavailable_cost_measure_raises_value_error(self): 82 | with self.assertRaises(ValueError): 83 | bad_cost_name = "Not an available cost name" 84 | self.model.neighbor_default_cost = bad_cost_name 85 | 86 | def test_user_cost_adds_new_column_to_cost_df(self): 87 | new_cost = self.model.cost_df.copy() 88 | new_cost["new_cost"] = 0 89 | 90 | self.model.append_user_cost( 91 | new_cost_df=new_cost, name="new_cost", origin="origin", destination="dest" 92 | ) 93 | 94 | actual = "new_cost" in self.model.cost_df.columns 95 | 96 | self.assertEqual(actual, True) 97 | 98 | def test_user_cost_adds_new_column_to_cost_names(self): 99 | new_cost = self.model.cost_df.copy() 100 | new_cost["new_cost"] = 0 101 | 102 | self.model.append_user_cost( 103 | new_cost_df=new_cost, name="new_cost", origin="origin", destination="dest" 104 | ) 105 | 106 | actual = "new_cost" in self.model.cost_names 107 | 108 | self.assertEqual(actual, True) 109 | 110 | def test_user_cost_neighbors_adds_new_column_to_neighbor_cost_df(self): 111 | new_cost = self.model.neighbor_cost_df.copy() 112 | new_cost["new_cost"] = 0 113 | 114 | self.model.append_user_cost_neighbors( 115 | new_cost_df=new_cost, name="new_cost", origin="origin", destination="dest" 116 | ) 117 | 118 | actual = "new_cost" in self.model.neighbor_cost_df.columns 119 | 120 | self.assertEqual(actual, True) 121 | 122 | def test_user_cost_adds_new_column_to_cost_names(self): 123 | new_cost = self.model.neighbor_cost_df.copy() 124 | new_cost["new_cost"] = 0 125 | 126 | self.model.append_user_cost_neighbors( 127 | new_cost_df=new_cost, name="new_cost", origin="origin", destination="dest" 128 | ) 129 | 130 | actual = "new_cost" in self.model.neighbor_cost_names 131 | 132 | self.assertEqual(actual, True) 133 | 134 | def test_norm_access_df(self): 135 | self.model.raam() 136 | self.model.fca_ratio() 137 | 138 | normalized_df = self.model.norm_access_df 139 | 140 | actual1 = normalized_df["fca_value"].iloc[0] 141 | 142 | self.assertEqual(actual1, 1) 143 | 144 | actual2 = normalized_df["raam_value"].iloc[0] 145 | 146 | self.assertEqual(actual2, 1) 147 | -------------------------------------------------------------------------------- /access/tests/test_raam.py: -------------------------------------------------------------------------------- 1 | from access import Access 2 | from access.access import weights 3 | 4 | import math 5 | import unittest 6 | 7 | import numpy as np 8 | import pandas as pd 9 | import geopandas as gpd 10 | import util as tu 11 | 12 | 13 | class TestRAAM(unittest.TestCase): 14 | def setUp(self): 15 | n = 5 16 | supply_grid = tu.create_nxn_grid(n) 17 | demand_grid = supply_grid.sample(1) 18 | cost_matrix = tu.create_cost_matrix(supply_grid, "euclidean") 19 | 20 | self.model = Access( 21 | demand_df=demand_grid, 22 | demand_index="id", 23 | demand_value="value", 24 | supply_df=supply_grid, 25 | supply_index="id", 26 | supply_value="value", 27 | cost_df=cost_matrix, 28 | cost_origin="origin", 29 | cost_dest="dest", 30 | cost_name="cost", 31 | neighbor_cost_df=cost_matrix, 32 | neighbor_cost_origin="origin", 33 | neighbor_cost_dest="dest", 34 | neighbor_cost_name="cost", 35 | ) 36 | 37 | def test_raam_single_demand_location_equals_sum_of_supply(self): 38 | self.model.raam() 39 | 40 | expected = self.model.supply_df.value.sum() 41 | actual = self.model.access_df["raam_value"].iloc[0] 42 | 43 | self.assertEqual(expected, actual) 44 | 45 | def test_raam_single_demand_location_equals_sum_of_supply_initial_step_int(self): 46 | self.model.raam(initial_step=1) 47 | 48 | expected = self.model.supply_df.value.sum() 49 | actual = self.model.access_df["raam_value"].iloc[0] 50 | 51 | self.assertEqual(expected, actual) 52 | 53 | def test_raam_single_demand_location_equals_sum_of_supply_min_step(self): 54 | self.model.raam(min_step=1, verbose=True) 55 | 56 | expected = self.model.supply_df.value.sum() 57 | actual = self.model.access_df["raam_value"].iloc[0] 58 | 59 | self.assertEqual(expected, actual) 60 | 61 | def test_raam_run_again_and_test_overwrite(self): 62 | self.model.raam() 63 | self.model.raam() 64 | 65 | expected = self.model.supply_df.value.sum() 66 | actual = self.model.access_df["raam_value"].iloc[0] 67 | 68 | self.assertEqual(expected, actual) 69 | 70 | def test_raam_single_demand_location_equals_sum_of_supply_normalize(self): 71 | self.model.raam(normalize=True) 72 | 73 | actual = self.model.access_df["raam_value"].iloc[0] 74 | 75 | self.assertEqual(actual, 25) 76 | -------------------------------------------------------------------------------- /access/tests/test_weighted_catchment.py: -------------------------------------------------------------------------------- 1 | from access import Access 2 | from access.access import weights 3 | 4 | import math 5 | import unittest 6 | 7 | import numpy as np 8 | import pandas as pd 9 | import geopandas as gpd 10 | import util as tu 11 | 12 | 13 | class TestWeightedCatchment(unittest.TestCase): 14 | def setUp(self): 15 | n = 5 16 | supply_grid = tu.create_nxn_grid(n) 17 | demand_grid = supply_grid.sample(1) 18 | cost_matrix = tu.create_cost_matrix(supply_grid, "euclidean") 19 | 20 | self.model = Access( 21 | demand_df=demand_grid, 22 | demand_index="id", 23 | demand_value="value", 24 | supply_df=supply_grid, 25 | supply_index="id", 26 | supply_value="value", 27 | cost_df=cost_matrix, 28 | cost_origin="origin", 29 | cost_dest="dest", 30 | cost_name="cost", 31 | ) 32 | 33 | def test_weighted_catchment_small_catchment_weight_1(self): 34 | catchment = 0.5 35 | weight = 1 36 | result = self.model.weighted_catchment( 37 | name="test", weight_fn=weights.step_fn({catchment: weight}) 38 | ) 39 | actual = result.iloc[0]["test_value"] 40 | self.assertEqual(actual, 1) 41 | 42 | def test_weighted_catchment_small_catchment_weight_x(self): 43 | catchment = 0.5 44 | weight = 0.5 45 | result = self.model.weighted_catchment( 46 | name="test", weight_fn=weights.step_fn({catchment: weight}) 47 | ) 48 | actual = result.iloc[0]["test_value"] 49 | self.assertEqual(actual, 0.5) 50 | 51 | def test_weighted_catchment_large_catchment_weight_1(self): 52 | catchment = 10 53 | weight = 1 54 | result = self.model.weighted_catchment( 55 | name="test", weight_fn=weights.step_fn({catchment: weight}) 56 | ) 57 | actual = result.iloc[0]["test_value"] 58 | self.assertEqual(actual, 25) 59 | 60 | def test_weighted_catchment_run_again_and_test_overwrite(self): 61 | catchment = 0.5 62 | weight = 1 63 | result = self.model.weighted_catchment( 64 | name="test", weight_fn=weights.step_fn({catchment: weight}) 65 | ) 66 | result = self.model.weighted_catchment( 67 | name="test", weight_fn=weights.step_fn({catchment: weight}) 68 | ) 69 | actual = result.iloc[0]["test_value"] 70 | self.assertEqual(actual, 1) 71 | 72 | def test_weighted_catchment_large_catchment_weight_1_normalized(self): 73 | catchment = 10 74 | weight = 1 75 | result = self.model.weighted_catchment( 76 | name="test", weight_fn=weights.step_fn({catchment: weight}), normalize=True 77 | ) 78 | actual = result.iloc[0]["test_value"] 79 | self.assertEqual(actual, 1) 80 | 81 | def test_weighted_catchment_with_gravity_weights(self): 82 | n = 5 83 | supply_grid = tu.create_nxn_grid(n) 84 | demand_grid = supply_grid 85 | cost_matrix = tu.create_cost_matrix(supply_grid, "euclidean") 86 | 87 | self.model = Access( 88 | demand_df=demand_grid, 89 | demand_index="id", 90 | demand_value="value", 91 | supply_df=supply_grid, 92 | supply_index="id", 93 | supply_value="value", 94 | cost_df=cost_matrix, 95 | cost_origin="origin", 96 | cost_dest="dest", 97 | cost_name="cost", 98 | ) 99 | 100 | gravity = weights.gravity(scale=60, alpha=1) 101 | self.model.weighted_catchment(name="gravity", weight_fn=gravity) 102 | 103 | ids = [1, 5, 13, 19, 24] 104 | expected_vals = [ 105 | 1.322340210, 106 | 1.322340210, 107 | 0.780985109, 108 | 0.925540119, 109 | 1.133733026, 110 | ] 111 | 112 | for id, expected in zip(ids, expected_vals): 113 | actual = self.model.access_df.gravity_value.loc[id] 114 | 115 | self.assertAlmostEqual(actual, expected) 116 | 117 | 118 | if __name__ == "__main__": 119 | unittest.main() 120 | -------------------------------------------------------------------------------- /access/tests/test_weights.py: -------------------------------------------------------------------------------- 1 | from access.access import weights 2 | 3 | import math 4 | import unittest 5 | from random import randint 6 | 7 | import numpy as np 8 | import pandas as pd 9 | import util as tu 10 | 11 | 12 | class TestWeights(unittest.TestCase): 13 | def setUp(self): 14 | self.r_int = randint(2, 101) 15 | self.series = pd.Series(range(1, self.r_int)) 16 | 17 | def apply_weight_fn(self, weight_fn): 18 | weight_vs = self.series.apply(weight_fn) 19 | w_applied = self.series * weight_vs 20 | 21 | return w_applied 22 | 23 | def test_step_fn_all_weight_zero_equals_zero_sum(self): 24 | weight_fn = weights.step_fn({self.r_int: 0}) 25 | w_applied = self.apply_weight_fn(weight_fn) 26 | 27 | expected = w_applied.sum() 28 | self.assertEqual(expected, 0) 29 | 30 | def test_step_fn_all_weight_one_equals_self(self): 31 | weight_fn = weights.step_fn({self.r_int: 1}) 32 | w_applied = self.apply_weight_fn(weight_fn) 33 | 34 | expected = w_applied.sum() 35 | actual = self.series.sum() 36 | 37 | self.assertEqual(expected, actual) 38 | 39 | def test_step_fn_all_weight_half_equals_half(self): 40 | weight_fn = weights.step_fn({self.r_int: 0.5}) 41 | w_applied = self.apply_weight_fn(weight_fn) 42 | 43 | expected = w_applied.sum() 44 | actual = self.series.sum() / 2 45 | 46 | self.assertEqual(expected, actual) 47 | 48 | def test_step_fn_all_weight_two_equals_twice(self): 49 | weight_fn = weights.step_fn({self.r_int: 2}) 50 | w_applied = self.apply_weight_fn(weight_fn) 51 | 52 | expected = w_applied.sum() 53 | actual = self.series.sum() * 2 54 | 55 | self.assertEqual(expected, actual) 56 | 57 | def test_step_fn_negative_weight_raises_error(self): 58 | with self.assertRaises(ValueError): 59 | weight_fn = weights.step_fn({self.r_int: -1}) 60 | 61 | def test_step_fn_non_dict_input_raises_error(self): 62 | with self.assertRaises(TypeError): 63 | weights.step_fn(1) 64 | 65 | with self.assertRaises(TypeError): 66 | weights.step_fn("a") 67 | 68 | with self.assertRaises(TypeError): 69 | weights.step_fn([]) 70 | 71 | with self.assertRaises(TypeError): 72 | weights.step_fn(1.0) 73 | 74 | def test_gaussian_width_zero_raises_error(self): 75 | with self.assertRaises(ValueError): 76 | weight_fn = weights.gaussian(0) 77 | w_applied = self.apply_weight_fn(weight_fn) 78 | 79 | def test_gaussian_weight_sigma_one(self): 80 | weight_fn = weights.gaussian(1) 81 | w_applied = self.apply_weight_fn(weight_fn) 82 | 83 | actual = w_applied.loc[0] 84 | actual 85 | 86 | self.assertAlmostEqual(actual, 0.6065306597) 87 | 88 | def test_gaussian_weight_sigma_varied(self): 89 | sigma_vals = [-50, -2, 2, 50] 90 | expected_vals = [ 91 | 0.99980001, 92 | 0.88249690, 93 | 0.88249690, 94 | 0.99980001, 95 | ] 96 | for sigma, expected in zip(sigma_vals, expected_vals): 97 | weight_fn = weights.gaussian(sigma) 98 | w_applied = self.apply_weight_fn(weight_fn) 99 | print(w_applied.loc[0]) 100 | actual = w_applied.loc[0] 101 | 102 | self.assertAlmostEqual(actual, expected) 103 | 104 | def test_gravity_with_zero_alpha(self): 105 | rand_int = randint(1, 100) 106 | weight_fn = weights.gravity(scale=rand_int, alpha=0) 107 | w_applied = self.apply_weight_fn(weight_fn) 108 | 109 | actual = w_applied.loc[0] 110 | 111 | self.assertEqual(actual, 1) 112 | -------------------------------------------------------------------------------- /access/tests/util.py: -------------------------------------------------------------------------------- 1 | import math 2 | import random 3 | 4 | import numpy as np 5 | import pandas as pd 6 | import geopandas as gpd 7 | 8 | 9 | def create_nxn_grid(n, buffer=0, random_values=False, seed=44): 10 | """ 11 | Helper function to create an n x n matrix in a GeoDataFrame. 12 | 13 | Parameters 14 | ---------- 15 | n: integer determining the size of the resulting grid 16 | buffer: create points with a buffer of radius size given 17 | 18 | Returns 19 | ------- 20 | grid: nxn size grid in a GeoDataFrame with columns 'id', 'x', 'y', 'value' 21 | """ 22 | rows = [] 23 | id = 0 24 | value = 1 25 | 26 | random.seed(seed) 27 | 28 | for x in range(n): 29 | for y in range(n): 30 | if random_values: 31 | value = random.randint(1, 200) 32 | id += 1 33 | rows.append({"id": id, "x": x, "y": y, "value": value}) 34 | 35 | data = pd.DataFrame(rows, columns=["id", "x", "y", "value"]) 36 | grid = gpd.GeoDataFrame(data, geometry=gpd.points_from_xy(data.x, data.y)) 37 | 38 | if buffer: 39 | grid = grid.buffer(buffer) 40 | 41 | return grid 42 | 43 | 44 | def create_cost_matrix(grid, dist_func): 45 | """ 46 | Helper function to create a play cost matrix for an nxn grid. 47 | 48 | Parameters 49 | ---------- 50 | grid: an nxn grid in a DataFrame or GeoDataFrame 51 | dist_func: distance function, either euclidean or manhattan 52 | 53 | Returns 54 | ------- 55 | cost_matrix: a cost_matrix of size n**4 with distance between each point 56 | to every other point in the play grid. Has columns 'origin', 57 | 'dest', and 'cost' 58 | """ 59 | rows = [] 60 | 61 | funcs = { 62 | "manhattan": lambda i, j: abs(i.x - j.x) + abs(i.y - j.y), 63 | "euclidean": lambda i, j: math.sqrt((i.x - j.x) ** 2 + (i.y - j.y) ** 2), 64 | } 65 | dist_func = funcs[dist_func] 66 | 67 | for x in grid.iterrows(): 68 | x = x[1] 69 | for y in grid.iterrows(): 70 | y = y[1] 71 | dist = dist_func(x, y) 72 | rows.append([x.id, y.id, dist]) 73 | 74 | cost_matrix = pd.DataFrame(rows, columns=["origin", "dest", "cost"]) 75 | 76 | return cost_matrix 77 | -------------------------------------------------------------------------------- /access/weights.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def step_fn(step_dict): 5 | """ 6 | Create a step function from a dictionary. 7 | 8 | Parameters 9 | ---------- 10 | step_dict : dict 11 | Dictionary of cut-offs and weight values. 12 | 13 | Returns 14 | ------- 15 | 16 | weight_function : function 17 | Function returning weight, for input distance or time, *x*. 18 | Values beyond the largest threshold will return 0. 19 | 20 | Examples 21 | -------- 22 | 23 | Import the weights: 24 | 25 | >>> from access import weights 26 | 27 | Create a step function with thresholds at 20, 40, and 60. 28 | Travel costs are in minutes here, but the code cannot tell if you mix units! 29 | 30 | >>> fn = weights.step_fn({20 : 1, 40 : 0.68, 60 : 0.22}) 31 | 32 | >>> {v : fn(v) for v in range(0, 71, 10)} 33 | {0: 1, 10: 1, 20: 1, 30: 0.68, 40: 0.68, 50: 0.22, 60: 0.22, 70: 0} 34 | """ 35 | 36 | if type(step_dict) != dict: 37 | raise TypeError("step_dict must be of type dict.") 38 | 39 | for v in step_dict.values(): 40 | if v < 0: 41 | raise ValueError("All weights must be positive.") 42 | 43 | def helper(key_to_test): 44 | 45 | for k, v in sorted(step_dict.items()): 46 | if key_to_test <= k: 47 | return v 48 | 49 | return 0 50 | 51 | return helper 52 | 53 | 54 | def gaussian(sigma): 55 | """ 56 | Create a gaussian weight function, for a specified width, :math:`\sigma`. 57 | The mean / location parameter is assumed to be 0. 58 | Note that the standard normalization of the Gaussian, :math:`1 / \sqrt{2\pi\sigma^2}`, 59 | is *not* applied, so :math:`f(0) = 1` regardless of the value of :math:`\sigma`. 60 | Of course, this is irrelevant if the ultimate access values are ultimately normalized. 61 | 62 | Parameters 63 | ---------- 64 | sigma : float 65 | This the classical width parameter of the Gaussian / Normal distriution. 66 | 67 | Returns 68 | ------- 69 | 70 | weight_function : function 71 | Function returning weight, for input distance or time, *x*. 72 | 73 | Examples 74 | -------- 75 | 76 | Import the weights. 77 | 78 | >>> from access import weights 79 | 80 | Create a step function with thresholds at 20, 40, and 60. 81 | Travel costs are in minutes here, but the code cannot tell if you mix units! 82 | 83 | >>> fn = weights.gaussian(sigma = 20) 84 | 85 | >>> {v : fn(v) for v in range(0, 61, 20)} 86 | {0: 1.0, 20: 0.6065306597126334, 40: 0.1353352832366127, 60: 0.011108996538242306} 87 | 88 | Compare this to a simpler formulation: 89 | 90 | >>> import numpy as np 91 | >>> {x : np.exp(-x**2/2) for x in range(4)} 92 | {0: 1.0, 1: 0.6065306597126334, 2: 0.1353352832366127, 3: 0.011108996538242306} 93 | """ 94 | 95 | if sigma == 0: 96 | raise ValueError("Sigma must be non-zero.") 97 | 98 | return lambda x: np.exp(-x * x / (2 * sigma**2)) # / np.sqrt(2*np.pi*sigma**2) 99 | 100 | 101 | def gravity(scale, alpha, min_dist=0): 102 | """ 103 | Create a gravity function from a scale :math:`s` and :math:`\\alpha` parameters 104 | as well as an optional minimum distance :math:`x_\\text{min}`. 105 | The function is of the form :math:`f(x) = (\\text{max}(x, x_\\text{min})/s)^\\alpha`. 106 | Note that there is no overall normalization. 107 | 108 | Parameters 109 | ---------- 110 | scale : float 111 | Scaling value, normalizing the function input. 112 | alpha : float 113 | Power to which the normalized inputs are raised. 114 | Note that it is not implicitly negative (i.e., :math:`x^\\alpha` instead of :math:`1/x^\\alpha`. 115 | min_dist : float 116 | A 'standard' issue with gravity model is the infinite potential at 0 distance or time. 117 | This can be rectified crudely by specifying a minimum distance, 118 | and setting any input exceeding that minimum to the minimum itself. 119 | The default threshold is 0. 120 | 121 | Returns 122 | ------- 123 | 124 | weight_function : function 125 | Function returning weight, for input distance or time, *x*. 126 | 127 | Examples 128 | -------- 129 | 130 | Import the weights: 131 | 132 | >>> from access import weights 133 | 134 | Create a step function with thresholds at 20, 40, and 60. 135 | Travel costs are in minutes here, but the code cannot tell if you mix units! 136 | 137 | >>> fn = weights.gravity(scale = 20, alpha = -2, min_dist = 1) 138 | 139 | >>> {t : round(fn(t), 2) for t in [0, 1, 2, 20, 40, 60]} 140 | {0: 400.0, 1: 400.0, 2: 100.0, 20: 1.0, 40: 0.25, 60: 0.11} 141 | """ 142 | 143 | return lambda x: np.power(max(x, min_dist) / scale, alpha) 144 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | after_n_builds: 6 4 | coverage: 5 | range: 50..95 6 | round: nearest 7 | precision: 1 8 | status: 9 | project: 10 | default: 11 | threshold: 2% 12 | patch: 13 | default: 14 | threshold: 2% 15 | target: 80% 16 | ignore: 17 | - "tests/*" 18 | comment: 19 | layout: "reach, diff, files" 20 | behavior: once 21 | after_n_builds: 6 22 | require_changes: true 23 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = access 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | 22 | clean: 23 | rm -rf $(BUILDDIR)/* 24 | rm -rf auto_examples/ 25 | -------------------------------------------------------------------------------- /docs/_static/images/access_cook_hospitals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/access_cook_hospitals.png -------------------------------------------------------------------------------- /docs/_static/images/access_cook_hospitals_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/access_cook_hospitals_2.png -------------------------------------------------------------------------------- /docs/_static/images/compare_access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/compare_access.png -------------------------------------------------------------------------------- /docs/_static/images/csds.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/csds.png -------------------------------------------------------------------------------- /docs/_static/images/euclidean_distance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/euclidean_distance.png -------------------------------------------------------------------------------- /docs/_static/images/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/fig1.png -------------------------------------------------------------------------------- /docs/_static/images/full_us.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/full_us.jpg -------------------------------------------------------------------------------- /docs/_static/images/googlemaps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/googlemaps.png -------------------------------------------------------------------------------- /docs/_static/images/graphhopper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/graphhopper.png -------------------------------------------------------------------------------- /docs/_static/images/gravity_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/gravity_model.png -------------------------------------------------------------------------------- /docs/_static/images/osrm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/osrm.png -------------------------------------------------------------------------------- /docs/_static/images/otp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/otp.png -------------------------------------------------------------------------------- /docs/_static/images/pandana.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/pandana.png -------------------------------------------------------------------------------- /docs/_static/images/pgrouting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/pgrouting.png -------------------------------------------------------------------------------- /docs/_static/images/pysal_favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/pysal_favicon.ico -------------------------------------------------------------------------------- /docs/_static/images/r.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/r.png -------------------------------------------------------------------------------- /docs/_static/images/screenshot_cost_website.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/screenshot_cost_website.png -------------------------------------------------------------------------------- /docs/_static/images/thumbnail_workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/thumbnail_workflow.png -------------------------------------------------------------------------------- /docs/_static/images/valhalla.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_static/images/valhalla.png -------------------------------------------------------------------------------- /docs/_static/pysal-styles.css: -------------------------------------------------------------------------------- 1 | /* Make thumbnails with equal heights */ 2 | @media only screen and (min-width : 481px) { 3 | .row.equal-height { 4 | display: flex; 5 | flex-wrap: wrap; 6 | } 7 | .row.equal-height > [class*='col-'] { 8 | display: flex; 9 | flex-direction: column; 10 | } 11 | .row.equal-height.row:after, 12 | .row.equal-height.row:before { 13 | display: flex; 14 | } 15 | 16 | .row.equal-height > [class*='col-'] > .thumbnail, 17 | .row.equal-height > [class*='col-'] > .thumbnail > .caption { 18 | display: flex; 19 | flex: 1 0 auto; 20 | flex-direction: column; 21 | } 22 | .row.equal-height > [class*='col-'] > .thumbnail > .caption > .flex-text { 23 | flex-grow: 1; 24 | } 25 | .row.equal-height > [class*='col-'] > .thumbnail > img { 26 | width: 100%; 27 | height: 200px; /* force image's height */ 28 | 29 | /* force image fit inside it's "box" */ 30 | -webkit-object-fit: cover; 31 | -moz-object-fit: cover; 32 | -ms-object-fit: cover; 33 | -o-object-fit: cover; 34 | object-fit: cover; 35 | } 36 | } 37 | 38 | .row.extra-bottom-padding{ 39 | margin-bottom: 20px; 40 | } 41 | 42 | 43 | .topnavicons { 44 | margin-left: 10% !important; 45 | } 46 | 47 | .topnavicons li { 48 | margin-left: 0px !important; 49 | min-width: 100px; 50 | text-align: center; 51 | } 52 | 53 | .topnavicons .thumbnail { 54 | margin-right: 10px; 55 | border: none; 56 | box-shadow: none; 57 | text-align: center; 58 | font-size: 85%; 59 | font-weight: bold; 60 | line-height: 10px; 61 | height: 100px; 62 | } 63 | 64 | .topnavicons .thumbnail img { 65 | display: block; 66 | margin-left: auto; 67 | margin-right: auto; 68 | } 69 | 70 | 71 | /* Table with a scrollbar */ 72 | .bodycontainer { max-height: 600px; width: 100%; margin: 0; overflow-y: auto; } 73 | .table-scrollable { margin: 0; padding: 0; } 74 | 75 | .label { 76 | color: #222222; 77 | font-size: 100%; 78 | } 79 | 80 | 81 | .tg {border-collapse:collapse;border-spacing:0;} 82 | .tg td{font-family:Arial, sans-serif;font-size:14px;padding:5px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:black;} 83 | .tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:5px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:black;} 84 | .tg .tg-1wig{font-weight:bold;text-align:center;vertical-align:center} 85 | .tg .tg-0lax{text-align:center;vertical-align:center} 86 | .tg {margin:auto;width:90%!important;margin-top:30px;margin-bottom:30px} 87 | .td {margin:auto;width:50%!important;margin-top:10px;margin-bottom:10px;font-family:Arial, sans-serif} 88 | .table_center {display:block;margin:auto;width:100%;text-align:center;} 89 | 90 | .h2 { font-size: 28px; } 91 | 92 | 93 | 94 | -------------------------------------------------------------------------------- /docs/_static/references.bib: -------------------------------------------------------------------------------- 1 | @book{1960_isard_reganalysis, 2 | author = {Walter Isard}, 3 | title = {Methods of Regional Analysis: Introduction to Regional Science.}, 4 | publisher = {MIT Press}, 5 | year = 1960, 6 | address = {Cambridge, MA}, 7 | } 8 | 9 | @article{2019_saxon_snow_raam, 10 | author = {James Saxon and Daniel Snow}, 11 | title = {A Rational Agent Model for the Spatial Accessibility of Primary Health Care}, 12 | journal = {Annals of the American Association of Geographers}, 13 | volume = {0}, 14 | number = {0}, 15 | pages = {1-18}, 16 | year = {2019}, 17 | publisher = {Taylor & Francis}, 18 | doi = {10.1080/24694452.2019.1629870} 19 | } 20 | 21 | @article{1963_huff_shopping_trade_areas, 22 | author = {David L. Huff}, 23 | journal = {Land Economics}, 24 | number = {1}, 25 | pages = {81--90}, 26 | publisher = {[Board of Regents of the University of Wisconsin System, University of Wisconsin Press]}, 27 | title = {A Probabilistic Analysis of Shopping Center Trade Areas}, 28 | volume = {39}, 29 | year = {1963}, 30 | doi = {10.2307/3144521} 31 | } 32 | 33 | @article{1974_aday_andersen_access_to_care, 34 | author = {Lu Ann Aday and Ronald Andersen}, 35 | title = {A Framework for the Study of Access to Medical Care.}, 36 | journal = {Health Services Research}, 37 | volume = {9}, 38 | number = {3}, 39 | year = {1974}, 40 | pages = {208-220}, 41 | url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1071804/} 42 | } 43 | 44 | @article{1976_weibull_axiomatic_accessibility, 45 | author = {J\"orgen W. Weibull}, 46 | title = "An axiomatic approach to the measurement of accessibility", 47 | journal = "Regional Science and Urban Economics", 48 | volume = "6", 49 | number = "4", 50 | pages = "357 - 379", 51 | year = "1976", 52 | issn = "0166-0462", 53 | doi = "10.1016/0166-0462(76)90031-4" 54 | } 55 | 56 | @article{1982_joseph_potential_physical_accessibility_rural, 57 | author = "Alun E. Joseph and Peter R. Bantock", 58 | title = "Measuring potential physical accessibility to general practitioners in rural areas: A method and case study", 59 | journal = "Social Science \& Medicine", 60 | volume = "16", 61 | number = "1", 62 | pages = "85 - 90", 63 | year = "1982", 64 | issn = "0277-9536", 65 | doi = "10.1016/0277-9536(82)90428-2", 66 | } 67 | 68 | @article{1988_thouez_geographic_accessibility, 69 | author = {Thouez, Jean-Pierre M. and Bodson, Paul and Joseph, Alun E.}, 70 | title = {Some Methods for Measuring the Geographic Accessibility of Medical Services in Rural Regions}, 71 | year = {1988}, 72 | journal = {Medical Care}, 73 | month = {January}, 74 | volume = {26}, 75 | number = {1}, 76 | pages = {34--44}, 77 | url = {http://www.jstor.org/stable/3765239}, 78 | } 79 | 80 | @article{2000_radke_mu_spatial_decomposition, 81 | author = {John Radke and Lan Mu}, 82 | title = {Spatial Decompositions, Modeling and Mapping Services Regions to Predict Access to Social Programs}, 83 | month = {December}, 84 | year = {2000}, 85 | journal = {Geographic Information Science}, 86 | pages = {105--112}, 87 | volume = {6}, 88 | number = {2}, 89 | doi = {10.1080/10824000009480538}, 90 | } 91 | 92 | @article{2002_luo_spatial_accessibility_chicago, 93 | author = {Wei Luo and Fahui Wang}, 94 | title ={Measures of Spatial Accessibility to Health Care in a GIS Environment: Synthesis and a Case Study in the Chicago Region}, 95 | journal = {Environment and Planning B: Planning and Design}, 96 | volume = {30}, 97 | number = {6}, 98 | pages = {865-884}, 99 | year = {2003}, 100 | doi = {10.1068/b29120} 101 | } 102 | 103 | 104 | @article{2004_guagliardo_spatial_accessibility, 105 | author="Guagliardo, Mark F.", 106 | title="Spatial accessibility of primary care: concepts, methods and challenges", 107 | journal="International Journal of Health Geographics", 108 | year="2004", 109 | month="Feb", 110 | day="26", 111 | volume="3", 112 | number="1", 113 | pages="3", 114 | doi="10.1186/1476-072X-3-3", 115 | } 116 | 117 | @article{2004_luo_gis_floating_catchment, 118 | author = "Wei Luo", 119 | title = "Using a GIS-based floating catchment method to assess areas with shortage of physicians", 120 | journal = "Health \& Place", 121 | volume = "10", 122 | number = "1", 123 | pages = "1 - 11", 124 | year = "2004", 125 | doi = "10.1016/S1353-8292(02)00067-9", 126 | } 127 | 128 | @article{2004_wang_luo_HPSAs, 129 | title = "Assessing spatial and nonspatial factors for healthcare access: towards an integrated approach to defining health professional shortage areas", 130 | journal = "Health \& Place", 131 | volume = "11", 132 | number = "2", 133 | pages = "131 - 146", 134 | year = "2005", 135 | note = "Special section: Geographies of Intellectual Disability", 136 | doi = "10.1016/j.healthplace.2004.02.003", 137 | author = "Fahui Wang and Wei Luo", 138 | } 139 | 140 | @chapter{2005_tiwari_spatial_filters_population, 141 | author="Tiwari, Chetan and Rushton, Gerard", 142 | title="Using Spatially Adaptive Filters to Map Late Stage Colorectal Cancer Incidence in Iowa", 143 | booktitle="Developments in Spatial Data Handling", 144 | year="2005", 145 | publisher="Springer Berlin Heidelberg", 146 | address="Berlin, Heidelberg", 147 | pages="665--676", 148 | doi = {10.1007/3-540-26772-7\_50} 149 | } 150 | 151 | 152 | @Article{2006_yang_goerge_gis_spatial_accessibility, 153 | author="Yang, Duck-Hye and Goerge, Robert and Mullner, Ross", 154 | title="Comparing GIS-Based Methods of Measuring Spatial Accessibility to Health Services", 155 | journal="Journal of Medical Systems", 156 | year="2006", 157 | month="Feb", 158 | day="1", 159 | volume="30", 160 | number="1", 161 | pages="23--32", 162 | doi="10.1007/s10916-006-7400-5", 163 | } 164 | 165 | @article{2007_rickets_medically_underserved, 166 | author = {Thomas C. Ricketts and Laurie J. Goldsmith and George Mark Holmes and Randy Randolph and Richard Lee Donald H. Taylor, Jr. and Jan Ostermann}, 167 | title = {Designating Places and Populations as Medically Underserved: A Proposal for a New Approach}, 168 | journal = {Journal of Health Care for the Poor and Underserved}, 169 | volume = {18}, 170 | number = {3}, 171 | month = {August}, 172 | year = {2007}, 173 | pages = {567-589}, 174 | doi = {10.1353/hpu.2007.0065}, 175 | } 176 | 177 | 178 | @article{2009_luo_qi_E2SFCA, 179 | author = "Wei Luo and Yi Qi", 180 | title = "An enhanced two-step floating catchment area (E2SFCA) method for measuring spatial accessibility to primary care physicians", 181 | journal = "Health \& Place", 182 | volume = "15", 183 | number = "4", 184 | pages = "1100 - 1107", 185 | year = "2009", 186 | doi = "10.1016/j.healthplace.2009.06.002", 187 | } 188 | 189 | 190 | @article{2009_mcgrail_improved_2SFCA, 191 | author = "Matthew R. McGrail and John S. Humphreys", 192 | title = "Measuring spatial accessibility to primary care in rural areas: Improving the effectiveness of the two-step floating catchment area method", 193 | journal = "Applied Geography", 194 | volume = "29", 195 | number = "4", 196 | pages = "533 - 541", 197 | year = "2009", 198 | issn = "0143-6228", 199 | doi = "10.1016/j.apgeog.2008.12.003", 200 | } 201 | 202 | 203 | @article{2012_mcgrail_improvements_2SFCA, 204 | author="Matthew R. McGrail", 205 | title="Spatial accessibility of primary health care utilising the two step floating catchment area method: an assessment of recent improvements", 206 | journal="International Journal of Health Geographics", 207 | year="2012", 208 | month="Nov", 209 | day="16", 210 | volume="11", 211 | number="1", 212 | pages="50", 213 | doi="10.1186/1476-072X-11-50", 214 | } 215 | 216 | 217 | @article{2012_wan_3SFCA, 218 | author = { Neng Wan and Bin Zou and Troy Sternberg }, 219 | title = {A three-step floating catchment area method for analyzing spatial access to health services}, 220 | journal = {International Journal of Geographical Information Science}, 221 | volume = {26}, 222 | number = {6}, 223 | pages = {1073-1089}, 224 | year = {2012}, 225 | publisher = {Taylor \& Francis}, 226 | doi = {10.1080/13658816.2011.624987}, 227 | } 228 | 229 | 230 | @article{2015_polo_location_allocation, 231 | author = {Polo, Gina AND Acosta, C. Mera AND Ferreira, Fernando AND Dias, Ricardo Augusto}, 232 | journal = {PLOS ONE}, 233 | publisher = {Public Library of Science}, 234 | title = {Location-Allocation and Accessibility Models for Improving the Spatial Planning of Public Health Services}, 235 | year = {2015}, 236 | month = {03}, 237 | volume = {10}, 238 | pages = {1-14}, 239 | number = {3}, 240 | doi = {10.1371/journal.pone.0119190} 241 | } 242 | 243 | 244 | @article{2017_apparicio_distance_types, 245 | author="Apparicio, Philippe and Gelb, J{\'e}r{\'e}my and Dub{\'e}, Anne-Sophie and Kingham, Simon and Gauvin, Lise and Robitaille, {\'E}ric", 246 | title="The approaches to measuring the potential spatial access to urban health services revisited: distance types and aggregation-error issues", 247 | journal="International Journal of Health Geographics", 248 | year="2017", 249 | month="Aug", 250 | day="23", 251 | volume="16", 252 | number="1", 253 | pages="32", 254 | doi="10.1186/s12942-017-0105-9", 255 | } 256 | 257 | @article{2015_li_opt_framework, 258 | author="Li, Zihao and Serban, Nicoleta and Swann, Julie L.", 259 | title="An optimization framework for measuring spatial access over healthcare networks", 260 | journal="BMC Health Services Research", 261 | year="2015", 262 | month="Jul", 263 | day="17", 264 | volume="15", 265 | number="1", 266 | pages="273", 267 | doi="10.1186/s12913-015-0919-8", 268 | } 269 | 270 | @chapter{2015_murray_health_facilities, 271 | author="Alan T. Murray and Tony H. Grubesic", 272 | title="Locational Planning of Health Care Facilities", 273 | booktitle="Spatial Analysis in Health Geography", 274 | year="2015", 275 | editor="Pavlos Kanaraglou and Eric Delmelle and Antonio P\'aez", 276 | publisher="Ashgate Publishing Limited", 277 | address="Surrey, UK", 278 | pages="243-259", 279 | } 280 | 281 | 282 | @article{2013_mao_busses_at_10mph, 283 | title = "Measuring spatial accessibility to healthcare for populations with multiple transportation modes", 284 | journal = "Health \& Place", 285 | volume = "24", 286 | pages = "115 - 122", 287 | year = "2013", 288 | issn = "1353-8292", 289 | author = "Liang Mao and Dawn Nekorchuk", 290 | doi = "10.1016/j.healthplace.2013.08.008", 291 | } 292 | 293 | @article{2017_serban_primary_care_disparities, 294 | author = {Gentili, Monica and Harati, Pravara and Serban, Nicoleta and O'Connor, Jean and Swann, Julie}, 295 | title = {Quantifying Disparities in Accessibility and Availability of Pediatric Primary Care across Multiple States with Implications for Targeted Interventions}, 296 | journal = {Health Services Research}, 297 | volume = {53}, 298 | number = {3}, 299 | pages = {1458-1477}, 300 | year = {2017}, 301 | doi = {10.1111/1475-6773.12722}, 302 | } 303 | 304 | @article{2003_goodman_pcsa_definition, 305 | author = {Goodman, David C. and Mick, Stephen S. and Bott, David and Stukel, Therese and Chang, Chiang-hua and Marth, Nancy and Poage, Jim and Carretta, Henry J.}, 306 | title = {Primary Care Service Areas: A New Tool for the Evaluation of Primary Care Services}, 307 | journal = {Health Services Research}, 308 | volume = {38}, 309 | number = {1p1}, 310 | pages = {287-309}, 311 | year = {2003}, 312 | keywords = {Primary health care, small-area analysis, health services accessibility, Medicare, Medicaid}, 313 | doi = {10.1111/1475-6773.00116}, 314 | } 315 | 316 | @article{1997_grumbach_supply_access, 317 | author = {Grumbach, Kevin and Vranizan, Karen and Bindman, Andrew B.}, 318 | title = {Physician Supply And Access To Care In Urban Communities}, 319 | journal = {Health Affairs}, 320 | volume = {16}, 321 | number = {1}, 322 | pages = {71-86}, 323 | year = {1997}, 324 | doi = {10.1377/hlthaff.16.1.71} 325 | } 326 | 327 | @article{2015_mcgrail_distance_tolerance, 328 | title = {Accessing doctors at times of need: measuring the distance tolerance of rural residents for health-related travel}, 329 | author = {Matthew R. McGrail and John S. Humphreys and Bernadette Ward}, 330 | journal = {BMC Health Services Research}, 331 | address = {London}, 332 | year = {2015}, 333 | volume = {15}, 334 | number = {212}, 335 | doi = {10.1186/s12913-015-0880-6} 336 | } 337 | 338 | @article{2018_tao_yao_shenzhen_2SFCA_multiple_modes, 339 | author="Tao, Zhuolin and Yao, Zaoxing and Kong, Hui and Duan, Fei and Li, Guicai", 340 | title="Spatial accessibility to healthcare services in Shenzhen, China: improving the multi-modal two-step floating catchment area method by estimating travel time via online map APIs", 341 | journal="BMC Health Services Research", 342 | year="2018", 343 | month="May", 344 | day="09", 345 | volume="18", 346 | number="1", 347 | pages="345", 348 | doi="10.1186/s12913-018-3132-8", 349 | } 350 | 351 | @article{2015_langford_2sfca_multiple_modes, 352 | title = "Multi-modal two-step floating catchment area analysis of primary health care accessibility", 353 | journal = "Health \& Place", 354 | volume = "38", 355 | pages = "70 - 81", 356 | year = "2016", 357 | issn = "1353-8292", 358 | doi = "10.1016/j.healthplace.2015.11.007", 359 | author = "Mitchel Langford and Gary Higgs and Richard Fry", 360 | } 361 | 362 | 363 | @article{2018_lin_wan_multi_modal, 364 | author="Lin, Yan and Wan, Neng and Sheets, Sagert and Gong, Xi and Davies, Angela", 365 | title="A multi-modal relative spatial access assessment approach to measure spatial accessibility to primary care providers", 366 | journal="International Journal of Health Geographics", 367 | year="2018", 368 | month="Aug", 369 | day="23", 370 | volume="17", 371 | number="1", 372 | pages="33", 373 | doi="10.1186/s12942-018-0153-9", 374 | } 375 | 376 | @article{2018_wang_2sfca_crowdedness, 377 | author = {Fahui Wang}, 378 | title = {Inverted Two-Step Floating Catchment Area Method for Measuring Facility Crowdedness}, 379 | journal = {The Professional Geographer}, 380 | volume = {70}, 381 | number = {2}, 382 | pages = {251-260}, 383 | year = {2018}, 384 | publisher = {Routledge}, 385 | doi = {10.1080/00330124.2017.1365308}, 386 | } 387 | 388 | 389 | @article{2015_mcgrail_2sfca_australia_national, 390 | author = {Matthew R. McGrail and John Stirling Humphreys}, 391 | title = {Spatial access disparities to primary health care in rural and remote Australia}, 392 | year = {2015}, 393 | journal = {Geospatial Health}, 394 | volume = {10}, 395 | number = {2}, 396 | pages = {138-143}, 397 | doi = {10.4081/gh.2015.358} 398 | } 399 | 400 | @article{150130_fransen_cb2scfa, 401 | author = "Koos Fransen and Tijs Neutens and Philippe De Maeyer and Greet Deruyter", 402 | title = "A commuter-based two-step floating catchment area method for measuring spatial accessibility of daycare centers", 403 | journal = "Health \& Place", 404 | volume = "32", 405 | pages = "65 - 73", 406 | year = "2015", 407 | issn = "1353-8292", 408 | doi = "10.1016/j.healthplace.2015.01.002", 409 | } 410 | 411 | 412 | @article{2010_kringos_primary_care_meta, 413 | author="Kringos, Dionne S. and Boerma, Wienke GW and Hutchinson, Allen and van der Zee, Jouke and Groenewegen, Peter P.", 414 | title="The breadth of primary care: a systematic literature review of its core dimensions", 415 | journal="BMC Health Services Research", 416 | year="2010", 417 | month="Mar", 418 | day="13", 419 | volume="10", 420 | number="1", 421 | pages="65", 422 | doi="10.1186/1472-6963-10-65", 423 | } 424 | 425 | @article{1983_andersen_mccutcheon_aday_access_dimensions, 426 | year = {1983}, 427 | title = {Exploring dimensions of access to medical care}, 428 | author = {Ronald M. Andersen and Allan McCutcheon and Lu Ann Aday and Grace Y. Chiu and Ralph Bell}, 429 | pages = {49-74}, 430 | journal = {Health Services Research}, 431 | volume = {18}, 432 | number = {1}, 433 | url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1068709/} 434 | } 435 | 436 | @article{2013_levesque_accessibility_5A_plus_patient_abilities, 437 | author="Levesque, Jean-Frederic and Harris, Mark F. and Russell, Grant", 438 | title="Patient-centred access to health care: conceptualising access at the interface of health systems and populations", 439 | journal="International Journal for Equity in Health", 440 | year="2013", 441 | month="Mar", 442 | day="11", 443 | volume="12", 444 | number="1", 445 | pages="18", 446 | doi="10.1186/1475-9276-12-18", 447 | } 448 | 449 | @article{1994_khan_bhardwaj_spatial_access_conceptual_2_axis, 450 | author = {Abdullah A. Khan and Surinder M. Bhardwaj}, 451 | title = {Access to Health Care: A Conceptual Framework and its Relevance to Health Care Planning}, 452 | journal = {Evaluation \& the Health Professions}, 453 | volume = {17}, 454 | number = {1}, 455 | pages = {60-76}, 456 | year = {1994}, 457 | doi = {10.1177/016327879401700104}, 458 | } 459 | 460 | 461 | 462 | @article{1981_penchansky_thomas_concept_of_access, 463 | doi = {10.2307/3764310}, 464 | author = {Roy Penchansky and J. William Thomas}, 465 | journal = {Medical Care}, 466 | number = {2}, 467 | pages = {127--140}, 468 | publisher = {Lippincott Williams \& Wilkins}, 469 | title = {The Concept of Access: Definition and Relationship to Consumer Satisfaction}, 470 | volume = {19}, 471 | year = {1981} 472 | } 473 | 474 | @article{2017_jia_empirical_distances_huff_hsa, 475 | author = {Peng Jia and Fahui Wang and Imam M. Xierali}, 476 | title = {Using a Huff-Based Model to Delineate Hospital Service Areas}, 477 | journal = {The Professional Geographer}, 478 | volume = {69}, 479 | number = {4}, 480 | pages = {522-530}, 481 | year = {2017}, 482 | publisher = {Routledge}, 483 | doi = {10.1080/00330124.2016.1266950}, 484 | } 485 | 486 | 487 | @article{2012_wan_spar, 488 | author = "Neng Wan and F. Benjamin Zhan and Bin Zou and Edwin Chow", 489 | title = "A relative spatial access assessment approach for analyzing potential spatial access to colorectal cancer services in Texas", 490 | journal = "Applied Geography", 491 | volume = "32", 492 | number = "2", 493 | pages = "291 - 299", 494 | year = "2012", 495 | issn = "0143-6228", 496 | doi = "10.1016/j.apgeog.2011.05.001", 497 | } 498 | 499 | 500 | @article{2016_donohoe_spar, 501 | author="Donohoe, Joseph and Marshall, Vincent and Tan, Xi and Camacho, Fabian T. and Anderson, Roger and Balkrishnan, Rajesh", 502 | title="Evaluating and comparing methods for measuring spatial access to mammography centers in Appalachia", 503 | journal="Health Services and Outcomes Research Methodology", 504 | year="2016", 505 | month="Jun", 506 | day="01", 507 | volume="16", 508 | number="1", 509 | pages="22--40", 510 | doi="10.1007/s10742-016-0143-y", 511 | } 512 | 513 | 514 | @article{2013_salonen_travel_times_car_transport, 515 | author = "Maria Salonen and Tuuli Toivonen", 516 | title = "Modelling travel time in urban networks: comparable measures for private car and public transport", 517 | journal = "Journal of Transport Geography", 518 | volume = "31", 519 | pages = "143 - 153", 520 | year = "2013", 521 | issn = "0966-6923", 522 | doi = "https://doi.org/10.1016/j.jtrangeo.2013.06.011", 523 | } 524 | 525 | @article{1972_donabedian_organizing_evaluating_delivery_personal_health, 526 | doi = {10.2307/3349436}, 527 | author = {Avedis Donabedian}, 528 | journal = {The Milbank Memorial Fund Quarterly}, 529 | number = {4}, 530 | pages = {103--154}, 531 | publisher = {[Milbank Memorial Fund, Wiley]}, 532 | title = {Models for Organizing the Delivery of Personal Health Services and Criteria for Evaluating Them}, 533 | volume = {50}, 534 | year = {1972} 535 | } 536 | 537 | 538 | 539 | @website{osm, 540 | author = {{OpenStreetMap contributors}}, 541 | title = {{North America data retrieved from http://download.geofabrik.de/}}, 542 | year = {2018}, 543 | month = {June}, 544 | day = {1}, 545 | url = {https://www.openstreetmap.org}, 546 | } 547 | 548 | @techreport{2014_graham_lodes_acs_comparison, 549 | author={Matthew R. Graham and Mark J. Kutzbach and Brian McKenzie}, 550 | title={{Design Comparison of LODES and ACS Commuting Data Products}}, 551 | year=2014, 552 | month={October}, 553 | institution={Center for Economic Studies, U.S. Census Bureau}, 554 | type={Working Papers}, 555 | number={14-38}, 556 | url = {https://ideas.repec.org/p/cen/wpaper/14-38.html} 557 | } 558 | 559 | @article{2002_hart_rural, 560 | year = {2002}, 561 | author = {Hart, L. Gary and Salsberg, Edward and Phillips, Debra M. and Lishner, Denise M.}, 562 | title = {Rural Health Care Providers in the United States}, 563 | journal = {The Journal of Rural Health}, 564 | volume = {18}, 565 | number = {5}, 566 | pages = {211-231}, 567 | doi = {10.1111/j.1748-0361.2002.tb00932.x}, 568 | } 569 | 570 | @article{2014_weinhold_understanding_shortages, 571 | title = {Understanding shortages of sufficient health care in rural areas}, 572 | volume = {118}, 573 | doi = {10.1016/j.healthpol.2014.07.018}, 574 | language = {en}, 575 | number = {2}, 576 | urldate = {2018-03-08}, 577 | journal = {Health Policy}, 578 | author = {Weinhold, Ines and Gurtner, Sebastian}, 579 | month = {November}, 580 | year = {2014}, 581 | pages = {201--214}, 582 | } 583 | 584 | @article{ipums, 585 | title={Integrated Public Use Microdata Series}, 586 | year={2017}, 587 | doi={10.18128/D010.V7.0.}, 588 | author={Steven Ruggles and Katie Genadek and Ronald Goeken and Josiah Grover and Matthew Sobek}, 589 | journal={University of Minnesota}, 590 | volume = {7.0} 591 | } 592 | 593 | 594 | @article{1970_horton_reynolds_action_spaces, 595 | author = {Frank E. Horton and David R. Reynolds}, 596 | title = {Action Space Formation: A Behavioral Approach to Predicting Urban Travel Behavior}, 597 | journal = {Highway Research Record}, 598 | volume = {322}, 599 | pages = {136-148}, 600 | year = {1970}, 601 | url = {https://trid.trb.org/view/134288} 602 | } 603 | 604 | @phdthesis{1976_lenntorp_prisms, 605 | author = {Bo Lenntorp}, 606 | title = {Paths in space-time environments : a timegeographic study of movement possibilities of individuals}, 607 | institution = {Royal University of Lund, Dept. of Geography}, 608 | address = {Lund}, 609 | year = {1976} 610 | } 611 | 612 | 613 | @article{1970_hagerstrand_people_in_regional_science, 614 | author = {Torsten H\"agerstraand}, 615 | title = {What about people in regional science?}, 616 | journal = {Papers in Regional Science}, 617 | volume = {24}, 618 | number = {1}, 619 | pages = {7-24}, 620 | year = {1970}, 621 | doi = {10.1111/j.1435-5597.1970.tb01464.x} 622 | } 623 | 624 | @article{1999_dijst_action_spaces_planning, 625 | author="Dijst, Martin", 626 | title="Action space as planning concept in spatial planning", 627 | journal="Netherlands journal of housing and the built environment", 628 | year="1999a", 629 | month="June", 630 | day="1", 631 | volume="14", 632 | number="2", 633 | pages="163--182", 634 | issn="1573-7772", 635 | doi="10.1007/BF02496820", 636 | } 637 | 638 | @article{2015_patterson_path_areas_activity_spaces, 639 | author = {Zachary Patterson and Steven Farber}, 640 | title = {Potential Path Areas and Activity Spaces in Application: A Review}, 641 | journal = {Transport Reviews}, 642 | volume = {35}, 643 | number = {6}, 644 | pages = {679-700}, 645 | year = {2015}, 646 | publisher = {Routledge}, 647 | doi = {10.1080/01441647.2015.1042944} 648 | } 649 | 650 | @article{2017_mcgrail_attractiveness_of_rural_communities, 651 | author = {Matthew McGrail and Peter M Wingrove and Stephen M Petterson and John Humphreys and Deborah Russell and Andrew W Bazemore}, 652 | title = {Measuring the attractiveness of rural communities in accounting for differences of rural primary care workforce supply}, 653 | journal = {Rural and Remote Health}, 654 | month = {April}, 655 | year = {2017}, 656 | volume = {17}, 657 | number = {3925}, 658 | doi = {10.22605/RRH3925}, 659 | } 660 | 661 | @article{1994_starfield_essential_primary_care, 662 | author = "Barbara Starfield", 663 | title = "Is primary care essential?", 664 | journal = "The Lancet", 665 | volume = "344", 666 | number = "8930", 667 | pages = "1129 - 1133", 668 | year = "1994", 669 | note = "Originally published as Volume 2, Issue 8930", 670 | doi = "10.1016/S0140-6736(94)90634-3", 671 | } 672 | 673 | @article{1999_shi_income_inequality_primary_care, 674 | author = {Leiyu Shi and Barbara Starfield and Bruce Kennedy and Ichiro Kawachi}, 675 | title = {Income inequality, primary care, and health indicators}, 676 | journal = {Journal of Family Practice}, 677 | volume = {48}, 678 | number = {4}, 679 | pages = {275-284}, 680 | url = {https://www.ncbi.nlm.nih.gov/pubmed/10229252}, 681 | year = {1999}, 682 | month = {April} 683 | } 684 | 685 | @article{2002_shi_pc_self-rated_social_disparities, 686 | author = {Shi, Leiyu and Starfield, Barbara and Politzer, Robert and Regan, Jerri}, 687 | title = {Primary Care, Self-rated Health, and Reductions in Social Disparities in Health}, 688 | journal = {Health Services Research}, 689 | volume = {37}, 690 | number = {3}, 691 | pages = {529-550}, 692 | keywords = {Primary-care experience, income inequality, self-rated health}, 693 | doi = {10.1111/1475-6773.t01-1-00036}, 694 | year = {2002} 695 | } 696 | 697 | @article{2005_shi_pc_inequality_mortality_urban_rural, 698 | author = "Leiyu Shi and James Macinko and Barbara Starfield and Robert Politzer and John Wulu and Jiahong Xu", 699 | title = "Primary care, social inequalities and all-cause, heart disease and cancer mortality in US counties: a comparison between urban and non-urban areas", 700 | journal = "Public Health", 701 | volume = "119", 702 | number = "8", 703 | pages = "699 - 710", 704 | year = "2005", 705 | issn = "0033-3506", 706 | doi = "10.1016/j.puhe.2004.12.007", 707 | } 708 | 709 | @article{2005_shi_pc_social_ineuqality_mortality, 710 | author = {Shi, Leiyu and Macinko, James and Starfield, Barbara and Politzer, Robert and Wulu, John and Xu, Jiahong}, 711 | title = {Primary Care, Social Inequalities, and All-Cause, Heart Disease, and Cancer Mortality in US Counties, 1990}, 712 | journal = {American Journal of Public Health}, 713 | volume = {95}, 714 | number = {4}, 715 | pages = {674-680}, 716 | year = {2005}, 717 | doi = {10.2105/AJPH.2003.031716}, 718 | } 719 | 720 | @article{2005_starfield_contributions_of_primary_care, 721 | author = {Barbara Starfield and Leiyu Shi and James Macinko}, 722 | title = {Contribution of Primary Care to Health Systems and Health}, 723 | journal = {The Milbank Quarterly}, 724 | volume = {83}, 725 | number = {3}, 726 | year = {2005}, 727 | pages = {457-502}, 728 | keywords = {Primary care, health outcomes, population health}, 729 | doi = {10.1111/j.1468-0009.2005.00409.x}, 730 | } 731 | 732 | @article{2012_shi_pc_focused_review, 733 | author = {Leiyu Shi}, 734 | title = {The Impact of Primary Care: A Focused Review}, 735 | journal = {Scientifica}, 736 | volume = {2012}, 737 | year = {2012}, 738 | doi = {10.6064/2012/432892}, 739 | } 740 | 741 | @article{2013_kringos_primary_care_europe, 742 | author = {Kringos, Dionne S. and Boerma, Wienke and van der Zee, Jouke and Groenewegen, Peter}, 743 | title = {Europe’s Strong Primary Care Systems Are Linked To Better Population Health But Also To Higher Health Spending}, 744 | journal = {Health Affairs}, 745 | volume = {32}, 746 | number = {4}, 747 | pages = {686-694}, 748 | year = {2013}, 749 | doi = {10.1377/hlthaff.2012.1242}, 750 | } 751 | 752 | 753 | @article{2013_yasaitis_supply_demand_response, 754 | author = {Laura C. Yasaitis and Julie P. W. Bynum and Jonathan S. Skinner}, 755 | journal = {Medical Care}, 756 | pages = {524-531}, 757 | title = {Association Between Physician Supply, Local Practice Norms, and Outpatient Visit Rates}, 758 | volume = {51}, 759 | number = {6}, 760 | year = {2013}, 761 | doi = {10.1097/MLR.0b013e3182928f67} 762 | } 763 | -------------------------------------------------------------------------------- /docs/_templates/globaltoc.html: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pysal/access/f1038c460909f015b6e4daf9eeea3976be061092/docs/_templates/globaltoc.html -------------------------------------------------------------------------------- /docs/access_class.rst: -------------------------------------------------------------------------------- 1 | .. _access_class: 2 | 3 | Access Class API 4 | ---------------- 5 | 6 | .. autoclass:: access.Access 7 | :members: weighted_catchment, fca_ratio, two_stage_fca, enhanced_two_stage_fca, three_stage_fca, raam, score, create_euclidean_distance, create_euclidean_distance_neighbors, append_user_cost, append_user_cost_neighbors 8 | 9 | .. automethod:: __init__ 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs/access_functions.rst: -------------------------------------------------------------------------------- 1 | .. _afunctions: 2 | 3 | .. currentmodule:: access 4 | 5 | Listing of Internal Access Functions 6 | ==================================== 7 | 8 | .. autosummary:: 9 | :toctree: generated/ 10 | 11 | raam.raam 12 | fca.weighted_catchment 13 | fca.fca_ratio 14 | fca.two_stage_fca 15 | fca.three_stage_fca 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. _api_ref: 2 | 3 | API reference 4 | ============= 5 | 6 | If you're just getting started, have a look at :mod:`access.Access` or :ref:`access_class` (or the :ref:`tutorials`!) 7 | to see the basic structure of the package and its applications. 8 | 9 | .. currentmodule:: access 10 | 11 | Accessibility Class 12 | ---------------------- 13 | For the full definitions and examples of each method, see individual functions. 14 | 15 | .. autosummary:: 16 | :toctree: generated/ 17 | 18 | Access 19 | Access.weighted_catchment 20 | Access.fca_ratio 21 | Access.two_stage_fca 22 | Access.enhanced_two_stage_fca 23 | Access.three_stage_fca 24 | Access.raam 25 | Access.score 26 | Access.create_euclidean_distance 27 | Access.create_euclidean_distance_neighbors 28 | Access.append_user_cost 29 | Access.append_user_cost_neighbors 30 | 31 | 32 | Helper Functions 33 | ---------------- 34 | 35 | .. autosummary:: 36 | :toctree: generated/ 37 | 38 | weights.step_fn 39 | weights.gravity 40 | weights.gaussian 41 | 42 | Internal Access Functions 43 | ------------------------- 44 | 45 | The access class uses lower-level functions for its internal calculations. 46 | In most cases, we do not expect users to call these directly. 47 | However, users seeking to understand these calculations and their inputs 48 | can still consult the :ref:`afunctions`. 49 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # giddy documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Jun 6 15:54:22 2018. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | import sys, os 20 | import sphinx_bootstrap_theme 21 | 22 | 23 | sys.path.insert(0, os.path.abspath("../../")) 24 | 25 | # import your package to obtain the version info to display on the docs website 26 | import access 27 | 28 | 29 | # -- General configuration ------------------------------------------------ 30 | 31 | # If your documentation needs a minimal Sphinx version, state it here. 32 | # 33 | # needs_sphinx = '2.4.3' 34 | # Add any Sphinx extension module names here, as strings. They can be 35 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 36 | # ones. 37 | extensions = [ #'sphinx_gallery.gen_gallery', 38 | "sphinx.ext.autodoc", 39 | "sphinx.ext.autosummary", 40 | "sphinx.ext.viewcode", 41 | "sphinxcontrib.bibtex", 42 | "sphinx.ext.mathjax", 43 | "sphinx.ext.doctest", 44 | "sphinx.ext.intersphinx", 45 | "numpydoc", 46 | "matplotlib.sphinxext.plot_directive", 47 | ] 48 | 49 | # Bib Variables 50 | bibtex_default_style = "plain" 51 | bibtex_reference_style = "author_year" 52 | 53 | bibtex_bibfiles = ["_static/references.bib"] 54 | 55 | 56 | # Add any paths that contain templates here, relative to this directory. 57 | templates_path = ["_templates"] 58 | 59 | # The suffix(es) of source filenames. 60 | # You can specify multiple suffix as a list of string: 61 | # 62 | # source_suffix = ['.rst', '.md'] 63 | source_suffix = ".rst" 64 | 65 | # The master toctree document. 66 | master_doc = "index" 67 | 68 | # General information about the project. 69 | project = "access" # string of your project name, for example, 'giddy' 70 | copyright = "2019, pysal access developers" 71 | author = "pysal access developers" 72 | 73 | # The version info for the project you're documenting, acts as replacement for 74 | # |version| and |release|, also used in various other places throughout the 75 | # built documents. 76 | # 77 | # The full version. 78 | version = access.__version__ # should replace it with your access 79 | release = access.__version__ # should replace it with your access 80 | 81 | # The language for content autogenerated by Sphinx. Refer to documentation 82 | # for a list of supported languages. 83 | # 84 | # This is also used if you do content translation via gettext catalogs. 85 | # Usually you set "language" from the command line for these cases. 86 | language = None 87 | 88 | # List of patterns, relative to source directory, that match files and 89 | # directories to ignore when looking for source files. 90 | # This patterns also effect to html_static_path and html_extra_path 91 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "tests/*"] 92 | 93 | # The name of the Pygments (syntax highlighting) style to use. 94 | pygments_style = "sphinx" 95 | 96 | # If true, `todo` and `todoList` produce output, else they produce nothing. 97 | todo_include_todos = False 98 | 99 | # -- Options for HTML output ---------------------------------------------- 100 | 101 | # The theme to use for HTML and HTML Help pages. See the documentation for 102 | # a list of builtin themes. 103 | # 104 | # html_theme = 'alabaster' 105 | html_theme = "bootstrap" 106 | html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() 107 | html_title = "%s v%s Manual" % (project, version) 108 | 109 | # (Optional) Logo of your package. Should be small enough to fit the navbar (ideally 24x24). 110 | # Path should be relative to the ``_static`` files directory. 111 | # html_logo = "_static/images/package_logo.jpg" 112 | 113 | # (Optional) PySAL favicon 114 | html_favicon = "_static/images/pysal_favicon.ico" 115 | 116 | 117 | # Theme options are theme-specific and customize the look and feel of a theme 118 | # further. For a list of options available for each theme, see the 119 | # documentation. 120 | # 121 | html_theme_options = { 122 | # Navigation bar title. (Default: ``project`` value) 123 | "navbar_title": "access", # string of your project name, for example, 'giddy' 124 | # Render the next and previous page links in navbar. (Default: true) 125 | "navbar_sidebarrel": False, 126 | # Render the current pages TOC in the navbar. (Default: true) 127 | #'navbar_pagenav': True, 128 | "navbar_pagenav": False, 129 | # No sidebar 130 | "nosidebar": True, 131 | # Tab name for the current pages TOC. (Default: "Page") 132 | #'navbar_pagenav_name': "Page", 133 | # Global TOC depth for "site" navbar tab. (Default: 1) 134 | # Switching to -1 shows all levels. 135 | "globaltoc_depth": -1, 136 | # Include hidden TOCs in Site navbar? 137 | # 138 | # Note: If this is "false", you cannot have mixed ``:hidden:`` and 139 | # non-hidden ``toctree`` directives in the same page, or else the build 140 | # will break. 141 | # 142 | # Values: "true" (default) or "false" 143 | "globaltoc_includehidden": "true", 144 | # HTML navbar class (Default: "navbar") to attach to
element. 145 | # For black navbar, do "navbar navbar-inverse" 146 | #'navbar_class': "navbar navbar-inverse", 147 | # Fix navigation bar to top of page? 148 | # Values: "true" (default) or "false" 149 | "navbar_fixed_top": "true", 150 | # Location of link to source. 151 | # Options are "nav" (default), "footer" or anything else to exclude. 152 | "source_link_position": "footer", 153 | # Bootswatch (http://bootswatch.com/) theme. 154 | # 155 | # Options are nothing (default) or the name of a valid theme 156 | # such as "amelia" or "cosmo", "yeti", "flatly". 157 | "bootswatch_theme": "yeti", 158 | # Choose Bootstrap version. 159 | # Values: "3" (default) or "2" (in quotes) 160 | "bootstrap_version": "3", 161 | # Navigation bar menu 162 | "navbar_links": [ 163 | ("Installation", "installation"), 164 | ("API", "api"), 165 | ("Tutorials", "tutorials"), 166 | ("Travel Times", "resources"), 167 | ("References", "references"), 168 | ], 169 | } 170 | 171 | # Add any paths that contain custom static files (such as style sheets) here, 172 | # relative to this directory. They are copied after the builtin static files, 173 | # so a file named "default.css" will overwrite the builtin "default.css". 174 | html_static_path = ["_static"] 175 | 176 | # Custom sidebar templates, maps document names to template names. 177 | # html_sidebars = {} 178 | # html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']} 179 | 180 | # -- Options for HTMLHelp output ------------------------------------------ 181 | 182 | # Output file base name for HTML help builder. 183 | htmlhelp_basename = "access" + "doc" 184 | 185 | 186 | # -- Options for LaTeX output --------------------------------------------- 187 | 188 | latex_elements = { 189 | # The paper size ('letterpaper' or 'a4paper'). 190 | # 191 | # 'papersize': 'letterpaper', 192 | # The font size ('10pt', '11pt' or '12pt'). 193 | # 194 | # 'pointsize': '10pt', 195 | # Additional stuff for the LaTeX preamble. 196 | # 197 | # 'preamble': '', 198 | # Latex figure (float) alignment 199 | # 200 | # 'figure_align': 'htbp', 201 | } 202 | 203 | # Grouping the document tree into LaTeX files. List of tuples 204 | # (source start file, target name, title, 205 | # author, documentclass [howto, manual, or own class]). 206 | latex_documents = [ 207 | (master_doc, "access.tex", "access Documentation", "pysal developers", "manual"), 208 | ] 209 | 210 | 211 | # -- Options for manual page output --------------------------------------- 212 | 213 | # One entry per manual page. List of tuples 214 | # (source start file, name, description, authors, manual section). 215 | man_pages = [(master_doc, "access", "access Documentation", [author], 1)] 216 | 217 | 218 | # -- Options for Texinfo output ------------------------------------------- 219 | 220 | # Grouping the document tree into Texinfo files. List of tuples 221 | # (source start file, target name, title, author, 222 | # dir menu entry, description, category) 223 | texinfo_documents = [ 224 | ( 225 | master_doc, 226 | "access", 227 | "access Documentation", 228 | author, 229 | "access", 230 | "One line description of project.", 231 | "Miscellaneous", 232 | ), 233 | ] 234 | 235 | 236 | # ----------------------------------------------------------------------------- 237 | # Autosummary 238 | # ----------------------------------------------------------------------------- 239 | 240 | # Generate the API documentation when building 241 | autosummary_generate = True 242 | numpydoc_show_class_members = True 243 | class_members_toctree = True 244 | numpydoc_show_inherited_class_members = True 245 | numpydoc_use_plots = True 246 | 247 | # display the source code for Plot directive 248 | plot_include_source = True 249 | 250 | 251 | def setup(app): 252 | app.add_css_file("pysal-styles.css") 253 | 254 | 255 | # Example configuration for intersphinx: refer to the Python standard library. 256 | intersphinx_mapping = { 257 | "https://docs.python.org/3.6/": None, 258 | "geopandas": ("https://geopandas.readthedocs.io/en/latest/", None), 259 | "libpysal": ("https://pysal.org/libpysal/", None), 260 | "matplotlib": ("https://matplotlib.org/", None), 261 | "numpy": ("https://numpy.org/doc/stable/", None), 262 | "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), 263 | "python": ("https://docs.python.org/3.9/", None), 264 | "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), 265 | } 266 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. documentation master file 2 | 3 | ======================== 4 | Spatial Access for PySAL 5 | ======================== 6 | 7 | | 8 | 9 | Whether you work with data in health, retail, employment or other domains, spatial accessibility measures help identify potential spatial mismatches between the supply and demand of services. They indicate how close demand locations are to supply locations. 10 | 11 | 12 | .. image:: _static/images/compare_access.png 13 | :target: https://nbviewer.jupyter.org/github/pysal/access/blob/master/notebooks/Generating%20and%20Plotting%20a%20Variety%20of%20Access%20Scores.ipynb#Ready-to-roll... 14 | :width: 250px 15 | 16 | .. image:: _static/images/euclidean_distance.png 17 | :target: https://nbviewer.jupyter.org/github/pysal/access/blob/master/notebooks/Generating%20and%20Plotting%20a%20Variety%20of%20Access%20Scores.ipynb#Add-an-Additional-Distance-Measure---Euclidean-Distance 18 | :width: 250px 19 | 20 | .. image:: _static/images/gravity_model.png 21 | :target: https://nbviewer.jupyter.org/github/pysal/access/blob/master/notebooks/How%20to%20Use%20access%20to%20Compute%20Access%20Scores%20to%20Resources%20Given%20XY%20Coordinates%20Joined%20to%20Census%20Tracts.ipynb#Gravity-Model 22 | :width: 250px 23 | 24 | 25 | 26 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 27 | Motivation 28 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 29 | 30 | We built this package for several reasons: 31 | 32 | - to make the new spatial access metric (RAAM) available, 33 | - to allow for easy comparison between RAAM and classic spatial access models, 34 | - to support spatial access research at scale by making pre-computed travel time matrices available and sharing code for computing new matrices at scale, and 35 | - to allow users who prefer a point-and-click interface to obtain spatial access results for their data using our web app (for US). 36 | 37 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 38 | Methods 39 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 40 | 41 | This PySAL package implements our new measure that simultaneously accounts for travel time and congestion at the destination: 42 | 43 | - `Rational Agent Access Model (RAAM) `_ (Saxon and Snow 2019, :cite:`2019_saxon_snow_raam`). 44 | 45 | Here is an example of the results of the RAAM model from this article: It shows how spatially accessible each Census tract is to primary care, compared to the national average. Darker blue areas have better spatial access (below-average travel costs) while darker red areas have worse spatial access (above average travel costs). 46 | 47 | .. image:: _static/images/full_us.jpg 48 | :width: 100% 49 | 50 | In addition, the package calculates five classic spatial access models within the same access class as RAAM for easy comparison between models. The methods implement the original published versions but also allow for additional customization (e.g. re. weights). 51 | 52 | - `Floating Catchment Areas `_ (FCA): For each provider, this is the ratio of providers to clients within a given travel time to the provider (Huff 1963, :cite:`1963_huff_shopping_trade_areas`, Joseph and Bantock 1982, :cite:`1982_joseph_potential_physical_accessibility_rural` and Luo 2004, :cite:`2004_luo_gis_floating_catchment`). 53 | 54 | - `Two-Step FCAs `_ (2SFCA): Calculated in two steps for a given travel time to the provider: 1) for each provider, the provider-to-client ratio is generated, 2) for each point of origin, these ratios are then summed (Luo and Wang, 2002, :cite:`2002_luo_spatial_accessibility_chicago` and Wang and Luo 2005, :cite:`2004_wang_luo_HPSAs`). 55 | 56 | - `Enhanced 2SFCA `_ (E2SFCA): 2SFCA but with less weight to providers that are still within the travel threshold but at larger distances from the point of origin (Luo and Qi 2009, :cite:`2009_luo_qi_E2SFCA`). 57 | 58 | - `Three-Step FCA `_ (3SFCA): adds distance-based allocation function to E2SFCA (Wan, Zou, and Sternberg, 2012, :cite:`2012_wan_3SFCA`). 59 | 60 | - `Access Score `_: This is a weighted sum of access components like distance to provider and relative importance of provider type (Isard 1960, :cite:`1960_isard_reganalysis`). 61 | 62 | These classic models were also recently implemented in the Python package `aceso `_. 63 | 64 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 65 | Architecture 66 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 67 | 68 | Figure 1 shows the workflow that our PySAL package uses to calculate these models, including data inputs, creation of cost (travel time) matrix, and data output: 69 | 70 | .. image:: _static/images/fig1.png 71 | :width: 100% 72 | 73 | As shown, all measures depend on travel times or distances between origins and destinations. This is the most computationally expensive part of calculating spatial access measures. The `Cost Matrix `_ section outlines how these travel times can be computed or how you can access our pre-computed matrices for the US. 74 | 75 | Figure 2 shows the workflow from data input to data output. The most time-consuming and computationally intensive aspect of the workflow is the calculation of travel times (cost matrix). 76 | 77 | .. image:: _static/images/thumbnail_workflow.png 78 | :width: 100% 79 | 80 | | 81 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 82 | Authors 83 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 84 | 85 | - Spatial access package: `James Saxon, PhD `_ 86 | - Research Assistants: `Yair Atlas `_ (`CDAC Internship `_), `Bryan Wang `_ and `Vidal Anguiano Jr., MSCAPP `_ 87 | - Pre-Computed Cost Matrices: `Dan Snow, MPP `_ 88 | - Concept and Documentation: `Julia Koschinsky, PhD `_, `Karina Acosta Ordonez `_, and `James Saxon, PhD `_ 89 | | 90 | 91 | Citation: `Saxon, James, Julia Koschinsky, Karina Acosta, Vidal Anguiano, Luc Anselin, and Sergio Rey. (2020). An Open Software Environment to Make Spatial Access Metrics More Accessible. University of Chicago: Center for Spatial Data Science. Preprint doi:10.13140/RG.2.2.12396.28807 `_ 92 | 93 | .. toctree:: 94 | :hidden: 95 | :maxdepth: 3 96 | :caption: Contents: 97 | 98 | Installation 99 | API 100 | Tutorials 101 | Internal Access Functions 102 | Travel Times 103 | References 104 | 105 | 106 | 107 | .. _PySAL: https://github.com/pysal/pysal 108 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | .. Installation 2 | 3 | Installation 4 | ============ 5 | 6 | Easy! You can get the package from either pip or conda-forge: 7 | 8 | .. code-block:: bash 9 | 10 | pip install access 11 | | 12 | 13 | .. code-block:: bash 14 | 15 | conda install -c conda-forge access 16 | | 17 | 18 | The only dependencies are pandas and numpy. 19 | Geopandas will allow convenience functions for Euclidean distances, but is not required for core methods. 20 | 21 | Note that the library is python 3 only. 22 | 23 | You can also clone the master branch from github: 24 | 25 | .. code-block:: bash 26 | 27 | git clone git@github.com:pysal/access.git 28 | cd access/ 29 | python setup.py install 30 | | 31 | 32 | 33 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=python -msphinx 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=access 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed, 20 | echo.then set the SPHINXBUILD environment variable to point to the full 21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the 22 | echo.Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/references.rst: -------------------------------------------------------------------------------- 1 | .. reference for the docs 2 | 3 | References 4 | ========== 5 | 6 | .. bibliography:: _static/references.bib 7 | :cited: 8 | -------------------------------------------------------------------------------- /docs/resources.rst: -------------------------------------------------------------------------------- 1 | .. resources 2 | 3 | ==================================== 4 | Resources for Computing Travel Cost 5 | ==================================== 6 | 7 | | 8 | 9 | The spatial access measures depend on travel times or distances between origins and destinations. If you only need distances between origins and destinations, the package will calculate Euclidean distances for your projected data. If you need travel times for a specific travel mode, you need to generate these so-called travel time (or travel cost) matrices from other sources. 10 | 11 | Explore and Download Pre-Computed Travel Times 12 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 13 | | 14 | Since computing travel times is computationally expensive and non-trivial at scale, we've pre-computed times between common Census geographies for several travel modes. These times cover the entire United States and the most recent Census years (2020+). They are available via [OpenTimes](https://opentimes.org/), a dedicated website created by Dan Snow (UChicago MPP'19). For more information on how these times are calculated, visit the [OpenTimes GitHub](https://github.com/dfsnow/opentimes). 15 | 16 | Compute your Own Travel Times 17 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 18 | | 19 | If you need to compute customized cost matrices, there are several options. This table lists some of them: 20 | 21 | .. raw:: html 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 44 | 45 | 46 | 47 | 48 | 49 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 |
NameInstallationNotes
pgRoutingdockerGood for driving, open-source and free, PostgreSQL/postgis and OpenStreetMap (OSM)
OSRM 40 | install / 41 | R / 42 | docker 43 | Best for driving, OSM, open-source and free, customized travel parameters, C++
Open Trip Planner 50 | docker routing / 51 | resources / 52 | DockerHub 53 | Best for transit, open-source and free, customized travel parameters, Java
ValhallainstallMulti-modal, OSM, open-source, for fee at scale, Python
PandanainstallGood for driving and walking, OSM, open-source and free, part of UrbanSim, Python
GraphhopperinstallMulti-modal, OSM, open-source, for fee at scale, Python
Spatial Access Package 78 | install / 79 | notebooks 80 | Best for walking, OSM, scales well, open-source and free, includes spatial access metrics, Python
e.g. dogr, R5 and gtfs-routerdogr, R5, gtfs-routerSelected R packages
Google MapsinstallAccurate multi-modal, customized travel parameters, commercial, expensive at scale
96 | | 97 | 98 | 99 | -------------------------------------------------------------------------------- /docs/tutorials.rst: -------------------------------------------------------------------------------- 1 | .. _tutorials: 2 | 3 | ========= 4 | Tutorials 5 | ========= 6 | - `PySAL Package Documentation: Explanation and Comparison of Spatial Access Methods (PDF) `_ 7 | - How to Get Results for Your Own Data with the LiveApp `(PDF) `_ and `(R code) `_ 8 | - `Generating and Contrasting Access Measures (Notebook) `_ 9 | - `Package Overview (15-minute conference presentation J. Koschinsky) `_ 10 | 11 | 12 | Data Preparation 13 | ---------------- 14 | - `How to Subset Travel Cost Matricies (Notebook) `_ 15 | - `How to Read, Filter, and Convert Shapefiles to .geojson (Notebook) `_ 16 | 17 | Implementing access 18 | ------------------- 19 | - `How to Use access to Compute Access Scores to Resources Given XY Coordinates (Notebook) `_ 20 | - `How to Use access to Compute Access Scores to Resources Given XY Coordinates Snapped to Census Tract Centroids (Notebook) `_ 21 | - `Building Gravity Models, Producing Composite Access Scores, and Producing Euclidean Distance Cost Matricies (Notebook) `_ 22 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: access 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python>=3.7 6 | - geopandas 7 | - numpy>=1.3 8 | - pandas>=0.23.4 9 | - scipy>=0.11 10 | -------------------------------------------------------------------------------- /notebooks/How to Subset the Travel Cost Matricies.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "\n", 11 | "import requests\n", 12 | "import pandas as pd\n", 13 | "import dask.dataframe as dd" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "# How to Subset the Pre-Computed Travel Cost Matricies\n", 21 | "To calculate spatial access measures, data on travel times or distances between origins and destinations must be used. If you only need distances between origins and destinations, the `access` package will calculate Euclidean distances for your projected data. If you need travel times for a specific travel mode (walking, public transit, or driving) you need to generate these so-called travel time (or travel cost) matrices from other sources. We provide pre-calculated travel cost matricies between blocks and tracts [here](https://access.readthedocs.io/en/latest/resources.html) for the 20 largest cities in the US and for the entire country.\n", 22 | "\n", 23 | "Each dataset are point-to-point distance matrices generated by the Center for Spatial Data Science. The matrices come as bzipped CSVs. Cities are listed according to their containing county code. The origins for each distance matrix are the population-weighted centroids that lie within each county, and the destinations are any population-weighted centroids that lie within 100 km of the buffered county.\n", 24 | "\n", 25 | "In this notebook, we show how to download the national travel cost matrix and subset it for your area of interest. In the example below, we will subset the matrix to include only the tracts in Cook County, IL. By the end of this notebook, you should be able to:\n", 26 | "- [Download the travel matrix](#Downloading-the-National-Travel-Matrix)\n", 27 | "- [Define your area of interest using the Census GEOID](#Defining-Area-of-Interest-with-Census-GEOID)\n", 28 | "- [Use dask to subset the travel cost matrix to your area of interest](#Use-Dask-to-Subset-the-Travel-Cost-Matrix)\n", 29 | "- [Use the terminal and basic bash commands to subset the travel cost matrix to your area of interest](#Use-Bash-Commands-to-Subset-the-Travel-Cost-Matrix)\n" 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## Downloading the National Travel Matrix\n", 37 | "You can find the national driving travel cost matrix on the [`access` read the docs page](https://access.readthedocs.io/en/latest/resources.html). You can download the dataset either by clicking on the link as shown in the image below, or as shown in the code which follows.\n", 38 | "\n" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "! wget https://uchicago.box.com/shared/static/prapz7ac7vwuz44nnab3dhe10vbg55cz.bz2 -O ./national_cost_matrix.csv.bz2" 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "metadata": {}, 53 | "source": [ 54 | "You should now see the file in the current directory.\n", 55 | "\n" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": null, 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "os.listdir(\"./\")" 65 | ] 66 | }, 67 | { 68 | "cell_type": "markdown", 69 | "metadata": {}, 70 | "source": [ 71 | "## Defining Area of Interest with Census GEOID\n", 72 | "You can find more information on the Census GEOID [here](https://www.census.gov/programs-surveys/geography/guidance/geo-identifiers.html). For our example, we will filter the dataset for Cook County, IL. The State ID for Illinois is `17` and the County ID for Cook County is `031`.TOgether, we want to filter our dataset for rows that have an origin and destination that start with `17031`. " 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "metadata": {}, 78 | "source": [ 79 | "## Use Dask to Subset the Travel Cost Matrix\n", 80 | "Since the uncompressed csv is too large to load into the memory of computers with less than 8GB of memory, we cannot load the data in its entirety into memory using `pandas`. Even if you can load it into memory, using `pandas` may take too long. Instead, we will show you how to use [`dask`](https://docs.dask.org/en/latest/delayed.html) to subset the travel cost matrix. First, we must uncompress the file and extract the csv. Note: you must install the command line package `bzip2` if it's not already installed." 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "! bzip2 -dk national_cost_matrix.csv.bz2" 90 | ] 91 | }, 92 | { 93 | "cell_type": "markdown", 94 | "metadata": {}, 95 | "source": [ 96 | "Note that this is a big file, uncompressed -- more than 4GB. You may want to delete it when you're done.\n", 97 | "\n", 98 | "Next, we'll read in the csv dataset using dask dataframe's `.read_csv()` method. Note: nothing has yet happend by running this command. Dask uses lazy evaluation, so nothing will compute until you use the `.compute()` method." 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "national_cost_matrix = dd.read_csv(\"./national_cost_matrix.csv\")\n", 108 | "\n", 109 | "# Filter out faulty rows which exist in the data - will be cleaned and replaced soon.\n", 110 | "national_cost_matrix = national_cost_matrix[national_cost_matrix[\"origin\"] != \"origin\"]" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": {}, 116 | "source": [ 117 | "We will now convert the origin and destination columns to type `str` and use the newly converted string column to filter for `17031`." 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": null, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "national_cost_matrix[\"origin\"] = national_cost_matrix.origin.astype(str)\n", 127 | "national_cost_matrix[\"destination\"] = national_cost_matrix.destination.astype(str)\n", 128 | "\n", 129 | "cook_county_fips = \"17031\"\n", 130 | "\n", 131 | "cook_cost_matrix = national_cost_matrix[\n", 132 | " national_cost_matrix[\"origin\"].str.startswith(cook_county_fips)\n", 133 | " & national_cost_matrix[\"destination\"].str.startswith(cook_county_fips)\n", 134 | "]" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "metadata": {}, 140 | "source": [ 141 | "With our delayed transformations setup, now we can execute our data transformations and have `dask` complete those transformations in parallel with the `.compute()` method. **Warning: you might need at least 8GB of memory to successfully execute this process. If you have 8GB of memory and it fails, try closing unused programs and try again.**" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "cook_cost_matrix = cook_cost_matrix.compute()" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "cook_cost_matrix.head()" 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "Make sure to save a copy of the subsetted data!" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "cook_cost_matrix.to_csv(\"cook_county_cost_matrix.csv\", index=False)" 176 | ] 177 | }, 178 | { 179 | "cell_type": "markdown", 180 | "metadata": {}, 181 | "source": [ 182 | "## Use Bash Commands to Subset the Travel Cost Matrix\n", 183 | "You can also use a bash script to subset the national Travel Cost matrix provided you have two files, each containing the origin and destination geoids you're interested in capturing.\n", 184 | "\n", 185 | "Here's an example where we use two files containing a small list of origin and destination locations, split into separate `origin.csv` and `destinations.csv` files." 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": 2, 191 | "metadata": {}, 192 | "outputs": [], 193 | "source": [ 194 | "orig_dest = pd.DataFrame(\n", 195 | " {\n", 196 | " \"origin\": [\"17031010100\", \"17031010101\", \"17031010102\"],\n", 197 | " \"destination\": [\"17031820605\", \"17031292400\", \"17031827100\"],\n", 198 | " }\n", 199 | ")" 200 | ] 201 | }, 202 | { 203 | "cell_type": "code", 204 | "execution_count": 3, 205 | "metadata": {}, 206 | "outputs": [], 207 | "source": [ 208 | "orig_dest[\"origin\"].to_csv(\"./origins.csv\", index=False, header=False)" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": 4, 214 | "metadata": {}, 215 | "outputs": [ 216 | { 217 | "data": { 218 | "text/html": [ 219 | "
\n", 220 | "\n", 233 | "\n", 234 | " \n", 235 | " \n", 236 | " \n", 237 | " \n", 238 | " \n", 239 | " \n", 240 | " \n", 241 | " \n", 242 | " \n", 243 | " \n", 244 | " \n", 245 | " \n", 246 | " \n", 247 | " \n", 248 | " \n", 249 | " \n", 250 | " \n", 251 | " \n", 252 | " \n", 253 | " \n", 254 | "
0
017031010100
117031010101
217031010102
\n", 255 | "
" 256 | ], 257 | "text/plain": [ 258 | " 0\n", 259 | "0 17031010100\n", 260 | "1 17031010101\n", 261 | "2 17031010102" 262 | ] 263 | }, 264 | "execution_count": 4, 265 | "metadata": {}, 266 | "output_type": "execute_result" 267 | } 268 | ], 269 | "source": [ 270 | "pd.read_csv(\"./origins.csv\", header=None).head()" 271 | ] 272 | }, 273 | { 274 | "cell_type": "code", 275 | "execution_count": 5, 276 | "metadata": {}, 277 | "outputs": [], 278 | "source": [ 279 | "orig_dest[\"destination\"].to_csv(\"./destinations.csv\", index=False, header=False)" 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "execution_count": 6, 285 | "metadata": {}, 286 | "outputs": [ 287 | { 288 | "data": { 289 | "text/html": [ 290 | "
\n", 291 | "\n", 304 | "\n", 305 | " \n", 306 | " \n", 307 | " \n", 308 | " \n", 309 | " \n", 310 | " \n", 311 | " \n", 312 | " \n", 313 | " \n", 314 | " \n", 315 | " \n", 316 | " \n", 317 | " \n", 318 | " \n", 319 | " \n", 320 | " \n", 321 | " \n", 322 | " \n", 323 | " \n", 324 | " \n", 325 | "
0
017031820605
117031292400
217031827100
\n", 326 | "
" 327 | ], 328 | "text/plain": [ 329 | " 0\n", 330 | "0 17031820605\n", 331 | "1 17031292400\n", 332 | "2 17031827100" 333 | ] 334 | }, 335 | "execution_count": 6, 336 | "metadata": {}, 337 | "output_type": "execute_result" 338 | } 339 | ], 340 | "source": [ 341 | "pd.read_csv(\"./destinations.csv\", header=None).head()" 342 | ] 343 | }, 344 | { 345 | "cell_type": "markdown", 346 | "metadata": {}, 347 | "source": [ 348 | "In addition to the two files above, you'll also need the `r.awk` script (included in this directory) and the unzipped national Travel Cost matrix.\n", 349 | "\n", 350 | "Below is the simple `r.awk` script. What this does, line by line, is:\n", 351 | "\n", 352 | "0. Keep track of the file number, by checking if it's the first line of a file.\n", 353 | "1. For file number 1, add the first field (we've done `-F','`, so this is comma separated) to a list of origins.\n", 354 | "2. For file number 2, add the first field to a list of destinations.\n", 355 | "3. For other files (i.e. number 3), check to see if field one is an origin and field two is a destination. If so, it will print." 356 | ] 357 | }, 358 | { 359 | "cell_type": "code", 360 | "execution_count": 7, 361 | "metadata": {}, 362 | "outputs": [ 363 | { 364 | "name": "stdout", 365 | "output_type": "stream", 366 | "text": [ 367 | "\r\n", 368 | "FNR == 1 { file+=1 }\r\n", 369 | "\r\n", 370 | "file == 1 { ORIGINS[$1]=1 ; next } \r\n", 371 | "\r\n", 372 | "file == 2 { DESTINATIONS[$1]=1 ; next } \r\n", 373 | "\r\n", 374 | "($1 in ORIGINS) && ($2 in DESTINATIONS)\r\n", 375 | "\r\n" 376 | ] 377 | } 378 | ], 379 | "source": [ 380 | "!cat r.awk" 381 | ] 382 | }, 383 | { 384 | "cell_type": "markdown", 385 | "metadata": {}, 386 | "source": [ 387 | "The next cell shows, how to use the script to create your desired subset. The command follows the structure:\n", 388 | "\n", 389 | "`awk -F',' -f ./r.awk [origins_file] [destinations_file] [travel_cost_matrix] > [subset_output]`" 390 | ] 391 | }, 392 | { 393 | "cell_type": "code", 394 | "execution_count": 8, 395 | "metadata": {}, 396 | "outputs": [], 397 | "source": [ 398 | "!awk -F',' -f ./r.awk ./origins.csv ./destinations.csv ./national_cost_matrix.csv > subset_cost_matrix.csv" 399 | ] 400 | }, 401 | { 402 | "cell_type": "markdown", 403 | "metadata": {}, 404 | "source": [ 405 | "Now we can read the newly created travel matrix subset, making sure to include column names for the dataset." 406 | ] 407 | }, 408 | { 409 | "cell_type": "code", 410 | "execution_count": 9, 411 | "metadata": { 412 | "scrolled": true 413 | }, 414 | "outputs": [], 415 | "source": [ 416 | "subset_matrix = pd.read_csv(\n", 417 | " \"./subset_cost_matrix.csv\", names=[\"origin\", \"destination\", \"minutes\"]\n", 418 | ")" 419 | ] 420 | }, 421 | { 422 | "cell_type": "code", 423 | "execution_count": 10, 424 | "metadata": {}, 425 | "outputs": [ 426 | { 427 | "data": { 428 | "text/html": [ 429 | "
\n", 430 | "\n", 443 | "\n", 444 | " \n", 445 | " \n", 446 | " \n", 447 | " \n", 448 | " \n", 449 | " \n", 450 | " \n", 451 | " \n", 452 | " \n", 453 | " \n", 454 | " \n", 455 | " \n", 456 | " \n", 457 | " \n", 458 | " \n", 459 | " \n", 460 | " \n", 461 | " \n", 462 | " \n", 463 | " \n", 464 | " \n", 465 | " \n", 466 | " \n", 467 | " \n", 468 | " \n", 469 | " \n", 470 | " \n", 471 | " \n", 472 | "
origindestinationminutes
0170310101001703129240046.88
1170310101001703182060559.28
2170310101001703182710062.75
\n", 473 | "
" 474 | ], 475 | "text/plain": [ 476 | " origin destination minutes\n", 477 | "0 17031010100 17031292400 46.88\n", 478 | "1 17031010100 17031820605 59.28\n", 479 | "2 17031010100 17031827100 62.75" 480 | ] 481 | }, 482 | "execution_count": 10, 483 | "metadata": {}, 484 | "output_type": "execute_result" 485 | } 486 | ], 487 | "source": [ 488 | "subset_matrix" 489 | ] 490 | }, 491 | { 492 | "cell_type": "code", 493 | "execution_count": null, 494 | "metadata": {}, 495 | "outputs": [], 496 | "source": [] 497 | } 498 | ], 499 | "metadata": { 500 | "kernelspec": { 501 | "display_name": "Python 3", 502 | "language": "python", 503 | "name": "python3" 504 | }, 505 | "language_info": { 506 | "codemirror_mode": { 507 | "name": "ipython", 508 | "version": 3 509 | }, 510 | "file_extension": ".py", 511 | "mimetype": "text/x-python", 512 | "name": "python", 513 | "nbconvert_exporter": "python", 514 | "pygments_lexer": "ipython3", 515 | "version": "3.7.3" 516 | } 517 | }, 518 | "nbformat": 4, 519 | "nbformat_minor": 2 520 | } 521 | -------------------------------------------------------------------------------- /notebooks/r.awk: -------------------------------------------------------------------------------- 1 | 2 | FNR == 1 { file+=1 } 3 | 4 | file == 1 { ORIGINS[$1]=1 ; next } 5 | 6 | file == 2 { DESTINATIONS[$1]=1 ; next } 7 | 8 | ($1 in ORIGINS) && ($2 in DESTINATIONS) 9 | 10 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0", "setuptools_scm[toml]>=6.2"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.setuptools_scm] 6 | 7 | [project] 8 | name = "access" 9 | dynamic = ["version"] 10 | authors = [ 11 | { name = "James Saxon" }, 12 | ] 13 | maintainers = [ 14 | { name = "Julia Koschinsky", email = "jkoschinsky@uchicago.edu" }, 15 | { name = "James D. Gaboardi", email = "jgaboardi@gmail.com" }, 16 | { name = "PySAL Developers" }, 17 | { name = "access contributors" }, 18 | ] 19 | 20 | license = { text = "BSD 3-Clause" } 21 | description = "Calculate spatial accessibility metrics." 22 | keywords = [ 23 | "spatial", "statistics", "access" 24 | ] 25 | readme = "README.md" 26 | classifiers = [ 27 | "Development Status :: 5 - Production/Stable", 28 | "Intended Audience :: Science/Research", 29 | "Intended Audience :: Developers", 30 | "Intended Audience :: Education", 31 | "License :: OSI Approved :: BSD License", 32 | "Operating System :: OS Independent", 33 | "Programming Language :: Python :: 3", 34 | "Topic :: Scientific/Engineering :: GIS", 35 | ] 36 | requires-python = ">=3.8" 37 | dependencies = [ 38 | "geopandas>=1.0.1", 39 | "numpy>=1.3", 40 | "pandas>=0.23.4", 41 | "scipy>=1.14.1", 42 | ] 43 | 44 | [project.urls] 45 | Home = "https://pysal.org/access/" 46 | Repository = "https://github.com/pysal/access" 47 | 48 | [project.optional-dependencies] 49 | tests = [ 50 | "codecov", 51 | "coverage", 52 | "pre-commit", 53 | "pytest", 54 | "pytest-cov", 55 | "pytest-xdist", 56 | ] 57 | docs = [ 58 | "nbsphinx", 59 | "numpydoc", 60 | "quilt3", 61 | "sphinx", 62 | "sphinxcontrib-napoleon", 63 | "sphinx-gallery", 64 | "sphinxcontrib-bibtex", 65 | "sphinx_bootstrap_theme", 66 | ] 67 | notebooks = [ 68 | "dask", 69 | "matplotlib", 70 | "requests", 71 | ] 72 | all = ["access[tests,docs,notebooks]"] 73 | 74 | 75 | [tool.setuptools.packages.find] 76 | include = ["access", "access.*"] 77 | 78 | 79 | [tool.coverage.run] 80 | source = ["./access"] 81 | 82 | [tool.coverage.report] 83 | exclude_lines = [ 84 | "if self.debug:", 85 | "pragma: no cover", 86 | "raise NotImplementedError", 87 | "except ModuleNotFoundError:", 88 | "except ImportError", 89 | ] 90 | ignore_errors = true 91 | omit = ["access/tests/*"] 92 | -------------------------------------------------------------------------------- /tools/gitcount.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## PySAL Change Log Statistics\n", 8 | "\n", 9 | "This notebook generates the summary statistics for a package. \n", 10 | "\n", 11 | "It assumes you are running this under the `tools` directory at the toplevel of the package\n", 12 | "\n" 13 | ] 14 | }, 15 | { 16 | "cell_type": "markdown", 17 | "metadata": {}, 18 | "source": [ 19 | "## Change the values only in the next cell" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": { 26 | "ExecuteTime": { 27 | "end_time": "2022-05-17T23:16:37.950061Z", 28 | "start_time": "2022-05-17T23:16:37.945680Z" 29 | } 30 | }, 31 | "outputs": [], 32 | "source": [ 33 | "package_name = \"access\"\n", 34 | "release_date = \"2022-05-17\"\n", 35 | "start_date = \"2021-01-31\"" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "metadata": {}, 41 | "source": [ 42 | "This notebook will generate a file in the current directory with the name \"changelog_VERSION.md\". You can edit and append this on front of the CHANGELOG file for the package release." 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": { 49 | "ExecuteTime": { 50 | "end_time": "2022-05-17T23:16:45.956074Z", 51 | "start_time": "2022-05-17T23:16:39.013043Z" 52 | } 53 | }, 54 | "outputs": [], 55 | "source": [ 56 | "from __future__ import print_function\n", 57 | "import os\n", 58 | "import json\n", 59 | "import re\n", 60 | "import sys\n", 61 | "import pandas\n", 62 | "\n", 63 | "from datetime import datetime, timedelta\n", 64 | "from time import sleep\n", 65 | "from subprocess import check_output\n", 66 | "\n", 67 | "try:\n", 68 | " from urllib import urlopen\n", 69 | "except:\n", 70 | " from urllib.request import urlopen\n", 71 | "\n", 72 | "import ssl\n", 73 | "import yaml\n", 74 | "\n", 75 | "context = ssl._create_unverified_context()" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "metadata": { 82 | "ExecuteTime": { 83 | "end_time": "2022-05-17T23:16:47.446444Z", 84 | "start_time": "2022-05-17T23:16:47.442015Z" 85 | } 86 | }, 87 | "outputs": [], 88 | "source": [ 89 | "CWD = os.path.abspath(os.path.curdir)" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": { 96 | "ExecuteTime": { 97 | "end_time": "2022-05-17T23:16:47.874890Z", 98 | "start_time": "2022-05-17T23:16:47.861946Z" 99 | } 100 | }, 101 | "outputs": [], 102 | "source": [ 103 | "CWD" 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": { 110 | "ExecuteTime": { 111 | "end_time": "2022-05-17T23:16:49.205118Z", 112 | "start_time": "2022-05-17T23:16:49.197387Z" 113 | } 114 | }, 115 | "outputs": [], 116 | "source": [ 117 | "since_date = '--since=\"{start}\"'.format(start=start_date)\n", 118 | "since_date\n", 119 | "since = datetime.strptime(start_date + \" 0:0:0\", \"%Y-%m-%d %H:%M:%S\")\n", 120 | "since" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": null, 126 | "metadata": { 127 | "ExecuteTime": { 128 | "end_time": "2022-05-17T23:16:50.157499Z", 129 | "start_time": "2022-05-17T23:16:50.151692Z" 130 | } 131 | }, 132 | "outputs": [], 133 | "source": [ 134 | "# get __version__\n", 135 | "%run ../access/_version.py" 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": null, 141 | "metadata": {}, 142 | "outputs": [], 143 | "source": [ 144 | "__version__ = get_versions()[\"version\"]" 145 | ] 146 | }, 147 | { 148 | "cell_type": "markdown", 149 | "metadata": {}, 150 | "source": [ 151 | "## Total commits by subpackage" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "metadata": { 158 | "ExecuteTime": { 159 | "end_time": "2022-05-17T23:16:52.267977Z", 160 | "start_time": "2022-05-17T23:16:52.216778Z" 161 | } 162 | }, 163 | "outputs": [], 164 | "source": [ 165 | "cmd = [\"git\", \"log\", \"--oneline\", since_date]\n", 166 | "ncommits = len(check_output(cmd).splitlines())" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "metadata": { 173 | "ExecuteTime": { 174 | "end_time": "2022-05-17T23:16:52.723059Z", 175 | "start_time": "2022-05-17T23:16:52.715679Z" 176 | } 177 | }, 178 | "outputs": [], 179 | "source": [ 180 | "ncommits" 181 | ] 182 | }, 183 | { 184 | "cell_type": "markdown", 185 | "metadata": {}, 186 | "source": [ 187 | "## List Contributors" 188 | ] 189 | }, 190 | { 191 | "cell_type": "markdown", 192 | "metadata": {}, 193 | "source": [ 194 | "Some of our contributors have many aliases for the same identity. So, we've added a mapping to make sure that individuals are listed once (and only once). " 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "metadata": { 201 | "ExecuteTime": { 202 | "end_time": "2022-05-17T23:16:55.078324Z", 203 | "start_time": "2022-05-17T23:16:55.069649Z" 204 | } 205 | }, 206 | "outputs": [], 207 | "source": [ 208 | "identities = {\n", 209 | " \"Levi John Wolf\": (\"ljwolf\", \"Levi John Wolf\"),\n", 210 | " \"Serge Rey\": (\"Serge Rey\", \"Sergio Rey\", \"sjsrey\", \"serge\"),\n", 211 | " \"Wei Kang\": (\"Wei Kang\", \"weikang9009\"),\n", 212 | " \"Dani Arribas-Bel\": (\"Dani Arribas-Bel\", \"darribas\"),\n", 213 | "}\n", 214 | "\n", 215 | "\n", 216 | "def regularize_identity(string):\n", 217 | " string = string.decode()\n", 218 | " for name, aliases in identities.items():\n", 219 | " for alias in aliases:\n", 220 | " if alias in string:\n", 221 | " string = string.replace(alias, name)\n", 222 | " if len(string.split(\" \")) > 1:\n", 223 | " string = string.title()\n", 224 | " return string.lstrip(\"* \")" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": null, 230 | "metadata": { 231 | "ExecuteTime": { 232 | "end_time": "2022-05-17T23:16:55.809853Z", 233 | "start_time": "2022-05-17T23:16:55.805239Z" 234 | } 235 | }, 236 | "outputs": [], 237 | "source": [ 238 | "author_cmd = [\"git\", \"log\", \"--format=* %aN\", since_date]" 239 | ] 240 | }, 241 | { 242 | "cell_type": "code", 243 | "execution_count": null, 244 | "metadata": { 245 | "ExecuteTime": { 246 | "end_time": "2022-05-17T23:16:56.218962Z", 247 | "start_time": "2022-05-17T23:16:56.214396Z" 248 | } 249 | }, 250 | "outputs": [], 251 | "source": [ 252 | "from collections import Counter" 253 | ] 254 | }, 255 | { 256 | "cell_type": "code", 257 | "execution_count": null, 258 | "metadata": { 259 | "ExecuteTime": { 260 | "end_time": "2022-05-17T23:16:56.981077Z", 261 | "start_time": "2022-05-17T23:16:56.928758Z" 262 | } 263 | }, 264 | "outputs": [], 265 | "source": [ 266 | "ncommits = len(check_output(cmd).splitlines())\n", 267 | "all_authors = check_output(author_cmd).splitlines()\n", 268 | "counter = Counter([regularize_identity(author) for author in all_authors])\n", 269 | "# global_counter += counter\n", 270 | "# counters.update({'.'.join((package,subpackage)): counter})\n", 271 | "unique_authors = sorted(set(all_authors))" 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": null, 277 | "metadata": { 278 | "ExecuteTime": { 279 | "end_time": "2022-05-17T23:16:57.345278Z", 280 | "start_time": "2022-05-17T23:16:57.341476Z" 281 | } 282 | }, 283 | "outputs": [], 284 | "source": [ 285 | "unique_authors = counter.keys()" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": null, 291 | "metadata": { 292 | "ExecuteTime": { 293 | "end_time": "2022-05-17T23:16:57.774890Z", 294 | "start_time": "2022-05-17T23:16:57.768056Z" 295 | } 296 | }, 297 | "outputs": [], 298 | "source": [ 299 | "unique_authors" 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "metadata": {}, 305 | "source": [ 306 | "## Disaggregate by PR, Issue" 307 | ] 308 | }, 309 | { 310 | "cell_type": "code", 311 | "execution_count": null, 312 | "metadata": { 313 | "ExecuteTime": { 314 | "end_time": "2022-05-17T23:16:58.518333Z", 315 | "start_time": "2022-05-17T23:16:58.512723Z" 316 | } 317 | }, 318 | "outputs": [], 319 | "source": [ 320 | "from datetime import datetime, timedelta\n", 321 | "\n", 322 | "ISO8601 = \"%Y-%m-%dT%H:%M:%SZ\"\n", 323 | "PER_PAGE = 100\n", 324 | "element_pat = re.compile(r\"<(.+?)>\")\n", 325 | "rel_pat = re.compile(r'rel=[\\'\"](\\w+)[\\'\"]')" 326 | ] 327 | }, 328 | { 329 | "cell_type": "code", 330 | "execution_count": null, 331 | "metadata": { 332 | "ExecuteTime": { 333 | "end_time": "2022-05-17T23:17:02.752602Z", 334 | "start_time": "2022-05-17T23:17:02.734645Z" 335 | } 336 | }, 337 | "outputs": [], 338 | "source": [ 339 | "def parse_link_header(headers):\n", 340 | " link_s = headers.get(\"link\", \"\")\n", 341 | " urls = element_pat.findall(link_s)\n", 342 | " rels = rel_pat.findall(link_s)\n", 343 | " d = {}\n", 344 | " for rel, url in zip(rels, urls):\n", 345 | " d[rel] = url\n", 346 | " return d\n", 347 | "\n", 348 | "\n", 349 | "def get_paged_request(url):\n", 350 | " \"\"\"get a full list, handling APIv3's paging\"\"\"\n", 351 | " results = []\n", 352 | " while url:\n", 353 | " # print(\"fetching %s\" % url, file=sys.stderr)\n", 354 | " f = urlopen(url)\n", 355 | " results.extend(json.load(f))\n", 356 | " links = parse_link_header(f.headers)\n", 357 | " url = links.get(\"next\")\n", 358 | " return results\n", 359 | "\n", 360 | "\n", 361 | "def get_issues(project=\"pysal/pysal\", state=\"closed\", pulls=False):\n", 362 | " \"\"\"Get a list of the issues from the Github API.\"\"\"\n", 363 | " which = \"pulls\" if pulls else \"issues\"\n", 364 | " url = \"https://api.github.com/repos/%s/%s?state=%s&per_page=%i\" % (\n", 365 | " project,\n", 366 | " which,\n", 367 | " state,\n", 368 | " PER_PAGE,\n", 369 | " )\n", 370 | " return get_paged_request(url)\n", 371 | "\n", 372 | "\n", 373 | "def _parse_datetime(s):\n", 374 | " \"\"\"Parse dates in the format returned by the Github API.\"\"\"\n", 375 | " if s:\n", 376 | " return datetime.strptime(s, ISO8601)\n", 377 | " else:\n", 378 | " return datetime.fromtimestamp(0)\n", 379 | "\n", 380 | "\n", 381 | "def issues2dict(issues):\n", 382 | " \"\"\"Convert a list of issues to a dict, keyed by issue number.\"\"\"\n", 383 | " idict = {}\n", 384 | " for i in issues:\n", 385 | " idict[i[\"number\"]] = i\n", 386 | " return idict\n", 387 | "\n", 388 | "\n", 389 | "def is_pull_request(issue):\n", 390 | " \"\"\"Return True if the given issue is a pull request.\"\"\"\n", 391 | " return \"pull_request_url\" in issue\n", 392 | "\n", 393 | "\n", 394 | "def issues_closed_since(period=timedelta(days=365), project=\"pysal/pysal\", pulls=False):\n", 395 | " \"\"\"Get all issues closed since a particular point in time. period\n", 396 | " can either be a datetime object, or a timedelta object. In the\n", 397 | " latter case, it is used as a time before the present.\"\"\"\n", 398 | "\n", 399 | " which = \"pulls\" if pulls else \"issues\"\n", 400 | "\n", 401 | " if isinstance(period, timedelta):\n", 402 | " period = datetime.now() - period\n", 403 | " url = (\n", 404 | " \"https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i\"\n", 405 | " % (project, which, period.strftime(ISO8601), PER_PAGE)\n", 406 | " )\n", 407 | " allclosed = get_paged_request(url)\n", 408 | " # allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period)\n", 409 | " filtered = [i for i in allclosed if _parse_datetime(i[\"closed_at\"]) > period]\n", 410 | "\n", 411 | " # exclude rejected PRs\n", 412 | " if pulls:\n", 413 | " filtered = [pr for pr in filtered if pr[\"merged_at\"]]\n", 414 | "\n", 415 | " return filtered\n", 416 | "\n", 417 | "\n", 418 | "def sorted_by_field(issues, field=\"closed_at\", reverse=False):\n", 419 | " \"\"\"Return a list of issues sorted by closing date date.\"\"\"\n", 420 | " return sorted(issues, key=lambda i: i[field], reverse=reverse)\n", 421 | "\n", 422 | "\n", 423 | "def report(issues, show_urls=False):\n", 424 | " \"\"\"Summary report about a list of issues, printing number and title.\"\"\"\n", 425 | " # titles may have unicode in them, so we must encode everything below\n", 426 | " if show_urls:\n", 427 | " for i in issues:\n", 428 | " role = \"ghpull\" if \"merged_at\" in i else \"ghissue\"\n", 429 | " print(\"* :%s:`%d`: %s\" % (role, i[\"number\"], i[\"title\"].encode(\"utf-8\")))\n", 430 | " else:\n", 431 | " for i in issues:\n", 432 | " print(\"* %d: %s\" % (i[\"number\"], i[\"title\"].encode(\"utf-8\")))" 433 | ] 434 | }, 435 | { 436 | "cell_type": "code", 437 | "execution_count": null, 438 | "metadata": { 439 | "ExecuteTime": { 440 | "end_time": "2022-05-17T23:17:04.371021Z", 441 | "start_time": "2022-05-17T23:17:03.295584Z" 442 | } 443 | }, 444 | "outputs": [], 445 | "source": [ 446 | "all_issues = {}\n", 447 | "all_pulls = {}\n", 448 | "total_commits = 0\n", 449 | "# prj='pysal/libpysal'\n", 450 | "prj = \"pysal/{package}\".format(package=package_name)\n", 451 | "issues = issues_closed_since(since, project=prj, pulls=False)\n", 452 | "pulls = issues_closed_since(since, project=prj, pulls=True)\n", 453 | "issues = sorted_by_field(issues, reverse=True)\n", 454 | "pulls = sorted_by_field(pulls, reverse=True)\n", 455 | "n_issues, n_pulls = map(len, (issues, pulls))\n", 456 | "n_total = n_issues + n_pulls" 457 | ] 458 | }, 459 | { 460 | "cell_type": "code", 461 | "execution_count": null, 462 | "metadata": { 463 | "ExecuteTime": { 464 | "end_time": "2022-05-17T23:17:06.696427Z", 465 | "start_time": "2022-05-17T23:17:06.690112Z" 466 | } 467 | }, 468 | "outputs": [], 469 | "source": [ 470 | "issue_listing = []\n", 471 | "for issue in issues:\n", 472 | " entry = \"{title} (#{number})\".format(title=issue[\"title\"], number=issue[\"number\"])\n", 473 | " issue_listing.append(entry)" 474 | ] 475 | }, 476 | { 477 | "cell_type": "code", 478 | "execution_count": null, 479 | "metadata": { 480 | "ExecuteTime": { 481 | "end_time": "2022-05-17T23:17:07.146998Z", 482 | "start_time": "2022-05-17T23:17:07.141778Z" 483 | } 484 | }, 485 | "outputs": [], 486 | "source": [ 487 | "pull_listing = []\n", 488 | "for pull in pulls:\n", 489 | " entry = \"{title} (#{number})\".format(title=pull[\"title\"], number=pull[\"number\"])\n", 490 | " pull_listing.append(entry)" 491 | ] 492 | }, 493 | { 494 | "cell_type": "code", 495 | "execution_count": null, 496 | "metadata": { 497 | "ExecuteTime": { 498 | "end_time": "2022-05-17T23:17:07.731052Z", 499 | "start_time": "2022-05-17T23:17:07.724113Z" 500 | } 501 | }, 502 | "outputs": [], 503 | "source": [ 504 | "pull_listing" 505 | ] 506 | }, 507 | { 508 | "cell_type": "code", 509 | "execution_count": null, 510 | "metadata": { 511 | "ExecuteTime": { 512 | "end_time": "2022-05-17T23:17:10.222064Z", 513 | "start_time": "2022-05-17T23:17:10.217762Z" 514 | } 515 | }, 516 | "outputs": [], 517 | "source": [ 518 | "message = \"We closed a total of {total} issues (enhancements and bug fixes) through {pr} pull requests\".format(\n", 519 | " total=n_total, pr=n_pulls\n", 520 | ")" 521 | ] 522 | }, 523 | { 524 | "cell_type": "code", 525 | "execution_count": null, 526 | "metadata": { 527 | "ExecuteTime": { 528 | "end_time": "2022-05-17T23:17:10.807853Z", 529 | "start_time": "2022-05-17T23:17:10.803257Z" 530 | } 531 | }, 532 | "outputs": [], 533 | "source": [ 534 | "message = \"{msg}, since our last release on {previous}.\".format(\n", 535 | " msg=message, previous=str(start_date)\n", 536 | ")" 537 | ] 538 | }, 539 | { 540 | "cell_type": "code", 541 | "execution_count": null, 542 | "metadata": { 543 | "ExecuteTime": { 544 | "end_time": "2022-05-17T23:17:11.930347Z", 545 | "start_time": "2022-05-17T23:17:11.923823Z" 546 | } 547 | }, 548 | "outputs": [], 549 | "source": [ 550 | "message" 551 | ] 552 | }, 553 | { 554 | "cell_type": "code", 555 | "execution_count": null, 556 | "metadata": { 557 | "ExecuteTime": { 558 | "end_time": "2022-05-17T23:17:13.525550Z", 559 | "start_time": "2022-05-17T23:17:13.522153Z" 560 | } 561 | }, 562 | "outputs": [], 563 | "source": [ 564 | "message += \"\\n\\n## Issues Closed\\n\"" 565 | ] 566 | }, 567 | { 568 | "cell_type": "code", 569 | "execution_count": null, 570 | "metadata": { 571 | "ExecuteTime": { 572 | "end_time": "2022-05-17T23:17:14.255527Z", 573 | "start_time": "2022-05-17T23:17:14.250218Z" 574 | } 575 | }, 576 | "outputs": [], 577 | "source": [ 578 | "print(message)" 579 | ] 580 | }, 581 | { 582 | "cell_type": "code", 583 | "execution_count": null, 584 | "metadata": { 585 | "ExecuteTime": { 586 | "end_time": "2022-05-17T23:17:14.967383Z", 587 | "start_time": "2022-05-17T23:17:14.963548Z" 588 | } 589 | }, 590 | "outputs": [], 591 | "source": [ 592 | "issues = \"\\n\".join([\" - \" + issue for issue in issue_listing])\n", 593 | "message += issues\n", 594 | "message += \"\\n\\n## Pull Requests\\n\"\n", 595 | "pulls = \"\\n\".join([\" - \" + pull for pull in pull_listing])\n", 596 | "message += pulls" 597 | ] 598 | }, 599 | { 600 | "cell_type": "code", 601 | "execution_count": null, 602 | "metadata": { 603 | "ExecuteTime": { 604 | "end_time": "2022-05-17T23:17:15.591401Z", 605 | "start_time": "2022-05-17T23:17:15.587892Z" 606 | } 607 | }, 608 | "outputs": [], 609 | "source": [ 610 | "print(message)" 611 | ] 612 | }, 613 | { 614 | "cell_type": "code", 615 | "execution_count": null, 616 | "metadata": { 617 | "ExecuteTime": { 618 | "end_time": "2022-05-17T23:17:16.408755Z", 619 | "start_time": "2022-05-17T23:17:16.403978Z" 620 | } 621 | }, 622 | "outputs": [], 623 | "source": [ 624 | "people = \"\\n\".join([\" - \" + person for person in unique_authors])" 625 | ] 626 | }, 627 | { 628 | "cell_type": "code", 629 | "execution_count": null, 630 | "metadata": { 631 | "ExecuteTime": { 632 | "end_time": "2022-05-17T23:17:17.109747Z", 633 | "start_time": "2022-05-17T23:17:17.104026Z" 634 | } 635 | }, 636 | "outputs": [], 637 | "source": [ 638 | "print(people)" 639 | ] 640 | }, 641 | { 642 | "cell_type": "code", 643 | "execution_count": null, 644 | "metadata": { 645 | "ExecuteTime": { 646 | "end_time": "2022-05-17T23:17:18.339052Z", 647 | "start_time": "2022-05-17T23:17:18.333927Z" 648 | } 649 | }, 650 | "outputs": [], 651 | "source": [ 652 | "message += (\n", 653 | " \"\\n\\nThe following individuals contributed to this release:\\n\\n{people}\".format(\n", 654 | " people=people\n", 655 | " )\n", 656 | ")" 657 | ] 658 | }, 659 | { 660 | "cell_type": "code", 661 | "execution_count": null, 662 | "metadata": { 663 | "ExecuteTime": { 664 | "end_time": "2022-05-17T23:17:18.955964Z", 665 | "start_time": "2022-05-17T23:17:18.951062Z" 666 | } 667 | }, 668 | "outputs": [], 669 | "source": [ 670 | "print(message)" 671 | ] 672 | }, 673 | { 674 | "cell_type": "code", 675 | "execution_count": null, 676 | "metadata": { 677 | "ExecuteTime": { 678 | "end_time": "2022-05-17T23:17:19.780714Z", 679 | "start_time": "2022-05-17T23:17:19.776314Z" 680 | } 681 | }, 682 | "outputs": [], 683 | "source": [ 684 | "head = \"# Changes\\n\\nVersion {version} ({release_date})\\n\\n\".format(\n", 685 | " version=__version__, release_date=release_date\n", 686 | ")" 687 | ] 688 | }, 689 | { 690 | "cell_type": "code", 691 | "execution_count": null, 692 | "metadata": { 693 | "ExecuteTime": { 694 | "end_time": "2022-05-17T23:17:20.392969Z", 695 | "start_time": "2022-05-17T23:17:20.387513Z" 696 | } 697 | }, 698 | "outputs": [], 699 | "source": [ 700 | "print(head + message)" 701 | ] 702 | }, 703 | { 704 | "cell_type": "code", 705 | "execution_count": null, 706 | "metadata": { 707 | "ExecuteTime": { 708 | "end_time": "2022-05-17T23:17:21.419457Z", 709 | "start_time": "2022-05-17T23:17:21.414924Z" 710 | } 711 | }, 712 | "outputs": [], 713 | "source": [ 714 | "outfile = \"changelog_{version}.md\".format(version=__version__)\n", 715 | "with open(outfile, \"w\") as of:\n", 716 | " of.write(head + message)" 717 | ] 718 | } 719 | ], 720 | "metadata": { 721 | "kernelspec": { 722 | "display_name": "Python [conda env:py39_libpysal]", 723 | "language": "python", 724 | "name": "conda-env-py39_libpysal-py" 725 | }, 726 | "language_info": { 727 | "codemirror_mode": { 728 | "name": "ipython", 729 | "version": 3 730 | }, 731 | "file_extension": ".py", 732 | "mimetype": "text/x-python", 733 | "name": "python", 734 | "nbconvert_exporter": "python", 735 | "pygments_lexer": "ipython3", 736 | "version": "3.9.9" 737 | } 738 | }, 739 | "nbformat": 4, 740 | "nbformat_minor": 2 741 | } 742 | --------------------------------------------------------------------------------