"
13 |
14 |
15 | def plot_spatial_weights(
16 | w,
17 | gdf,
18 | indexed_on=None,
19 | ax=None,
20 | figsize=(10, 10),
21 | node_kws=None,
22 | edge_kws=None,
23 | nonplanar_edge_kws=None,
24 | ):
25 | """
26 | Plot spatial weights network.
27 | NOTE: Additionally plots `w.non_planar_joins` if
28 | `libpysal.weights.util.nonplanar_neighbors()` was applied.
29 |
30 | Parameters
31 | ----------
32 | w : libpysal.W object
33 | Values of libpysal weights object.
34 | gdf : geopandas dataframe
35 | The original shapes whose topological relations are
36 | modelled in W.
37 | indexed_on : str, optional
38 | Column of gdf which the weights object uses as an index.
39 | Default =None, so the geodataframe's index is used.
40 | ax : matplotlib axis, optional
41 | Axis on which to plot the weights.
42 | Default =None, so plots on the current figure.
43 | figsize : tuple, optional
44 | W, h of figure. Default =(10,10)
45 | node_kws : keyword argument dictionary, optional
46 | Dictionary of keyword arguments to send to pyplot.scatter,
47 | which provide fine-grained control over the aesthetics
48 | of the nodes in the plot. Default =None.
49 | edge_kws : keyword argument dictionary, optional
50 | Dictionary of keyword arguments to send to pyplot.plot,
51 | which provide fine-grained control over the aesthetics
52 | of the edges in the plot. Default =None.
53 | nonplanar_edge_kws : keyword argument dictionary, optional
54 | Dictionary of keyword arguments to send to pyplot.plot,
55 | which provide fine-grained control over the aesthetics
56 | of the edges from `weights.non_planar_joins` in the plot.
57 | Default =None.
58 |
59 | Returns
60 | -------
61 | fig : matplotlip Figure instance
62 | Figure of spatial weight network.
63 | ax : matplotlib Axes instance
64 | Axes in which the figure is plotted.
65 |
66 | Examples
67 | --------
68 | Imports
69 |
70 | >>> from libpysal.weights.contiguity import Queen
71 | >>> import geopandas as gpd
72 | >>> import libpysal
73 | >>> from libpysal import examples
74 | >>> import matplotlib.pyplot as plt
75 | >>> from splot.libpysal import plot_spatial_weights
76 |
77 | Data preparation and statistical analysis
78 |
79 | >>> gdf = gpd.read_file(examples.get_path('map_RS_BR.shp'))
80 | >>> weights = Queen.from_dataframe(gdf)
81 | >>> wnp = libpysal.weights.util.nonplanar_neighbors(weights, gdf)
82 |
83 | Plot weights
84 |
85 | >>> plot_spatial_weights(weights, gdf)
86 | >>> plt.show()
87 |
88 | Plot corrected weights
89 |
90 | >>> plot_spatial_weights(wnp, gdf)
91 | >>> plt.show()
92 |
93 | """
94 | if ax is None:
95 | fig = plt.figure(figsize=figsize)
96 | ax = fig.add_subplot(111)
97 | else:
98 | fig = ax.get_figure()
99 |
100 | # default for node_kws
101 | if node_kws is None:
102 | node_kws = dict(markersize=10, facecolor="#4d4d4d", edgecolor="#4d4d4d")
103 |
104 | # default for edge_kws
105 | if edge_kws is None:
106 | edge_kws = dict(colors="#4393c3")
107 |
108 | # default for nonplanar_edge_kws
109 | if nonplanar_edge_kws is None:
110 | edge_kws.setdefault("lw", 0.7)
111 | nonplanar_edge_kws = edge_kws.copy()
112 | nonplanar_edge_kws["colors"] = "#d6604d"
113 |
114 | node_has_nonplanar_join = []
115 | if hasattr(w, "non_planar_joins"):
116 | # This attribute is present when an instance is created by the user
117 | # calling `weights.util.nonplanar_neighbors`. If so, treat those
118 | # edges differently by default.
119 | node_has_nonplanar_join = w.non_planar_joins.keys()
120 |
121 | centroids_shp = gdf.centroid.values
122 |
123 | segments = []
124 | non_planar_segments = []
125 |
126 | if indexed_on is not None:
127 | dict_index = dict(zip(gdf[indexed_on].values, range(len(gdf))))
128 | for idx in w.id_order:
129 | if idx in w.islands:
130 | continue
131 | # Find the centroid of the polygon we're looking at now
132 | origin = np.array(centroids_shp[dict_index[idx]].coords)[0]
133 | for jdx in w.neighbors[idx]:
134 | dest = np.array(centroids_shp[dict_index[jdx]].coords)[0]
135 | if (idx in node_has_nonplanar_join) and (
136 | jdx in w.non_planar_joins[idx]
137 | ):
138 | # This is a non-planar edge
139 | non_planar_segments.append([origin, dest])
140 | else:
141 | segments.append([origin, dest])
142 | else:
143 | for idx in w.id_order:
144 | if idx in w.islands:
145 | continue
146 |
147 | # Find the centroid of the polygon we're looking at now
148 | origin = np.array(centroids_shp[idx].coords)[0]
149 | for j in w.neighbors[idx]:
150 | jdx = w.id2i[j]
151 | dest = np.array(centroids_shp[jdx].coords)[0]
152 | if (idx in node_has_nonplanar_join) and (
153 | jdx in w.non_planar_joins[idx]
154 | ):
155 | # This is a non-planar edge
156 | non_planar_segments.append([origin, dest])
157 | else:
158 | segments.append([origin, dest])
159 |
160 | # Plot the polygons from the geodataframe as a base layer
161 | gdf.plot(ax=ax, color="#bababa", edgecolor="w")
162 |
163 | # plot polygon centroids
164 | gdf.centroid.plot(ax=ax, **node_kws)
165 |
166 | # plot weight edges
167 | non_planar_segs_plot = LineCollection(
168 | np.array(non_planar_segments), **nonplanar_edge_kws
169 | )
170 | segs_plot = LineCollection(np.array(segments), **edge_kws)
171 | ax.add_collection(segs_plot)
172 | ax.add_collection(non_planar_segs_plot)
173 |
174 | ax.set_axis_off()
175 | ax.set_aspect("equal")
176 | return fig, ax
177 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | **`splot` is in the process of being archived. It's functionality is being integrated into associated PySAL projects.**
2 |
3 | # splot
4 |
5 | [](https://github.com/pysal/splot/actions/workflows/unittests.yml)
6 | [](https://codecov.io/gh/pysal/splot)
7 | [](https://splot.readthedocs.io/en/latest/?badge=latest)
8 | [](https://badge.fury.io/py/splot)
9 | [](https://doi.org/10.21105/joss.01882)
10 | [](https://doi.org/10.5281/zenodo.3258810)
11 |
12 | **Visual analytics for spatial analysis with PySAL.**
13 |
14 | 
15 |
16 | ## What is splot?
17 |
18 | `splot` connects spatial analysis done in [`PySAL`](https://github.com/pysal) to different popular visualization toolkits like [`matplotlib`](https://matplotlib.org).
19 | The `splot` package allows you to create both static plots ready for publication and interactive visualizations for quick iteration and spatial data exploration. The primary goal of `splot` is to enable you to visualize popular `PySAL` objects and gives you different views on your spatial analysis workflow.
20 |
21 | If you are new to `splot` and `PySAL` you will best get started with our [documentation](https://splot.readthedocs.io/en/latest/) and the short introduction [video](https://youtu.be/kriQOJMycIQ?t=2403) of the package at the Scipy 2018 conference!
22 |
23 | ## Installing splot
24 |
25 | ### Installing dependencies
26 |
27 | `splot` is compatible with `Python` 3.8+ and depends on `geopandas` 0.9.0 or later and `matplotlib` 3.3.3 or later.
28 |
29 | splot also uses
30 |
31 | * `numpy`
32 | * `seaborn`
33 | * `mapclassify`
34 | * `Ipywidgets`
35 |
36 | Depending on your spatial analysis workflow and the `PySAL` objects you would like to visualize, `splot` relies on:
37 |
38 | * PySAL 2.0
39 |
40 | or separate packages found in the `PySAL` stack:
41 |
42 | * esda
43 | * libpysal
44 | * spreg
45 | * giddy
46 |
47 | ### Installing splot
48 |
49 | There are two ways of accessing `splot`. First, `splot` is installed with the [PySAL 2.0](https://pysal.readthedocs.io/en/latest/installation.html) metapackage through:
50 |
51 | $ pip install -U pysal
52 |
53 | or
54 |
55 | $ conda install -c conda-forge pysal
56 |
57 | Second, `splot` can be installed as a separate package. If you are using Anaconda, install `splot` via the `conda` utility:
58 |
59 | conda install -c conda-forge splot
60 |
61 | Otherwise you can install `splot` from `PyPI` with pip:
62 |
63 | pip install splot
64 |
65 | ## Usage
66 |
67 | Usage examples for different spatial statistical workflows are provided as [notebooks](https://github.com/pysal/splot/tree/main/notebooks):
68 |
69 | * [for creating value-by-alpha maps](https://github.com/pysal/splot/blob/main/notebooks/mapping_vba.ipynb)
70 | * [for assessing the relationship between neighboring polygons](https://github.com/pysal/splot/blob/main/notebooks/libpysal_non_planar_joins_viz.ipynb)
71 | * [for the visualization of space-time autocorrelation](https://github.com/pysal/splot/blob/main/notebooks/giddy_space_time.ipynb), also documented in [giddy](https://github.com/pysal/giddy/blob/main/notebooks/directional.ipynb)
72 | * for visualizing spatial autocorrelation of [univariate](https://github.com/pysal/splot/blob/main/notebooks/esda_morans_viz.ipynb) or [multivariate](https://github.com/pysal/splot/blob/main/notebooks/esda_moran_matrix_viz.ipynb) variable analysis
73 |
74 | You can also check our [documentation](https://splot.readthedocs.io/en/latest/) for examples on how to use each function. A detailed report about the development, structure and usage of `splot` can be found [here](https://gist.github.com/slumnitz/a86ef4a5b48b1b5fac41e91cfd05fff2). More tutorials for the whole `PySAL` ecosystem can be found in our [notebooks book](http://pysal.org/notebooks/intro.html) project.
75 |
76 | ## Contributing to splot
77 |
78 | `splot` is an open source project within the Python Spatial Analysis Library that is supported by a community of Geographers, visualization lovers, map fans, users and data scientists. As a community we work together to create splot as our own spatial visualization toolkit and will gratefully and humbly accept any contributions and ideas you might bring into this project.
79 |
80 | Feel free to check out our discussion spaces, add ideas and contributions:
81 |
82 | * [Idea collection](https://github.com/pysal/splot/issues/10) which PySAL objects to support and how new visualizations could look like
83 | * [Discussion](https://github.com/pysal/splot/issues/9) about the splot API
84 | * Ideas how to integrate [other popular visualization toolkits](https://github.com/pysal/splot/issues/22) like `Bokeh` or `Altair`
85 |
86 | If you have never contributed before or you are just discovering what `PySAL` and `splot` have to offer, reading through """Doc-strings""" and correcting our Documentation can be a great way to start. Check for spelling and grammar mistakes or use [pep8](https://pypi.org/project/pep8/) and [pyflakes](https://pypi.org/project/pyflakes/) to clean our `.py` files. This will allow you to get used to working with [git](https://try.github.io) and generally allows you to familiarize yourself with the `splot` and `PySAL` code base.
87 |
88 | If you have already used `PySAL` and `splot` and you are missing object-specific views for your analysis feel free to add to our code-base or discuss your ideas. Please make sure you include unit test, documentation and examples or (create an issue so someone else can work together with you). The common `splot` API design discussed [here](https://github.com/pysal/splot/issues/9) can help you to decide how to best integrate your visualization prototype into `splot`.
89 |
90 | Beyond working on documentation and prototyping new visualizations, you can always write a bug report or feature request on [Github issues](https://github.com/pysal/splot/issues). Whether large or small, any contribution makes a big difference and we hope you enjoy being part of our community as much as we do! The only thing we ask is that you abide principles of openness, respect, and consideration of others as described in the [PySAL Code of Conduct](https://github.com/pysal/code_of_conduct/blob/master/README.md).
91 |
92 | ## Road-map
93 |
94 | We are planning on extending `splot`'s visualization toolkit in future. Functionality we plan to implement includes:
95 |
96 | * visualisations for [density methods](https://github.com/pysal/splot/issues/32) (mapping density estimations)
97 | * [cross-hatching fill styles](https://github.com/pysal/splot/issues/35) for maps (to allow choropleth visualizations without class intervals)
98 | * [legendgrams](https://github.com/pysal/splot/issues/34) (map legends that visualize the distribution of observations by color in a given map)
99 |
100 | If you are interested in working on one of these or any other methods, check out the linked issues or get in touch!
101 |
102 | ## Community support
103 |
104 | * [PySAL 2.0](http://pysal.org)
105 | * [Gitter chat splot](https://gitter.im/pysal/splot?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
106 |
--------------------------------------------------------------------------------
/doc/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # splot documentation build configuration file, created by
4 | # sphinx-quickstart on Wed Jun 6 15:54:22 2018.
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | import os
16 |
17 | # If extensions (or modules to document with autodoc) are in another directory,
18 | # add these directories to sys.path here. If the directory is relative to the
19 | # documentation root, use os.path.abspath to make it absolute, like shown here.
20 | #
21 | import sys
22 |
23 | import sphinx_bootstrap_theme
24 |
25 | sys.path.insert(0, os.path.abspath("../../"))
26 |
27 | # import your package to obtain the version info to display on the docs website
28 | import splot # noqa E402
29 |
30 | # -- General configuration ------------------------------------------------
31 |
32 | # If your documentation needs a minimal Sphinx version, state it here.
33 | #
34 | # needs_sphinx = '1.0'
35 | # Add any Sphinx extension module names here, as strings. They can be
36 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
37 | # ones.
38 | extensions = [ # 'sphinx_gallery.gen_gallery',
39 | "sphinx.ext.autodoc",
40 | "sphinx.ext.autosummary",
41 | "sphinx.ext.viewcode",
42 | "sphinxcontrib.bibtex",
43 | "sphinx.ext.mathjax",
44 | "sphinx.ext.doctest",
45 | "sphinx.ext.intersphinx",
46 | "numpydoc",
47 | "matplotlib.sphinxext.plot_directive",
48 | ]
49 |
50 | bibtex_bibfiles = ["_static/references.bib"]
51 |
52 | # Add any paths that contain templates here, relative to this directory.
53 | templates_path = ["_templates"]
54 |
55 | # The suffix(es) of source filenames.
56 | # You can specify multiple suffix as a list of string:
57 | #
58 | # source_suffix = ['.rst', '.md']
59 | source_suffix = ".rst"
60 |
61 | # The master toctree document.
62 | master_doc = "index"
63 |
64 | # General information about the project.
65 | project = "splot"
66 | copyright = "2018, pysal developers"
67 | author = "pysal developers"
68 |
69 | # The version info for the project you're documenting, acts as replacement for
70 | # |version| and |release|, also used in various other places throughout the
71 | # built documents.
72 | #
73 | # The full version.
74 | version = splot.__version__ # should replace it with your PACKAGE_NAME
75 | release = splot.__version__ # should replace it with your PACKAGE_NAME
76 |
77 | # The language for content autogenerated by Sphinx. Refer to documentation
78 | # for a list of supported languages.
79 | #
80 | # This is also used if you do content translation via gettext catalogs.
81 | # Usually you set "language" from the command line for these cases.
82 | language = None
83 |
84 | # List of patterns, relative to source directory, that match files and
85 | # directories to ignore when looking for source files.
86 | # This patterns also effect to html_static_path and html_extra_path
87 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "tests/*"]
88 |
89 | # The name of the Pygments (syntax highlighting) style to use.
90 | pygments_style = "sphinx"
91 |
92 | # If true, `todo` and `todoList` produce output, else they produce nothing.
93 | todo_include_todos = False
94 |
95 | # -- Options for HTML output ----------------------------------------------
96 |
97 | # The theme to use for HTML and HTML Help pages. See the documentation for
98 | # a list of builtin themes.
99 | #
100 | # html_theme = 'alabaster'
101 | html_theme = "bootstrap"
102 | html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
103 | html_title = "%s v%s Manual" % (project, version)
104 |
105 | # (Optional) Logo of your package.
106 | # Should be small enough to fit the navbar (ideally 24x24).
107 | # Path should be relative to the ``_static`` files directory.
108 | # html_logo = "_static/images/package_logo.jpg"
109 |
110 | # (Optional) PySAL favicon
111 | html_favicon = "_static/images/pysal_favicon.ico"
112 |
113 |
114 | # Theme options are theme-specific and customize the look and feel of a theme
115 | # further. For a list of options available for each theme, see the
116 | # documentation.
117 | #
118 | html_theme_options = {
119 | # Navigation bar title. (Default: ``project`` value)
120 | "navbar_title": "splot", # string of your project name, for example, 'giddy'
121 | # Render the next and previous page links in navbar. (Default: true)
122 | "navbar_sidebarrel": False,
123 | # Render the current pages TOC in the navbar. (Default: true)
124 | # 'navbar_pagenav': True,
125 | # 'navbar_pagenav': False,
126 | # No sidebar
127 | "nosidebar": True,
128 | # Tab name for the current pages TOC. (Default: "Page")
129 | # 'navbar_pagenav_name': "Page",
130 | # Global TOC depth for "site" navbar tab. (Default: 1)
131 | # Switching to -1 shows all levels.
132 | "globaltoc_depth": 2,
133 | # Include hidden TOCs in Site navbar?
134 | #
135 | # Note: If this is "false", you cannot have mixed ``:hidden:`` and
136 | # non-hidden ``toctree`` directives in the same page, or else the build
137 | # will break.
138 | #
139 | # Values: "true" (default) or "false"
140 | "globaltoc_includehidden": "true",
141 | # HTML navbar class (Default: "navbar") to attach to element.
142 | # For black navbar, do "navbar navbar-inverse"
143 | # 'navbar_class': "navbar navbar-inverse",
144 | # Fix navigation bar to top of page?
145 | # Values: "true" (default) or "false"
146 | "navbar_fixed_top": "true",
147 | # Location of link to source.
148 | # Options are "nav" (default), "footer" or anything else to exclude.
149 | "source_link_position": "footer",
150 | # Bootswatch (http://bootswatch.com/) theme.
151 | #
152 | # Options are nothing (default) or the name of a valid theme
153 | # such as "amelia" or "cosmo", "yeti", "flatly".
154 | "bootswatch_theme": "yeti",
155 | # Choose Bootstrap version.
156 | # Values: "3" (default) or "2" (in quotes)
157 | "bootstrap_version": "3",
158 | # Navigation bar menu
159 | "navbar_links": [
160 | ("Installation", "installation"),
161 | ("API", "api"),
162 | ("References", "references"),
163 | ],
164 | }
165 |
166 | # Add any paths that contain custom static files (such as style sheets) here,
167 | # relative to this directory. They are copied after the builtin static files,
168 | # so a file named "default.css" will overwrite the builtin "default.css".
169 | html_static_path = ["_static"]
170 |
171 | # Custom sidebar templates, maps document names to template names.
172 | # html_sidebars = {}
173 | # html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
174 |
175 | # -- Options for HTMLHelp output ------------------------------------------
176 |
177 | # Output file base name for HTML help builder.
178 | htmlhelp_basename = "splot" + "doc"
179 |
180 |
181 | # -- Options for LaTeX output ---------------------------------------------
182 |
183 | latex_elements = {
184 | # The paper size ('letterpaper' or 'a4paper').
185 | #
186 | # 'papersize': 'letterpaper',
187 | # The font size ('10pt', '11pt' or '12pt').
188 | #
189 | # 'pointsize': '10pt',
190 | # Additional stuff for the LaTeX preamble.
191 | #
192 | # 'preamble': '',
193 | # Latex figure (float) alignment
194 | #
195 | # 'figure_align': 'htbp',
196 | }
197 |
198 | # Grouping the document tree into LaTeX files. List of tuples
199 | # (source start file, target name, title,
200 | # author, documentclass [howto, manual, or own class]).
201 | latex_documents = [
202 | (master_doc, "splot.tex", "splot Documentation", "pysal developers", "manual"),
203 | ]
204 |
205 |
206 | # -- Options for manual page output ---------------------------------------
207 |
208 | # One entry per manual page. List of tuples
209 | # (source start file, name, description, authors, manual section).
210 | man_pages = [(master_doc, "splot", "splot Documentation", [author], 1)]
211 |
212 |
213 | # -- Options for Texinfo output -------------------------------------------
214 |
215 | # Grouping the document tree into Texinfo files. List of tuples
216 | # (source start file, target name, title, author,
217 | # dir menu entry, description, category)
218 | texinfo_documents = [
219 | (
220 | master_doc,
221 | "splot",
222 | "splot Documentation",
223 | author,
224 | "splot",
225 | "One line description of project.",
226 | "Miscellaneous",
227 | ),
228 | ]
229 |
230 |
231 | # -----------------------------------------------------------------------------
232 | # Autosummary
233 | # -----------------------------------------------------------------------------
234 |
235 | # Generate the API documentation when building
236 | autosummary_generate = True
237 | numpydoc_show_class_members = True
238 | class_members_toctree = True
239 | numpydoc_show_inherited_class_members = True
240 | numpydoc_use_plots = True
241 |
242 | # display the source code for Plot directive
243 | plot_include_source = True
244 |
245 |
246 | def setup(app):
247 | app.add_css_file("pysal-styles.css")
248 |
249 |
250 | # Example configuration for intersphinx: refer to the Python standard library.
251 | intersphinx_mapping = {"https://docs.python.org/3.6/": None}
252 |
--------------------------------------------------------------------------------
/paper/paper.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: '`splot` - visual analytics for spatial statistics'
3 | tags:
4 | - Python
5 | - visualization
6 | - spatial analysis
7 | - spatial statistics
8 | authors:
9 | - name: Stefanie Lumnitz
10 | orcid: 0000-0002-7007-5812
11 | affiliation: "1, 2" # (Multiple affiliations must be quoted)
12 | - name: Dani Arribas-Bell
13 | orcid: 0000-0002-6274-1619
14 | affiliation: 3
15 | - name: Renan X. Cortes
16 | orcid: 0000-0002-1889-5282
17 | affiliation: 2
18 | - name: James D. Gaboardi
19 | orcid: 0000-0002-4776-6826
20 | affiliation: 4
21 | - name: Verena Griess
22 | orcid: 0000-0002-3856-3736
23 | affiliation: 1
24 | - name: Wei Kang
25 | orcid: 0000-0002-1073-7781
26 | affiliation: 2
27 | - name: Taylor M. Oshan
28 | orcid: 0000-0002-0537-2941
29 | affiliation: 7
30 | - name: Levi Wolf
31 | orcid: 0000-0003-0274-599X
32 | affiliation: "5,6"
33 | - name: Sergio Rey
34 | orcid: 0000-0001-5857-9762
35 | affiliation: 2
36 | affiliations:
37 | - name: Department of Forest Resource Management, University of British Columbia
38 | index: 1
39 | - name: Center for Geospatial Sciences, University of California Riverside
40 | index: 2
41 | - name: Geographic Data Science Lab, Department of Geography & Planning, University of Liverpool
42 | index: 3
43 | - name: Department of Geography, Pennsylvania State University
44 | index: 4
45 | - name: School of Geographical Sciences, University of Bristol
46 | index: 5
47 | - name: Alan Turing Institute
48 | index: 6
49 | - name: Department of Geographical Sciences, University of Maryland, College Park
50 | index: 7
51 | date: 25 October 2019
52 | bibliography: paper.bib
53 | ---
54 |
55 | # Summary
56 |
57 | Geography is an intensely visual domain. Its longstanding dependence on visualization and cartography shows as much, with John Snow's cholera map serving as one of the first instances of geovisual analytics in science [@johnson2007ghost;@arribas-bel2017looking], and the perennial presence of maps as statistical displays in seminal works on visualization [@tufte2001visual]. As such, the existence and continued focus on maps in geographical analysis demands serious, dedicated attention in scientific computing. However, existing methods in Python, specifically for *statistical* visualization of spatial data, are lacking. General-purpose mapping provided by `geopandas` is not fine-tuned enough for statistical analysis [@kelsey_jordahl_2019_3333010]. The more analytically-oriented views offered by `geoplot`, while useful, are limited in their statistical applications [@aleksey_bilogur_2019_3475569]. Thus, the need remains for a strong, analytically-oriented toolbox for visual geographical analysis.
58 |
59 | This need is heightened by the fact that the collection and generation of geographical data is becoming more pervasive [@goodchild2007citizen;@arribas-bel2014accidental]. With the proliferation of high-accuracy GPS data, many datasets are now *becoming* spatial datasets; their analysis and visualization increasingly requires explicitly spatial methods that account for the various special structures in geographical data [@anselin1988spatial]. Geographical questions about dependence, endogeneity, heterogeneity, and non-stationarity require special statistical tools to diagnose, and spatial analytic software to visualize [@anselin2014modern]. Further, with the increasing importance of code and computation in geographical curricula [@rey2009show;@rey2018code;@ucgis2019geographic], it has become critical for both pedagogical and research reasons to support geographical analyses with robust visualization tools. To date there are few toolkits for geovisualization developed in the scientific Python stack to fill this need and none for visualization of the process and outcome of spatial analytics. It is this niche that `splot` is designed to fill.
60 |
61 | Implemented in Python, `splot` extends both *spatial analytical methods* like that found in the Python Spatial Analysis Library (`PySAL`) and *general purpose visualization* functionality provided by popular packages such as `matplotlib`, in order to simplify visualizing spatial analysis workflows and results. The `splot` package was developed in parallel to the ecosystem of tools to store, manage, and analyze spatial data, which evolved in ways that gave more relevance to integrated command-line oriented environments such as `Jupyter`; and less to disconnected, one-purpose point-and-click tools such as traditional desktop GIS packages. In this context, visual analytics done with `splot` allows for more general scientific workflows via the integration of spatial analytics with the rest of the Python data science ecosystem.
62 |
63 | As a visual steering tool, `splot` facilitates analyses and interpretation of results, and streamlines the process of model and method selection for many spatial applications. Our high-level API allows quick access to visualizing popular `PySAL` objects generated through spatial statistical analysis. The `PySAL` ecosystem can hereby be understood as a library, integrating many spatial analytical packages (called *sub-modules*) under one umbrella. These sub-modules range in purpose from exploratory data analysis to explanatory statistical models of spatial relationships. As a separate standing package within the ecosystem, `splot` implements a multitude of views for different spatial analysis workflows to give users the opportunity to assess a problem from different perspectives.
64 |
65 | Building on top of our users' feedback, `splot`'s functionality can be accessed in two main ways. First, basic `splot` visualization is exposed as `.plot` methods on objects found in various packages in the `PySAL` ecosystem. Integrating simple `splot` visualizations in other `PySAL` packages ensures that users have the quickest possible access to visualizations. This is especially useful for an instantaneous sanity check to determine if the spatial analysis done in `PySAL` is correct, or if there are any errors present in the data used.
66 |
67 | Second, all visualizations can be found and called using a `splot.'PySAL_submodule'` name space, depending on the previously analysed object that needs to be visualized (e.g. `splot.giddy`). Directly calling visualizations through `splot` has the advantage to extend users' spatial analysis workflows with more general cartographic and visual methods in `splot.mapping`. One example of this is a Value-by-Alpha [@roth2010vba] (vba) map, a multivariate choropleth mapping method useful to visualize geographic data with uncertainty or visually compare characteristics of populations with varying sizes. A conventional workflow could look like this: after cleaning and preparing data, a `PySAL` Local Moran object is created that estimates whether crimes tend to cluster around one another or disperse far from one another. In order to assess whether the occurrences of crime in the neighborhood of Columbus, Ohio USA, are clustered (or, *spatially autocorrelated*), Local Indicators of Spatial Autocorrelation (LISA) hot and cold spots, Moran I scatterplots and a choropleth map can quickly be created to provide visual analysis (see fig. 1).
68 |
69 | ```python
70 | from splot.esda import plot_local_autocorrelation
71 | plot_local_autocorrelation(moran_loc, gdf, 'Crime')
72 | plt.show()
73 | ```
74 |
75 |
76 | 
77 |
78 | The user can now further visually assess whether there is dependency between high crime rates (fig. 2, rgb variable) and high income in this neighborhood (fig. 2, alpha variable). Darker shades of the colormap correspond to higher crime and income values, displayed through a static Value-by-Alpha Choropleth using `splot.mapping.vba_choropleth`.
79 |
80 |
81 | ```python
82 | fig = plt.figure(figsize=(10,10))
83 | ax = fig.add_subplot(111)
84 | vba_choropleth(x, y, gdf,
85 | alpha_mapclassify=dict(classifier='quantiles', k=5,
86 | rgb_mapclassify=dict(classifier='quantiles', k=5,
87 | cmap='Blues',
88 | legend=True, divergent=True, ax=ax)
89 | plt.show()
90 | ```
91 |
92 |
93 | 
94 |
95 | Ultimately, the `splot` package is designed to facilitate the creation of both static plots ready for publication, and interactive visualizations for quick iteration and spatial data exploration. Although most of `splot` is currently implemented with a `matplotlib` backend, `splot` is framework independent. In that sense, `splot` offers a "grammar" of views that are important and useful in spatial analyses and geographic data science. The `splot` package is not restricted or limited to the current `matplotlib` implementation and can be advanced by integrating emerging or succeeding interactive visualization toolkits, such as `altair` or `bokeh`.
96 |
97 | In conclusion, `splot` tightly connects visual analytics with statistical analysis and facilitates the integration of spatial analytics into more general Python workflows through it's compatibility with integrated code-based environments like Jupyter. From spatial autocorrelation analysis to value by alpha choropleths, `splot` is designed as a grammar of views that can be applied to a multitude of spatial analysis workflows. As `splot` developers, we strive to expand `splot`'s grammar of views through new functionality (e.g. in flow mapping methods), as well as provide different backend implementations, including interactive backends, such as `bokeh`, in the future.
98 |
99 | # Acknowledgements
100 |
101 | We acknowledge contributions from, and thank, all our users for reporting bugs, raising issues and suggesting changes to `splot`'s API. Thank you, Joris Van den Bossche and the `geopandas` team for timing releases in accordance with `splot` developments. Thank you, Rebecca Bilbro and Benjamin Bengfort for sharing your insights in how to structure and build API's for visualizations. Thank you Ralf Gommers for guidance on how to design library code for easy maintainability.
102 |
103 | ### References
104 |
--------------------------------------------------------------------------------
/splot/tests/test_viz_esda_mpl.py:
--------------------------------------------------------------------------------
1 | import geopandas as gpd
2 | import libpysal as lp
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import pytest
6 | from esda.moran import Moran, Moran_BV, Moran_BV_matrix, Moran_Local, Moran_Local_BV
7 | from libpysal import examples
8 | from libpysal.weights.contiguity import Queen
9 |
10 | from splot._viz_esda_mpl import (
11 | _moran_bv_scatterplot,
12 | _moran_global_scatterplot,
13 | _moran_loc_bv_scatterplot,
14 | _moran_loc_scatterplot,
15 | )
16 | from splot.esda import (
17 | lisa_cluster,
18 | moran_facet,
19 | moran_scatterplot,
20 | plot_local_autocorrelation,
21 | plot_moran,
22 | plot_moran_bv,
23 | plot_moran_bv_simulation,
24 | plot_moran_simulation,
25 | )
26 |
27 |
28 | def _test_data():
29 | guerry = examples.load_example("Guerry")
30 | link_to_data = guerry.get_path("guerry.shp")
31 | gdf = gpd.read_file(link_to_data)
32 | return gdf
33 |
34 |
35 | def _test_data_columbus():
36 | columbus = examples.load_example("Columbus")
37 | link_to_data = columbus.get_path("columbus.shp")
38 | df = gpd.read_file(link_to_data)
39 | return df
40 |
41 |
42 | def _test_LineString():
43 | link_to_data = examples.get_path("streets.shp")
44 | gdf = gpd.read_file(link_to_data)
45 | return gdf
46 |
47 |
48 | def test_moran_scatterplot():
49 | gdf = _test_data()
50 | x = gdf["Suicids"].values
51 | y = gdf["Donatns"].values
52 | w = Queen.from_dataframe(gdf)
53 | w.transform = "r"
54 |
55 | # Calculate `esda.moran` Objects
56 | moran = Moran(y, w)
57 | moran_bv = Moran_BV(y, x, w)
58 | moran_loc = Moran_Local(y, w)
59 | moran_loc_bv = Moran_Local_BV(y, x, w)
60 |
61 | # try with p value so points are colored or warnings apply
62 | with pytest.warns(UserWarning, match="`p` is only used for plotting"):
63 | fig, _ = moran_scatterplot(moran, p=0.05, aspect_equal=False)
64 | plt.close(fig)
65 |
66 | fig, _ = moran_scatterplot(moran_loc, p=0.05)
67 | plt.close(fig)
68 |
69 | with pytest.warns(UserWarning, match="`p` is only used for plotting"):
70 | fig, _ = moran_scatterplot(moran_bv, p=0.05)
71 | plt.close(fig)
72 |
73 | fig, _ = moran_scatterplot(moran_loc_bv, p=0.05)
74 | plt.close(fig)
75 |
76 |
77 | def test_moran_global_scatterplot():
78 | # Load data and apply statistical analysis
79 | gdf = _test_data()
80 | y = gdf["Donatns"].values
81 | w = Queen.from_dataframe(gdf)
82 | w.transform = "r"
83 | # Calc Global Moran
84 | w = Queen.from_dataframe(gdf)
85 | moran = Moran(y, w)
86 | # plot
87 | fig, _ = _moran_global_scatterplot(moran)
88 | plt.close(fig)
89 | # customize
90 | fig, _ = _moran_global_scatterplot(
91 | moran, zstandard=False, aspect_equal=False, fitline_kwds=dict(color="#4393c3")
92 | )
93 | plt.close(fig)
94 |
95 |
96 | def test_plot_moran_simulation():
97 | # Load data and apply statistical analysis
98 | gdf = _test_data()
99 | y = gdf["Donatns"].values
100 | w = Queen.from_dataframe(gdf)
101 | w.transform = "r"
102 | # Calc Global Moran
103 | w = Queen.from_dataframe(gdf)
104 | moran = Moran(y, w)
105 | # plot
106 | fig, _ = plot_moran_simulation(moran)
107 | plt.close(fig)
108 | # customize
109 | fig, _ = plot_moran_simulation(moran, fitline_kwds=dict(color="#4393c3"))
110 | plt.close(fig)
111 |
112 |
113 | def test_plot_moran():
114 | # Load data and apply statistical analysis
115 | gdf = _test_data()
116 | y = gdf["Donatns"].values
117 | w = Queen.from_dataframe(gdf)
118 | w.transform = "r"
119 | # Calc Global Moran
120 | w = Queen.from_dataframe(gdf)
121 | moran = Moran(y, w)
122 | # plot
123 | fig, _ = plot_moran(moran)
124 | plt.close(fig)
125 | # customize
126 | fig, _ = plot_moran(
127 | moran, zstandard=False, aspect_equal=False, fitline_kwds=dict(color="#4393c3")
128 | )
129 | plt.close(fig)
130 |
131 |
132 | def test_moran_bv_scatterplot():
133 | gdf = _test_data()
134 | x = gdf["Suicids"].values
135 | y = gdf["Donatns"].values
136 | w = Queen.from_dataframe(gdf)
137 | w.transform = "r"
138 | # Calculate Bivariate Moran
139 | moran_bv = Moran_BV(x, y, w)
140 | # plot
141 | fig, _ = _moran_bv_scatterplot(moran_bv)
142 | plt.close(fig)
143 | # customize plot
144 | fig, _ = _moran_bv_scatterplot(
145 | moran_bv, aspect_equal=False, fitline_kwds=dict(color="#4393c3")
146 | )
147 | plt.close(fig)
148 |
149 |
150 | def test_plot_moran_bv_simulation():
151 | # Load data and calculate weights
152 | gdf = _test_data()
153 | x = gdf["Suicids"].values
154 | y = gdf["Donatns"].values
155 | w = Queen.from_dataframe(gdf)
156 | w.transform = "r"
157 | # Calculate Bivariate Moran
158 | moran_bv = Moran_BV(x, y, w)
159 | # plot
160 | fig, _ = plot_moran_bv_simulation(moran_bv)
161 | plt.close(fig)
162 | # customize plot
163 | fig, _ = plot_moran_bv_simulation(
164 | moran_bv, aspect_equal=False, fitline_kwds=dict(color="#4393c3")
165 | )
166 | plt.close(fig)
167 |
168 |
169 | def test_plot_moran_bv():
170 | # Load data and calculate weights
171 | gdf = _test_data()
172 | x = gdf["Suicids"].values
173 | y = gdf["Donatns"].values
174 | w = Queen.from_dataframe(gdf)
175 | w.transform = "r"
176 | # Calculate Bivariate Moran
177 | moran_bv = Moran_BV(x, y, w)
178 | # plot
179 | fig, _ = plot_moran_bv(moran_bv)
180 | plt.close(fig)
181 | # customize plot
182 | fig, _ = plot_moran_bv(
183 | moran_bv, aspect_equal=False, fitline_kwds=dict(color="#4393c3")
184 | )
185 | plt.close(fig)
186 |
187 |
188 | def test_moran_loc_scatterplot():
189 | df = _test_data_columbus()
190 |
191 | x = df["INC"].values
192 | y = df["HOVAL"].values
193 | w = Queen.from_dataframe(df)
194 | w.transform = "r"
195 |
196 | moran_loc = Moran_Local(y, w)
197 | moran_bv = Moran_BV(x, y, w)
198 |
199 | # try without p value
200 | fig, _ = _moran_loc_scatterplot(moran_loc)
201 | plt.close(fig)
202 |
203 | # try with p value and different figure size
204 | fig, _ = _moran_loc_scatterplot(
205 | moran_loc, p=0.05, aspect_equal=False, fitline_kwds=dict(color="#4393c3")
206 | )
207 | plt.close(fig)
208 |
209 | # try with p value and zstandard=False
210 | fig, _ = _moran_loc_scatterplot(
211 | moran_loc, p=0.05, zstandard=False, fitline_kwds=dict(color="#4393c3")
212 | )
213 | plt.close(fig)
214 |
215 | # try without p value and zstandard=False
216 | fig, _ = _moran_loc_scatterplot(
217 | moran_loc, zstandard=False, fitline_kwds=dict(color="#4393c3")
218 | )
219 | plt.close(fig)
220 |
221 | pytest.raises(ValueError, _moran_loc_scatterplot, moran_bv, p=0.5)
222 | pytest.warns(
223 | UserWarning,
224 | _moran_loc_scatterplot,
225 | moran_loc,
226 | p=0.5,
227 | scatter_kwds=dict(c="#4393c3"),
228 | )
229 |
230 |
231 | def _test_calc_moran_loc(gdf, var="HOVAL"):
232 | y = gdf[var].values
233 | w = Queen.from_dataframe(gdf)
234 | w.transform = "r"
235 |
236 | moran_loc = Moran_Local(y, w)
237 | return moran_loc
238 |
239 |
240 | def test_lisa_cluster():
241 | df = _test_data_columbus()
242 | moran_loc = _test_calc_moran_loc(df)
243 |
244 | fig, _ = lisa_cluster(moran_loc, df)
245 | plt.close(fig)
246 |
247 | # test LineStrings
248 | df_line = _test_LineString()
249 | moran_loc = _test_calc_moran_loc(df_line, var="Length")
250 |
251 | fig, _ = lisa_cluster(moran_loc, df_line)
252 | plt.close(fig)
253 |
254 |
255 | def test_plot_local_autocorrelation():
256 | df = _test_data_columbus()
257 | moran_loc = _test_calc_moran_loc(df)
258 |
259 | fig, _ = plot_local_autocorrelation(moran_loc, df, "HOVAL", p=0.05)
260 | plt.close(fig)
261 |
262 | # also test with quadrant and mask
263 | with pytest.warns(UserWarning, match="Values in `mask` are not the same dtype"):
264 | fig, _ = plot_local_autocorrelation(
265 | moran_loc,
266 | df,
267 | "HOVAL",
268 | p=0.05,
269 | region_column="POLYID",
270 | aspect_equal=False,
271 | mask=["1", "2", "3"],
272 | quadrant=1,
273 | )
274 | plt.close(fig)
275 |
276 | # also test with quadrant and mask
277 | with pytest.warns(UserWarning, match="Values in `mask` are not the same dtype"):
278 | pytest.raises(
279 | ValueError,
280 | plot_local_autocorrelation,
281 | moran_loc,
282 | df,
283 | "HOVAL",
284 | p=0.05,
285 | region_column="POLYID",
286 | mask=["100", "200", "300"],
287 | quadrant=1,
288 | )
289 |
290 |
291 | def test_moran_loc_bv_scatterplot():
292 | gdf = _test_data()
293 | x = gdf["Suicids"].values
294 | y = gdf["Donatns"].values
295 | w = Queen.from_dataframe(gdf)
296 | w.transform = "r"
297 | # Calculate Univariate and Bivariate Moran
298 | moran_loc = Moran_Local(y, w)
299 | moran_loc_bv = Moran_Local_BV(x, y, w)
300 | # try with p value so points are colored
301 | fig, _ = _moran_loc_bv_scatterplot(moran_loc_bv)
302 | plt.close(fig)
303 |
304 | # try with p value and different figure size
305 | fig, _ = _moran_loc_bv_scatterplot(moran_loc_bv, p=0.05, aspect_equal=False)
306 | plt.close(fig)
307 |
308 | pytest.raises(ValueError, _moran_loc_bv_scatterplot, moran_loc, p=0.5)
309 | pytest.warns(
310 | UserWarning,
311 | _moran_loc_bv_scatterplot,
312 | moran_loc_bv,
313 | p=0.5,
314 | scatter_kwds=dict(c="r"),
315 | )
316 |
317 |
318 | def test_moran_facet():
319 | sids2 = examples.load_example("sids2")
320 | f = lp.io.open(sids2.get_path("sids2.dbf"))
321 | varnames = ["SIDR74", "SIDR79", "NWR74", "NWR79"]
322 | vars = [np.array(f.by_col[var]) for var in varnames]
323 | w = lp.io.open(examples.get_path("sids2.gal")).read()
324 | # calculate moran matrix
325 | moran_matrix = Moran_BV_matrix(vars, w, varnames=varnames)
326 | # plot
327 | fig, axarr = moran_facet(moran_matrix)
328 | plt.close(fig)
329 | # customize
330 | fig, axarr = moran_facet(
331 | moran_matrix, scatter_glob_kwds=dict(color="r"), fitline_bv_kwds=dict(color="y")
332 | )
333 | plt.close(fig)
334 |
--------------------------------------------------------------------------------
/paper/paper.bib:
--------------------------------------------------------------------------------
1 | @book{johnson2007ghost,
2 | address = {London},
3 | edition = {Reprint edition},
4 | title = {The {{Ghost Map}}: {{The Story}} of {{London}}'s {{Most Terrifying Epidemic}}--and {{How It Changed Science}}, {{Cities}}, and the {{Modern World}}},
5 | isbn = {978-1-59448-269-4},
6 | shorttitle = {The {{Ghost Map}}},
7 | abstract = {A National Bestseller, a New York Times Notable Book, and an Entertainment Weekly Best Book of the Year It's the summer of 1854, and London is just emerging as one of the first modern cities in the world. But lacking the infrastructure-garbage removal, clean water, sewers-necessary to support its rapidly expanding population, the city has become the perfect breeding ground for a terrifying disease no one knows how to cure. As the cholera outbreak takes hold, a physician and a local curate are spurred to action-and ultimately solve the most pressing medical riddle of their time. In a triumph of multidisciplinary thinking, Johnson illuminates the intertwined histories of the spread of disease, the rise of cities, and the nature of scientific inquiry, offering both a riveting history and a powerful explanation of how it has shaped the world we live in.},
8 | language = {English},
9 | publisher = {{Riverhead Books}},
10 | author = {Johnson, Steven},
11 | month = oct,
12 | year = {2007},
13 | doi = {10.1080/01944360802146329}
14 | }
15 |
16 | @incollection{arribas-bel2017looking,
17 | address = {Cham},
18 | series = {Advances in {{Spatial Science}}},
19 | title = {Looking at {{John Snow}}'s {{Cholera Map}} from the {{Twenty First Century}}: {{A Practical Primer}} on {{Reproducibility}} and {{Open Science}}},
20 | isbn = {978-3-319-50590-9},
21 | shorttitle = {Looking at {{John Snow}}'s {{Cholera Map}} from the {{Twenty First Century}}},
22 | abstract = {This chapter (This manuscript is a chapter version of the original document, which is a reproducible online notebook. The entire, version-controlled project can be found online at: https://bitbucket.org/darribas/reproducible\_john\_snow.) presents an entirely reproducible spatial analysis of the classic John Snow's map of the 1854 cholera epidemic in London. The analysis draws on many of the techniques most commonly used by regional scientists, such as choropleth mapping, spatial autocorrelation, and point pattern analysis. In doing so, the chapter presents a practical roadmap for performing a completely open and reproducible analysis in regional science. In particular, we deal with the automation of (1) synchronizing code and text, (2) presenting results in figures and tables, and (3) generating reference lists. In addition, we discuss the significant added value of version control systems and their role in enhancing transparency through public, open repositories. With this chapter, we aim to practically illustrate a set of principles and techniques that facilitate transparency and reproducibility in empirical research, both keys to the health and credibility of regional science in the next 50 years to come.},
23 | language = {en},
24 | booktitle = {Regional {{Research Frontiers}} - {{Vol}}. 2: {{Methodological Advances}}, {{Regional Systems Modeling}} and {{Open Sciences}}},
25 | publisher = {{Springer International Publishing}},
26 | author = {{Arribas-Bel}, Daniel and {de Graaff}, Thomas and Rey, Sergio J.},
27 | editor = {Jackson, Randall and Schaeffer, Peter},
28 | year = {2017},
29 | keywords = {Regional Science,Spatial Autocorrelation,Spatial Outlier,Spatial Weight Matrix,Street Segment},
30 | pages = {283-306},
31 | doi = {10.1007/978-3-319-50590-9_17}
32 | }
33 |
34 | @book{tufte2001visual,
35 | title = {The Visual Display of Quantitative Information},
36 | publisher = {{Graphics Press Cheshire, CT, USA}},
37 | author = {Tufte, E. R.},
38 | year = {2001}
39 | }
40 |
41 | @article{goodchild2007citizen,
42 | title = {Citizen as Sensors: The World of Volunteered Geography},
43 | volume = {69},
44 | journal = {GeoJournal},
45 | author = {Goodchild, Michael F},
46 | year = {2007},
47 | keywords = {volunteer geographic information},
48 | pages = {211--221}
49 | }
50 |
51 | @article{arribas-bel2014accidental,
52 | title = {Accidental, Open and Everywhere: {{Emerging}} Data Sources for the Understanding of Cities},
53 | volume = {49},
54 | issn = {01436228},
55 | shorttitle = {Accidental, Open and Everywhere},
56 | abstract = {In this paper, I review the recent emergence of three groups of data sources and assess some of the opportunities and challenges they pose for the understanding of cities, particularly in the context of the Regional Science and urban research agenda. These are data collected from mobile sensors carried by individuals, data derived from businesses moving their activity online and government data released in an open format. Although very different from each other, they are all becoming available as a side-effect since they were created with different purposes but their degree of popularity, pervasiveness and ease of access is turning them into interesting alternatives for researchers. Existing projects and initiatives that conform to each class are featured as illustrative examples of these new potential sources of knowledge. \'O 2013 Elsevier Ltd. All rights reserved.},
57 | language = {en},
58 | journal = {Applied Geography},
59 | doi = {10.1016/j.apgeog.2013.09.012},
60 | author = {{Arribas-Bel}, Daniel},
61 | month = may,
62 | year = {2014},
63 | pages = {45-53},
64 | file = {/home/lw17329/Zotero/storage/I4QFYAPM/Arribas-Bel - 2014 - Accidental, open and everywhere Emerging data sou.pdf}
65 | }
66 |
67 | @article{anselin1988spatial,
68 | title = {Do Spatial Effects Really Matter in Regression Analysis?},
69 | volume = {65},
70 | journal = {Papers Regional Science},
71 | author = {Anselin, L. and Griffith, Daniel A},
72 | year = {1988},
73 | keywords = {Spatial dependence},
74 | pages = {11--34},
75 | doi = {10.1111/j.1435-5597.1988.tb01155.x}
76 | }
77 |
78 | @book{anselin2014modern,
79 | address = {Chicago, IL},
80 | title = {Modern {{Spatial Econometrics}} in {{Practice}}, a {{Guide}} to {{GeoDa}}, {{GeoDaSpace}}, and {{PySAL}}},
81 | publisher = {{GeoDa Press}},
82 | author = {Anselin, Luc and Rey, Sergio J.},
83 | year = {2014}
84 | }
85 |
86 | @article{rey2009show,
87 | title = {Show Me the Code: Spatial Analysis and Open Source},
88 | volume = {11},
89 | issn = {1435-5930, 1435-5949},
90 | shorttitle = {Show Me the Code},
91 | language = {en},
92 | number = {2},
93 | journal = {Journal of Geographical Systems},
94 | doi = {10.1007/s10109-009-0086-8},
95 | author = {Rey, Sergio J.},
96 | month = jun,
97 | year = {2009},
98 | pages = {191--207},
99 | file = {/home/lw17329/Dropbox/literature/Rey - 2009 - Show me the code spatial analysis and open source.pdf;/home/lw17329/Zotero/storage/E6UEYX4F/art\%3A10.1007\%2Fs10109-009-0086-8.pdf;/home/lw17329/Zotero/storage/K9JRHXCG/art\%3A10.1007\%2Fs10109-009-0086-8.pdf}
100 | }
101 |
102 | @incollection{rey2018code,
103 | address = {Cham},
104 | series = {Advances in {{Geographic Information Science}}},
105 | title = {Code as {{Text}}: {{Open Source Lessons}} for {{Geospatial Research}} and {{Education}}},
106 | isbn = {978-3-319-59511-5},
107 | shorttitle = {Code as {{Text}}},
108 | abstract = {This chapter examines the potential opportunities that open source offers for research and education in spatial analysis. Drawing on lessons learned in the development of PySAL: Python Library for Spatial Analysis, it touches on the opportunities and challenges related to the adoption of open source practices and culture. While open source has had major impacts on pedagogy and research in spatial analysis, these are somewhat under-appreciated and at times seen as separate spheres. A central argument is that a mind shift is required that comes to see code not just as a tool for doing research, but rather to view code as text in the sense it becomes an object of research. The chapter reconsiders open source spatial analysis teaching and research from this lens of code as text.},
109 | language = {en},
110 | booktitle = {{{GeoComputational Analysis}} and {{Modeling}} of {{Regional Systems}}},
111 | publisher = {{Springer International Publishing}},
112 | author = {Rey, Sergio J.},
113 | editor = {Thill, Jean-Claude and Dragicevic, Suzana},
114 | year = {2018},
115 | pages = {7-21},
116 | doi = {10.1007/978-3-319-59511-5_2}
117 | }
118 |
119 | @techreport{ucgis2019geographic,
120 | title = {Geographic {{Information Science}} and {{Technology Body}} of {{Knowledge}}},
121 | author = {{University Consortium of Geographic Information Science}},
122 | year = {2019},
123 | file = {/home/lw17329/Zotero/storage/9INJ4PZ7/gistbok.ucgis.org.html}
124 | }
125 |
126 | @misc{kelsey_jordahl_2019_3333010,
127 | author = {Kelsey Jordahl and
128 | Joris Van den Bossche and
129 | Jacob Wasserman and
130 | James McBride and
131 | Jeffrey Gerard and
132 | Jeff Tratner and
133 | Matthew Perry and
134 | Carson Farmer and
135 | Sean Gillies and
136 | Micah Cochran and
137 | Matt Bartos and
138 | Martin Fleischmann and
139 | Lucas Culbertson and
140 | Nick Eubank and
141 | maxalbert and
142 | Aleksey Bilogur and
143 | Geir Arne Hjelle and
144 | Dani Arribas-Bel and
145 | Christopher Ren and
146 | Sergio Rey and
147 | Martin Journois and
148 | Levi John Wolf and
149 | Nick Grue and
150 | Joshua Wilson and
151 | \"Omer \"Ozak and
152 | Yuichi Notoya},
153 | title = {geopandas/geopandas: v0.5.1},
154 | month = jul,
155 | year = 2019,
156 | doi = {10.5281/zenodo.3333010},
157 | url = {https://doi.org/10.5281/zenodo.3333010}
158 | }
159 |
160 | @misc{aleksey_bilogur_2019_3475569,
161 | author = {Aleksey Bilogur and
162 | Aneesh Karve and
163 | Luis Marsano and
164 | Martin Fleischmann},
165 | title = {ResidentMario/geoplot 0.3.3},
166 | month = oct,
167 | year = 2019,
168 | doi = {10.5281/zenodo.3475569},
169 | url = {https://doi.org/10.5281/zenodo.3475569}
170 | }
171 |
172 | @article{roth2010vba,
173 | author = {Robert E Roth and Andrew W Woodruff and Zachary F Johnson},
174 | title = {Value-by-alpha maps: {An} alternative to the cartogram},
175 | journal = {The Cartographic Journal},
176 | year = {2010},
177 | volume = 47,
178 | issue = 2,
179 | pages = {130--140},
180 | doi = {10.1179/000870409X12488753453372}
181 | }
182 |
--------------------------------------------------------------------------------
/splot/_viz_utils.py:
--------------------------------------------------------------------------------
1 | import mapclassify as classify
2 | import matplotlib
3 | import matplotlib as mpl
4 | import numpy as np
5 | from packaging.version import Version
6 |
7 | # isolate MPL version - GH#162
8 | MPL_36 = Version(matplotlib.__version__) >= Version("3.6")
9 | if MPL_36:
10 | from matplotlib import colormaps as cm
11 | else:
12 | import matplotlib.cm as cm
13 | import matplotlib.pyplot as plt
14 |
15 |
16 | """
17 | Utility functions for lightweight visualizations in splot
18 | """
19 |
20 | __author__ = "Stefanie Lumnitz "
21 |
22 |
23 | def moran_hot_cold_spots(moran_loc, p=0.05):
24 | sig = 1 * (moran_loc.p_sim < p)
25 | HH = 1 * (sig * moran_loc.q == 1)
26 | LL = 3 * (sig * moran_loc.q == 3)
27 | LH = 2 * (sig * moran_loc.q == 2)
28 | HL = 4 * (sig * moran_loc.q == 4)
29 | cluster = HH + LL + LH + HL
30 | return cluster
31 |
32 |
33 | def mask_local_auto(moran_loc, p=0.5):
34 | """
35 | Create Mask for coloration and labeling of local spatial autocorrelation
36 |
37 | Parameters
38 | ----------
39 | moran_loc : esda.moran.Moran_Local instance
40 | values of Moran's I Global Autocorrelation Statistic
41 | p : float
42 | The p-value threshold for significance. Points will
43 | be colored by significance.
44 |
45 | Returns
46 | -------
47 | cluster_labels : list of str
48 | List of labels - ['ns', 'HH', 'LH', 'LL', 'HL']
49 | colors5 : list of str
50 | List of colours - ['#d7191c', '#fdae61', '#abd9e9',
51 | '#2c7bb6', 'lightgrey']
52 | colors : array of str
53 | Array containing coloration for each input value/ shape.
54 | labels : list of str
55 | List of label for each attribute value/ polygon.
56 | """
57 | # create a mask for local spatial autocorrelation
58 | cluster = moran_hot_cold_spots(moran_loc, p)
59 |
60 | cluster_labels = ["ns", "HH", "LH", "LL", "HL"]
61 | labels = [cluster_labels[i] for i in cluster]
62 |
63 | colors5 = {0: "lightgrey", 1: "#d7191c", 2: "#abd9e9", 3: "#2c7bb6", 4: "#fdae61"}
64 | colors = [colors5[i] for i in cluster] # for Bokeh
65 | # for MPL, keeps colors even if clusters are missing:
66 | x = np.array(labels)
67 | y = np.unique(x)
68 | colors5_mpl = {
69 | "HH": "#d7191c",
70 | "LH": "#abd9e9",
71 | "LL": "#2c7bb6",
72 | "HL": "#fdae61",
73 | "ns": "lightgrey",
74 | }
75 | colors5 = [colors5_mpl[i] for i in y] # for mpl
76 |
77 | # HACK need this, because MPL sorts these labels while Bokeh does not
78 | cluster_labels.sort()
79 | return cluster_labels, colors5, colors, labels
80 |
81 |
82 | _classifiers = {
83 | "box_plot": classify.BoxPlot,
84 | "equal_interval": classify.EqualInterval,
85 | "fisher_jenks": classify.FisherJenks,
86 | "headtail_breaks": classify.HeadTailBreaks,
87 | "jenks_caspall": classify.JenksCaspall,
88 | "jenks_caspall_forced": classify.JenksCaspallForced,
89 | "max_p_classifier": classify.MaxP,
90 | "maximum_breaks": classify.MaximumBreaks,
91 | "natural_breaks": classify.NaturalBreaks,
92 | "quantiles": classify.Quantiles,
93 | "percentiles": classify.Percentiles,
94 | "std_mean": classify.StdMean,
95 | "user_defined": classify.UserDefined,
96 | }
97 |
98 |
99 | def bin_values_choropleth(attribute_values, method="quantiles", k=5):
100 | """
101 | Create bins based on different classification methods.
102 | Needed for legend labels and Choropleth coloring.
103 |
104 | Parameters
105 | ----------
106 | attribute_values : array or geopandas.series instance
107 | Array containing relevant attribute values.
108 | method : str
109 | Classification method to be used. Options supported:
110 | * 'quantiles' (default)
111 | * 'fisher-jenks'
112 | * 'equal-interval'
113 | k : int
114 | Number of bins, assigning values to. Default k=5
115 |
116 | Returns
117 | -------
118 | bin_values : mapclassify instance
119 | Object containing bin ids for each observation (.yb),
120 | upper bounds of each class (.bins), number of classes (.k)
121 | and number of onservations falling in each class (.counts)
122 | """
123 | if method not in ["quantiles", "fisher_jenks", "equal_interval"]:
124 | raise ValueError("Method {} not supported".format(method))
125 |
126 | bin_values = _classifiers[method](attribute_values, k)
127 | return bin_values
128 |
129 |
130 | def bin_labels_choropleth(gdf, attribute_values, method="quantiles", k=5):
131 | """
132 | Create labels for each bin in the legend
133 |
134 | Parameters
135 | ----------
136 | gdf : Geopandas dataframe
137 | Dataframe containign relevant shapes and attribute values.
138 | attribute_values : array or geopandas.series instance
139 | Array containing relevant attribute values.
140 | method : str, optional
141 | Classification method to be used. Options supported:
142 | * 'quantiles' (default)
143 | * 'fisher-jenks'
144 | * 'equal-interval'
145 | k : int, optional
146 | Number of bins, assigning values to. Default k=5
147 |
148 | Returns
149 | -------
150 | bin_labels : list of str
151 | List of label for each bin.
152 | """
153 | # Retrieve bin values from bin_values_choropleth()
154 | bin_values = bin_values_choropleth(attribute_values, method=method, k=k)
155 |
156 | # Extract bin ids (.yb) and upper bounds for each class (.bins)
157 | yb = bin_values.yb
158 | bins = bin_values.bins
159 |
160 | # Create bin labels (smaller version)
161 | bin_edges = bins.tolist()
162 | bin_labels = []
163 | for i in range(k):
164 | bin_labels.append("<{:1.1f}".format(bin_edges[i]))
165 |
166 | # Add labels (which are the labels printed in the legend) to each row of gdf
167 | labels = np.array([bin_labels[c] for c in yb])
168 | gdf["labels_choro"] = [str(l_) for l_ in labels]
169 | return bin_labels
170 |
171 |
172 | def add_legend(fig, labels, colors):
173 | """
174 | Add a legend to a figure given legend labels & colors.
175 |
176 | Parameters
177 | ----------
178 | fig : Bokeh Figure instance
179 | Figure instance labels should be generated for.
180 | labels : list of str
181 | Labels to use as legend entries.
182 | colors : Bokeh Palette instance
183 | Palette instance containing colours of choice.
184 | """
185 | from bokeh.models import Legend
186 |
187 | # add labels to figure (workaround,
188 | # legend with geojsondatasource doesn't work,
189 | # see https://github.com/bokeh/bokeh/issues/5904)
190 | items = []
191 | for label, color in zip(labels, colors):
192 | patch = fig.patches(xs=[], ys=[], fill_color=color)
193 | items.append((label, [patch]))
194 |
195 | legend = Legend(
196 | items=items, location="top_left", margin=0, orientation="horizontal"
197 | )
198 | # possibility to define glyph_width=10, glyph_height=10)
199 | legend.label_text_font_size = "8pt"
200 | fig.add_layout(legend, "below")
201 | return legend
202 |
203 |
204 | def format_legend(values):
205 | """
206 | Helper to return sensible legend values
207 |
208 | Parameters
209 | ----------
210 | values: array
211 | Values plotted in legend.
212 | """
213 | in_thousand = False
214 | if np.any(values > 1000):
215 | in_thousand = True
216 | values = values / 1000
217 | return values, in_thousand
218 |
219 |
220 | def calc_data_aspect(plot_height, plot_width, bounds):
221 | # Deal with data ranges in Bokeh:
222 | # make a meter in x and a meter in y the same in pixel lengths
223 | aspect_box = plot_height / plot_width # 2 / 1 = 2
224 | xmin, ymin, xmax, ymax = bounds
225 | x_range = xmax - xmin # 1 = 1 - 0
226 | y_range = ymax - ymin # 3 = 3 - 0
227 | aspect_data = y_range / x_range # 3 / 1 = 3
228 | if aspect_data > aspect_box:
229 | # we need to increase x_range,
230 | # such that aspect_data becomes equal to aspect_box
231 | halfrange = 0.5 * x_range * (aspect_data / aspect_box - 1)
232 | # 0.5 * 1 * (3 / 2 - 1) = 0.25
233 | xmin -= halfrange # 0 - 0.25 = -0.25
234 | xmax += halfrange # 1 + 0.25 = 1.25
235 | else:
236 | # we need to increase y_range
237 | halfrange = 0.5 * y_range * (aspect_box / aspect_data - 1)
238 | ymin -= halfrange
239 | ymax += halfrange
240 |
241 | # Add a bit of margin to both x and y
242 | margin = 0.03
243 | xmin -= (xmax - xmin) / 2 * margin
244 | xmax += (xmax - xmin) / 2 * margin
245 | ymin -= (ymax - ymin) / 2 * margin
246 | ymax += (ymax - ymin) / 2 * margin
247 | return xmin, xmax, ymin, ymax
248 |
249 |
250 | # Utility functions for colormaps
251 | # Color design
252 | splot_colors = dict(moran_base="#bababa", moran_fit="#d6604d")
253 |
254 | # Utility function #1 - forces continuous diverging colormap to be centered at zero
255 | def shift_colormap( # noqa E302
256 | cmap, start=0, midpoint=0.5, stop=1.0, name="shiftedcmap"
257 | ):
258 | """
259 | Function to offset the "center" of a colormap. Useful for
260 | data with a negative min and positive max and you want the
261 | middle of the colormap's dynamic range to be at zero
262 |
263 | Parameters
264 | ----------
265 | cmap : str or matplotlib.cm instance
266 | colormap to be altered
267 | start : float, optional
268 | Offset from lowest point in the colormap's range.
269 | Should be between 0.0 and `midpoint`.
270 | Default =0.0 (no lower ofset).
271 | midpoint : float, optional
272 | The new center of the colormap.Should be between 0.0 and
273 | 1.0. In general, this should be 1 - vmax/(vmax + abs(vmin)).
274 | For example if your data range from -15.0 to +5.0 and
275 | you want the center of the colormap at 0.0, `midpoint`
276 | should be set to 1 - 5/(5 + 15)) or 0.75.
277 | Default =0.5 (no shift).
278 | stop : float, optional
279 | Offset from highets point in the colormap's range.
280 | Should be between `midpoint` and 1.0.
281 | Default =1.0 (no upper ofset).
282 | name : str, optional
283 | Name of the new colormap.
284 |
285 | Returns
286 | -------
287 | new_cmap : A new colormap that has been shifted.
288 | """
289 | if isinstance(cmap, str):
290 | cmap = cm.get_cmap(cmap)
291 |
292 | cdict = {"red": [], "green": [], "blue": [], "alpha": []}
293 |
294 | # regular index to compute the colors
295 | reg_index = np.linspace(start, stop, 257)
296 |
297 | # shifted index to match the data
298 | shift_index = np.hstack(
299 | [
300 | np.linspace(0.0, midpoint, 128, endpoint=False),
301 | np.linspace(midpoint, 1.0, 129, endpoint=True),
302 | ]
303 | )
304 |
305 | for ri, si in zip(reg_index, shift_index):
306 | r, g, b, a = cmap(ri)
307 |
308 | cdict["red"].append((si, r, r))
309 | cdict["green"].append((si, g, g))
310 | cdict["blue"].append((si, b, b))
311 | cdict["alpha"].append((si, a, a))
312 |
313 | """
314 | new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
315 | plt.register_cmap(cmap=new_cmap)
316 | return new_cmap
317 | """
318 |
319 | new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
320 | if MPL_36:
321 | cm.register(new_cmap)
322 | else:
323 | plt.register_cmap(cmap=new_cmap)
324 | return new_cmap
325 |
326 |
327 | # Utility #2 - truncate colorcap in order to grab only positive or negative portion
328 | def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
329 | """
330 | Function to truncate a colormap by selecting a subset of
331 | the original colormap's values
332 |
333 | Parameters
334 | ----------
335 | cmap : str or matplotlib.cm instance
336 | Colormap to be altered
337 | minval : float, optional
338 | Minimum value of the original colormap to include
339 | in the truncated colormap. Default =0.0.
340 | maxval : Maximum value of the original colormap to
341 | include in the truncated colormap. Default =1.0.
342 | n : int, optional
343 | Number of intervals between the min and max values
344 | for the gradient of the truncated colormap. Default =100.
345 |
346 | Returns
347 | -------
348 | new_cmap : A new colormap that has been shifted.
349 | """
350 |
351 | if isinstance(cmap, str):
352 | cmap = cm.get_cmap(cmap)
353 |
354 | new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
355 | "trunc({n},{a:.2f},{b:.2f})".format(n=cmap.name, a=minval, b=maxval),
356 | cmap(np.linspace(minval, maxval, n)),
357 | )
358 | return new_cmap
359 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changes
2 |
3 | # Version 1.1.5 (2022-04-13)
4 |
5 | Minor patch release.
6 |
7 | - [BUG] set viz defaults for LineStrings in lisa_cluster (#140)
8 | - Import ABC from collections.abc for Python 3.10 compatibility. (#150)
9 |
10 | The following individuals contributed to this release:
11 |
12 | - Stefanie Lumnitz
13 | - James Gaboardi
14 | - Martin Fleischmann
15 | - Karthikeyan Singaravelan
16 |
17 | # Version 1.1.4 (2021-07-27)
18 |
19 | We closed a total of 39 issues (enhancements and bug fixes) through 12 pull requests, since our last release on 2020-03-23.
20 |
21 | ## Issues Closed
22 |
23 | - Streamline & upgrade CI (#135)
24 | - update conf.py (#134)
25 | - Migrating testing & coverage services (#124)
26 | - [MAINT] rename 'master' to 'main' (#121)
27 | - ipywidgets dependency (#130)
28 | - REF: make ipywidgets optional dependency (#132)
29 | - [WIP] update testing procedure with new datasets (#133)
30 | - MatplotlibDeprecationWarning from ax.spines[label].set_smart_bounds() (#115)
31 | - [DOC] include libpysal.example api changes & reinstall splot for testing (#128)
32 | - [MAINT] remove `.set_smart_bounds()` (#125)
33 | - Gha testing (#126)
34 | - GitHub Actions for continuous integration (#111)
35 | - [MAINT] change in`pandas.isin()` affecting `plot_local_autocorrelation` (#123)
36 | - [BUG] enforce dtype in `mask` in `plot_local_autocorrelation()` (#122)
37 | - [MAINT] AttributeError: 'NoneType' object has no attribute 'startswith' in all Moran plots (#117)
38 | - [BUG] 'color' and 'c' in `test_viz_giddy_mpl.test_dynamic_lisa_vectors` (#116)
39 | - [MAINT] update links to Guerry dataset in `_test_data()` (#119)
40 | - [BUG] Build failing due to change in Seaborn (#110)
41 | - [BUG] pin seaborn to v0.10.0 for testing new functionality (#114)
42 | - Topological colouring (#94)
43 | - vba_choropleth --> ValueError: Invalid RGBA argument: (#100)
44 | - Pyviz affiliation (#75)
45 | - BUG: Bokeh needed for testing (#107)
46 | - [JOSS] add Joss badge to README.md (#106)
47 | - [JOSS] doi reference correction (#105)
48 | - Fixing BibTeX entry pages. (#104)
49 | - Release1.1.3 (#103)
50 |
51 | ## Pull Requests
52 |
53 | - Streamline & upgrade CI (#135)
54 | - REF: make ipywidgets optional dependency (#132)
55 | - [DOC] include libpysal.example api changes & reinstall splot for testing (#128)
56 | - [MAINT] remove `.set_smart_bounds()` (#125)
57 | - Gha testing (#126)
58 | - [BUG] enforce dtype in `mask` in `plot_local_autocorrelation()` (#122)
59 | - [MAINT] update links to Guerry dataset in `_test_data()` (#119)
60 | - BUG: Bokeh needed for testing (#107)
61 | - [JOSS] add Joss badge to README.md (#106)
62 | - [JOSS] doi reference correction (#105)
63 | - Fixing BibTeX entry pages. (#104)
64 | - Release1.1.3 (#103)
65 |
66 | The following individuals contributed to this release:
67 |
68 | - Stefanie Lumnitz
69 | - James Gaboardi
70 | - Martin Fleischmann
71 | - Dani Arribas-Bel
72 | - Serge Rey
73 | - Arfon Smith
74 |
75 | # Version 1.1.3 (2020-03-18)
76 |
77 | We closed a total of 15 issues (enhancements and bug fixes) through 6 pull requests, since our last release on 2020-01-18.
78 |
79 | ## Issues Closed
80 |
81 | - add permanent links to current version of no's to joss paper (#102)
82 | - [BUG] set colors as list in _plot_choropleth_fig() (#101)
83 | - Remove the links around figures in the JOSS paper (#99)
84 | - Release prep for 1.1.2 (#98)
85 | - Installation instructions; pip install fails on macOS (#88)
86 | - Usage in readme is a fragment (#90)
87 | - JOSS: missing figure captions (#92)
88 | - [DOC] update installation instruction (#96)
89 | - [DOC] add example links to README.md & figure captions in joss article (#97)
90 |
91 | ## Pull Requests
92 |
93 | - add permanent links to current version of no's to joss paper (#102)
94 | - [BUG] set colors as list in _plot_choropleth_fig() (#101)
95 | - Remove the links around figures in the JOSS paper (#99)
96 | - Release prep for 1.1.2 (#98)
97 | - [DOC] update installation instruction (#96)
98 | - [DOC] add example links to README.md & figure captions in joss article (#97)
99 |
100 | The following individuals contributed to this release:
101 |
102 | - Stefanie Lumnitz
103 | - Levi John Wolf
104 | - Leonardo Uieda
105 | - Serge Rey
106 |
107 | # Version 1.1.2 (2020-01-18)
108 |
109 | We closed a total of 33 issues (enhancements and bug fixes) through 13 pull requests, since our last release on 2019-07-13.
110 |
111 | ## Issues Closed
112 |
113 | - Installation instructions; pip install fails on macOS (#88)
114 | - Usage in readme is a fragment (#90)
115 | - JOSS: missing figure captions (#92)
116 | - [DOC] update installation instruction (#96)
117 | - [DOC] add example links to README.md & figure captions in joss article (#97)
118 | - [BUG] vba_choropleth failure (#83)
119 | - BUG: Fix breakage due to mapclassify deprecation (#95)
120 | - addressing pysal/pysal#1145 & adapting testing examples (#93)
121 | - Fix docstring for plot_spatial_weights (#89)
122 | - JOSS paper submission (#59)
123 | - Fix format for multiple citations in JOSS paper (#87)
124 | - Joss paper, finalise title (#86)
125 | - [JOSS] work on `paper.md` (#62)
126 | - [ENH] change doc badge to latest doc (#85)
127 | - [BUG] require geopandas>=0.4.0,<=0.6.0rc1 for vba_choropleth testing (#84)
128 | - `plot_moran_simulation` weird dimensions (#82)
129 | - Colors are not fixed is LISA maps (#80)
130 | - Release 1.1.1 (#79)
131 | - add ipywidgets to requirements_dev.txt (#78)
132 | - add descartes to `requirements.txt` (#77)
133 |
134 | ## Pull Requests
135 |
136 | - [DOC] update installation instruction (#96)
137 | - [DOC] add example links to README.md & figure captions in joss article (#97)
138 | - BUG: Fix breakage due to mapclassify deprecation (#95)
139 | - addressing pysal/pysal#1145 & adapting testing examples (#93)
140 | - Fix docstring for plot_spatial_weights (#89)
141 | - Fix format for multiple citations in JOSS paper (#87)
142 | - Joss paper, finalise title (#86)
143 | - [JOSS] work on `paper.md` (#62)
144 | - [ENH] change doc badge to latest doc (#85)
145 | - [BUG] require geopandas>=0.4.0,<=0.6.0rc1 for vba_choropleth testing (#84)
146 | - Release 1.1.1 (#79)
147 | - add ipywidgets to requirements_dev.txt (#78)
148 | - add descartes to `requirements.txt` (#77)
149 |
150 | The following individuals contributed to this release:
151 |
152 | - Stefanie Lumnitz
153 | - Serge Rey
154 | - James Gaboardi
155 | - Martin Fleischmann
156 | - Leonardo Uieda
157 | - Levi John Wolf
158 | - Wei Kang
159 |
160 | # Version 1.1.1 (2019-07-13)
161 |
162 | We closed a total of 8 issues (enhancements and bug fixes) through 4 pull requests, since our last release on 2019-06-27.
163 |
164 | ## Issues Closed
165 |
166 | - add ipywidgets to requirements_dev.txt (#78)
167 | - add descartes to `requirements.txt` (#77)
168 | - [ENH] read long_description from README.md (#76)
169 | - Rel1.1.0 (#74)
170 |
171 | ## Pull Requests
172 |
173 | - add ipywidgets to requirements_dev.txt (#78)
174 | - add descartes to `requirements.txt` (#77)
175 | - [ENH] read long_description from README.md (#76)
176 | - Rel1.1.0 (#74)
177 |
178 | The following individuals contributed to this release:
179 |
180 | - Stefanie Lumnitz
181 | - Levi John Wolf
182 |
183 | # Version 1.1.0 (2019-06-27)
184 |
185 | We closed a total of 54 issues (enhancements and bug fixes) through 21 pull requests, since our last release on 2018-11-13.
186 |
187 | ## Issues Closed
188 |
189 | - LISA cluster map colours mixed when cluster value not present (#72)
190 | - [ENH] select colour by presence of value in list in `mask_local_auto` (#73)
191 | - Moran Scatterplots with equal bounds on X and Y axes? (#51)
192 | - Add aspect_equal argument to Moran functionality (#70)
193 | - set up dual travis tests for pysal dependencies (pip and github) (#69)
194 | - API changes of mapclassify propagate to splot (#65)
195 | - [DOC] include rtree and descartes in `requirements_dev.txt` (#68)
196 | - Readme update (#67)
197 | - docs building using readthedocs.yml version 2 (#64)
198 | - [DOC] add test for missing code cove % (#57)
199 | - Add tests for warnings and ValueErrors (#61)
200 | - Update travis for testing (#1)
201 | - travis ci testing: migrate from 3.5 and 3.6 to 3.6 and 3.7 (#63)
202 | - create paper directory (#58)
203 | - clean and rerun notebooks (#56)
204 | - `vba_choropleth` API (#45)
205 | - allow string (default) in vba_choropleth function of tests (#52)
206 | - migrating to readthedocs II (#54)
207 | - migration to readthedocs (#53)
208 | - Make docs (#46)
209 | - Segmentation fault in running tests on TravisCI (#47)
210 | - code 139 memory segmentation fault: RESOLVED (#48)
211 | - pip install on linux fails on pyproj (#41)
212 | - update archaic Miniconda build (#44)
213 | - adjusting markdown font (#43)
214 | - add `moran_facette` functionality and merge `esda.moran` plots to `moran_scatterplot` (#27)
215 | - (ENH) speed up plot_spatial_weights for plotting spatial weights (#42)
216 | - Travis testing against esda and giddy master branch (#31)
217 | - 1.0.0 Release (#40)
218 | - merge Sprint with master branch (#39)
219 | - Change documentation style (#38)
220 | - add travis build badge to README.md (#37)
221 | - fix current documentation for sprint (#36)
222 |
223 | ## Pull Requests
224 |
225 | - [ENH] select colour by presence of value in list in `mask_local_auto` (#73)
226 | - Add aspect_equal argument to Moran functionality (#70)
227 | - set up dual travis tests for pysal dependencies (pip and github) (#69)
228 | - Readme update (#67)
229 | - docs building using readthedocs.yml version 2 (#64)
230 | - Add tests for warnings and ValueErrors (#61)
231 | - travis ci testing: migrate from 3.5 and 3.6 to 3.6 and 3.7 (#63)
232 | - create paper directory (#58)
233 | - clean and rerun notebooks (#56)
234 | - allow string (default) in vba_choropleth function of tests (#52)
235 | - migrating to readthedocs II (#54)
236 | - migration to readthedocs (#53)
237 | - Make docs (#46)
238 | - code 139 memory segmentation fault: RESOLVED (#48)
239 | - update archaic Miniconda build (#44)
240 | - adjusting markdown font (#43)
241 | - (ENH) speed up plot_spatial_weights for plotting spatial weights (#42)
242 | - 1.0.0 Release (#40)
243 | - merge Sprint with master branch (#39)
244 | - Change documentation style (#38)
245 | - fix current documentation for sprint (#36)
246 |
247 | The following individuals contributed to this release:
248 |
249 | - Stefanie Lumnitz
250 | - Wei Kang
251 | - James Gaboardi
252 | - Renanxcortes
253 | - Dani Arribas-Bel
254 |
255 | # Version 1.0.0 (2018-11-30)
256 |
257 | We closed a total of 52 issues (enhancements and bug fixes) through 23 pull requests, since our last release on 2017-05-09.
258 |
259 | ## Issues Closed
260 |
261 | - merge Sprint with master branch (#39)
262 | - Change documentation style (#38)
263 | - add travis build badge to README.md (#37)
264 | - fix current documentation for sprint (#36)
265 | - `value_by_alpha` prototype (#28)
266 | - Clean up of current code base (#30)
267 | - Value By Alpha specification (#24)
268 | - nonplanar example update (#33)
269 | - add README.md (#29)
270 | - issues in some docstrings for giddy (#26)
271 | - debug `splot` documentation (#25)
272 | - collection of cleanups for`splot.giddy` (#23)
273 | - created `esda.moran.Moran_Local_BV` visualisations (#20)
274 | - add `esda.moran.Moran_BV` visualizations to `splot.esda` (#18)
275 | - add `seaborn` and `matplotlib` to `install_requirements` in `setup.py` (#19)
276 | - prototype `moran_scatterplot()`, `plot_moran_simulation()` and `plot_moran()` for `esda` (#17)
277 | - include utility functions `shift_colormap` and `truncate_colormap` (#15)
278 | - fix setup.py so files are installed with "pip install ." (#16)
279 | - `plot_spatial_weights` including network joins for `non_planar_joins` (#14)
280 | - adapting existing `esda` functionality to `splot.esda` namespace and allow `.plot()` method (#13)
281 | - adding license (#4)
282 | - add `giddy` dynamic LISA functionality under `splot.giddy` (#11)
283 | - start sphinx html documentation (#12)
284 | - add visualization option with significance to mplot (#7)
285 | - Visualising Local Autocorrelation (#8)
286 | - Copy new changes made to viz module into split (#5)
287 | - run 2to3 for splot (#6)
288 | - Fix for Pysal#930 (#3)
289 | - Add a Gitter chat badge to README.md (#2)
290 |
291 | ## Pull Requests
292 |
293 | - merge Sprint with master branch (#39)
294 | - Change documentation style (#38)
295 | - fix current documentation for sprint (#36)
296 | - `value_by_alpha` prototype (#28)
297 | - Clean up of current code base (#30)
298 | - add README.md (#29)
299 | - debug `splot` documentation (#25)
300 | - collection of cleanups for`splot.giddy` (#23)
301 | - created `esda.moran.Moran_Local_BV` visualisations (#20)
302 | - add `esda.moran.Moran_BV` visualizations to `splot.esda` (#18)
303 | - add `seaborn` and `matplotlib` to `install_requirements` in `setup.py` (#19)
304 | - prototype `moran_scatterplot()`, `plot_moran_simulation()` and `plot_moran()` for `esda` (#17)
305 | - include utility functions `shift_colormap` and `truncate_colormap` (#15)
306 | - fix setup.py so files are installed with "pip install ." (#16)
307 | - `plot_spatial_weights` including network joins for `non_planar_joins` (#14)
308 | - adapting existing `esda` functionality to `splot.esda` namespace and allow `.plot()` method (#13)
309 | - add `giddy` dynamic LISA functionality under `splot.giddy` (#11)
310 | - start sphinx html documentation (#12)
311 | - add visualization option with significance to mplot (#7)
312 | - Visualising Local Autocorrelation (#8)
313 | - run 2to3 for splot (#6)
314 | - Fix for Pysal#930 (#3)
315 | - Add a Gitter chat badge to README.md (#2)
316 |
317 | The following individuals contributed to this release:
318 |
319 | - Stefanie Lumnitz
320 | - Dani Arribas-Bel
321 | - Levi John Wolf
322 | - Serge Rey
323 | - Thequackdaddy
324 | - Jsignell
325 | - Serge
326 |
--------------------------------------------------------------------------------
/splot/_viz_value_by_alpha_mpl.py:
--------------------------------------------------------------------------------
1 | import collections.abc
2 |
3 | import matplotlib
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | from matplotlib import colors, patches
7 | from packaging.version import Version
8 |
9 | from ._viz_utils import _classifiers, format_legend
10 |
11 | # isolate MPL version - GH#162
12 | MPL_36 = Version(matplotlib.__version__) >= Version("3.6")
13 | if MPL_36:
14 | from matplotlib import colormaps as cm
15 | else:
16 | import matplotlib.cm as cm
17 |
18 |
19 | """
20 | Creating Maps with splot
21 | * Value-by-Alpha maps
22 | * Mapclassify wrapper
23 | * Color utilities
24 |
25 | TODO:
26 | * add Choropleth functionality with one input variable
27 | * merge all alpha keywords in one keyword dictionary
28 | for vba_choropleth
29 |
30 | """
31 |
32 | __author__ = "Stefanie Lumnitz "
33 |
34 |
35 | def value_by_alpha_cmap(x, y, cmap="GnBu", revert_alpha=False, divergent=False):
36 | """
37 | Calculates Value by Alpha rgba values
38 |
39 | Parameters
40 | ----------
41 | x : array
42 | Variable determined by color
43 | y : array
44 | Variable determining alpha value
45 | cmap : str or list of str
46 | Matplotlib Colormap or list of colors used
47 | to create vba_layer
48 | revert_alpha : bool, optional
49 | If True, high y values will have a
50 | low alpha and low values will be transparent.
51 | Default =False.
52 | divergent : bool, optional
53 | Creates a divergent alpha array with high values
54 | at the extremes and low, transparent values
55 | in the middle of the input values.
56 |
57 | Returns
58 | -------
59 | rgba : ndarray (n,4)
60 | RGBA colormap, where the alpha channel represents one
61 | attribute (x) and the rgb color the other attribute (y)
62 | cmap : str or list of str
63 | Original Matplotlib Colormap or list of colors used
64 | to create vba_layer
65 |
66 | Examples
67 | --------
68 |
69 | Imports
70 |
71 | >>> from libpysal import examples
72 | >>> import geopandas as gpd
73 | >>> import matplotlib.pyplot as plt
74 | >>> import matplotlib
75 | >>> import numpy as np
76 | >>> from splot.mapping import value_by_alpha_cmap
77 |
78 | Load Example Data
79 |
80 | >>> link_to_data = examples.get_path('columbus.shp')
81 | >>> gdf = gpd.read_file(link_to_data)
82 | >>> x = gdf['HOVAL'].values
83 | >>> y = gdf['CRIME'].values
84 |
85 | Create rgba values
86 |
87 | >>> rgba, _ = value_by_alpha_cmap(x, y)
88 |
89 | Create divergent rgba and change Colormap
90 |
91 | >>> div_rgba, _ = value_by_alpha_cmap(x, y, cmap='seismic', divergent=True)
92 |
93 | Create rgba values with reverted alpha values
94 |
95 | >>> rev_rgba, _ = value_by_alpha_cmap(x, y, cmap='RdBu', revert_alpha=True)
96 |
97 | """
98 | # option for cmap or colorlist input
99 | if isinstance(cmap, str):
100 | cmap = cm.get_cmap(cmap)
101 | elif isinstance(cmap, collections.abc.Sequence):
102 | cmap = colors.LinearSegmentedColormap.from_list("newmap", cmap)
103 |
104 | rgba = cmap((x - x.min()) / (x.max() - x.min()))
105 | if revert_alpha:
106 | rgba[:, 3] = 1 - ((y - y.min()) / (y.max() - y.min()))
107 | else:
108 | rgba[:, 3] = (y - y.min()) / (y.max() - y.min())
109 | if divergent is not False:
110 | a_under_0p5 = rgba[:, 3] < 0.5
111 | rgba[a_under_0p5, 3] = 1 - rgba[a_under_0p5, 3]
112 | rgba[:, 3] = (rgba[:, 3] - 0.5) * 2
113 | return rgba, cmap
114 |
115 |
116 | def vba_choropleth(
117 | x_var,
118 | y_var,
119 | gdf,
120 | cmap="GnBu",
121 | divergent=False,
122 | revert_alpha=False,
123 | alpha_mapclassify=None,
124 | rgb_mapclassify=None,
125 | ax=None,
126 | legend=False,
127 | ):
128 | """
129 | Value by Alpha Choropleth
130 |
131 | Parameters
132 | ----------
133 | x_var : string or array
134 | The name of variable in gdf determined by color or an array
135 | of values determined by color.
136 | y_var : string or array
137 | The name of variable in gdf determining alpha value or an array
138 | of values determined by color.
139 | gdf : geopandas dataframe instance
140 | The Dataframe containing information to plot.
141 | cmap : str or list of str
142 | Matplotlib Colormap or list of colors used
143 | to create vba_layer
144 | divergent : bool, optional
145 | Creates a divergent alpha array with high values at
146 | the extremes and low, transparent values in the
147 | middle of the input values.
148 | revert_alpha : bool, optional
149 | If True, high y values will have a
150 | low alpha and low values will be transparent.
151 | Default = False.
152 | alpha_mapclassify : dict
153 | Keywords used for binning input values and
154 | classifying alpha values with `mapclassify`.
155 | Note: valid keywords are eg. dict(classifier='quantiles', k=5,
156 | hinge=1.5). For other options check `splot.mapping.mapclassify_bin`.
157 | rgb_mapclassify : dict
158 | Keywords used for binning input values and
159 | classifying rgb values with `mapclassify`.
160 | Note: valid keywords are eg.g dict(classifier='quantiles', k=5,
161 | hinge=1.5).For other options check `splot.mapping.mapclassify_bin`.
162 | ax : matplotlib Axes instance, optional
163 | Axes in which to plot the figure in multiple Axes layout.
164 | Default = None
165 | legend : bool, optional
166 | Adds a legend.
167 | Note: currently only available if data is classified,
168 | hence if `alpha_mapclassify` and `rgb_mapclassify` are used.
169 |
170 | Returns
171 | -------
172 | fig : matplotlip Figure instance
173 | Figure of Value by Alpha choropleth
174 | ax : matplotlib Axes instance
175 | Axes in which the figure is plotted
176 |
177 | Examples
178 | --------
179 |
180 | Imports
181 |
182 | >>> from libpysal import examples
183 | >>> import geopandas as gpd
184 | >>> import matplotlib.pyplot as plt
185 | >>> import matplotlib
186 | >>> import numpy as np
187 | >>> from splot.mapping import vba_choropleth
188 |
189 | Load Example Data
190 |
191 | >>> link_to_data = examples.get_path('columbus.shp')
192 | >>> gdf = gpd.read_file(link_to_data)
193 |
194 | Plot a Value-by-Alpha map
195 |
196 | >>> fig, _ = vba_choropleth('HOVAL', 'CRIME', gdf)
197 | >>> plt.show()
198 |
199 | Plot a Value-by-Alpha map with reverted alpha values
200 |
201 | >>> fig, _ = vba_choropleth('HOVAL', 'CRIME', gdf, cmap='RdBu',
202 | ... revert_alpha=True)
203 | >>> plt.show()
204 |
205 | Plot a Value-by-Alpha map with classified alpha and rgb values
206 |
207 | >>> fig, axs = plt.subplots(2,2, figsize=(20,10))
208 | >>> vba_choropleth('HOVAL', 'CRIME', gdf, cmap='viridis', ax = axs[0,0],
209 | ... rgb_mapclassify=dict(classifier='quantiles', k=3),
210 | ... alpha_mapclassify=dict(classifier='quantiles', k=3))
211 | >>> vba_choropleth('HOVAL', 'CRIME', gdf, cmap='viridis', ax = axs[0,1],
212 | ... rgb_mapclassify=dict(classifier='natural_breaks'),
213 | ... alpha_mapclassify=dict(classifier='natural_breaks'))
214 | >>> vba_choropleth('HOVAL', 'CRIME', gdf, cmap='viridis', ax = axs[1,0],
215 | ... rgb_mapclassify=dict(classifier='std_mean'),
216 | ... alpha_mapclassify=dict(classifier='std_mean'))
217 | >>> vba_choropleth('HOVAL', 'CRIME', gdf, cmap='viridis', ax = axs[1,1],
218 | ... rgb_mapclassify=dict(classifier='fisher_jenks', k=3),
219 | ... alpha_mapclassify=dict(classifier='fisher_jenks', k=3))
220 | >>> plt.show()
221 |
222 | Pass in a list of colors instead of a cmap
223 |
224 | >>> color_list = ['#a1dab4','#41b6c4','#225ea8']
225 | >>> vba_choropleth('HOVAL', 'CRIME', gdf, cmap=color_list,
226 | ... rgb_mapclassify=dict(classifier='quantiles', k=3),
227 | ... alpha_mapclassify=dict(classifier='quantiles'))
228 | >>> plt.show()
229 |
230 | Add a legend and use divergent alpha values
231 |
232 | >>> fig = plt.figure(figsize=(15,10))
233 | >>> ax = fig.add_subplot(111)
234 | >>> vba_choropleth('HOVAL', 'CRIME', gdf, divergent=True,
235 | ... alpha_mapclassify=dict(classifier='quantiles', k=5),
236 | ... rgb_mapclassify=dict(classifier='quantiles', k=5),
237 | ... legend=True, ax=ax)
238 | >>> plt.show()
239 |
240 | """
241 |
242 | if isinstance(x_var, str):
243 | x = np.array(gdf[x_var])
244 | else:
245 | x = x_var
246 |
247 | if isinstance(y_var, str):
248 | y = np.array(gdf[y_var])
249 | else:
250 | y = y_var
251 |
252 | if ax is None:
253 | fig = plt.figure()
254 | ax = fig.add_subplot(111)
255 | else:
256 | fig = ax.get_figure()
257 |
258 | if rgb_mapclassify is not None:
259 | rgb_mapclassify.setdefault("k", 5)
260 | rgb_mapclassify.setdefault("hinge", 1.5)
261 | rgb_mapclassify.setdefault("multiples", [-2, -1, 1, 2])
262 | rgb_mapclassify.setdefault("mindiff", 0)
263 | rgb_mapclassify.setdefault("initial", 100)
264 | rgb_mapclassify.setdefault("bins", [20, max(x)])
265 | classifier = rgb_mapclassify["classifier"]
266 | k = rgb_mapclassify["k"]
267 | hinge = rgb_mapclassify["hinge"]
268 | multiples = rgb_mapclassify["multiples"]
269 | mindiff = rgb_mapclassify["mindiff"]
270 | initial = rgb_mapclassify["initial"]
271 | bins = rgb_mapclassify["bins"]
272 | rgb_bins = mapclassify_bin(
273 | x,
274 | classifier,
275 | k=k,
276 | hinge=hinge,
277 | multiples=multiples,
278 | mindiff=mindiff,
279 | initial=initial,
280 | bins=bins,
281 | )
282 | x = rgb_bins.yb
283 |
284 | if alpha_mapclassify is not None:
285 | alpha_mapclassify.setdefault("k", 5)
286 | alpha_mapclassify.setdefault("hinge", 1.5)
287 | alpha_mapclassify.setdefault("multiples", [-2, -1, 1, 2])
288 | alpha_mapclassify.setdefault("mindiff", 0)
289 | alpha_mapclassify.setdefault("initial", 100)
290 | alpha_mapclassify.setdefault("bins", [20, max(y)])
291 | classifier = alpha_mapclassify["classifier"]
292 | k = alpha_mapclassify["k"]
293 | hinge = alpha_mapclassify["hinge"]
294 | multiples = alpha_mapclassify["multiples"]
295 | mindiff = alpha_mapclassify["mindiff"]
296 | initial = alpha_mapclassify["initial"]
297 | bins = alpha_mapclassify["bins"]
298 | # TODO: use the pct keyword here
299 | alpha_bins = mapclassify_bin(
300 | y,
301 | classifier,
302 | k=k,
303 | hinge=hinge,
304 | multiples=multiples,
305 | mindiff=mindiff,
306 | initial=initial,
307 | bins=bins,
308 | )
309 | y = alpha_bins.yb
310 |
311 | rgba, vba_cmap = value_by_alpha_cmap(
312 | x=x, y=y, cmap=cmap, divergent=divergent, revert_alpha=revert_alpha
313 | )
314 | gdf.plot(color=rgba, ax=ax)
315 | ax.set_axis_off()
316 | ax.set_aspect("equal")
317 |
318 | if legend:
319 | left, bottom, width, height = [0, 0.5, 0.2, 0.2]
320 | ax2 = fig.add_axes([left, bottom, width, height])
321 | vba_legend(rgb_bins, alpha_bins, vba_cmap, ax=ax2)
322 | return fig, ax
323 |
324 |
325 | def vba_legend(rgb_bins, alpha_bins, cmap, ax=None):
326 | """
327 | Creates Value by Alpha heatmap used as choropleth legend.
328 |
329 | Parameters
330 | ----------
331 | rgb_bins : pysal.mapclassify instance
332 | Object of classified values used for rgb.
333 | Can be created with `mapclassify_bin()`
334 | or `pysal.mapclassify`.
335 | alpha_bins : pysal.mapclassify instance
336 | Object of classified values used for alpha.
337 | Can be created with `mapclassify_bin()`
338 | or `pysal.mapclassify`.
339 | ax : matplotlib Axes instance, optional
340 | Axes in which to plot the figure in multiple Axes layout.
341 | Default = None
342 |
343 | Returns
344 | -------
345 | fig : matplotlip Figure instance
346 | Figure of Value by Alpha heatmap
347 | ax : matplotlib Axes instance
348 | Axes in which the figure is plotted
349 |
350 | Examples
351 | --------
352 | Imports
353 |
354 | >>> from libpysal import examples
355 | >>> import geopandas as gpd
356 | >>> import matplotlib.pyplot as plt
357 | >>> import matplotlib
358 | >>> import numpy as np
359 | >>> from splot.mapping import vba_legend, mapclassify_bin
360 |
361 | Load Example Data
362 |
363 | >>> link_to_data = examples.get_path('columbus.shp')
364 | >>> gdf = gpd.read_file(link_to_data)
365 | >>> x = gdf['HOVAL'].values
366 | >>> y = gdf['CRIME'].values
367 |
368 | Classify your data
369 |
370 | >>> rgb_bins = mapclassify_bin(x, 'quantiles')
371 | >>> alpha_bins = mapclassify_bin(y, 'quantiles')
372 |
373 | Plot your legend
374 |
375 | >>> fig, _ = vba_legend(rgb_bins, alpha_bins, cmap='RdBu')
376 | >>> plt.show()
377 |
378 | """
379 | # VALUES
380 | rgba, legend_cmap = value_by_alpha_cmap(rgb_bins.yb, alpha_bins.yb, cmap=cmap)
381 | # separate rgb and alpha values
382 | alpha = rgba[:, 3]
383 | # extract unique values for alpha and rgb
384 | alpha_vals = np.unique(alpha)
385 | rgb_vals = legend_cmap(
386 | (rgb_bins.bins - rgb_bins.bins.min())
387 | / (rgb_bins.bins.max() - rgb_bins.bins.min())
388 | )[:, 0:3]
389 |
390 | # PLOTTING
391 | if ax is None:
392 | fig = plt.figure()
393 | ax = fig.add_subplot(111)
394 | else:
395 | fig = ax.get_figure()
396 |
397 | for irow, alpha_val in enumerate(alpha_vals):
398 | for icol, rgb_val in enumerate(rgb_vals):
399 | rect = patches.Rectangle(
400 | (irow, icol),
401 | 1,
402 | 1,
403 | linewidth=3,
404 | edgecolor="none",
405 | facecolor=rgb_val,
406 | alpha=alpha_val,
407 | )
408 | ax.add_patch(rect)
409 |
410 | values_alpha, x_in_thousand = format_legend(alpha_bins.bins)
411 | values_rgb, y_in_thousand = format_legend(rgb_bins.bins)
412 | ax.plot([], [])
413 | ax.set_xlim([0, irow + 1])
414 | ax.set_ylim([0, icol + 1])
415 | ax.set_xticks(np.arange(irow + 1) + 0.5)
416 | ax.set_yticks(np.arange(icol + 1) + 0.5)
417 | ax.set_xticklabels(
418 | ["< %1.1f" % val for val in values_alpha],
419 | rotation=30,
420 | horizontalalignment="right",
421 | )
422 | ax.set_yticklabels(["$<$%1.1f" % val for val in values_rgb])
423 | if x_in_thousand:
424 | ax.set_xlabel("alpha variable ($10^3$)")
425 | if y_in_thousand:
426 | ax.set_ylabel("rgb variable ($10^3$)")
427 | else:
428 | ax.set_xlabel("alpha variable")
429 | ax.set_ylabel("rgb variable")
430 | ax.spines["left"].set_visible(False)
431 | ax.spines["right"].set_visible(False)
432 | ax.spines["bottom"].set_visible(False)
433 | ax.spines["top"].set_visible(False)
434 | return fig, ax
435 |
436 |
437 | def mapclassify_bin(
438 | y,
439 | classifier,
440 | k=5,
441 | pct=[1, 10, 50, 90, 99, 100],
442 | hinge=1.5,
443 | multiples=[-2, -1, 1, 2],
444 | mindiff=0,
445 | initial=100,
446 | bins=None,
447 | ):
448 | """
449 | Classify your data with `pysal.mapclassify`
450 | Note: Input parameters are dependent on classifier used.
451 |
452 | Parameters
453 | ----------
454 | y : array
455 | (n,1), values to classify
456 | classifier : str
457 | pysal.mapclassify classification scheme
458 | k : int, optional
459 | The number of classes. Default=5.
460 | pct : array, optional
461 | Percentiles used for classification with `percentiles`.
462 | Default=[1,10,50,90,99,100]
463 | hinge : float, optional
464 | Multiplier for IQR when `BoxPlot` classifier used.
465 | Default=1.5.
466 | multiples : array, optional
467 | The multiples of the standard deviation to add/subtract from
468 | the sample mean to define the bins using `std_mean`.
469 | Default=[-2,-1,1,2].
470 | mindiff : float, optional
471 | The minimum difference between class breaks
472 | if using `maximum_breaks` classifier. Deafult =0.
473 | initial : int
474 | Number of initial solutions to generate or number of runs
475 | when using `natural_breaks` or `max_p_classifier`.
476 | Default =100.
477 | Note: setting initial to 0 will result in the quickest
478 | calculation of bins.
479 | bins : array, optional
480 | (k,1), upper bounds of classes (have to be monotically
481 | increasing) if using `user_defined` classifier.
482 | Default =None, Example =[20, max(y)].
483 |
484 | Returns
485 | -------
486 | bins : pysal.mapclassify instance
487 | Object containing bin ids for each observation (.yb),
488 | upper bounds of each class (.bins), number of classes (.k)
489 | and number of onservations falling in each class (.counts)
490 |
491 | Note: Supported classifiers include: quantiles, box_plot, euqal_interval,
492 | fisher_jenks, headtail_breaks, jenks_caspall, jenks_caspall_forced,
493 | max_p_classifier, maximum_breaks, natural_breaks, percentiles, std_mean,
494 | user_defined
495 |
496 | Examples
497 | --------
498 |
499 | Imports
500 |
501 | >>> from libpysal import examples
502 | >>> import geopandas as gpd
503 | >>> from splot.mapping import mapclassify_bin
504 |
505 | Load Example Data
506 |
507 | >>> link_to_data = examples.get_path('columbus.shp')
508 | >>> gdf = gpd.read_file(link_to_data)
509 | >>> x = gdf['HOVAL'].values
510 |
511 | Classify values by quantiles
512 |
513 | >>> quantiles = mapclassify_bin(x, 'quantiles')
514 |
515 | Classify values by box_plot and set hinge to 2
516 |
517 | >>> box_plot = mapclassify_bin(x, 'box_plot', hinge=2)
518 |
519 | """
520 | classifier = classifier.lower()
521 | if classifier not in _classifiers:
522 | raise ValueError(
523 | "Invalid scheme. Scheme must be in the" " set: %r" % _classifiers.keys()
524 | )
525 | elif classifier == "box_plot":
526 | bins = _classifiers[classifier](y, hinge)
527 | elif classifier == "headtail_breaks":
528 | bins = _classifiers[classifier](y)
529 | elif classifier == "percentiles":
530 | bins = _classifiers[classifier](y, pct)
531 | elif classifier == "std_mean":
532 | bins = _classifiers[classifier](y, multiples)
533 | elif classifier == "maximum_breaks":
534 | bins = _classifiers[classifier](y, k, mindiff)
535 | elif classifier in ["natural_breaks", "max_p_classifier"]:
536 | bins = _classifiers[classifier](y, k, initial)
537 | elif classifier == "user_defined":
538 | bins = _classifiers[classifier](y, bins)
539 | else:
540 | bins = _classifiers[classifier](y, k)
541 | return bins
542 |
--------------------------------------------------------------------------------
/splot/_viz_bokeh.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | # import pysal as ps
4 | import spreg
5 | from bokeh import palettes
6 | from bokeh.layouts import gridplot
7 | from bokeh.models import (
8 | CategoricalColorMapper,
9 | ColumnDataSource,
10 | GeoJSONDataSource,
11 | HoverTool,
12 | Span,
13 | )
14 | from bokeh.plotting import figure
15 | from esda.moran import Moran_Local
16 |
17 | from ._viz_utils import (
18 | add_legend,
19 | bin_labels_choropleth,
20 | calc_data_aspect,
21 | mask_local_auto,
22 | )
23 |
24 | """
25 | Leightweight interactive visualizations in Bokeh.
26 |
27 | TODO:
28 | * We are not re-projection data into web-mercator atm,
29 | to allow plotting from raw coordinates.
30 | The user should however be aware of the projection of the used data.
31 | """
32 |
33 | __author__ = "Stefanie Lumnitz "
34 |
35 |
36 | def plot_choropleth(
37 | df,
38 | attribute,
39 | title=None,
40 | plot_width=500,
41 | plot_height=500,
42 | method="quantiles",
43 | k=5,
44 | reverse_colors=False,
45 | tools="",
46 | region_column="",
47 | ):
48 | """
49 | Plot Choropleth colored according to attribute
50 |
51 | Parameters
52 | ----------
53 | df : Geopandas dataframe
54 | Dataframe containign relevant shapes and attribute values.
55 | attribute : str
56 | Name of column containing attribute values of interest.
57 | title : str, optional
58 | Title of map. Default title=None
59 | plot_width : int, optional
60 | Width dimension of the figure in screen units/ pixels.
61 | Default = 500
62 | plot_height : int, optional
63 | Height dimension of the figure in screen units/ pixels.
64 | Default = 500
65 | method : str, optional
66 | Classification method to be used. Options supported:
67 | * 'quantiles' (default)
68 | * 'fisher-jenks'
69 | * 'equal-interval'
70 | k : int, optional
71 | Number of bins, assigning values to. Default k=5
72 | reverse_colors: boolean
73 | Reverses the color palette to show lightest colors for
74 | lowest values. Default reverse_colors=False
75 | tools : str, optional
76 | Tools used for bokeh plotting. Default = ''
77 | region_column : str, optional
78 | Column name containing region descpriptions/ names or polygone ids.
79 | Default = ''.
80 |
81 | Returns
82 | -------
83 | fig : Bokeh Figure instance
84 | Figure of Choropleth
85 |
86 | Examples
87 | --------
88 | >>> import libpysal.api as lp
89 | >>> from libpysal import examples
90 | >>> import geopandas as gpd
91 | >>> import esda
92 | >>> from splot.bk import plot_choropleth
93 | >>> from bokeh.io import show
94 |
95 | >>> link = examples.get_path('columbus.shp')
96 | >>> df = gpd.read_file(link)
97 | >>> w = lp.Queen.from_dataframe(df)
98 | >>> w.transform = 'r'
99 |
100 | >>> TOOLS = "tap,help"
101 | >>> fig = plot_choropleth(df, 'HOVAL', title='columbus',
102 | ... reverse_colors=True, tools=TOOLS)
103 | >>> show(fig)
104 | """
105 | # We're adding columns, do that on a copy rather than on the users' input
106 | df = df.copy()
107 |
108 | # Extract attribute values from df
109 | attribute_values = df[attribute].values
110 |
111 | # Create bin labels with bin_labels_choropleth()
112 | bin_labels = bin_labels_choropleth(df, attribute_values, method, k)
113 |
114 | # Initialize GeoJSONDataSource
115 | geo_source = GeoJSONDataSource(geojson=df.to_json())
116 |
117 | fig = _plot_choropleth_fig(
118 | geo_source,
119 | attribute,
120 | bin_labels,
121 | bounds=df.total_bounds,
122 | region_column=region_column,
123 | title=title,
124 | plot_width=plot_width,
125 | plot_height=plot_height,
126 | method=method,
127 | k=k,
128 | reverse_colors=reverse_colors,
129 | tools=tools,
130 | )
131 | return fig
132 |
133 |
134 | def _plot_choropleth_fig(
135 | geo_source,
136 | attribute,
137 | bin_labels,
138 | bounds,
139 | region_column="",
140 | title=None,
141 | plot_width=500,
142 | plot_height=500,
143 | method="quantiles",
144 | k=5,
145 | reverse_colors=False,
146 | tools="",
147 | ):
148 | colors = list(palettes.YlGnBu[k])
149 | if reverse_colors is True:
150 | colors.reverse() # lightest color for lowest values
151 |
152 | # make data aspect ration match the figure aspect ratio
153 | # to avoid map distortion (1km=1km)
154 | x_min, x_max, y_min, y_max = calc_data_aspect(plot_height, plot_width, bounds)
155 |
156 | # Create figure
157 | fig = figure(
158 | title=title,
159 | plot_width=plot_width,
160 | plot_height=plot_height,
161 | tools=tools,
162 | x_range=(x_min, x_max),
163 | y_range=(y_min, y_max),
164 | )
165 | # The use of `nonselection_fill_*` shouldn't be necessary,
166 | # but currently it is. This looks like a bug in Bokeh
167 | # where gridplot plus taptool chooses the underlay from the figure
168 | # that is clicked and applies it to the other figure as well.
169 | fill_color = {
170 | "field": "labels_choro",
171 | "transform": CategoricalColorMapper(palette=colors, factors=bin_labels),
172 | }
173 | fig.patches(
174 | "xs",
175 | "ys",
176 | fill_alpha=0.7,
177 | fill_color=fill_color,
178 | line_color="white",
179 | nonselection_fill_alpha=0.2,
180 | nonselection_fill_color=fill_color,
181 | selection_line_color="firebrick",
182 | selection_fill_color=fill_color,
183 | line_width=0.5,
184 | source=geo_source,
185 | )
186 |
187 | # add hover tool
188 | if "hover" in tools:
189 | hover = fig.select_one(HoverTool)
190 | hover.point_policy = "follow_mouse"
191 | hover.tooltips = [
192 | ("Region", "@" + region_column),
193 | ("Attribute", "@" + attribute + "{0.0}"),
194 | ]
195 |
196 | # add legend with add_legend()
197 | add_legend(fig, bin_labels, colors)
198 |
199 | # change layout
200 | fig.xgrid.grid_line_color = None
201 | fig.ygrid.grid_line_color = None
202 | fig.axis.visible = None
203 | return fig
204 |
205 |
206 | def lisa_cluster(
207 | moran_loc,
208 | df,
209 | p=0.05,
210 | region_column="",
211 | title=None,
212 | plot_width=500,
213 | plot_height=500,
214 | tools="",
215 | ):
216 | """
217 | Lisa Cluster map, coloured by local spatial autocorrelation
218 |
219 | Parameters
220 | ----------
221 | moran_loc : esda.moran.Moran_Local instance
222 | values of Moran's Local Autocorrelation Statistic
223 | df : geopandas dataframe instance
224 | In mask_local_auto(), assign df['labels'] per row. Note that
225 | ``df`` will be modified, so calling functions uses a copy of
226 | the user provided ``df``.
227 | p : float, optional
228 | The p-value threshold for significance. Points will
229 | be colored by significance.
230 | title : str, optional
231 | Title of map. Default title=None
232 | plot_width : int, optional
233 | Width dimension of the figure in screen units/ pixels.
234 | Default = 500
235 | plot_height : int, optional
236 | Height dimension of the figure in screen units/ pixels.
237 | Default = 500
238 |
239 | Returns
240 | -------
241 | fig : Bokeh figure instance
242 | Figure of LISA cluster map, colored by local spatial autocorrelation
243 |
244 | Examples
245 | --------
246 | >>> import libpysal.api as lp
247 | >>> from libpysal import examples
248 | >>> import geopandas as gpd
249 | >>> from esda.moran import Moran_Local
250 | >>> from splot.bk import lisa_cluster
251 | >>> from bokeh.io import show
252 |
253 | >>> link = examples.get_path('columbus.shp')
254 | >>> df = gpd.read_file(link)
255 | >>> y = df['HOVAL'].values
256 | >>> w = lp.Queen.from_dataframe(df)
257 | >>> w.transform = 'r'
258 | >>> moran_loc = Moran_Local(y, w)
259 |
260 | >>> TOOLS = "tap,reset,help"
261 | >>> fig = lisa_cluster(moran_loc, df, p=0.05, tools=TOOLS)
262 | >>> show(fig)
263 | """
264 | # We're adding columns, do that on a copy rather than on the users' input
265 | df = df.copy()
266 |
267 | # add cluster_labels and colors5 in mask_local_auto
268 | cluster_labels, colors5, _, labels = mask_local_auto(moran_loc, p=0.05)
269 | df["labels_lisa"] = labels
270 | df["moranloc_psim"] = moran_loc.p_sim
271 | df["moranloc_q"] = moran_loc.q
272 |
273 | # load df into bokeh data source
274 | geo_source = GeoJSONDataSource(geojson=df.to_json())
275 |
276 | fig = _lisa_cluster_fig(
277 | geo_source,
278 | moran_loc,
279 | cluster_labels,
280 | colors5,
281 | bounds=df.total_bounds,
282 | region_column=region_column,
283 | title=title,
284 | plot_width=plot_width,
285 | plot_height=plot_height,
286 | tools=tools,
287 | )
288 | return fig
289 |
290 |
291 | def _lisa_cluster_fig(
292 | geo_source,
293 | moran_loc,
294 | cluster_labels,
295 | colors5,
296 | bounds,
297 | region_column="",
298 | title=None,
299 | plot_width=500,
300 | plot_height=500,
301 | tools="",
302 | ):
303 | # make data aspect ration match the figure aspect ratio
304 | # to avoid map distortion (1km=1km)
305 | x_min, x_max, y_min, y_max = calc_data_aspect(plot_height, plot_width, bounds)
306 |
307 | # Create figure
308 | fig = figure(
309 | title=title,
310 | toolbar_location="right",
311 | plot_width=plot_width,
312 | plot_height=plot_height,
313 | x_range=(x_min, x_max),
314 | y_range=(y_min, y_max),
315 | tools=tools,
316 | )
317 | fill_color = {
318 | "field": "labels_lisa",
319 | "transform": CategoricalColorMapper(palette=colors5, factors=cluster_labels),
320 | }
321 | fig.patches(
322 | "xs",
323 | "ys",
324 | fill_color=fill_color,
325 | fill_alpha=0.8,
326 | nonselection_fill_alpha=0.2,
327 | nonselection_fill_color=fill_color,
328 | line_color="white",
329 | selection_line_color="firebrick",
330 | selection_fill_color=fill_color,
331 | line_width=0.5,
332 | source=geo_source,
333 | )
334 |
335 | if "hover" in tools:
336 | # add hover tool
337 | hover = fig.select_one(HoverTool)
338 | hover.point_policy = "follow_mouse"
339 | hover.tooltips = [
340 | ("Region", "@" + region_column),
341 | ("Significance", "@moranloc_psim{0.00}"),
342 | ("Quadrant", "@moranloc_q{0}"),
343 | ]
344 |
345 | # add legend with add_legend()
346 | add_legend(fig, cluster_labels, colors5)
347 |
348 | # change layout
349 | fig.xgrid.grid_line_color = None
350 | fig.ygrid.grid_line_color = None
351 | fig.axis.visible = None
352 | return fig
353 |
354 |
355 | def moran_scatterplot(
356 | moran_loc, p=None, region_column="", plot_width=500, plot_height=500, tools=""
357 | ):
358 | """
359 | Moran Scatterplot, optional coloured by local spatial autocorrelation
360 |
361 | Parameters
362 | ----------
363 | moran_loc : esda.moran.Moran_Local instance
364 | values of Moran's Local Autocorrelation Statistic
365 | p : float, optional
366 | The p-value threshold for significance. Points will
367 | be colored by significance.
368 | plot_width : int, optional
369 | Width dimension of the figure in screen units/ pixels.
370 | Default = 500
371 | plot_height : int, optional
372 | Height dimension of the figure in screen units/ pixels.
373 | Default = 500
374 |
375 | Returns
376 | -------
377 | fig : Bokeh figure instance
378 | Figure of Moran Scatterplot, optionally colored by
379 | local spatial autocorrelation
380 |
381 | Examples
382 | --------
383 | >>> import libpysal.api as lp
384 | >>> from libpysal import examples
385 | >>> import geopandas as gpd
386 | >>> from esda.moran import Moran_Local
387 | >>> from splot.bk import moran_scatterplot
388 | >>> from bokeh.io import show
389 |
390 | >>> link = examples.get_path('columbus.shp')
391 | >>> df = gpd.read_file(link)
392 | >>> y = df['HOVAL'].values
393 | >>> w = lp.Queen.from_dataframe(df)
394 | >>> w.transform = 'r'
395 | >>> moran_loc = Moran_Local(y, w)
396 |
397 | >>> fig = moran_scatterplot(moran_loc, p=0.05)
398 | >>> show(fig)
399 | """
400 | data = _moran_scatterplot_calc(moran_loc, p)
401 | source = ColumnDataSource(pd.DataFrame(data))
402 | fig = _moran_scatterplot_fig(
403 | source,
404 | p=p,
405 | region_column=region_column,
406 | plot_width=plot_width,
407 | plot_height=plot_height,
408 | tools=tools,
409 | )
410 | return fig
411 |
412 |
413 | def _moran_scatterplot_calc(moran_loc, p):
414 | lag = spreg.lag_spatial(moran_loc.w, moran_loc.z)
415 | fit = spreg.OLS(moran_loc.z[:, None], lag[:, None])
416 | if p is not None:
417 | if not isinstance(moran_loc, Moran_Local):
418 | raise ValueError("`moran_loc` is not a esda.moran.Moran_Local instance")
419 |
420 | _, _, colors, _ = mask_local_auto(moran_loc, p=p)
421 | else:
422 | colors = "black"
423 |
424 | data = {
425 | "moran_z": moran_loc.z,
426 | "lag": lag,
427 | "colors": colors,
428 | "fit_y": fit.predy.flatten(),
429 | "moranloc_psim": moran_loc.p_sim,
430 | "moranloc_q": moran_loc.q,
431 | }
432 | return data
433 |
434 |
435 | def _moran_scatterplot_fig(
436 | source,
437 | p=None,
438 | title="Moran Scatterplot",
439 | region_column="",
440 | plot_width=500,
441 | plot_height=500,
442 | tools="",
443 | ):
444 | """
445 | Parameters
446 | ----------
447 | source : Bokeh ColumnDatasource or GeoJSONDataSource instance
448 | The data source, should contain the columns ``moran_z`` and ``lag``,
449 | which will be used as x and y inputs of the scatterplot.
450 | """
451 | # Vertical line
452 | vline = Span(
453 | location=0,
454 | dimension="height",
455 | line_color="lightskyblue",
456 | line_width=2,
457 | line_dash="dashed",
458 | )
459 | # Horizontal line
460 | hline = Span(
461 | location=0,
462 | dimension="width",
463 | line_color="lightskyblue",
464 | line_width=2,
465 | line_dash="dashed",
466 | )
467 |
468 | # Create figure
469 | fig = figure(
470 | title=title,
471 | x_axis_label="Response",
472 | y_axis_label="Spatial Lag",
473 | toolbar_location="left",
474 | plot_width=plot_width,
475 | plot_height=plot_height,
476 | tools=tools,
477 | )
478 | fig.scatter(
479 | x="moran_z",
480 | y="lag",
481 | source=source,
482 | color="colors",
483 | size=8,
484 | fill_alpha=0.6,
485 | selection_fill_alpha=1,
486 | selection_line_color="firebrick",
487 | selection_fill_color="colors",
488 | )
489 | fig.renderers.extend([vline, hline])
490 | fig.xgrid.grid_line_color = None
491 | fig.ygrid.grid_line_color = None
492 | fig.line(x="lag", y="fit_y", source=source, line_width=2) # fit line
493 |
494 | if "hover" in tools:
495 | hover = fig.select_one(HoverTool)
496 | hover.point_policy = "follow_mouse"
497 | hover.tooltips = [
498 | ("Region", "@" + region_column),
499 | ("Significance", "@moranloc_psim{0.00}"),
500 | ("Quadrant", "@moranloc_q{0}"),
501 | ]
502 | return fig
503 |
504 |
505 | def plot_local_autocorrelation(
506 | moran_loc,
507 | df,
508 | attribute,
509 | p=0.05,
510 | region_column="",
511 | plot_width=350,
512 | plot_height=400,
513 | method="quantiles",
514 | k=5,
515 | reverse_colors=False,
516 | ):
517 | """
518 | Plot Moran Scatterplot, LISA cluster and Choropleth
519 | for Local Spatial Autocorrelation Analysis
520 |
521 | Parameters
522 | ----------
523 | moran_loc : esda.moran.Moran_Local instance
524 | values of Moran's Local Autocorrelation Statistic
525 | df : Geopandas dataframe
526 | Dataframe containing relevant polygon and attribute values.
527 | attribute : str
528 | Name of column containing attribute values of interest.
529 | plot_width : int, optional
530 | Width dimension of the figure in screen units/ pixels.
531 | Default = 250
532 | plot_height : int, optional
533 | Height dimension of the figure in screen units/ pixels.
534 | Default = 300
535 | method : str, optional
536 | Classification method to be used. Options supported:
537 | * 'quantiles' (default)
538 | * 'fisher-jenks'
539 | * 'equal-interval'
540 | k : int, optional
541 | Number of bins, assigning values to. Default k=5
542 | reverse_colors: boolean
543 | Reverses the color palette to show lightest colors for
544 | lowest values in Choropleth map. Default reverse_colors=False
545 |
546 | Returns
547 | -------
548 | fig : Bokeh Figure instance
549 | Figure of Choropleth
550 |
551 | Examples
552 | --------
553 | >>> import libpysal.api as lp
554 | >>> from libpysal import examples
555 | >>> import geopandas as gpd
556 | >>> from esda.moran import Moran_Local
557 | >>> from splot.bk import plot_local_autocorrelation
558 | >>> from bokeh.io import show
559 |
560 | >>> link = examples.get_path('columbus.shp')
561 | >>> df = gpd.read_file(link)
562 | >>> y = df['HOVAL'].values
563 | >>> w = lp.Queen.from_dataframe(df)
564 | >>> w.transform = 'r'
565 | >>> moran_loc = Moran_Local(y, w)
566 |
567 | >>> fig = plot_local_autocorrelation(moran_loc, df, 'HOVAL',
568 | reverse_colors=True)
569 | >>> show(fig)
570 | """
571 | # We're adding columns, do that on a copy rather than on the users' input
572 | df = df.copy()
573 |
574 | # Add relevant results for moran_scatterplot as columns to geodataframe
575 | moran_scatterplot_data = _moran_scatterplot_calc(moran_loc, p)
576 | for key in moran_scatterplot_data:
577 | df[key] = moran_scatterplot_data[key]
578 |
579 | # add cluster_labels and colors5 in mask_local_auto
580 | cluster_labels, colors5, _, labels = mask_local_auto(moran_loc, p=0.05)
581 | df["labels_lisa"] = labels
582 | df["moranloc_psim"] = moran_loc.p_sim
583 | df["moranloc_q"] = moran_loc.q
584 | # Extract attribute values from df
585 | attribute_values = df[attribute].values
586 | # Create bin labels with bin_labels_choropleth()
587 | bin_labels = bin_labels_choropleth(df, attribute_values, method, k)
588 |
589 | # load df into bokeh data source
590 | geo_source = GeoJSONDataSource(geojson=df.to_json())
591 |
592 | TOOLS = "tap,reset,help,hover"
593 |
594 | scatter = _moran_scatterplot_fig(
595 | geo_source,
596 | p=p,
597 | region_column=region_column,
598 | title="Local Spatial Autocorrelation",
599 | plot_width=int(plot_width * 1.15),
600 | plot_height=plot_height,
601 | tools=TOOLS,
602 | )
603 | LISA = _lisa_cluster_fig(
604 | geo_source,
605 | moran_loc,
606 | cluster_labels,
607 | colors5,
608 | bounds=df.total_bounds,
609 | region_column=region_column,
610 | plot_width=plot_width,
611 | plot_height=plot_height,
612 | tools=TOOLS,
613 | )
614 | choro = _plot_choropleth_fig(
615 | geo_source,
616 | attribute,
617 | bin_labels,
618 | bounds=df.total_bounds,
619 | region_column=region_column,
620 | reverse_colors=reverse_colors,
621 | plot_width=plot_width,
622 | plot_height=plot_height,
623 | tools=TOOLS,
624 | )
625 |
626 | fig = gridplot([[scatter, LISA, choro]], sizing_mode="scale_width")
627 | return fig
628 |
--------------------------------------------------------------------------------
/splot/_viz_giddy_mpl.py:
--------------------------------------------------------------------------------
1 | import matplotlib as mpl
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import seaborn as sns
5 | from esda.moran import Moran_Local
6 | from giddy.directional import Rose
7 |
8 | from ._viz_esda_mpl import lisa_cluster
9 | from ._viz_utils import moran_hot_cold_spots
10 |
11 | """
12 | Lightweight visualizations for pysal dynamics using Matplotlib and Geopandas
13 |
14 | TODO
15 | * implement LIMA
16 | * allow for different patterns or list of str
17 | in dynamic_lisa_composite_explore()
18 | """
19 |
20 | __author__ = "Stefanie Lumnitz "
21 |
22 |
23 | def _dynamic_lisa_heatmap_data(moran_locy, moran_locx, p=0.05):
24 | """
25 | Utility function to calculate dynamic lisa heatmap table
26 | and diagonal color mask
27 | """
28 | clustery = moran_hot_cold_spots(moran_locy, p=p)
29 | clusterx = moran_hot_cold_spots(moran_locx, p=p)
30 |
31 | # to put into seaborn function
32 | # and set diagonal elements to zero to see the rest better
33 | heatmap_data = np.zeros((5, 5), dtype=int)
34 | mask = np.zeros((5, 5), dtype=bool)
35 | for row in range(5):
36 | for col in range(5):
37 | yr1 = clustery == row
38 | yr2 = clusterx == col
39 | heatmap_data[row, col] = (yr1 & yr2).sum()
40 | if row == col:
41 | mask[row, col] = True
42 | return heatmap_data, mask
43 |
44 |
45 | def _moran_loc_from_rose_calc(rose):
46 | """
47 | Calculate esda.moran.Moran_Local values from giddy.rose object
48 | """
49 | old_state = np.random.get_state()
50 | moran_locy = Moran_Local(rose.Y[:, 0], rose.w)
51 | np.random.set_state(old_state)
52 | moran_locx = Moran_Local(rose.Y[:, 1], rose.w)
53 | np.random.set_state(old_state)
54 | return moran_locy, moran_locx
55 |
56 |
57 | def dynamic_lisa_heatmap(rose, p=0.05, ax=None, **kwargs):
58 | """
59 | Heatmap indicating significant transition of LISA values
60 | over time inbetween Moran Scatterplot quadrants
61 |
62 | Parameters
63 | ----------
64 | rose : giddy.directional.Rose instance
65 | A ``Rose`` object, which contains (among other attributes) LISA
66 | values at two points in time, and a method
67 | to perform inference on those.
68 | p : float, optional
69 | The p-value threshold for significance. Default =0.05
70 | ax : Matplotlib Axes instance, optional
71 | If given, the figure will be created inside this axis.
72 | Default =None.
73 | **kwargs : keyword arguments, optional
74 | Keywords used for creating and designing the heatmap.
75 | These are passed on to `seaborn.heatmap()`.
76 | See `seaborn` documentation for valid keywords.
77 | Note: "Start time" refers to `y1` in `Y = np.array([y1, y2]).T`
78 | with `giddy.Rose(Y, w, k=5)`, "End time" referst to `y2`.
79 |
80 | Returns
81 | -------
82 | fig : Matplotlib Figure instance
83 | Heatmap figure
84 | ax : matplotlib Axes instance
85 | Axes in which the figure is plotted
86 |
87 | Examples
88 | --------
89 | >>> import geopandas as gpd
90 | >>> import pandas as pd
91 | >>> from libpysal.weights.contiguity import Queen
92 | >>> from libpysal import examples
93 | >>> import numpy as np
94 | >>> import matplotlib.pyplot as plt
95 | >>> from giddy.directional import Rose
96 | >>> from splot.giddy import dynamic_lisa_heatmap
97 |
98 | get csv and shp files
99 |
100 | >>> shp_link = examples.get_path('us48.shp')
101 | >>> df = gpd.read_file(shp_link)
102 | >>> income_table = pd.read_csv(examples.get_path("usjoin.csv"))
103 |
104 | calculate relative values
105 |
106 | >>> for year in range(1969, 2010):
107 | ... income_table[str(year) + '_rel'] = (
108 | ... income_table[str(year)] / income_table[str(year)].mean())
109 |
110 | merge to one gdf
111 |
112 | >>> gdf = df.merge(income_table,left_on='STATE_NAME',right_on='Name')
113 |
114 | retrieve spatial weights and data for two points in time
115 |
116 | >>> w = Queen.from_dataframe(gdf)
117 | >>> w.transform = 'r'
118 | >>> y1 = gdf['1969_rel'].values
119 | >>> y2 = gdf['2000_rel'].values
120 |
121 | calculate rose Object
122 |
123 | >>> Y = np.array([y1, y2]).T
124 | >>> rose = Rose(Y, w, k=5)
125 |
126 | plot
127 |
128 | >>> dynamic_lisa_heatmap(rose)
129 | >>> plt.show()
130 |
131 | customize plot
132 |
133 | >>> dynamic_lisa_heatmap(rose, cbar='GnBu')
134 | >>> plt.show()
135 |
136 | """
137 | moran_locy, moran_locx = _moran_loc_from_rose_calc(rose)
138 | fig, ax = _dynamic_lisa_heatmap(moran_locy, moran_locx, p=p, ax=ax, **kwargs)
139 | return fig, ax
140 |
141 |
142 | def _dynamic_lisa_heatmap(moran_locy, moran_locx, p, ax, **kwargs):
143 | """
144 | Create dynamic_lisa_heatmap figure from esda.moran.Moran_local values
145 | """
146 | heatmap_data, diagonal_mask = _dynamic_lisa_heatmap_data(moran_locy, moran_locx, p)
147 | # set default plot style
148 | annot = kwargs.pop("annot", True)
149 | cmap = kwargs.pop("cmap", "YlGnBu")
150 | mask = kwargs.pop("mask", diagonal_mask)
151 | cbar = kwargs.pop("cbar", False)
152 | square = kwargs.pop("square", True)
153 |
154 | # set name for tick labels
155 | xticklabels = kwargs.pop("xticklabels", ["ns", "HH", "HL", "LH", "LL"])
156 | yticklabels = kwargs.pop("yticklabels", ["ns", "HH", "HL", "LH", "LL"])
157 |
158 | ax = sns.heatmap(
159 | heatmap_data,
160 | annot=annot,
161 | cmap=cmap,
162 | xticklabels=xticklabels,
163 | yticklabels=yticklabels,
164 | mask=mask,
165 | ax=ax,
166 | cbar=cbar,
167 | square=square,
168 | **kwargs
169 | )
170 | ax.set_xlabel("End time")
171 | ax.set_ylabel("Start time")
172 | fig = ax.get_figure()
173 | return fig, ax
174 |
175 |
176 | def dynamic_lisa_rose(rose, attribute=None, ax=None, **kwargs):
177 | """
178 | Plot dynamic LISA values in a rose diagram.
179 |
180 | Parameters
181 | ----------
182 | rose : giddy.directional.Rose instance
183 | A ``Rose`` object, which contains (among other attributes) LISA
184 | values at two points in time, and a method
185 | to perform inference on those.
186 | attribute : (n,) ndarray, optional
187 | Points will be colored by chosen attribute values.
188 | Variable to specify colors of the colorbars. Default =None.
189 | ax : Matplotlib Axes instance, optional
190 | If given, the figure will be created inside this axis.
191 | Default =None. Note: This axis should have a polar projection.
192 | **kwargs : keyword arguments, optional
193 | Keywords used for creating and designing the
194 | `matplotlib.pyplot.scatter()`.
195 | Note: 'c' and 'color' cannot be passed when attribute is not None.
196 |
197 | Returns
198 | -------
199 | fig : Matplotlib Figure instance
200 | LISA rose plot figure
201 | ax : matplotlib Axes instance
202 | Axes in which the figure is plotted
203 |
204 | Examples
205 | --------
206 | >>> import geopandas as gpd
207 | >>> import pandas as pd
208 | >>> from libpysal.weights.contiguity import Queen
209 | >>> from libpysal import examples
210 | >>> import numpy as np
211 | >>> import matplotlib.pyplot as plt
212 | >>> from giddy.directional import Rose
213 | >>> from splot.giddy import dynamic_lisa_rose
214 |
215 | get csv and shp files
216 |
217 | >>> shp_link = examples.get_path('us48.shp')
218 | >>> df = gpd.read_file(shp_link)
219 | >>> income_table = pd.read_csv(examples.get_path("usjoin.csv"))
220 |
221 | calculate relative values
222 |
223 | >>> for year in range(1969, 2010):
224 | ... income_table[str(year) + '_rel'] = (
225 | ... income_table[str(year)] / income_table[str(year)].mean())
226 |
227 | merge to one gdf
228 |
229 | >>> gdf = df.merge(income_table,left_on='STATE_NAME',right_on='Name')
230 |
231 | retrieve spatial weights and data for two points in time
232 |
233 | >>> w = Queen.from_dataframe(gdf)
234 | >>> w.transform = 'r'
235 | >>> y1 = gdf['1969_rel'].values
236 | >>> y2 = gdf['2000_rel'].values
237 |
238 | calculate rose Object
239 |
240 | >>> Y = np.array([y1, y2]).T
241 | >>> rose = Rose(Y, w, k=5)
242 |
243 | plot
244 |
245 | >>> dynamic_lisa_rose(rose, attribute=y1)
246 | >>> plt.show()
247 |
248 | customize plot
249 |
250 | >>> dynamic_lisa_rose(rose, c='r')
251 | >>> plt.show()
252 |
253 | """
254 | # save_old default values
255 | old_gridcolor = mpl.rcParams["grid.color"]
256 | old_facecolor = mpl.rcParams["axes.facecolor"]
257 | old_edgecolor = mpl.rcParams["axes.edgecolor"]
258 | # define plotting style
259 | mpl.rcParams["grid.color"] = "w"
260 | mpl.rcParams["axes.edgecolor"] = "w"
261 | mpl.rcParams["axes.facecolor"] = "#E5E5E5"
262 | alpha = kwargs.pop("alpha", 0.9)
263 | cmap = kwargs.pop("cmap", "YlGnBu")
264 |
265 | if ax is None:
266 | fig = plt.figure()
267 | ax = fig.add_subplot(111, projection="polar")
268 | can_insert_colorbar = True
269 | else:
270 | fig = ax.get_figure()
271 | can_insert_colorbar = False
272 |
273 | ax.set_rlabel_position(315)
274 |
275 | if attribute is None:
276 | c = ax.scatter(rose.theta, rose.r, alpha=alpha, cmap=cmap, **kwargs)
277 | else:
278 | if "c" in kwargs.keys() or "color" in kwargs.keys():
279 | raise ValueError(
280 | "c and color are not valid keywords here; "
281 | "attribute is used for coloring"
282 | )
283 |
284 | c = ax.scatter(
285 | rose.theta, rose.r, c=attribute, alpha=alpha, cmap=cmap, **kwargs
286 | )
287 | if can_insert_colorbar:
288 | fig.colorbar(c)
289 |
290 | # reset style to old default values
291 | mpl.rcParams["grid.color"] = old_gridcolor
292 | mpl.rcParams["axes.facecolor"] = old_facecolor
293 | mpl.rcParams["axes.edgecolor"] = old_edgecolor
294 | return fig, ax
295 |
296 |
297 | def _add_arrow(line, position=None, direction="right", size=15, color=None):
298 | """
299 | add an arrow to a line.
300 |
301 | Parameters
302 | ----------
303 | line:
304 | Line2D object
305 | position: float
306 | x-position of the arrow. If None, mean of xdata is taken
307 | direction: str
308 | 'left' or 'right'
309 | size: int
310 | size of the arrow in fontsize points
311 | color: str
312 | if None, line color is taken.
313 |
314 | """
315 | if color is None:
316 | color = line.get_color()
317 |
318 | xdata = line.get_xdata()
319 | ydata = line.get_ydata()
320 | line.axes.annotate(
321 | "",
322 | xytext=(xdata[0], ydata[0]),
323 | xy=(xdata[1], ydata[1]),
324 | arrowprops=dict(arrowstyle="->", color=color),
325 | size=size,
326 | )
327 |
328 |
329 | def dynamic_lisa_vectors(rose, ax=None, arrows=True, **kwargs):
330 | """
331 | Plot vectors of positional transition of LISA values
332 | in Moran scatterplot
333 |
334 | Parameters
335 | ----------
336 | rose : giddy.directional.Rose instance
337 | A ``Rose`` object, which contains (among other attributes) LISA
338 | values at two points in time, and a method
339 | to perform inference on those.
340 | ax : Matplotlib Axes instance, optional
341 | If given, the figure will be created inside this axis.
342 | Default =None.
343 | arrows : boolean, optional
344 | If True show arrowheads of vectors. Default =True
345 | **kwargs : keyword arguments, optional
346 | Keywords used for creating and designing the `matplotlib.pyplot.plot()`
347 |
348 | Returns
349 | -------
350 | fig : Matplotlib Figure instance
351 | Figure of dynamic LISA vectors
352 | ax : matplotlib Axes instance
353 | Axes in which the figure is plotted
354 |
355 | Examples
356 | --------
357 | >>> import geopandas as gpd
358 | >>> import pandas as pd
359 | >>> from libpysal.weights.contiguity import Queen
360 | >>> from libpysal import examples
361 | >>> import numpy as np
362 | >>> import matplotlib.pyplot as plt
363 |
364 | >>> from giddy.directional import Rose
365 | >>> from splot.giddy import dynamic_lisa_vectors
366 |
367 | get csv and shp files
368 |
369 | >>> shp_link = examples.get_path('us48.shp')
370 | >>> df = gpd.read_file(shp_link)
371 | >>> income_table = pd.read_csv(examples.get_path("usjoin.csv"))
372 |
373 | calculate relative values
374 |
375 | >>> for year in range(1969, 2010):
376 | ... income_table[str(year) + '_rel'] = (
377 | ... income_table[str(year)] / income_table[str(year)].mean())
378 |
379 | merge to one gdf
380 |
381 | >>> gdf = df.merge(income_table,left_on='STATE_NAME',right_on='Name')
382 |
383 | retrieve spatial weights and data for two points in time
384 |
385 | >>> w = Queen.from_dataframe(gdf)
386 | >>> w.transform = 'r'
387 | >>> y1 = gdf['1969_rel'].values
388 | >>> y2 = gdf['2000_rel'].values
389 |
390 | calculate rose Object
391 |
392 | >>> Y = np.array([y1, y2]).T
393 | >>> rose = Rose(Y, w, k=5)
394 |
395 | plot
396 |
397 | >>> dynamic_lisa_vectors(rose)
398 | >>> plt.show()
399 |
400 | customize plot
401 |
402 | >>> dynamic_lisa_vectors(rose, arrows=False, color='r')
403 | >>> plt.show()
404 |
405 | """
406 | if ax is None:
407 | fig = plt.figure()
408 | ax = fig.add_subplot(111)
409 | can_insert_colorbar = True
410 | else:
411 | fig = ax.get_figure()
412 | can_insert_colorbar = False
413 |
414 | xlim = [rose.Y.min(), rose.Y.max()]
415 | ylim = [rose.wY.min(), rose.wY.max()]
416 |
417 | if "c" in kwargs.keys():
418 | color = kwargs.pop("c", "b")
419 | can_insert_colorbar = False
420 | else:
421 | color = kwargs.pop("color", "b")
422 | can_insert_colorbar = False
423 |
424 | xs = []
425 | ys = []
426 | for i in range(len(rose.Y)):
427 | # Plot a vector from xy_start to xy_end
428 | xs.append(rose.Y[i, :])
429 | ys.append(rose.wY[i, :])
430 |
431 | xs = np.asarray(xs).T
432 | ys = np.asarray(ys).T
433 | lines = ax.plot(xs, ys, color=color, **kwargs)
434 | if can_insert_colorbar:
435 | fig.colorbar(lines)
436 |
437 | if arrows:
438 | for line in lines:
439 | _add_arrow(line)
440 |
441 | ax.axis("equal")
442 | ax.set_xlim(xlim)
443 | ax.set_ylim(ylim)
444 | return fig, ax
445 |
446 |
447 | def dynamic_lisa_composite(rose, gdf, p=0.05, figsize=(13, 10)):
448 | """
449 | Composite visualisation for dynamic LISA values over two points in time.
450 | Includes dynamic lisa heatmap, dynamic lisa rose plot,
451 | and LISA cluster plots for both, compared points in time.
452 |
453 | Parameters
454 | ----------
455 | rose : giddy.directional.Rose instance
456 | A ``Rose`` object, which contains (among other attributes) LISA
457 | values at two points in time, and a method
458 | to perform inference on those.
459 | gdf : geopandas dataframe instance
460 | The GeoDataFrame containing information and polygons to plot.
461 | p : float, optional
462 | The p-value threshold for significance. Default =0.05.
463 | figsize: tuple, optional
464 | W, h of figure. Default =(13,10)
465 |
466 | Returns
467 | -------
468 | fig : Matplotlib Figure instance
469 | Dynamic lisa composite figure.
470 | axs : matplotlib Axes instance
471 | Axes in which the figure is plotted.
472 |
473 | Examples
474 | --------
475 | >>> import geopandas as gpd
476 | >>> import pandas as pd
477 | >>> from libpysal.weights.contiguity import Queen
478 | >>> from libpysal import examples
479 | >>> import numpy as np
480 | >>> import matplotlib.pyplot as plt
481 | >>> from giddy.directional import Rose
482 | >>> from splot.giddy import dynamic_lisa_composite
483 |
484 | get csv and shp files
485 |
486 | >>> shp_link = examples.get_path('us48.shp')
487 | >>> df = gpd.read_file(shp_link)
488 | >>> income_table = pd.read_csv(examples.get_path("usjoin.csv"))
489 |
490 | calculate relative values
491 |
492 | >>> for year in range(1969, 2010):
493 | ... income_table[str(year) + '_rel'] = (
494 | ... income_table[str(year)] / income_table[str(year)].mean())
495 |
496 | merge to one gdf
497 |
498 | >>> gdf = df.merge(income_table,left_on='STATE_NAME',right_on='Name')
499 |
500 | retrieve spatial weights and data for two points in time
501 |
502 | >>> w = Queen.from_dataframe(gdf)
503 | >>> w.transform = 'r'
504 | >>> y1 = gdf['1969_rel'].values
505 | >>> y2 = gdf['2000_rel'].values
506 |
507 | calculate rose Object
508 |
509 | >>> Y = np.array([y1, y2]).T
510 | >>> rose = Rose(Y, w, k=5)
511 |
512 | plot
513 |
514 | >>> dynamic_lisa_composite(rose, gdf)
515 | >>> plt.show()
516 |
517 | customize plot
518 |
519 | >>> fig, axs = dynamic_lisa_composite(rose, gdf)
520 | >>> axs[0].set_ylabel('1996')
521 | >>> axs[0].set_xlabel('2009')
522 | >>> axs[1].set_title('LISA cluster for 1996')
523 | >>> axs[3].set_title('LISA clsuter for 2009')
524 | >>> plt.show()
525 |
526 | """
527 | # Moran_Local uses random numbers,
528 | # which we cannot change between the two years!
529 | moran_locy, moran_locx = _moran_loc_from_rose_calc(rose)
530 |
531 | # initialize figure
532 | fig = plt.figure(figsize=figsize)
533 | fig.suptitle("Space-time autocorrelation", fontsize=20)
534 | axs = []
535 | axs.append(plt.subplot(221))
536 | axs.append(plt.subplot(222))
537 | # save_old default values
538 | old_gridcolor = mpl.rcParams["grid.color"]
539 | old_facecolor = mpl.rcParams["axes.facecolor"]
540 | old_edgecolor = mpl.rcParams["axes.edgecolor"]
541 | # define plotting style
542 | mpl.rcParams["grid.color"] = "w"
543 | mpl.rcParams["axes.edgecolor"] = "w"
544 | mpl.rcParams["axes.facecolor"] = "#E5E5E5"
545 | # define axs[2]
546 | axs.append(plt.subplot(223, projection="polar"))
547 | # reset style to old default values
548 | mpl.rcParams["grid.color"] = old_gridcolor
549 | mpl.rcParams["axes.facecolor"] = old_facecolor
550 | mpl.rcParams["axes.edgecolor"] = old_edgecolor
551 | # define axs[3]
552 | axs.append(plt.subplot(224))
553 |
554 | # space_time_heatmap
555 | _dynamic_lisa_heatmap(moran_locy, moran_locx, p=p, ax=axs[0])
556 | axs[0].xaxis.set_ticks_position("top")
557 | axs[0].xaxis.set_label_position("top")
558 |
559 | # Lisa_cluster maps
560 | lisa_cluster(
561 | moran_locy,
562 | gdf,
563 | p=p,
564 | ax=axs[1],
565 | legend=True,
566 | legend_kwds={"loc": "upper left", "bbox_to_anchor": (0.92, 1.05)},
567 | )
568 | axs[1].set_title("Start time")
569 | lisa_cluster(
570 | moran_locx,
571 | gdf,
572 | p=p,
573 | ax=axs[3],
574 | legend=True,
575 | legend_kwds={"loc": "upper left", "bbox_to_anchor": (0.92, 1.05)},
576 | )
577 | axs[3].set_title("End time")
578 |
579 | # Rose diagram: Moran movement vectors:
580 | dynamic_lisa_rose(rose, ax=axs[2])
581 | return fig, axs
582 |
583 |
584 | def _dynamic_lisa_widget_update(
585 | rose, gdf, start_time, end_time, p=0.05, figsize=(13, 10)
586 | ):
587 | """
588 | Update rose values if widgets are used
589 | """
590 | # determine rose object for (timex, timey),
591 | # which comes from interact widgets
592 | y1 = gdf[start_time].values
593 | y2 = gdf[end_time].values
594 | Y = np.array([y1, y2]).T
595 | rose_update = Rose(Y, rose.w, k=5)
596 |
597 | fig, _ = dynamic_lisa_composite(rose_update, gdf, p=p, figsize=figsize)
598 |
599 |
600 | def dynamic_lisa_composite_explore(rose, gdf, pattern="", p=0.05, figsize=(13, 10)):
601 | """
602 | Interactive exploration of dynamic LISA values
603 | for different dates in a dataframe.
604 | Note: only possible in jupyter notebooks
605 |
606 | Parameters
607 | ----------
608 | rose : giddy.directional.Rose instance
609 | A ``Rose`` object, which contains (among other attributes)
610 | weights to calculate `esda.moran.Moran_local` values
611 | gdf : geopandas dataframe instance
612 | The Dataframe containing information and polygons to plot.
613 | pattern : str, optional
614 | Option to extract all columns ending with a specific pattern.
615 | Only extracted columns will be used for comparison.
616 | p : float, optional
617 | The p-value threshold for significance. Default =0.05
618 | figsize: tuple, optional
619 | W, h of figure. Default =(13,10)
620 |
621 | Returns
622 | -------
623 | None
624 |
625 | Examples
626 | --------
627 | **Note**: this function creates Jupyter notebook widgets, so is meant only
628 | to run in a notebook.
629 |
630 | >>> import geopandas as gpd
631 | >>> import pandas as pd
632 | >>> from libpysal.weights.contiguity import Queen
633 | >>> from libpysal import examples
634 | >>> import numpy as np
635 | >>> import matplotlib.pyplot as plt
636 |
637 | If you want to see figures embedded inline in a Jupyter notebook,
638 | add a line ``%matplotlib inline`` at the top of your notebook.
639 |
640 | >>> from giddy.directional import Rose
641 | >>> from splot.giddy import dynamic_lisa_composite_explore
642 |
643 | get csv and shp files
644 |
645 | >>> shp_link = examples.get_path('us48.shp')
646 | >>> df = gpd.read_file(shp_link)
647 | >>> income_table = pd.read_csv(examples.get_path("usjoin.csv"))
648 |
649 | calculate relative values
650 |
651 | >>> for year in range(1969, 2010):
652 | ... income_table[str(year) + '_rel'] = (
653 | ... income_table[str(year)] / income_table[str(year)].mean())
654 |
655 | merge to one gdf
656 |
657 | >>> gdf = df.merge(income_table,left_on='STATE_NAME',right_on='Name')
658 |
659 | retrieve spatial weights and data for two points in time
660 |
661 | >>> w = Queen.from_dataframe(gdf)
662 | >>> w.transform = 'r'
663 | >>> y1 = gdf['1969_rel'].values
664 | >>> y2 = gdf['2000_rel'].values
665 |
666 | calculate rose Object
667 |
668 | >>> Y = np.array([y1, y2]).T
669 | >>> rose = Rose(Y, w, k=5)
670 |
671 | plot
672 |
673 | >>> fig = dynamic_lisa_composite_explore(rose, gdf, pattern='rel')
674 | >>> # plt.show()
675 |
676 | """
677 | try:
678 | from ipywidgets import fixed, interact
679 | except (ImportError, ModuleNotFoundError):
680 | raise ImportError(
681 | "`ipywidgets` package is required to use "
682 | "dynamic_lisa_composite_explore."
683 | "You can install it using `conda install ipywidgets` "
684 | "or `pip install ipywidgets`."
685 | )
686 | coldict = {col: col for col in gdf.columns if col.endswith(pattern)}
687 | interact(
688 | _dynamic_lisa_widget_update,
689 | start_time=coldict,
690 | end_time=coldict,
691 | rose=fixed(rose),
692 | gdf=fixed(gdf),
693 | p=fixed(p),
694 | figsize=fixed(figsize),
695 | )
696 |
--------------------------------------------------------------------------------
/splot/_version.py:
--------------------------------------------------------------------------------
1 | # This file helps to compute a version number in source trees obtained from
2 | # git-archive tarball (such as those provided by githubs download-from-tag
3 | # feature). Distribution tarballs (built by setup.py sdist) and build
4 | # directories (produced by setup.py build) will contain a much shorter file
5 | # that just contains the computed version number.
6 |
7 | # This file is released into the public domain. Generated by
8 | # versioneer-0.20 (https://github.com/python-versioneer/python-versioneer)
9 |
10 | """Git implementation of _version.py."""
11 |
12 | import errno
13 | import os
14 | import re
15 | import subprocess
16 | import sys
17 |
18 |
19 | def get_keywords():
20 | """Get the keywords needed to look up the version information."""
21 | # these strings will be replaced by git during git-archive.
22 | # setup.py/versioneer.py will grep for the variable names, so they must
23 | # each be defined on a line of their own. _version.py will just call
24 | # get_keywords().
25 | git_refnames = " (HEAD -> main)"
26 | git_full = "b8361cb5f4685d0945e08cbf9172ba701ce57c44"
27 | git_date = "2025-06-19 20:45:36 -0400"
28 | keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
29 | return keywords
30 |
31 |
32 | class VersioneerConfig: # pylint: disable=too-few-public-methods
33 | """Container for Versioneer configuration parameters."""
34 |
35 |
36 | def get_config():
37 | """Create, populate and return the VersioneerConfig() object."""
38 | # these strings are filled in when 'setup.py versioneer' creates
39 | # _version.py
40 | cfg = VersioneerConfig()
41 | cfg.VCS = "git"
42 | cfg.style = "pep440"
43 | cfg.tag_prefix = "v"
44 | cfg.parentdir_prefix = "splot-"
45 | cfg.versionfile_source = "splot/_version.py"
46 | cfg.verbose = False
47 | return cfg
48 |
49 |
50 | class NotThisMethod(Exception):
51 | """Exception raised if a method is not valid for the current scenario."""
52 |
53 |
54 | LONG_VERSION_PY = {}
55 | HANDLERS = {}
56 |
57 |
58 | def register_vcs_handler(vcs, method): # decorator
59 | """Create decorator to mark a method as the handler of a VCS."""
60 |
61 | def decorate(f):
62 | """Store f in HANDLERS[vcs][method]."""
63 | if vcs not in HANDLERS:
64 | HANDLERS[vcs] = {}
65 | HANDLERS[vcs][method] = f
66 | return f
67 |
68 | return decorate
69 |
70 |
71 | # pylint:disable=too-many-arguments,consider-using-with # noqa
72 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
73 | """Call the given command(s)."""
74 | assert isinstance(commands, list)
75 | process = None
76 | for command in commands:
77 | try:
78 | dispcmd = str([command] + args)
79 | # remember shell=False, so use git.cmd on windows, not just git
80 | process = subprocess.Popen(
81 | [command] + args,
82 | cwd=cwd,
83 | env=env,
84 | stdout=subprocess.PIPE,
85 | stderr=(subprocess.PIPE if hide_stderr else None),
86 | )
87 | break
88 | except EnvironmentError:
89 | e = sys.exc_info()[1]
90 | if e.errno == errno.ENOENT:
91 | continue
92 | if verbose:
93 | print("unable to run %s" % dispcmd)
94 | print(e)
95 | return None, None
96 | else:
97 | if verbose:
98 | print("unable to find command, tried %s" % (commands,))
99 | return None, None
100 | stdout = process.communicate()[0].strip().decode()
101 | if process.returncode != 0:
102 | if verbose:
103 | print("unable to run %s (error)" % dispcmd)
104 | print("stdout was %s" % stdout)
105 | return None, process.returncode
106 | return stdout, process.returncode
107 |
108 |
109 | def versions_from_parentdir(parentdir_prefix, root, verbose):
110 | """Try to determine the version from the parent directory name.
111 |
112 | Source tarballs conventionally unpack into a directory that includes both
113 | the project name and a version string. We will also support searching up
114 | two directory levels for an appropriately named parent directory
115 | """
116 | rootdirs = []
117 |
118 | for _ in range(3):
119 | dirname = os.path.basename(root)
120 | if dirname.startswith(parentdir_prefix):
121 | return {
122 | "version": dirname[len(parentdir_prefix) :],
123 | "full-revisionid": None,
124 | "dirty": False,
125 | "error": None,
126 | "date": None,
127 | }
128 | rootdirs.append(root)
129 | root = os.path.dirname(root) # up a level
130 |
131 | if verbose:
132 | print(
133 | "Tried directories %s but none started with prefix %s"
134 | % (str(rootdirs), parentdir_prefix)
135 | )
136 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
137 |
138 |
139 | @register_vcs_handler("git", "get_keywords")
140 | def git_get_keywords(versionfile_abs):
141 | """Extract version information from the given file."""
142 | # the code embedded in _version.py can just fetch the value of these
143 | # keywords. When used from setup.py, we don't want to import _version.py,
144 | # so we do it with a regexp instead. This function is not used from
145 | # _version.py.
146 | keywords = {}
147 | try:
148 | with open(versionfile_abs, "r") as fobj:
149 | for line in fobj:
150 | if line.strip().startswith("git_refnames ="):
151 | mo = re.search(r'=\s*"(.*)"', line)
152 | if mo:
153 | keywords["refnames"] = mo.group(1)
154 | if line.strip().startswith("git_full ="):
155 | mo = re.search(r'=\s*"(.*)"', line)
156 | if mo:
157 | keywords["full"] = mo.group(1)
158 | if line.strip().startswith("git_date ="):
159 | mo = re.search(r'=\s*"(.*)"', line)
160 | if mo:
161 | keywords["date"] = mo.group(1)
162 | except EnvironmentError:
163 | pass
164 | return keywords
165 |
166 |
167 | @register_vcs_handler("git", "keywords")
168 | def git_versions_from_keywords(keywords, tag_prefix, verbose):
169 | """Get version information from git keywords."""
170 | if "refnames" not in keywords:
171 | raise NotThisMethod("Short version file found")
172 | date = keywords.get("date")
173 | if date is not None:
174 | # Use only the last line. Previous lines may contain GPG signature
175 | # information.
176 | date = date.splitlines()[-1]
177 |
178 | # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
179 | # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
180 | # -like" string, which we must then edit to make compliant), because
181 | # it's been around since git-1.5.3, and it's too difficult to
182 | # discover which version we're using, or to work around using an
183 | # older one.
184 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
185 | refnames = keywords["refnames"].strip()
186 | if refnames.startswith("$Format"):
187 | if verbose:
188 | print("keywords are unexpanded, not using")
189 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
190 | refs = {r.strip() for r in refnames.strip("()").split(",")}
191 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
192 | # just "foo-1.0". If we see a "tag: " prefix, prefer those.
193 | TAG = "tag: "
194 | tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
195 | if not tags:
196 | # Either we're using git < 1.8.3, or there really are no tags. We use
197 | # a heuristic: assume all version tags have a digit. The old git %d
198 | # expansion behaves like git log --decorate=short and strips out the
199 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish
200 | # between branches and tags. By ignoring refnames without digits, we
201 | # filter out many common branch names like "release" and
202 | # "stabilization", as well as "HEAD" and "master".
203 | tags = {r for r in refs if re.search(r"\d", r)}
204 | if verbose:
205 | print("discarding '%s', no digits" % ",".join(refs - tags))
206 | if verbose:
207 | print("likely tags: %s" % ",".join(sorted(tags)))
208 | for ref in sorted(tags):
209 | # sorting will prefer e.g. "2.0" over "2.0rc1"
210 | if ref.startswith(tag_prefix):
211 | r = ref[len(tag_prefix) :]
212 | # Filter out refs that exactly match prefix or that don't start
213 | # with a number once the prefix is stripped (mostly a concern
214 | # when prefix is '')
215 | if not re.match(r"\d", r):
216 | continue
217 | if verbose:
218 | print("picking %s" % r)
219 | return {
220 | "version": r,
221 | "full-revisionid": keywords["full"].strip(),
222 | "dirty": False,
223 | "error": None,
224 | "date": date,
225 | }
226 | # no suitable tags, so version is "0+unknown", but full hex is still there
227 | if verbose:
228 | print("no suitable tags, using unknown + full revision id")
229 | return {
230 | "version": "0+unknown",
231 | "full-revisionid": keywords["full"].strip(),
232 | "dirty": False,
233 | "error": "no suitable tags",
234 | "date": None,
235 | }
236 |
237 |
238 | @register_vcs_handler("git", "pieces_from_vcs")
239 | def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
240 | """Get version from 'git describe' in the root of the source tree.
241 |
242 | This only gets called if the git-archive 'subst' keywords were *not*
243 | expanded, and _version.py hasn't already been rewritten with a short
244 | version string, meaning we're inside a checked out source tree.
245 | """
246 | GITS = ["git"]
247 | if sys.platform == "win32":
248 | GITS = ["git.cmd", "git.exe"]
249 |
250 | _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
251 | if rc != 0:
252 | if verbose:
253 | print("Directory %s not under git control" % root)
254 | raise NotThisMethod("'git rev-parse --git-dir' returned error")
255 |
256 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
257 | # if there isn't one, this yields HEX[-dirty] (no NUM)
258 | describe_out, rc = runner(
259 | GITS,
260 | [
261 | "describe",
262 | "--tags",
263 | "--dirty",
264 | "--always",
265 | "--long",
266 | "--match",
267 | "%s*" % tag_prefix,
268 | ],
269 | cwd=root,
270 | )
271 | # --long was added in git-1.5.5
272 | if describe_out is None:
273 | raise NotThisMethod("'git describe' failed")
274 | describe_out = describe_out.strip()
275 | full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
276 | if full_out is None:
277 | raise NotThisMethod("'git rev-parse' failed")
278 | full_out = full_out.strip()
279 |
280 | pieces = {}
281 | pieces["long"] = full_out
282 | pieces["short"] = full_out[:7] # maybe improved later
283 | pieces["error"] = None
284 |
285 | branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
286 | # --abbrev-ref was added in git-1.6.3
287 | if rc != 0 or branch_name is None:
288 | raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
289 | branch_name = branch_name.strip()
290 |
291 | if branch_name == "HEAD":
292 | # If we aren't exactly on a branch, pick a branch which represents
293 | # the current commit. If all else fails, we are on a branchless
294 | # commit.
295 | branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
296 | # --contains was added in git-1.5.4
297 | if rc != 0 or branches is None:
298 | raise NotThisMethod("'git branch --contains' returned error")
299 | branches = branches.split("\n")
300 |
301 | # Remove the first line if we're running detached
302 | if "(" in branches[0]:
303 | branches.pop(0)
304 |
305 | # Strip off the leading "* " from the list of branches.
306 | branches = [branch[2:] for branch in branches]
307 | if "master" in branches:
308 | branch_name = "master"
309 | elif not branches:
310 | branch_name = None
311 | else:
312 | # Pick the first branch that is returned. Good or bad.
313 | branch_name = branches[0]
314 |
315 | pieces["branch"] = branch_name
316 |
317 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
318 | # TAG might have hyphens.
319 | git_describe = describe_out
320 |
321 | # look for -dirty suffix
322 | dirty = git_describe.endswith("-dirty")
323 | pieces["dirty"] = dirty
324 | if dirty:
325 | git_describe = git_describe[: git_describe.rindex("-dirty")]
326 |
327 | # now we have TAG-NUM-gHEX or HEX
328 |
329 | if "-" in git_describe:
330 | # TAG-NUM-gHEX
331 | mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
332 | if not mo:
333 | # unparseable. Maybe git-describe is misbehaving?
334 | pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
335 | return pieces
336 |
337 | # tag
338 | full_tag = mo.group(1)
339 | if not full_tag.startswith(tag_prefix):
340 | if verbose:
341 | fmt = "tag '%s' doesn't start with prefix '%s'"
342 | print(fmt % (full_tag, tag_prefix))
343 | pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
344 | full_tag,
345 | tag_prefix,
346 | )
347 | return pieces
348 | pieces["closest-tag"] = full_tag[len(tag_prefix) :]
349 |
350 | # distance: number of commits since tag
351 | pieces["distance"] = int(mo.group(2))
352 |
353 | # commit: short hex revision ID
354 | pieces["short"] = mo.group(3)
355 |
356 | else:
357 | # HEX: no tags
358 | pieces["closest-tag"] = None
359 | count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
360 | pieces["distance"] = int(count_out) # total number of commits
361 |
362 | # commit date: see ISO-8601 comment in git_versions_from_keywords()
363 | date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
364 | # Use only the last line. Previous lines may contain GPG signature
365 | # information.
366 | date = date.splitlines()[-1]
367 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
368 |
369 | return pieces
370 |
371 |
372 | def plus_or_dot(pieces):
373 | """Return a + if we don't already have one, else return a ."""
374 | if "+" in pieces.get("closest-tag", ""):
375 | return "."
376 | return "+"
377 |
378 |
379 | def render_pep440(pieces):
380 | """Build up version string, with post-release "local version identifier".
381 |
382 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
383 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
384 |
385 | Exceptions:
386 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
387 | """
388 | if pieces["closest-tag"]:
389 | rendered = pieces["closest-tag"]
390 | if pieces["distance"] or pieces["dirty"]:
391 | rendered += plus_or_dot(pieces)
392 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
393 | if pieces["dirty"]:
394 | rendered += ".dirty"
395 | else:
396 | # exception #1
397 | rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
398 | if pieces["dirty"]:
399 | rendered += ".dirty"
400 | return rendered
401 |
402 |
403 | def render_pep440_branch(pieces):
404 | """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
405 |
406 | The ".dev0" means not master branch. Note that .dev0 sorts backwards
407 | (a feature branch will appear "older" than the master branch).
408 |
409 | Exceptions:
410 | 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
411 | """
412 | if pieces["closest-tag"]:
413 | rendered = pieces["closest-tag"]
414 | if pieces["distance"] or pieces["dirty"]:
415 | if pieces["branch"] != "master":
416 | rendered += ".dev0"
417 | rendered += plus_or_dot(pieces)
418 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
419 | if pieces["dirty"]:
420 | rendered += ".dirty"
421 | else:
422 | # exception #1
423 | rendered = "0"
424 | if pieces["branch"] != "master":
425 | rendered += ".dev0"
426 | rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
427 | if pieces["dirty"]:
428 | rendered += ".dirty"
429 | return rendered
430 |
431 |
432 | def render_pep440_pre(pieces):
433 | """TAG[.post0.devDISTANCE] -- No -dirty.
434 |
435 | Exceptions:
436 | 1: no tags. 0.post0.devDISTANCE
437 | """
438 | if pieces["closest-tag"]:
439 | rendered = pieces["closest-tag"]
440 | if pieces["distance"]:
441 | rendered += ".post0.dev%d" % pieces["distance"]
442 | else:
443 | # exception #1
444 | rendered = "0.post0.dev%d" % pieces["distance"]
445 | return rendered
446 |
447 |
448 | def render_pep440_post(pieces):
449 | """TAG[.postDISTANCE[.dev0]+gHEX] .
450 |
451 | The ".dev0" means dirty. Note that .dev0 sorts backwards
452 | (a dirty tree will appear "older" than the corresponding clean one),
453 | but you shouldn't be releasing software with -dirty anyways.
454 |
455 | Exceptions:
456 | 1: no tags. 0.postDISTANCE[.dev0]
457 | """
458 | if pieces["closest-tag"]:
459 | rendered = pieces["closest-tag"]
460 | if pieces["distance"] or pieces["dirty"]:
461 | rendered += ".post%d" % pieces["distance"]
462 | if pieces["dirty"]:
463 | rendered += ".dev0"
464 | rendered += plus_or_dot(pieces)
465 | rendered += "g%s" % pieces["short"]
466 | else:
467 | # exception #1
468 | rendered = "0.post%d" % pieces["distance"]
469 | if pieces["dirty"]:
470 | rendered += ".dev0"
471 | rendered += "+g%s" % pieces["short"]
472 | return rendered
473 |
474 |
475 | def render_pep440_post_branch(pieces):
476 | """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
477 |
478 | The ".dev0" means not master branch.
479 |
480 | Exceptions:
481 | 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
482 | """
483 | if pieces["closest-tag"]:
484 | rendered = pieces["closest-tag"]
485 | if pieces["distance"] or pieces["dirty"]:
486 | rendered += ".post%d" % pieces["distance"]
487 | if pieces["branch"] != "master":
488 | rendered += ".dev0"
489 | rendered += plus_or_dot(pieces)
490 | rendered += "g%s" % pieces["short"]
491 | if pieces["dirty"]:
492 | rendered += ".dirty"
493 | else:
494 | # exception #1
495 | rendered = "0.post%d" % pieces["distance"]
496 | if pieces["branch"] != "master":
497 | rendered += ".dev0"
498 | rendered += "+g%s" % pieces["short"]
499 | if pieces["dirty"]:
500 | rendered += ".dirty"
501 | return rendered
502 |
503 |
504 | def render_pep440_old(pieces):
505 | """TAG[.postDISTANCE[.dev0]] .
506 |
507 | The ".dev0" means dirty.
508 |
509 | Exceptions:
510 | 1: no tags. 0.postDISTANCE[.dev0]
511 | """
512 | if pieces["closest-tag"]:
513 | rendered = pieces["closest-tag"]
514 | if pieces["distance"] or pieces["dirty"]:
515 | rendered += ".post%d" % pieces["distance"]
516 | if pieces["dirty"]:
517 | rendered += ".dev0"
518 | else:
519 | # exception #1
520 | rendered = "0.post%d" % pieces["distance"]
521 | if pieces["dirty"]:
522 | rendered += ".dev0"
523 | return rendered
524 |
525 |
526 | def render_git_describe(pieces):
527 | """TAG[-DISTANCE-gHEX][-dirty].
528 |
529 | Like 'git describe --tags --dirty --always'.
530 |
531 | Exceptions:
532 | 1: no tags. HEX[-dirty] (note: no 'g' prefix)
533 | """
534 | if pieces["closest-tag"]:
535 | rendered = pieces["closest-tag"]
536 | if pieces["distance"]:
537 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
538 | else:
539 | # exception #1
540 | rendered = pieces["short"]
541 | if pieces["dirty"]:
542 | rendered += "-dirty"
543 | return rendered
544 |
545 |
546 | def render_git_describe_long(pieces):
547 | """TAG-DISTANCE-gHEX[-dirty].
548 |
549 | Like 'git describe --tags --dirty --always -long'.
550 | The distance/hash is unconditional.
551 |
552 | Exceptions:
553 | 1: no tags. HEX[-dirty] (note: no 'g' prefix)
554 | """
555 | if pieces["closest-tag"]:
556 | rendered = pieces["closest-tag"]
557 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
558 | else:
559 | # exception #1
560 | rendered = pieces["short"]
561 | if pieces["dirty"]:
562 | rendered += "-dirty"
563 | return rendered
564 |
565 |
566 | def render(pieces, style):
567 | """Render the given version pieces into the requested style."""
568 | if pieces["error"]:
569 | return {
570 | "version": "unknown",
571 | "full-revisionid": pieces.get("long"),
572 | "dirty": None,
573 | "error": pieces["error"],
574 | "date": None,
575 | }
576 |
577 | if not style or style == "default":
578 | style = "pep440" # the default
579 |
580 | if style == "pep440":
581 | rendered = render_pep440(pieces)
582 | elif style == "pep440-branch":
583 | rendered = render_pep440_branch(pieces)
584 | elif style == "pep440-pre":
585 | rendered = render_pep440_pre(pieces)
586 | elif style == "pep440-post":
587 | rendered = render_pep440_post(pieces)
588 | elif style == "pep440-post-branch":
589 | rendered = render_pep440_post_branch(pieces)
590 | elif style == "pep440-old":
591 | rendered = render_pep440_old(pieces)
592 | elif style == "git-describe":
593 | rendered = render_git_describe(pieces)
594 | elif style == "git-describe-long":
595 | rendered = render_git_describe_long(pieces)
596 | else:
597 | raise ValueError("unknown style '%s'" % style)
598 |
599 | return {
600 | "version": rendered,
601 | "full-revisionid": pieces["long"],
602 | "dirty": pieces["dirty"],
603 | "error": None,
604 | "date": pieces.get("date"),
605 | }
606 |
607 |
608 | def get_versions():
609 | """Get version information or return default if unable to do so."""
610 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
611 | # __file__, we can work backwards from there to the root. Some
612 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
613 | # case we can only use expanded keywords.
614 |
615 | cfg = get_config()
616 | verbose = cfg.verbose
617 |
618 | try:
619 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
620 | except NotThisMethod:
621 | pass
622 |
623 | try:
624 | root = os.path.realpath(__file__)
625 | # versionfile_source is the relative path from the top of the source
626 | # tree (where the .git directory might live) to this file. Invert
627 | # this to find the root from __file__.
628 | for _ in cfg.versionfile_source.split("/"):
629 | root = os.path.dirname(root)
630 | except NameError:
631 | return {
632 | "version": "0+unknown",
633 | "full-revisionid": None,
634 | "dirty": None,
635 | "error": "unable to find root of source tree",
636 | "date": None,
637 | }
638 |
639 | try:
640 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
641 | return render(pieces, cfg.style)
642 | except NotThisMethod:
643 | pass
644 |
645 | try:
646 | if cfg.parentdir_prefix:
647 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
648 | except NotThisMethod:
649 | pass
650 |
651 | return {
652 | "version": "0+unknown",
653 | "full-revisionid": None,
654 | "dirty": None,
655 | "error": "unable to compute version",
656 | "date": None,
657 | }
658 |
--------------------------------------------------------------------------------