├── .gitignore
├── .images
├── agents.png
├── metric.png
├── states.png
├── topological.png
├── video.png
├── visual.png
└── voronoi.png
├── LICENSE
├── README.md
├── figures
├── agent_positions.csv
├── fig_01_pairwise_potential.ipynb
├── fig_02_ref_distance_scalability.ipynb
├── fig_03_neighbor_selection.ipynb
├── fig_04_group_size.ipynb
├── fig_05_trajectories.ipynb
├── fig_06_visual_switching.ipynb
├── fig_07_density.ipynb
├── fig_08_range_noise.ipynb
├── fig_10_realistic.ipynb
└── fig_11_realistic_trajectories.ipynb
├── requirements.txt
├── scripts
├── params.yaml
├── vmerge
├── vmetric
└── vmodel
├── setup.py
├── test
└── test.py
└── vmodel
├── __init__.py
├── core.py
├── dataset.py
├── flocking
├── __init__.py
├── gregoire.py
├── leonard.py
├── olfati_saber.py
├── olfati_saber_simplified.py
├── olfati_saber_unified.py
└── reynolds.py
├── geometry.py
├── liveplot.py
├── math.py
├── metrics.py
├── plot.py
├── random.py
├── util
├── __init__.py
├── args.py
├── color.py
├── mpl.py
├── multiprocessing.py
├── numpy.py
├── util.py
└── xarray.py
└── visibility.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Own ignores
2 | *.pdf
3 | *.mp4
4 | *.lprof
5 |
6 | # Byte-compiled / optimized / DLL files
7 | __pycache__/
8 | *.py[cod]
9 | *$py.class
10 |
11 | # C extensions
12 | *.so
13 |
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | pip-wheel-metadata/
29 | share/python-wheels/
30 | *.egg-info/
31 | .installed.cfg
32 | *.egg
33 | MANIFEST
34 |
35 | # PyInstaller
36 | # Usually these files are written by a python script from a template
37 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
38 | *.manifest
39 | *.spec
40 |
41 | # Installer logs
42 | pip-log.txt
43 | pip-delete-this-directory.txt
44 |
45 | # Unit test / coverage reports
46 | htmlcov/
47 | .tox/
48 | .nox/
49 | .coverage
50 | .coverage.*
51 | .cache
52 | nosetests.xml
53 | coverage.xml
54 | *.cover
55 | *.py,cover
56 | .hypothesis/
57 | .pytest_cache/
58 |
59 | # Translations
60 | *.mo
61 | *.pot
62 |
63 | # Django stuff:
64 | *.log
65 | local_settings.py
66 | db.sqlite3
67 | db.sqlite3-journal
68 |
69 | # Flask stuff:
70 | instance/
71 | .webassets-cache
72 |
73 | # Scrapy stuff:
74 | .scrapy
75 |
76 | # Sphinx documentation
77 | docs/_build/
78 |
79 | # PyBuilder
80 | target/
81 |
82 | # Jupyter Notebook
83 | .ipynb_checkpoints
84 |
85 | # IPython
86 | profile_default/
87 | ipython_config.py
88 |
89 | # pyenv
90 | .python-version
91 |
92 | # pipenv
93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
96 | # install all needed dependencies.
97 | #Pipfile.lock
98 |
99 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
100 | __pypackages__/
101 |
102 | # Celery stuff
103 | celerybeat-schedule
104 | celerybeat.pid
105 |
106 | # SageMath parsed files
107 | *.sage.py
108 |
109 | # Environments
110 | .env
111 | .venv
112 | env/
113 | venv/
114 | ENV/
115 | env.bak/
116 | venv.bak/
117 |
118 | # Spyder project settings
119 | .spyderproject
120 | .spyproject
121 |
122 | # Rope project settings
123 | .ropeproject
124 |
125 | # mkdocs documentation
126 | /site
127 |
128 | # mypy
129 | .mypy_cache/
130 | .dmypy.json
131 | dmypy.json
132 |
133 | # Pyre type checker
134 | .pyre/
135 |
--------------------------------------------------------------------------------
/.images/agents.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lis-epfl/vmodel/d8860087b94ffc3582f187523864550ecf135b24/.images/agents.png
--------------------------------------------------------------------------------
/.images/metric.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lis-epfl/vmodel/d8860087b94ffc3582f187523864550ecf135b24/.images/metric.png
--------------------------------------------------------------------------------
/.images/states.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lis-epfl/vmodel/d8860087b94ffc3582f187523864550ecf135b24/.images/states.png
--------------------------------------------------------------------------------
/.images/topological.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lis-epfl/vmodel/d8860087b94ffc3582f187523864550ecf135b24/.images/topological.png
--------------------------------------------------------------------------------
/.images/video.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lis-epfl/vmodel/d8860087b94ffc3582f187523864550ecf135b24/.images/video.png
--------------------------------------------------------------------------------
/.images/visual.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lis-epfl/vmodel/d8860087b94ffc3582f187523864550ecf135b24/.images/visual.png
--------------------------------------------------------------------------------
/.images/voronoi.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lis-epfl/vmodel/d8860087b94ffc3582f187523864550ecf135b24/.images/voronoi.png
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Fabian Schilling
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # vmodel
2 |
3 | The `vmodel` package implements an agent-based model to simulate visual-detection-based swarms in [Python](https://www.python.org/).
4 | It is designed to generate statistical data of swarms with large groups sizes (~1000s of agents) and random initial conditions.
5 | It features several [flocking algorithms](#flocking-algorithms), [neighbor selection methods](#neighbor-selection), and [sensor noise parameters](#sensor-noise).
6 | The simulation can be run as a [graphical](#running-the-simulation-graphical) session with visualizations for rapid prototyping or in [headless](#running-the-simulation-headless) mode to quickly generate statistical results from repeated runs.
7 |
8 | This repository contains the source code accompanying the article:
9 |
10 | F. Schilling, E. Soria, and D. Floreano, "**On the Scalability of Vision-based Drone Swarms in the Presence of Occlusions**," *IEEE Access*, vol. 10, pp. 1-14, 2022. [[**IEEE** *Xplore*](https://ieeexplore.ieee.org/abstract/document/9732989)] [[Citation](#citation)]
11 |
12 | The following [video](https://youtu.be/3-O85lB_DJQ) gives a high-level explanation of the findings of the article:
13 |
14 |
15 |
16 |
17 |
18 | ## Requirements
19 |
20 | - [Git](https://git-scm.com/downloads)
21 | - [Python](https://www.python.org/downloads/) (at least version `3.6.9`)
22 |
23 | ## Getting started
24 |
25 | Clone this repository and move into its root folder:
26 |
27 | ```bash
28 | git clone git@github.com:lis-epfl/vmodel.git
29 | cd vmodel
30 | ```
31 |
32 | ### Installation
33 |
34 | Use `pip` to install the `vmodel` package (preferably in a newly created [virtual environment](https://docs.python.org/3/tutorial/venv.html)):
35 |
36 | ```bash
37 | python3 -m pip install --editable .
38 | ```
39 |
40 | This command will install all necessary requirements and add the `vmodel` simulator script (among others) to your `$PATH`.
41 |
42 | The `--editable` flag ensures that any changes you make to the code in this repository will be reflected when you run the simulation.
43 |
44 | ### Running the simulation (graphical)
45 |
46 | To run the swarm simulation with live visualizations, issue the following example command:
47 |
48 | ```bash
49 | vmodel --plot --plot-metrics --migration-dir 1 0
50 | ```
51 |
52 | The `--plot` flag shows a live visualization of the swarm state (agent positions and velocities) and the `--plot-metrics` flags shows an additional time series of useful swarm metrics.
53 | The `--migration-dir x y` argument takes two arguments gives the swarm an optional migration direction (a vector with `x` and `y` components).
54 |
55 | You should see two new windows on your screen, one showing a 2D visualization of the agents (left), and the other showing the time series plots (right):
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 | The agents will migrate forever until the `q` button is pressed to quit the simulation.
64 |
65 | If the windows are unresponsive, only one window is showing, or the plots are not updating, try using a different [Matplotlib backend](https://matplotlib.org/devdocs/users/explain/backends.html).
66 | The plotting code has only been tested on Ubuntu and macOS (the latter only with [XQuartz](https://www.xquartz.org/)).
67 |
68 | The graphical mode is useful for quick prototyping and to visualize the effects of the different simulation configurations.
69 | To easily produce statistical experiments, the simulation can also be run headless, as described below.
70 |
71 | ### Running the simulation (headless)
72 |
73 | To run the swarm simulation headless and write the results to disk, issue to the following example command:
74 |
75 | ```bash
76 | vmodel --num-runs 3 --num-timesteps 1000 --verbose --progress --migration-dir 1 0
77 | ```
78 |
79 | The `--num-runs N` argument runs the simulation `N` times with the same (random) initial conditions.
80 | The `--num-timesteps N` argument means that the simulation will run for `N` isochronous timesteps (of 0.1 second duration by default; configurable with `--delta-time`) before starting a new run.
81 | The `--verbose` flag prints additional information to the console and the `--progress` flag shows the simulation progress.
82 |
83 | By default, all runs are processed in parallel using the built-in `multiprocessing` package (use the `--no-parallel-runs` flag to simulate runs sequentially instead).
84 | To run multiple simulations with different arguments (and possibly with multiple runs each) we recommend the excellent [GNU Parallel](https://www.gnu.org/software/parallel/) command-line utility.
85 |
86 | When the simulation is done, two files will have been added to the current working directory:
87 |
88 | 1. a `.nc` file containing the states (positions, velocities, etc.) of the agents over time, and
89 | 2. a `.yaml` file with the configuration arguments of the simulation.
90 |
91 | ## Simulation configuration
92 |
93 | The swarm behavior can be configured in a variety of ways.
94 | Issue the following command to get an overview of all the possible command line arguments:
95 |
96 | ```bash
97 | vmodel --help
98 | ```
99 |
100 | The most important command-line arguments are:
101 |
102 | - *Number of agents* (`--num-agents N`): the group size (default: `10`)
103 | - *Reference distance* (`--ref-distance F`): the desired equilibrium distance between agents (default: `1`, in *meters*!)
104 | - *Random seed* (`--seed SEED`): random seed for repeatable experiments (default: `None` for random seed, otherwise *integer*!)
105 |
106 | ### Flocking algorithms
107 |
108 | The `vmodel` package comes with several flocking algorithms, e.g.,
109 |
110 | - *Reynolds* (`--algorithm reynolds`): Craig Reynolds' boids algorithm [[**ACM** *Digital Library*]](https://dl.acm.org/doi/10.1145/37402.37406)
111 | - *Olfati* (`--algorithm olfati`): Reza Olfati-Saber's algorithm [[**IEEE** *Xplore*]](https://ieeexplore.ieee.org/document/1605401)
112 | - Others: see [flocking](./vmodel/flocking) folder (not as extensively tested as the two above)
113 |
114 | ### Neighbor selection
115 |
116 | The `vmodel` package supports several different *neighbor selection* methods, e.g., *metric*, *visual*, *topological*, and *voronoi* (from left to right):
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 | - *Metric* (`--perception-radius F`): select only agents in a given metric radius `F`
129 | - *Visual* (`--filter-occluded`): select only agents within the perception radius that are not visually occluded by closer ones
130 | - *Topological* (`--max-agents N`): select only the `N` closest agents, irrespective of their metric distance
131 | - *Voronoi* (`--filter-voronoi`): select only the agents that share a Voronoi border with the focal agent
132 |
133 | The neighbor selection methods can be freely combined with each other.
134 |
135 | ### Sensor noise
136 |
137 | The `vmodel` package models noisy visual detections in two components:
138 |
139 | - *Range uncertainty* (`--range-std STD`): models the standard deviation `STD` of the distance to other agents (in *meters*!)
140 | - *Bearing uncertainty* (`--bearing-std STD`): models the standard deviation `STD` of the bearing towards other agents (in *degrees*!)
141 |
142 | ## Dataset
143 |
144 | The `vmodel` dataset (ca. 700 MB zipped, ca. 6 GB when extracted) can be downloaded here: [[Google Drive](https://drive.google.com/file/d/1AAGuwprqAA7-n2VAQgh2Qv89Yp3DpoFA)] [[SWITCHdrive](https://drive.switch.ch/index.php/s/PYEjRdu6LVzE6Qg)].
145 |
146 | The dataset is composed of several multi-dimensional arrays (including metadata) in the [netCDF4](https://unidata.github.io/netcdf4-python/) format (with `.nc` file extension).
147 | The files can be opened, e.g., using the excellent [xarray](https://xarray.pydata.org) library, and converted to [NumPy](https://numpy.org/) arrays.
148 | In the dataset, the different dimensions correspond, e.g., to the number of agents, the reference distance, the neighbor selection method, etc.
149 |
150 | To generate the figures contained in the [article](https://ieeexplore.ieee.org/abstract/document/9732989), download the dataset and run the Jupyter notebooks in the [figures](./figures/) folder (adjusting the `data_dir` path for where you saved the datasets locally).
151 |
152 | ## Testing
153 |
154 | To run the unit tests, move to the [test](./test/) folder and run the following command:
155 |
156 | ```bash
157 | python3 -m unittest -v test
158 | ```
159 |
160 | *Note*: the tests only cover the most important functions related to geometry and agent-to-agent visibility.
161 | We do not aim for full test coverage.
162 |
163 | ## Citation
164 |
165 | If you use this work in an academic context, please cite the following article:
166 |
167 | ```bibtex
168 | @article{schilling_vmodel_2022,
169 | title = {On the Scalability of Vision-based Drone Swarms in the Presence of Occlusions},
170 | author = {Schilling, Fabian and Soria, Enrica and Floreano, Dario},
171 | journal = {IEEE Access},
172 | year = {2022},
173 | volume = {10},
174 | pages = {1--14},
175 | doi = {10.1109/ACCESS.2022.3158758},
176 | issn = {2169-3536}
177 | }
178 | ```
179 |
180 | ## Acknowledgments
181 |
182 | Special thanks to:
183 |
184 | - [SwarmLab](https://github.com/lis-epfl/swarmlab) for inspiration regarding flocking algorithms and visualizations,
185 | - [xarray](https://xarray.pydata.org/) for making the analysis of multi-dimensional data fun again, and
186 | - [Numba](https://numba.pydata.org/) for speeding up numerical code and letting me be lazy with code vectorization.
187 |
188 | ## License
189 |
190 | This project is released under the [MIT License](https://opensource.org/licenses/MIT).
191 | Please refer to the [`LICENSE`](LICENSE) for more details.
192 |
--------------------------------------------------------------------------------
/figures/agent_positions.csv:
--------------------------------------------------------------------------------
1 | -9.408344077536318295e+00 -2.569861566155757338e+00
2 | -9.856140301823248961e+00 -9.373831531010523577e-02
3 | -9.544861313191621122e+00 1.036565505767935491e+00
4 | -9.536531704206359095e+00 2.560726952459233985e+00
5 | -8.642822922538030639e+00 -3.592270986094132645e+00
6 | -9.184412057052643519e+00 -1.215772383781635213e+00
7 | -8.858495851646228658e+00 5.981688787438876886e-02
8 | -8.628017292648403114e+00 3.025688068477832005e+00
9 | -8.924573862811056912e+00 4.187831749271111903e+00
10 | -8.021705755014099992e+00 -5.674405137530253995e+00
11 | -7.952856120217571601e+00 -4.553336444704524588e+00
12 | -8.544080769769601957e+00 -1.986082864759660893e+00
13 | -8.520899962092222779e+00 1.870807102081109008e+00
14 | -7.583082039896211768e+00 -1.340108906714407411e+00
15 | -7.731899055254964459e+00 2.897910053629431104e-01
16 | -7.508103261436083642e+00 2.182166617081767512e+00
17 | -7.828941795469104292e+00 3.650001527011825786e+00
18 | -7.581764018676776296e+00 5.316221757424340666e+00
19 | -6.792888743745534086e+00 -6.377400070942473853e+00
20 | -7.097569564155676858e+00 -3.022865313109371677e+00
21 | -6.508473135320105740e+00 -1.723106417864284978e+00
22 | -6.723983180463241283e+00 6.084849394004329781e+00
23 | -5.838799283991966682e+00 -7.147161866196696245e+00
24 | -6.336294160059974701e+00 -5.263259468273647101e+00
25 | -6.306264962832008436e+00 -4.081313607568277213e+00
26 | -5.829416790903742651e+00 -9.084458537980530934e-01
27 | -6.063836767220703194e+00 1.202270108872358634e-01
28 | -6.138007372952739793e+00 1.179796888483144457e+00
29 | -6.265024212330267517e+00 2.456518281141240934e+00
30 | -6.044627747637158599e+00 3.583207515777491636e+00
31 | -6.339021902793251506e+00 4.752663566427919761e+00
32 | -6.000430888130815710e+00 7.524291497903604409e+00
33 | -5.276480009740603272e+00 -8.469997332535919909e+00
34 | -5.055539863055695804e+00 -4.404386866363632080e+00
35 | -5.310455755880556517e+00 -3.065600031316320084e+00
36 | -5.163487481216178843e+00 -2.041221332343321038e+00
37 | -5.232086485810436116e+00 6.127981966506592215e+00
38 | -5.138394769828543041e+00 8.462764149084414100e+00
39 | -4.441574278563973799e+00 -7.386299875083256694e+00
40 | -4.866863803491032314e+00 -5.852753755796633151e+00
41 | -4.608047237114946348e+00 -2.901851195606237610e-01
42 | -5.045854881904542566e+00 1.221229286104234646e+00
43 | -4.621196815784140099e+00 2.373929174532458219e+00
44 | -5.001174670690694768e+00 3.911664554325865240e+00
45 | -4.581840747516875645e+00 4.965946723700900733e+00
46 | -4.555691149757801206e+00 7.495527875146350993e+00
47 | -4.037995324425493138e+00 -8.320428687472089635e+00
48 | -3.996790732962209880e+00 -4.465199117018427977e+00
49 | -4.322821693241089669e+00 -1.493110426877695573e+00
50 | -3.784772706993061675e+00 7.908611409932326808e-01
51 | -3.723713148871592438e+00 3.576770614652090785e+00
52 | -4.056298999944517369e+00 5.905454753194836925e+00
53 | -2.990813501118667617e+00 -8.878726151102620534e+00
54 | -3.391185835054153941e+00 -6.099580962751095292e+00
55 | -3.460527823264809122e+00 -2.968854913737216883e+00
56 | -3.316027473762570565e+00 -5.340193778811990200e-01
57 | -3.444124397431480133e+00 1.861621605723279060e+00
58 | -3.028330193483099286e+00 4.893246471577837653e+00
59 | -3.558784269525323118e+00 7.334894935260003024e+00
60 | -3.064817762288026159e+00 8.540541582282124011e+00
61 | -2.744513335389896724e+00 -7.045405704650066880e+00
62 | -2.267745677753405964e+00 -4.449117580040073960e+00
63 | -2.299139890290708976e+00 -3.447109151990487064e+00
64 | -2.784463749321783865e+00 -2.155490803886988438e+00
65 | -2.384567349092987953e+00 -7.476650154066888376e-02
66 | -2.602092296697353646e+00 1.069775107807496184e+00
67 | -2.421558796254452695e+00 3.132202260643353497e+00
68 | -2.843327872076597984e+00 6.565407176312898940e+00
69 | -1.920510181393755289e+00 -1.536055638913550681e+00
70 | -1.516235222956137108e+00 1.500339722391466069e+00
71 | -1.718075669236421632e+00 4.120131005470323515e+00
72 | -1.697443017139152133e+00 6.099257124346806336e+00
73 | -1.705054069061569422e+00 7.785285144971794580e+00
74 | -1.842891551436641961e+00 9.108852343091946580e+00
75 | -1.217337569082687310e+00 -9.729943672082736583e+00
76 | -1.381663243438067568e+00 -7.945116339517814374e+00
77 | -9.543219131744091754e-01 -6.982469043380301166e+00
78 | -1.502908212420917877e+00 -6.014819418295612508e+00
79 | -1.141250511480381036e+00 -4.419274792078326186e+00
80 | -1.039101342742258183e+00 -2.476143192553836414e+00
81 | -1.088642978157887242e+00 -9.395191152437014637e-01
82 | -1.020263646502616695e+00 1.466812026344808118e-01
83 | -1.351002027049938548e+00 2.531546793098028658e+00
84 | -9.737438590130906846e-01 5.000790715912572182e+00
85 | -7.884480936109792282e-01 -8.787258569428182398e+00
86 | -3.768653359561344729e-01 -5.069725622102008167e+00
87 | -3.351794607287832406e-01 -1.713572965444548402e+00
88 | -6.124711537781646342e-01 3.306930128531172741e+00
89 | -4.590371737756075987e-01 7.616933826580705613e+00
90 | -6.839879346952102424e-01 8.854286229606962877e+00
91 | -7.456126782763980998e-01 9.884558232376903675e+00
92 | 3.944309537632939566e-01 -9.410479694329270117e+00
93 | 9.009949894262803127e-02 -8.261128519930208469e+00
94 | 2.293964997094590785e-01 -7.047955915126734361e+00
95 | -7.531616668185137087e-02 -6.069529203714646215e+00
96 | -5.184079073841019181e-02 -3.438774765122412980e+00
97 | 0.000000000000000000e+00 0.000000000000000000e+00
98 | -1.771226875578513216e-02 1.464729806850543881e+00
99 | 3.269060326828157059e-01 2.581396890732872151e+00
100 | 4.456266839205813568e-01 3.982790677304500093e+00
101 | 1.485988294182263303e-01 5.103915973335004352e+00
102 | 1.049400678135139486e-01 6.312019895266089975e+00
103 | 3.335150695211925864e-01 8.471662580153477506e+00
104 | 5.659197475256423360e-01 9.463510618371145711e+00
105 | 1.223224733733594860e+00 -8.071926502474283893e+00
106 | 1.088959517943566269e+00 -5.770402427036555260e+00
107 | 1.199648899172000682e+00 -2.941885332242683226e+00
108 | 8.598188543686973162e-01 -1.724274766027610539e+00
109 | 1.052237837933171605e+00 -4.443979908967730097e-01
110 | 1.279504444529289131e+00 1.309620369594245659e+00
111 | 1.236489196415783809e+00 3.190894272505730100e+00
112 | 1.905879423881822632e+00 -8.868232577303455599e+00
113 | 1.782838314691042925e+00 -6.671039852146684268e+00
114 | 1.476438268495527950e+00 -4.333295239851334912e+00
115 | 1.802449414475363554e+00 2.827763627335961871e-01
116 | 1.445445332459945220e+00 5.006253333111626347e+00
117 | 1.762860895969156516e+00 6.967426575815238721e+00
118 | 1.449722094843213682e+00 8.011324273712844501e+00
119 | 1.688657763357230834e+00 9.836162363627721561e+00
120 | 2.282335461729621429e+00 -3.436757338788686056e+00
121 | 2.242650991588774900e+00 -6.635152446593917119e-01
122 | 2.709767883507092634e+00 3.360387566277271532e+00
123 | 2.195624949449554109e+00 4.225093972830277878e+00
124 | 2.572700730851574136e+00 6.380546064004239071e+00
125 | 2.717355690414285263e+00 7.877574627194455559e+00
126 | 2.118228625243302687e+00 8.845493034180101688e+00
127 | 3.013721656826682249e+00 -9.496490088902231008e+00
128 | 3.017671438860269006e+00 -7.927863394070554293e+00
129 | 3.208402919586244906e+00 -6.692584439883171754e+00
130 | 2.747090151080676890e+00 -5.801390377162766399e+00
131 | 2.935262100138476882e+00 -4.615577783657024646e+00
132 | 2.960757460125677198e+00 -2.188461241898751908e+00
133 | 2.811689376712555699e+00 4.793713653136659758e-01
134 | 2.784928346826953671e+00 2.017423865956091689e+00
135 | 3.293767607002969555e+00 4.418158974962887342e+00
136 | 3.356775221910329066e+00 9.192125506456878981e+00
137 | 3.701349871793157220e+00 -8.725877026275021819e+00
138 | 4.033599731495394636e+00 -5.543935900647180226e+00
139 | 4.136782947892230666e+00 -4.267558537330246260e+00
140 | 3.934434627834654918e+00 -1.571321450097560302e+00
141 | 3.531228798750623454e+00 -3.656260959871051597e-01
142 | 3.849466925079719104e+00 2.126120589161168084e+00
143 | 3.980902049366381590e+00 3.181042750489655901e+00
144 | 3.610713554822412519e+00 5.373886487902822751e+00
145 | 4.084368959145381339e+00 6.547346614880463989e+00
146 | 3.756249161134988412e+00 7.534306172060034612e+00
147 | 4.815394884928126729e+00 -8.651773361481261304e+00
148 | 4.178372691759843605e+00 -7.071458723702871652e+00
149 | 4.207198156482935048e+00 -3.260531586966389561e+00
150 | 4.495480344092944947e+00 -8.322822788144890183e-03
151 | 4.547111738525051905e+00 1.265712430694780011e+00
152 | 4.733449658798171811e+00 5.778545093284579437e+00
153 | 4.269609526194887650e+00 8.739253369115942860e+00
154 | 5.461872294342118295e+00 -6.254341045902499729e+00
155 | 5.381068885691272285e+00 -2.975062480152047328e+00
156 | 4.996733083003135789e+00 -1.725488008049374855e+00
157 | 5.409085939922757547e+00 -4.361788704811093709e-01
158 | 5.472608554846271289e+00 1.725760638695348348e+00
159 | 5.476643725911767291e+00 2.824921184532428242e+00
160 | 4.981164314652469116e+00 4.675531720051143481e+00
161 | 5.207898194373106193e+00 8.365869578545730434e+00
162 | 5.940657618452144462e+00 -7.574874313612247612e+00
163 | 5.832019229770756041e+00 -4.869796635173601729e+00
164 | 5.984727729951147879e+00 -3.836951368824665032e+00
165 | 5.856884923810376620e+00 6.677798921144724176e-01
166 | 5.609399492368661200e+00 6.417898626925577332e+00
167 | 6.464767611988143869e+00 -6.260979593024005752e+00
168 | 6.774863437271189781e+00 -2.441377249414486528e+00
169 | 6.342703328189241319e+00 -1.101176055924483421e+00
170 | 6.962583306517544202e+00 2.227119494277509659e-01
171 | 6.879706795186109503e+00 1.511404550315894824e+00
172 | 6.666483541267211876e+00 2.501257235084082708e+00
173 | 6.519890889330142159e+00 3.522146897992694647e+00
174 | 6.701699850681556825e+00 5.253902251814315250e+00
175 | 6.462346346664908481e+00 7.368444317388508580e+00
176 | 7.286133207425905312e+00 -6.847710441778111168e+00
177 | 7.093327362494978416e+00 -5.405175116156916815e+00
178 | 7.310965122335293387e+00 -4.301186677370059108e+00
179 | 7.479802734254640484e+00 -1.165135574246621175e+00
180 | 7.249007753856613334e+00 4.276228527824923020e+00
181 | 7.219153428383471294e+00 6.230265429240045449e+00
182 | 7.985611352464047741e+00 -5.966208074349527379e+00
183 | 8.018712789583382516e+00 -2.761126875771946487e+00
184 | 8.059354568334931912e+00 2.118997555508279618e+00
185 | 8.268880283657686903e+00 4.323417179178923320e+00
186 | 8.034263941205235682e+00 5.439200778651590795e+00
187 | 9.000363563185906912e+00 -4.344313803500214455e+00
188 | 9.082003377056533822e+00 -2.916628229954246976e+00
189 | 8.407332868143672755e+00 -1.769374495164727890e+00
190 | 8.527649329614650497e+00 -5.521394357416742338e-01
191 | 8.558738339901992731e+00 8.620727388432225524e-01
192 | 8.751145388017370408e+00 2.941953272259366869e+00
193 | 9.645568768006743454e+00 -1.981545246540248328e+00
194 | 9.519423384476112915e+00 -8.487449697834925644e-01
195 | 9.738153437965983983e+00 5.894024734137062183e-01
196 | 9.622970447992241105e+00 2.279222234059965047e+00
197 |
--------------------------------------------------------------------------------
/figures/fig_01_pairwise_potential.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import matplotlib.pyplot as plt"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "plt.rcParams['figure.autolayout'] = True\n",
20 | "plt.rcParams['figure.figsize'] = [6, 3]"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "metadata": {},
27 | "outputs": [],
28 | "source": [
29 | "start, end = 1e-9, 10\n",
30 | "dist = np.linspace(start, end, 100)\n"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "d_ref = 3\n",
40 | "sep_gain = d_ref ** 2"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": null,
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "rep = 1 / dist * sep_gain\n",
50 | "coh = dist.copy()\n",
51 | "tot = rep + coh"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "fig, ax = plt.subplots()\n",
61 | "color_sep, color_coh = 'tab:red', 'tab:green'\n",
62 | "color_eq = 'black'\n",
63 | "ax.plot(dist, rep, color=color_sep)\n",
64 | "ax.plot(dist, coh, color=color_coh)\n",
65 | "# ax.plot(dist, tot, color='tab:blue')\n",
66 | "ax.text(0.75, 20, 'separation', color=color_sep)\n",
67 | "ax.text(6, 9, 'cohesion', color=color_coh)\n",
68 | "\n",
69 | "ax.text(3, 12, 'equilibrium\\n distance', color='black', ha='center', va='center')\n",
70 | "ax.arrow(3, 9, 0, -4, width=0.01, head_width=0.1, head_length=0.5, length_includes_head=True, ec='black', fc='black')\n",
71 | "ax.set(xlim=(0, end), ylim=(0, 30))\n",
72 | "ax.set(xlabel=r'inter-agent distance $d_{ij}$ [$m$]', ylabel=r'pairwise potential')\n",
73 | "fig.savefig('pairwise_potential.pdf')\n",
74 | "# ax.grid()\n",
75 | "# ax.set(xticklabels=[], yticklabels=[])\n",
76 | "pass"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {},
83 | "outputs": [],
84 | "source": []
85 | }
86 | ],
87 | "metadata": {
88 | "interpreter": {
89 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
90 | },
91 | "kernelspec": {
92 | "display_name": "Python 3.6.9 64-bit",
93 | "name": "python3"
94 | },
95 | "language_info": {
96 | "codemirror_mode": {
97 | "name": "ipython",
98 | "version": 3
99 | },
100 | "file_extension": ".py",
101 | "mimetype": "text/x-python",
102 | "name": "python",
103 | "nbconvert_exporter": "python",
104 | "pygments_lexer": "ipython3",
105 | "version": "3.6.9"
106 | },
107 | "orig_nbformat": 4
108 | },
109 | "nbformat": 4,
110 | "nbformat_minor": 2
111 | }
112 |
--------------------------------------------------------------------------------
/figures/fig_02_ref_distance_scalability.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import glob\n",
10 | "import os\n",
11 | "\n",
12 | "from addict import Dict\n",
13 | "import functools\n",
14 | "import xarray as xr\n",
15 | "import numpy as np\n",
16 | "import matplotlib.pyplot as plt\n",
17 | "import vmodel.util.xarray as xrutil\n",
18 | "from vmodel import plot\n",
19 | "from vmodel import metrics\n",
20 | "\n",
21 | "%load_ext autoreload\n",
22 | "%autoreload 2"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": null,
28 | "metadata": {},
29 | "outputs": [],
30 | "source": [
31 | "plt.rcParams['figure.autolayout'] = True\n",
32 | "plt.rcParams['figure.figsize'] = [6, 3.5]"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "# Each subfolder contains an experiment\n",
42 | "expdir = '/home/fabian/vmodel_datasets/ref_distance_figure'\n",
43 | "plotname = expdir.split(os.path.sep)[-1]\n",
44 | "plotname"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "paths = sorted(glob.glob(f'{expdir}/**/*merged*.nc', recursive=True))\n",
54 | "paths[0], len(paths)"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": null,
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "# Fill experimental dictionary: experiment -> num_agents -> ds\n",
64 | "expdict = Dict()\n",
65 | "key = 'num_agents'\n",
66 | "for path in paths:\n",
67 | " # ds = pickle.load(open(path, 'rb'))\n",
68 | " ds = xr.open_dataset(path)\n",
69 | " *_, expname, fname = path.split(os.path.sep)\n",
70 | " expdict[float(expname.replace('dist', ''))] = ds"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "ds = xr.concat(expdict.values(), 'dist')\n",
80 | "ds.coords['dist'] = list(expdict.keys())"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": null,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "sepgains = [d ** 2 / 2 for d in ds.dist.data]\n",
90 | "sepgains"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "fig, ax = plt.subplots()\n",
100 | "mindist = ds.nndist.min('agent').isel(time=slice(400, None)).mean('time')\n",
101 | "for dist, dsagents in mindist.groupby('dist'):\n",
102 | " xs = dsagents.nagents.data\n",
103 | " ys, yerrs = dsagents.mean('run'), dsagents.std('run')\n",
104 | " ax.errorbar(xs, ys, yerrs, fmt='-o', capsize=3)\n",
105 | "ax.grid()\n",
106 | "# ax.legend()\n",
107 | "ax.set(xscale='log')\n",
108 | "ax.set(xticks=ds.nagents.data, xticklabels=ds.nagents.data)\n",
109 | "ax.set(yticks=ds.dist.data, yticklabels=ds.dist.data)\n",
110 | "# ax.set(ylim=(0, None))\n",
111 | "secax = ax.secondary_yaxis('right')\n",
112 | "ax.set(xlabel=r'number of agents $N$', ylabel=r'min. distance $d^\\mathrm{min}$ [$m$]')\n",
113 | "secax.set(ylabel=r'separation gain $k^\\mathrm{sep}$ [$m s^{-1}$]')\n",
114 | "secax.set(yticks=ds.dist.data, yticklabels=sepgains)\n",
115 | "pass\n",
116 | "fig.savefig(f'ref_distance_scalability.pdf', bbox_inches='tight')"
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": null,
122 | "metadata": {},
123 | "outputs": [],
124 | "source": []
125 | }
126 | ],
127 | "metadata": {
128 | "interpreter": {
129 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
130 | },
131 | "kernelspec": {
132 | "display_name": "Python 3.6.9 64-bit",
133 | "name": "python3"
134 | },
135 | "language_info": {
136 | "codemirror_mode": {
137 | "name": "ipython",
138 | "version": 3
139 | },
140 | "file_extension": ".py",
141 | "mimetype": "text/x-python",
142 | "name": "python",
143 | "nbconvert_exporter": "python",
144 | "pygments_lexer": "ipython3",
145 | "version": "3.6.9"
146 | },
147 | "orig_nbformat": 4
148 | },
149 | "nbformat": 4,
150 | "nbformat_minor": 2
151 | }
152 |
--------------------------------------------------------------------------------
/figures/fig_03_neighbor_selection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "\n",
12 | "import vmodel.random as vrandom\n",
13 | "import vmodel.liveplot as vplot\n",
14 | "import vmodel.geometry as vgeom\n",
15 | "import vmodel.visibility as vvis\n",
16 | "import vmodel.util.color as vcolor\n",
17 | "\n",
18 | "from scipy.spatial import Voronoi, Delaunay, ConvexHull\n",
19 | "\n",
20 | "from vmodel.plot import voronoi_plot_2d\n",
21 | "\n",
22 | "%load_ext autoreload\n",
23 | "%autoreload 2"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "num_agents = 200\n",
33 | "radius_agent = 0.25\n",
34 | "radius_arena = 10\n",
35 | "radius_perc = 3\n",
36 | "min_dist = 1\n",
37 | "maxlim = radius_arena + radius_agent * 2\n",
38 | "k = np.sqrt(2)\n",
39 | "lim = (-radius_perc * k, radius_perc * k)"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "# positions = vrandom.poisson_disk_spherical(radius_arena, min_dist, num_agents, candidate='first')\n",
49 | "# positions.shape"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": null,
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "# Save random positions to CSV for reproducibility\n",
59 | "# np.savetxt('agent_positions.csv', positions)"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": null,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "positions = np.loadtxt('agent_positions.csv')"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "# Find pos self!\n",
78 | "index = [i for i in range(len(positions)) if positions[i][0] == 0][0]\n",
79 | "index"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "pos_self = positions[index]\n",
89 | "pos_others = np.delete(positions, index, axis=0)"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": null,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "visibility = vvis.visibility_set(pos_others, radius_agent)\n",
99 | "visibility.shape"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": null,
105 | "metadata": {},
106 | "outputs": [],
107 | "source": [
108 | "max_agents = 6\n",
109 | "distances = np.linalg.norm(pos_others, axis=1)\n",
110 | "indices = distances.argsort()[:max_agents]"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "metadata": {},
117 | "outputs": [],
118 | "source": [
119 | "color_gray = vcolor.grey\n",
120 | "color_blue = vcolor.blue\n",
121 | "color_lgray = vcolor.lightgrey\n",
122 | "figsize = (5, 5)"
123 | ]
124 | },
125 | {
126 | "cell_type": "markdown",
127 | "metadata": {},
128 | "source": [
129 | "# Metric"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "fig, ax = plt.subplots(figsize=figsize)\n",
139 | "\n",
140 | "# Add background rectangle\n",
141 | "xy, size = (-maxlim, -maxlim), 2 * maxlim\n",
142 | "background = plt.Rectangle(xy, size, size, color=color_lgray)\n",
143 | "ax.add_patch(background)\n",
144 | "\n",
145 | "perc_radius = plt.Circle((0, 0), radius=radius_perc, color='white')\n",
146 | "ax.add_patch(perc_radius)\n",
147 | "\n",
148 | "perc_circle = plt.Circle((0, 0), radius=radius_perc, fill=False, ls=':', lw=0.5, ec='grey', zorder=100)\n",
149 | "ax.add_patch(perc_circle)\n",
150 | "\n",
151 | "for i, pos in enumerate(pos_others):\n",
152 | "\n",
153 | " too_far = distances[i] > radius_perc\n",
154 | " color = color_gray if too_far else color_blue\n",
155 | "\n",
156 | " vplot.plot_circle(ax, pos, radius=radius_agent, color=color, zorder=2, fill=True)\n",
157 | "vplot.plot_circle(ax, (0, 0), radius=radius_agent, color='tab:red', zorder=99)\n",
158 | "ax.set(aspect='equal')\n",
159 | "ax.set(xlim=lim, ylim=lim)\n",
160 | "ax.set(xticks=[], yticks=[])\n",
161 | "fig.savefig(f'1_neighbor_selection_metric.pdf', bbox_inches='tight')"
162 | ]
163 | },
164 | {
165 | "cell_type": "markdown",
166 | "metadata": {},
167 | "source": [
168 | "# Visual"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "metadata": {},
175 | "outputs": [],
176 | "source": [
177 | "fig, ax = plt.subplots(figsize=figsize)\n",
178 | "scale = 100\n",
179 | "\n",
180 | "# Add background rectangle\n",
181 | "xy, size = (-maxlim, -maxlim), 2 * maxlim\n",
182 | "\n",
183 | "# Add background rectangle\n",
184 | "xy, size = (-maxlim, -maxlim), 2 * maxlim\n",
185 | "background = plt.Rectangle(xy, size, size, color=color_lgray)\n",
186 | "ax.add_patch(background)\n",
187 | "\n",
188 | "perc_radius = plt.Circle((0, 0), radius=radius_perc, color='white')\n",
189 | "ax.add_patch(perc_radius)\n",
190 | "\n",
191 | "perc_circle = plt.Circle((0, 0), radius=radius_perc, fill=False, ls=':', lw=0.5, ec='grey', zorder=100)\n",
192 | "ax.add_patch(perc_circle)\n",
193 | "\n",
194 | "for i, pos in enumerate(pos_others):\n",
195 | "\n",
196 | " # Draw tangent points\n",
197 | " p1, p2 = vgeom.tangent_points_to_circle(pos, radius_agent)\n",
198 | " p1, p2 = np.array(p1), np.array(p2)\n",
199 | "\n",
200 | " ps1, ps2 = p1 * scale, p2 * scale\n",
201 | " d1, d2 = np.linalg.norm(p1), np.linalg.norm(p2)\n",
202 | "\n",
203 | " poly = np.array([p1, ps1, ps2, p2])\n",
204 | " polygon = plt.Polygon(poly, color=color_lgray, zorder=1)\n",
205 | " ax.add_patch(polygon)\n",
206 | "\n",
207 | " isvisible = visibility[i]\n",
208 | " isclose = distances[i] < radius_perc\n",
209 | " isneighbor = isvisible and isclose\n",
210 | "\n",
211 | " color = color_blue if isneighbor else color_gray\n",
212 | "\n",
213 | " vplot.plot_circle(ax, pos, radius=radius_agent, color=color, zorder=2, fill=True)\n",
214 | "vplot.plot_circle(ax, (0, 0), radius=radius_agent, color='tab:red', zorder=99)\n",
215 | "ax.set(aspect='equal')\n",
216 | "ax.set(xlim=lim, ylim=lim)\n",
217 | "ax.set(xticks=[], yticks=[])\n",
218 | "fig.savefig(f'2_neighbor_selection_visual.pdf', bbox_inches='tight')"
219 | ]
220 | },
221 | {
222 | "cell_type": "markdown",
223 | "metadata": {},
224 | "source": [
225 | "# Topological"
226 | ]
227 | },
228 | {
229 | "cell_type": "code",
230 | "execution_count": null,
231 | "metadata": {},
232 | "outputs": [],
233 | "source": [
234 | "fig, ax = plt.subplots(figsize=figsize)\n",
235 | "\n",
236 | "# Add background rectangle\n",
237 | "xy, size = (-maxlim, -maxlim), 2 * maxlim\n",
238 | "background = plt.Rectangle(xy, size, size, color=color_lgray)\n",
239 | "ax.add_patch(background)\n",
240 | "\n",
241 | "# Sort by polar coordinates\n",
242 | "pos_topo = [p for p in pos_others[indices]]\n",
243 | "pos_topo.sort(key=lambda p: np.arctan2(p[1], p[0]))\n",
244 | "polygon = plt.Polygon(pos_topo, color=vcolor.white)\n",
245 | "border = plt.Polygon(pos_topo, color=vcolor.grey, ls=':', lw=0.5, fill=False)\n",
246 | "ax.add_patch(polygon)\n",
247 | "ax.add_patch(border)\n",
248 | "\n",
249 | "for i, pos in enumerate(pos_others):\n",
250 | "\n",
251 | " include = (i in indices)\n",
252 | "\n",
253 | " color = color_blue if include else color_gray\n",
254 | "\n",
255 | " if include:\n",
256 | " x, y = pos\n",
257 | " \n",
258 | " ax.plot([0, x], [0, y], color=color_blue)\n",
259 | "\n",
260 | " vplot.plot_circle(ax, pos, radius=radius_agent, color=color, zorder=2, fill=True)\n",
261 | "vplot.plot_circle(ax, (0, 0), radius=radius_agent, color=vcolor.focal, zorder=99)\n",
262 | "ax.set(aspect='equal')\n",
263 | "ax.set(xlim=lim, ylim=lim)\n",
264 | "ax.set(xticks=[], yticks=[])\n",
265 | "fig.savefig(f'3_neighbor_selection_topological.pdf', bbox_inches='tight')"
266 | ]
267 | },
268 | {
269 | "cell_type": "markdown",
270 | "metadata": {},
271 | "source": [
272 | "# Voronoi"
273 | ]
274 | },
275 | {
276 | "cell_type": "code",
277 | "execution_count": null,
278 | "metadata": {},
279 | "outputs": [],
280 | "source": [
281 | "fig, ax = plt.subplots(figsize=figsize)\n",
282 | "\n",
283 | "pos_all = np.insert(pos_others, 0, np.zeros(2), axis=0)\n",
284 | "vor = Voronoi(pos_all)\n",
285 | "tri = Delaunay(pos_all)\n",
286 | "neighbors = np.array(vgeom.voronoi_neighbors(pos_all)[0]) - 1\n",
287 | "fig = voronoi_plot_2d(vor, ax=ax, show_vertices=False, point_size=0, line_colors=vcolor.grey, line_width=0.5, line_style=':')\n",
288 | "\n",
289 | "# Color all non neighbor regions light grey\n",
290 | "for index, r in enumerate(vor.point_region):\n",
291 | " region = vor.regions[r]\n",
292 | " if index - 1 in neighbors or index == 0:\n",
293 | " continue\n",
294 | " if not -1 in region:\n",
295 | " polygon = [vor.vertices[i] for i in region]\n",
296 | " ax.fill(*zip(*polygon), color=vcolor.lightgrey)\n",
297 | "\n",
298 | "for i, pos in enumerate(pos_others):\n",
299 | "\n",
300 | " isneighbor = (i in neighbors)\n",
301 | "\n",
302 | " color = color_blue if isneighbor else color_gray\n",
303 | " vplot.plot_circle(ax, pos, radius=radius_agent, color=color, zorder=2, fill=True)\n",
304 | " \n",
305 | "vplot.plot_circle(ax, (0, 0), radius=radius_agent, color=vcolor.focal, zorder=99)\n",
306 | "ax.set(aspect='equal')\n",
307 | "ax.set(xlim=lim, ylim=lim)\n",
308 | "ax.set(xticks=[], yticks=[])\n",
309 | "fig.savefig(f'4_neighbor_selection_voronoi.pdf', bbox_inches='tight')"
310 | ]
311 | },
312 | {
313 | "cell_type": "code",
314 | "execution_count": null,
315 | "metadata": {},
316 | "outputs": [],
317 | "source": []
318 | }
319 | ],
320 | "metadata": {
321 | "interpreter": {
322 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
323 | },
324 | "kernelspec": {
325 | "display_name": "Python 3.8.2 64-bit",
326 | "name": "python3"
327 | },
328 | "language_info": {
329 | "codemirror_mode": {
330 | "name": "ipython",
331 | "version": 3
332 | },
333 | "file_extension": ".py",
334 | "mimetype": "text/x-python",
335 | "name": "python",
336 | "nbconvert_exporter": "python",
337 | "pygments_lexer": "ipython3",
338 | "version": "3.6.9"
339 | },
340 | "orig_nbformat": 4
341 | },
342 | "nbformat": 4,
343 | "nbformat_minor": 2
344 | }
345 |
--------------------------------------------------------------------------------
/figures/fig_04_group_size.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import xarray as xr\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "import numpy as np\n",
12 | "import glob\n",
13 | "import os\n",
14 | "\n",
15 | "from addict import Dict\n",
16 | "\n",
17 | "from vmodel.util import color as vcolor"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": null,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "plt.rcParams['figure.figsize'] = [4, 3.5]\n",
27 | "plt.rcParams['figure.autolayout'] = True"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "data_dir = '/home/fabian/vmodel_datasets/neighbor_selection/pointmass'"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "paths = sorted(glob.glob(data_dir + '/**/merged_*.nc', recursive=True))\n",
46 | "len(paths)"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "# Read datasets\n",
56 | "expdict = Dict()\n",
57 | "for path in paths:\n",
58 | " dirname = os.path.dirname(path)\n",
59 | " dist_str, exp = dirname.split(os.path.sep)[-2:]\n",
60 | " dist = int(dist_str.replace('dist', ''))\n",
61 | " ds = xr.open_dataset(path)\n",
62 | " ds['nndist_min'] = ds.nndist.min('agent')\n",
63 | " del ds['nndist']\n",
64 | " ds['nvisible_mean'] = ds.nvisible.mean('agent')\n",
65 | " del ds['nvisible']\n",
66 | " expdict[dist][exp] = ds"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "metadata": {},
73 | "outputs": [],
74 | "source": [
75 | "distdict = Dict()\n",
76 | "for dist in expdict:\n",
77 | " ds = xr.concat(expdict[dist].values(), 'exp')\n",
78 | " ds.coords['exp'] = list(expdict[dist].keys())\n",
79 | " distdict[dist] = ds"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "# Concatenate datasets along distance\n",
89 | "ds = xr.concat(distdict.values(), 'dist')\n",
90 | "ds.coords['dist'] = list(distdict.keys())"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "if 'agent' in ds.coords:\n",
100 | " del ds.coords['agent']"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "labels = {\n",
110 | " 'metric': 'metric',\n",
111 | " 'visual': 'visual',\n",
112 | " 'visual_myopic': 'visual + myopic',\n",
113 | " 'visual_topo6': 'visual + topological',\n",
114 | " 'visual_voronoi': 'visual + voronoi'\n",
115 | "}"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": null,
121 | "metadata": {},
122 | "outputs": [],
123 | "source": [
124 | "colors = {\n",
125 | " 'metric': vcolor.metric,\n",
126 | " 'visual': vcolor.visual,\n",
127 | " 'visual_myopic': vcolor.myopic,\n",
128 | " 'visual_topo6': vcolor.topological,\n",
129 | " 'visual_voronoi': vcolor.voronoi\n",
130 | "}"
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": null,
136 | "metadata": {},
137 | "outputs": [],
138 | "source": [
139 | "timeslice = slice(150, 200)"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": null,
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "dst = ds.isel(time=timeslice).mean('time')"
149 | ]
150 | },
151 | {
152 | "cell_type": "code",
153 | "execution_count": null,
154 | "metadata": {},
155 | "outputs": [],
156 | "source": [
157 | "# Only select dist = 1\n",
158 | "dssize = dst.sel(dist=1)"
159 | ]
160 | },
161 | {
162 | "cell_type": "code",
163 | "execution_count": null,
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "def plot_size(ax, da):\n",
168 | " for exp in da.exp.data:\n",
169 | " data = da.sel(exp=exp)\n",
170 | " xs, ys, yerrs = data.nagents, data.mean('run'), data.std('run')\n",
171 | " label, color = labels[exp], colors[exp]\n",
172 | " ax.errorbar(xs, ys, yerrs, fmt='-o', capsize=3, label=label, color=color)\n",
173 | " ax.set(xscale='log')\n",
174 | " ax.set(xticks=ds.nagents, xticklabels=ds.nagents.data)\n",
175 | " ax.set(xlabel=r'number of agents $N$')\n",
176 | " ax.grid()\n",
177 | " ax.legend()"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "## Distance"
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": null,
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "fig, ax = plt.subplots()\n",
194 | "plot_size(ax, dssize.nndist_min)\n",
195 | "ax.set(ylabel=r'min. distance $d^\\mathrm{min}$ [$m$]')\n",
196 | "ax.set(ylim=(0.0, None))\n",
197 | "fig.savefig(f'size_nndist_min.pdf')"
198 | ]
199 | },
200 | {
201 | "cell_type": "markdown",
202 | "metadata": {},
203 | "source": [
204 | "## Order"
205 | ]
206 | },
207 | {
208 | "cell_type": "code",
209 | "execution_count": null,
210 | "metadata": {},
211 | "outputs": [],
212 | "source": [
213 | "fig, ax = plt.subplots()\n",
214 | "plot_size(ax, dssize.order)\n",
215 | "ax.set(ylabel=r'avg. order $\\phi^\\mathrm{order}$')\n",
216 | "ax.set(ylim=(0.80, None))\n",
217 | "fig.savefig(f'size_order.pdf')"
218 | ]
219 | },
220 | {
221 | "cell_type": "markdown",
222 | "metadata": {},
223 | "source": [
224 | "## Union"
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": null,
230 | "metadata": {},
231 | "outputs": [],
232 | "source": [
233 | "fig, ax = plt.subplots()\n",
234 | "plot_size(ax, dssize.union)\n",
235 | "ax.set(ylabel=r'avg. union $\\phi^\\mathrm{union}$')\n",
236 | "fig.savefig(f'size_union.pdf')"
237 | ]
238 | },
239 | {
240 | "cell_type": "markdown",
241 | "metadata": {},
242 | "source": [
243 | "## Neighbors"
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": null,
249 | "metadata": {},
250 | "outputs": [],
251 | "source": [
252 | "fig, ax = plt.subplots()\n",
253 | "plot_size(ax, dssize.nvisible_mean)\n",
254 | "ax.set(ylabel=r'avg. number of neighbors $N_i$')\n",
255 | "ax.set(yscale='log')\n",
256 | "ax.set(yticks=dssize.nagents, yticklabels=dssize.nagents.data)\n",
257 | "ax.set(ylim=(1, 3000))\n",
258 | "ax.legend(loc='upper left')\n",
259 | "fig.savefig(f'size_nvisible_mean.pdf')"
260 | ]
261 | },
262 | {
263 | "cell_type": "code",
264 | "execution_count": null,
265 | "metadata": {},
266 | "outputs": [],
267 | "source": []
268 | }
269 | ],
270 | "metadata": {
271 | "interpreter": {
272 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
273 | },
274 | "kernelspec": {
275 | "display_name": "Python 3.6.9 64-bit",
276 | "name": "python3"
277 | },
278 | "language_info": {
279 | "codemirror_mode": {
280 | "name": "ipython",
281 | "version": 3
282 | },
283 | "file_extension": ".py",
284 | "mimetype": "text/x-python",
285 | "name": "python",
286 | "nbconvert_exporter": "python",
287 | "pygments_lexer": "ipython3",
288 | "version": "3.6.9"
289 | },
290 | "orig_nbformat": 4
291 | },
292 | "nbformat": 4,
293 | "nbformat_minor": 2
294 | }
295 |
--------------------------------------------------------------------------------
/figures/fig_05_trajectories.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import glob\n",
10 | "import os\n",
11 | "\n",
12 | "from addict import Dict\n",
13 | "import xarray as xr\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "\n",
16 | "from vmodel import plot as vplot\n",
17 | "from vmodel.util import color as vcolor\n",
18 | "from vmodel.util import mpl as vmpl\n",
19 | "\n",
20 | "%load_ext autoreload\n",
21 | "%autoreload 2\n"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": null,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "data_dir = '/home/fabian/vmodel_datasets/trajectories/pointmass'"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "num_agents = 30"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "paths = sorted(glob.glob(f'{data_dir}/**/agents_{num_agents}_*.nc', recursive=True))\n",
49 | "paths = [p for p in paths if '.metrics.nc' not in p]\n",
50 | "len(paths)"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {},
57 | "outputs": [],
58 | "source": [
59 | "# Read datasets\n",
60 | "expdict = Dict()\n",
61 | "for path in paths:\n",
62 | " expname = os.path.split(os.path.dirname(path))[-1]\n",
63 | " ds = xr.open_dataset(path)\n",
64 | " expdict[expname] = ds"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {},
71 | "outputs": [],
72 | "source": [
73 | "expdict.keys()"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "ds = xr.concat(expdict.values(), 'exp')\n",
83 | "ds.coords['exp'] = list(expdict.keys())"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": null,
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "timeslice = slice(0, 60)"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": null,
98 | "metadata": {},
99 | "outputs": [],
100 | "source": [
101 | "ds = ds.sel(run=1).isel(time=timeslice)"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "metadata": {},
108 | "outputs": [],
109 | "source": [
110 | "figsize = (10, 3)"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "metadata": {},
117 | "outputs": [],
118 | "source": [
119 | "def plot_trajectories(ax, ds, focal_agent=1, exp='visual', focal_color='black'):\n",
120 | " \"\"\"Plot trajectories of positions over time\n",
121 | " Args:\n",
122 | " ds (dataset): timestep x agent x state\n",
123 | " \"\"\"\n",
124 | " start, mid, end = 0, len(ds.time.data) // 2, -1\n",
125 | "\n",
126 | " putlabel = True\n",
127 | " for agent in ds.agent.data:\n",
128 | "\n",
129 | " # Check if focal agent\n",
130 | " isfocal = agent == focal_agent\n",
131 | "\n",
132 | " xs, ys = ds.position.sel(agent=agent)\n",
133 | "\n",
134 | " # alpha = 1.0 if isfocal else 0.5\n",
135 | " # lw = 2.0 if isfocal else 0.5\n",
136 | " alpha = 1.0\n",
137 | " lw = 2.0 if isfocal else 1.0\n",
138 | " color = focal_color if isfocal else vcolor.grey\n",
139 | " zorder = 1 if isfocal else 0\n",
140 | "\n",
141 | " if isfocal:\n",
142 | " label = f'{exp} (focal)'\n",
143 | " elif not isfocal and putlabel:\n",
144 | " label = f'{exp} (others)'\n",
145 | " putlabel = False\n",
146 | " else:\n",
147 | " label = None\n",
148 | "\n",
149 | " # Plot trajectory\n",
150 | " line = ax.plot(xs, ys, color=color, lw=lw, alpha=alpha, zorder=zorder, label=label)\n",
151 | " # color = line[0].get_color()\n",
152 | "\n",
153 | " # Plot trajectory start\n",
154 | " x0, y0 = xs.isel(time=start), ys.isel(time=start)\n",
155 | " ax.plot(x0, y0, color=color, marker='s', alpha=alpha, lw=lw, zorder=zorder)\n",
156 | "\n",
157 | " # Plot mid point\n",
158 | " xm, ym = xs.isel(time=mid), ys.isel(time=mid)\n",
159 | " ax.plot(xm, ym, color=color, marker='>', alpha=alpha, lw=lw, zorder=zorder)\n",
160 | "\n",
161 | " # Plot trajectory end\n",
162 | " xt, yt = xs.isel(time=end), ys.isel(time=end)\n",
163 | " circle = plt.Circle((xt, yt), color=color, radius=0.25, alpha=alpha, lw=lw, zorder=zorder)\n",
164 | " ax.add_patch(circle)\n",
165 | " # ax.plot(xt, yt, color=color, marker='o', alpha=alpha, lw=lw, zorder=zorder)\n",
166 | "\n",
167 | " # ax.grid()\n",
168 | " offset = 7\n",
169 | " ax.set(ylim=(-offset, offset), xlim=(-offset, 29 + offset))\n",
170 | " ax.set(xlabel=r'$x$ position [$m$]', ylabel=r'$y$ position [$m$]')\n",
171 | " ax.set(aspect='equal')\n",
172 | "\n",
173 | " handles, labels = ax.get_legend_handles_labels()\n",
174 | " order = [1, 0]\n",
175 | " newhandles = [handles[i] for i in order]\n",
176 | " newlabels = [labels[i] for i in order]\n",
177 | " ax.legend(newhandles, newlabels, loc='upper center', ncol=2)\n",
178 | " ax.locator_params(axis='y', nbins=5)"
179 | ]
180 | },
181 | {
182 | "cell_type": "code",
183 | "execution_count": null,
184 | "metadata": {},
185 | "outputs": [],
186 | "source": [
187 | "focal_agent = 12"
188 | ]
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "metadata": {},
193 | "source": [
194 | "## Visual"
195 | ]
196 | },
197 | {
198 | "cell_type": "code",
199 | "execution_count": null,
200 | "metadata": {},
201 | "outputs": [],
202 | "source": [
203 | "fig, ax = plt.subplots(figsize=figsize)\n",
204 | "exp = 'visual'\n",
205 | "plot_trajectories(ax, ds.sel(exp='visual'), focal_agent=focal_agent, exp=exp, focal_color=vcolor.visual)\n",
206 | "fig.savefig(f'trajectories_visual.pdf', bbox_inches='tight')"
207 | ]
208 | },
209 | {
210 | "cell_type": "markdown",
211 | "metadata": {},
212 | "source": [
213 | "## Visual + voronoi"
214 | ]
215 | },
216 | {
217 | "cell_type": "code",
218 | "execution_count": null,
219 | "metadata": {},
220 | "outputs": [],
221 | "source": [
222 | "fig, ax = plt.subplots(figsize=figsize)\n",
223 | "exp = 'visual + voronoi'\n",
224 | "plot_trajectories(ax, ds.sel(exp='voronoi'), focal_agent=focal_agent, exp=exp, focal_color=vcolor.voronoi)\n",
225 | "fig.savefig(f'trajectories_visual_voronoi.pdf', bbox_inches='tight')"
226 | ]
227 | },
228 | {
229 | "cell_type": "markdown",
230 | "metadata": {},
231 | "source": [
232 | "## Visual + myopic"
233 | ]
234 | },
235 | {
236 | "cell_type": "code",
237 | "execution_count": null,
238 | "metadata": {},
239 | "outputs": [],
240 | "source": [
241 | "fig, ax = plt.subplots(figsize=figsize)\n",
242 | "exp = 'visual + myopic'\n",
243 | "plot_trajectories(ax, ds.sel(exp='myopic'), focal_agent=focal_agent, exp=exp, focal_color=vcolor.myopic)\n",
244 | "fig.savefig(f'trajectories_visual_myopic.pdf', bbox_inches='tight')"
245 | ]
246 | },
247 | {
248 | "cell_type": "markdown",
249 | "metadata": {},
250 | "source": [
251 | "## Visual + topological"
252 | ]
253 | },
254 | {
255 | "cell_type": "code",
256 | "execution_count": null,
257 | "metadata": {},
258 | "outputs": [],
259 | "source": [
260 | "fig, ax = plt.subplots(figsize=figsize)\n",
261 | "exp = 'visual + topological'\n",
262 | "plot_trajectories(ax, ds.sel(exp='topo'), focal_agent=focal_agent, exp=exp, focal_color=vcolor.topological)\n",
263 | "fig.savefig(f'trajectories_visual_topo.pdf', bbox_inches='tight')"
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": null,
269 | "metadata": {},
270 | "outputs": [],
271 | "source": []
272 | }
273 | ],
274 | "metadata": {
275 | "interpreter": {
276 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
277 | },
278 | "kernelspec": {
279 | "display_name": "Python 3.6.9 64-bit",
280 | "name": "python3"
281 | },
282 | "language_info": {
283 | "codemirror_mode": {
284 | "name": "ipython",
285 | "version": 3
286 | },
287 | "file_extension": ".py",
288 | "mimetype": "text/x-python",
289 | "name": "python",
290 | "nbconvert_exporter": "python",
291 | "pygments_lexer": "ipython3",
292 | "version": "3.6.9"
293 | },
294 | "orig_nbformat": 4
295 | },
296 | "nbformat": 4,
297 | "nbformat_minor": 2
298 | }
299 |
--------------------------------------------------------------------------------
/figures/fig_06_visual_switching.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import xarray as xr\n",
10 | "import numpy as np\n",
11 | "import matplotlib.pyplot as plt\n",
12 | "\n",
13 | "from vmodel.visibility import visibility_set\n",
14 | "import vmodel.geometry as vgeom\n",
15 | "\n",
16 | "import vmodel.util.color as vcolor\n",
17 | "from addict import Dict"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": null,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "plt.rcParams['figure.figsize'] = [4, 4]\n",
27 | "plt.rcParams['figure.autolayout'] = True"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "path = '/home/fabian/vmodel_datasets/visual_switching/agents_300_runs_1_times_100_dist_1.0_perc_10.0_topo_0_rngstd_0.0.states.nc'"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "ds = xr.open_dataset(path).sel(run=1)"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "# Only every 10th timestep\n",
55 | "vis = ds.visibility"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "metadata": {},
62 | "outputs": [],
63 | "source": [
64 | "# Create switches array (shift by one timestep and apply XOR)\n",
65 | "switches = np.logical_xor(vis, vis.shift(time=1, fill_value=False)).sum('agent2')"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "timestep = 12"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "def plot(ax, ds, timestep=1, focal_agent=1, center_agent=True, plot_text=False, plot_appearance=True):\n",
84 | "\n",
85 | " dst = ds.isel(time=timestep)\n",
86 | "\n",
87 | " margin = dst.perception_radius # margin around focal agent\n",
88 | " perception_radius = dst.perception_radius\n",
89 | " radius = float(ds.radius)\n",
90 | "\n",
91 | " pos_self = dst.position.sel(agent=focal_agent)\n",
92 | " px, py = pos_self.data\n",
93 | " px, py\n",
94 | "\n",
95 | " # Position computation\n",
96 | " positions = dst.position.data\n",
97 | " pos_self = positions[focal_agent - 1]\n",
98 | " pos_others = np.delete(positions, focal_agent - 1, axis=0) - pos_self\n",
99 | " distances = np.insert(np.linalg.norm(pos_others, axis=1), focal_agent - 1, float('inf'))\n",
100 | " pos_others_me = np.insert(pos_others, focal_agent - 1, np.array([0, 0]), axis=0)\n",
101 | "\n",
102 | " too_far = distances > perception_radius\n",
103 | "\n",
104 | " # Center figure around agent or swarm\n",
105 | " if center_agent:\n",
106 | " xlim = (px - margin, px + margin)\n",
107 | " ylim = (py - margin, py + margin)\n",
108 | " else:\n",
109 | " xlim = (positions[:, 0].min(), positions[:, 0].max())\n",
110 | " ylim = (positions[:, 1].min(), positions[:, 1].max())\n",
111 | "\n",
112 | "\n",
113 | " # Add background rectangle\n",
114 | " perc_radius = plt.Circle((px, py), radius=perception_radius, color='white', zorder=1)\n",
115 | " ax.add_patch(perc_radius)\n",
116 | "\n",
117 | " perc_circle = plt.Circle((px, py), radius=perception_radius, fill=False, ls=':', lw=0.5, ec='grey', zorder=100)\n",
118 | " ax.add_patch(perc_circle)\n",
119 | "\n",
120 | " # Loop over other agents\n",
121 | " for a in dst.agent.data:\n",
122 | " dsa = dst.sel(agent=a)\n",
123 | "\n",
124 | " istoofar = too_far[a - 1]\n",
125 | " isfocal = (a == focal_agent)\n",
126 | " isvisible = dst.visibility.sel(agent=focal_agent, agent2=a).data\n",
127 | "\n",
128 | " if isfocal:\n",
129 | " color = vcolor.focal\n",
130 | " elif istoofar:\n",
131 | " color = vcolor.invisible\n",
132 | " elif not istoofar and isvisible:\n",
133 | " color = vcolor.visual\n",
134 | " elif not istoofar and not isvisible:\n",
135 | " color = vcolor.grey\n",
136 | "\n",
137 | " x, y = dsa.position\n",
138 | " dx, dy = dsa.velocity\n",
139 | " speed = np.linalg.norm([dx, dy])\n",
140 | "\n",
141 | " timeslice = slice(timestep - 10, timestep)\n",
142 | " \n",
143 | " if plot_appearance:\n",
144 | " wasvisible = ds.visibility.isel(time=slice(timestep - 10, timestep)).mean('time').sel(agent=focal_agent, agent2=a).data.round().astype(bool)\n",
145 | " # wasvisible = ds.visibility.isel(time=timestep -1).sel(agent=focal_agent, agent2=a).data.astype(bool)\n",
146 | " didappear = not wasvisible and isvisible\n",
147 | " diddisappear = wasvisible and not isvisible\n",
148 | " if didappear:\n",
149 | " color = vcolor.voronoi\n",
150 | " elif diddisappear:\n",
151 | " color = vcolor.topological\n",
152 | "\n",
153 | " # Plot tail\n",
154 | " xs, ys = ds.position.sel(agent=a).isel(time=timeslice).data\n",
155 | " ax.plot(xs, ys, color=color, alpha=0.5, zorder=99)\n",
156 | "\n",
157 | " # Plot position\n",
158 | " ax.plot(x, y)\n",
159 | " circle = plt.Circle((x, y), radius=radius, color=color, zorder=99)\n",
160 | " ax.add_patch(circle)\n",
161 | "\n",
162 | " # Plot velocity\n",
163 | " # head_length = 0.3 * speed\n",
164 | " # head_width = head_length / 2\n",
165 | " # ax.arrow(x, y, dx, dy, color=color, head_length=head_length, head_width=head_width,\n",
166 | " # length_includes_head=True, zorder=99)\n",
167 | "\n",
168 | " # Plot agent number\n",
169 | " if plot_text:\n",
170 | " ax.text(x, y, s=f'{a}', ha='center', va='center', zorder=100)\n",
171 | " if not isfocal:\n",
172 | " p1, p2 = vgeom.tangent_points_to_circle(pos_others_me[a - 1], radius)\n",
173 | " p1, p2 = np.array(p1), np.array(p2)\n",
174 | " ps1, ps2 = p1 * perception_radius * 4, p2 * perception_radius * 4\n",
175 | " origin = np.array([px, py]) # need to translate by origin!!\n",
176 | " poly = np.array([p1, ps1, ps2, p2]) + origin\n",
177 | " polygon = plt.Polygon(poly, color=vcolor.lightgrey, zorder=1)\n",
178 | " ax.add_patch(polygon)\n",
179 | " \n",
180 | " ax.set(xlim=xlim, ylim=ylim)\n",
181 | " ax.set(aspect='equal')\n",
182 | " ax.set(xlabel='x [m]', ylabel='y [m]')\n",
183 | " ax.set(facecolor=vcolor.lightgrey)\n",
184 | " # ax.set(title=f'T = {timestep}s')\n"
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": null,
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "# fig, axes = plt.subplots(ncols=3, figsize=(20, 20), sharey=True)\n",
194 | "focal_agent = 65 # take some agent as focal agent\n",
195 | "plot_text = False\n",
196 | "start = 10\n",
197 | "offset = 10\n",
198 | "timesteps = [start, start + offset, start + 2 * offset, start + 3 * offset]\n",
199 | "for i in range(len(timesteps)):\n",
200 | " # ax = axes[i]\n",
201 | " timestep = timesteps[i]\n",
202 | " fig, ax = plt.subplots()\n",
203 | " plot(ax, ds, timestep=timestep, focal_agent=focal_agent, plot_text=False)\n",
204 | " ax.locator_params(axis='y', nbins=4)\n",
205 | " ax.grid()\n",
206 | " fig.savefig(f'visual_switching_{i + 1}.pdf')\n"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": null,
212 | "metadata": {},
213 | "outputs": [],
214 | "source": []
215 | }
216 | ],
217 | "metadata": {
218 | "interpreter": {
219 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
220 | },
221 | "kernelspec": {
222 | "display_name": "Python 3.6.9 64-bit",
223 | "name": "python3"
224 | },
225 | "language_info": {
226 | "codemirror_mode": {
227 | "name": "ipython",
228 | "version": 3
229 | },
230 | "file_extension": ".py",
231 | "mimetype": "text/x-python",
232 | "name": "python",
233 | "nbconvert_exporter": "python",
234 | "pygments_lexer": "ipython3",
235 | "version": "3.6.9"
236 | },
237 | "orig_nbformat": 4
238 | },
239 | "nbformat": 4,
240 | "nbformat_minor": 2
241 | }
242 |
--------------------------------------------------------------------------------
/figures/fig_07_density.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import xarray as xr\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "import numpy as np\n",
12 | "import glob\n",
13 | "import os\n",
14 | "\n",
15 | "from addict import Dict\n",
16 | "\n",
17 | "from vmodel.util import color as vcolor"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": null,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "plt.rcParams['figure.figsize'] = [4, 3.5]\n",
27 | "plt.rcParams['figure.autolayout'] = True"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "data_dir = '/home/fabian/vmodel_datasets/neighbor_selection/pointmass'"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "paths = sorted(glob.glob(data_dir + '/**/merged_*.nc', recursive=True))\n",
46 | "len(paths)"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "# Read datasets\n",
56 | "expdict = Dict()\n",
57 | "for path in paths:\n",
58 | " dirname = os.path.dirname(path)\n",
59 | " dist_str, exp = dirname.split(os.path.sep)[-2:]\n",
60 | " dist = int(dist_str.replace('dist', ''))\n",
61 | " ds = xr.open_dataset(path)\n",
62 | " ds['nndist_min'] = ds.nndist.min('agent')\n",
63 | " del ds['nndist']\n",
64 | " ds['nvisible_mean'] = ds.nvisible.mean('agent')\n",
65 | " del ds['nvisible']\n",
66 | " expdict[dist][exp] = ds"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "metadata": {},
73 | "outputs": [],
74 | "source": [
75 | "distdict = Dict()\n",
76 | "for dist in expdict:\n",
77 | " ds = xr.concat(expdict[dist].values(), 'exp')\n",
78 | " ds.coords['exp'] = list(expdict[dist].keys())\n",
79 | " distdict[dist] = ds"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "# Concatenate datasets along distance\n",
89 | "ds = xr.concat(distdict.values(), 'dist')\n",
90 | "ds.coords['dist'] = list(distdict.keys())"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "if 'agent' in ds.coords:\n",
100 | " del ds.coords['agent']"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "labels = {\n",
110 | " 'metric': 'metric',\n",
111 | " 'visual': 'visual',\n",
112 | " 'visual_myopic': 'visual + myopic',\n",
113 | " 'visual_topo6': 'visual + topological',\n",
114 | " 'visual_voronoi': 'visual + voronoi'\n",
115 | "}"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": null,
121 | "metadata": {},
122 | "outputs": [],
123 | "source": [
124 | "colors = {\n",
125 | " 'metric': vcolor.metric,\n",
126 | " 'visual': vcolor.visual,\n",
127 | " 'visual_myopic': vcolor.myopic,\n",
128 | " 'visual_topo6': vcolor.topological,\n",
129 | " 'visual_voronoi': vcolor.voronoi\n",
130 | "}"
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": null,
136 | "metadata": {},
137 | "outputs": [],
138 | "source": [
139 | "timeslice = slice(150, 200)"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": null,
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "dst = ds.isel(time=timeslice).mean('time')"
149 | ]
150 | },
151 | {
152 | "cell_type": "code",
153 | "execution_count": null,
154 | "metadata": {},
155 | "outputs": [],
156 | "source": [
157 | "dsdens = dst.sel(nagents=100)"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "metadata": {},
164 | "outputs": [],
165 | "source": [
166 | "def plot_density(ax, da):\n",
167 | " for exp in da.exp.data:\n",
168 | " data = da.sel(exp=exp)\n",
169 | " xs, ys, yerrs = data.dist, data.mean('run'), data.std('run')\n",
170 | " label, color = labels[exp], colors[exp]\n",
171 | " ax.errorbar(xs, ys, yerrs, fmt='-o', capsize=3, label=label, color=color)\n",
172 | " ax.set(xticks=ds.dist, xticklabels=ds.dist.data)\n",
173 | " ax.set(xlabel=r'reference distance $d^\\mathrm{ref}$ [$m$]')\n",
174 | " ax.grid()\n",
175 | " ax.legend()"
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": null,
181 | "metadata": {},
182 | "outputs": [],
183 | "source": [
184 | "dsdens['nndist_norm'] = dsdens.nndist_min / ds.dist"
185 | ]
186 | },
187 | {
188 | "cell_type": "markdown",
189 | "metadata": {},
190 | "source": [
191 | "## Distance"
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": null,
197 | "metadata": {},
198 | "outputs": [],
199 | "source": [
200 | "fig, ax = plt.subplots()\n",
201 | "plot_density(ax, dsdens.nndist_norm)\n",
202 | "ax.set(ylabel=r'norm. min. distance $d^\\mathrm{norm}$')\n",
203 | "ax.set(ylim=(0.3, None))\n",
204 | "fig.savefig(f'density_nndist_norm.pdf', bbox_inches='tight')\n",
205 | "pass"
206 | ]
207 | },
208 | {
209 | "cell_type": "markdown",
210 | "metadata": {},
211 | "source": [
212 | "## Order"
213 | ]
214 | },
215 | {
216 | "cell_type": "code",
217 | "execution_count": null,
218 | "metadata": {},
219 | "outputs": [],
220 | "source": [
221 | "fig, ax = plt.subplots()\n",
222 | "plot_density(ax, dsdens.order)\n",
223 | "ax.set(ylabel=r'avg. order $\\phi^{\\mathrm{order}}$')\n",
224 | "ax.set(ylim=(0.7, None))\n",
225 | "fig.savefig(f'density_order.pdf', bbox_inches='tight')\n",
226 | "pass"
227 | ]
228 | },
229 | {
230 | "cell_type": "markdown",
231 | "metadata": {},
232 | "source": [
233 | "## Union"
234 | ]
235 | },
236 | {
237 | "cell_type": "code",
238 | "execution_count": null,
239 | "metadata": {},
240 | "outputs": [],
241 | "source": [
242 | "fig, ax = plt.subplots()\n",
243 | "plot_density(ax, dsdens.union)\n",
244 | "ax.set(ylabel=r'avg. union $\\phi^{union}$')\n",
245 | "ax.set(ylim=(0.9, None))\n",
246 | "fig.savefig(f'density_union.pdf', bbox_inches='tight')\n",
247 | "pass"
248 | ]
249 | },
250 | {
251 | "cell_type": "markdown",
252 | "metadata": {},
253 | "source": [
254 | "## Neighbors"
255 | ]
256 | },
257 | {
258 | "cell_type": "code",
259 | "execution_count": null,
260 | "metadata": {},
261 | "outputs": [],
262 | "source": [
263 | "fig, ax = plt.subplots()\n",
264 | "plot_density(ax, dsdens.nvisible_mean)\n",
265 | "ax.set(ylabel=r'avg. number of neighbors $N_i$')\n",
266 | "ax.set(yscale='log')\n",
267 | "ax.set(yticks=ds.nagents, yticklabels=ds.nagents.data)\n",
268 | "ax.set(ylim=(None, 100))\n",
269 | "fig.savefig(f'density_nvisible_mean.pdf', bbox_inches='tight')\n",
270 | "pass"
271 | ]
272 | },
273 | {
274 | "cell_type": "code",
275 | "execution_count": null,
276 | "metadata": {},
277 | "outputs": [],
278 | "source": []
279 | }
280 | ],
281 | "metadata": {
282 | "interpreter": {
283 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
284 | },
285 | "kernelspec": {
286 | "display_name": "Python 3.6.9 64-bit",
287 | "name": "python3"
288 | },
289 | "language_info": {
290 | "codemirror_mode": {
291 | "name": "ipython",
292 | "version": 3
293 | },
294 | "file_extension": ".py",
295 | "mimetype": "text/x-python",
296 | "name": "python",
297 | "nbconvert_exporter": "python",
298 | "pygments_lexer": "ipython3",
299 | "version": "3.6.9"
300 | },
301 | "orig_nbformat": 4
302 | },
303 | "nbformat": 4,
304 | "nbformat_minor": 2
305 | }
306 |
--------------------------------------------------------------------------------
/figures/fig_08_range_noise.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import xarray as xr\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "import numpy as np\n",
12 | "import glob\n",
13 | "import os\n",
14 | "\n",
15 | "from vmodel.util import color as vcolor"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "\n",
25 | "plt.rcParams['figure.figsize'] = [4, 3.5]\n",
26 | "plt.rcParams['figure.autolayout'] = True"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "metadata": {},
33 | "outputs": [],
34 | "source": [
35 | "data_dir = '/home/fabian/vmodel_datasets/range_noise'"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "paths = sorted(glob.glob(data_dir + '/**/merged_*.nc', recursive=True))\n",
45 | "paths"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "# Read datasets\n",
55 | "expdict = {}\n",
56 | "for path in paths:\n",
57 | " expname = os.path.split(os.path.dirname(path))[-1]\n",
58 | " ds = xr.open_dataset(path)\n",
59 | " expdict[expname] = ds"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": null,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "# Concatenate datasets along experiment name\n",
69 | "ds = xr.concat(expdict.values(), 'exp')\n",
70 | "ds.coords['exp'] = list(expdict.keys())"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "# Precompute distance metrics\n",
80 | "ds['nndist_min'] = ds.nndist.min('agent')\n",
81 | "ds['nndist_mean'] = ds.nndist.mean('agent')\n",
82 | "ds['nndist_std'] = ds.nndist.std('agent')"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": null,
88 | "metadata": {},
89 | "outputs": [],
90 | "source": [
91 | "ds['nvisible_mean'] = ds.nvisible.mean('agent')\n",
92 | "ds['nvisible_std'] = ds.nvisible.std('agent')"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": null,
98 | "metadata": {},
99 | "outputs": [],
100 | "source": [
101 | "labels = {\n",
102 | " 'metric': 'metric',\n",
103 | " 'visual': 'visual',\n",
104 | " 'visual_myopic': 'visual + myopic',\n",
105 | " 'visual_topo6': 'visual + topological',\n",
106 | " 'visual_voronoi': 'visual + voronoi',\n",
107 | "}"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": null,
113 | "metadata": {},
114 | "outputs": [],
115 | "source": [
116 | "colors = {\n",
117 | " 'metric': vcolor.metric,\n",
118 | " 'visual': vcolor.visual,\n",
119 | " 'visual_myopic': vcolor.myopic,\n",
120 | " 'visual_topo6': vcolor.topological,\n",
121 | " 'visual_voronoi': vcolor.voronoi,\n",
122 | "}"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | "expname = 'range_std'"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "metadata": {},
138 | "outputs": [],
139 | "source": [
140 | "timeslice = slice(150, 200)"
141 | ]
142 | },
143 | {
144 | "cell_type": "code",
145 | "execution_count": null,
146 | "metadata": {},
147 | "outputs": [],
148 | "source": [
149 | "def plot(ax, da, verbose=False):\n",
150 | " for exp in da.exp.data:\n",
151 | " data = da.sel(exp=exp)\n",
152 | " xs, ys, yerrs = data.rngstd, data.mean('run'), data.std('run')\n",
153 | " label, color = labels[exp], colors[exp]\n",
154 | " ax.errorbar(xs, ys, yerrs, fmt='-o', capsize=3, label=label, color=color)\n",
155 | " if verbose:\n",
156 | " print(f'{exp}')\n",
157 | " print(f'xs: {xs.data}')\n",
158 | " print(f'ys: {np.round(ys.data, 2)}')\n",
159 | " print(f'ye: {np.round(yerrs.data, 2)}')\n",
160 | " print()\n",
161 | " # ax.set(xscale='log')\n",
162 | " ax.set(xticks=ds.rngstd, xticklabels=ds.rngstd.data)\n",
163 | " ax.set(xlabel=r'range noise $\\sigma_d$ [$m$]')\n",
164 | " ax.grid()\n",
165 | " ax.legend()"
166 | ]
167 | },
168 | {
169 | "cell_type": "markdown",
170 | "metadata": {},
171 | "source": [
172 | "## Distance"
173 | ]
174 | },
175 | {
176 | "cell_type": "code",
177 | "execution_count": null,
178 | "metadata": {},
179 | "outputs": [],
180 | "source": [
181 | "fig, ax = plt.subplots()\n",
182 | "plot(ax, ds.nndist_min.isel(time=timeslice).min('time'))\n",
183 | "ax.set(ylabel=r'min. nearest neighbor distance $d^\\mathrm{min}$ [$m$]')\n",
184 | "ax.set(ylim=(-0.15, None))\n",
185 | "# ax.plot(ds.nagents.data, np.full(len(ds.nagents), 0.5), label='collision distance', ls='--', color='tab:red')\n",
186 | "ax.legend()\n",
187 | "fig.savefig(f'{expname}_nndist_min.pdf')\n",
188 | "pass"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "metadata": {},
194 | "source": [
195 | "## Order"
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": null,
201 | "metadata": {},
202 | "outputs": [],
203 | "source": [
204 | "fig, ax = plt.subplots()\n",
205 | "plot(ax, ds.order.isel(time=timeslice).mean('time'))\n",
206 | "ax.set(ylabel=r'avg. order $\\phi^{\\mathrm{order}}$')\n",
207 | "fig.savefig(f'{expname}_order.pdf')\n",
208 | "ax.set(ylim=(-0.15, None))\n",
209 | "pass"
210 | ]
211 | },
212 | {
213 | "cell_type": "markdown",
214 | "metadata": {},
215 | "source": [
216 | "## Union"
217 | ]
218 | },
219 | {
220 | "cell_type": "code",
221 | "execution_count": null,
222 | "metadata": {},
223 | "outputs": [],
224 | "source": [
225 | "fig, ax = plt.subplots()\n",
226 | "plot(ax, ds.union.isel(time=timeslice).mean('time'))\n",
227 | "ax.set(ylabel=r'avg. union $\\phi^{union}$')\n",
228 | "ax.set(ylim=(0.9, None))\n",
229 | "fig.savefig(f'{expname}_union.pdf')\n",
230 | "pass"
231 | ]
232 | },
233 | {
234 | "cell_type": "markdown",
235 | "metadata": {},
236 | "source": [
237 | "## Travel"
238 | ]
239 | },
240 | {
241 | "cell_type": "code",
242 | "execution_count": null,
243 | "metadata": {},
244 | "outputs": [],
245 | "source": [
246 | "# ds.traveldist.mean('run').plot(hue='exp')\n",
247 | "# ds.traveldist.mean('run').plot(hue='exp')\n",
248 | "fig, ax = plt.subplots()\n",
249 | "plot(ax, ds.traveldist)\n",
250 | "ax.set(ylabel=r'avg. travel distance $d^\\mathrm{travel}$ [$m$]')\n",
251 | "fig.savefig(f'{expname}_traveldist.pdf')\n",
252 | "pass"
253 | ]
254 | },
255 | {
256 | "cell_type": "code",
257 | "execution_count": null,
258 | "metadata": {},
259 | "outputs": [],
260 | "source": []
261 | }
262 | ],
263 | "metadata": {
264 | "interpreter": {
265 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
266 | },
267 | "kernelspec": {
268 | "display_name": "Python 3.8.2 64-bit",
269 | "name": "python3"
270 | },
271 | "language_info": {
272 | "codemirror_mode": {
273 | "name": "ipython",
274 | "version": 3
275 | },
276 | "file_extension": ".py",
277 | "mimetype": "text/x-python",
278 | "name": "python",
279 | "nbconvert_exporter": "python",
280 | "pygments_lexer": "ipython3",
281 | "version": "3.6.9"
282 | },
283 | "orig_nbformat": 4
284 | },
285 | "nbformat": 4,
286 | "nbformat_minor": 2
287 | }
288 |
--------------------------------------------------------------------------------
/figures/fig_11_realistic_trajectories.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import glob\n",
10 | "import os\n",
11 | "\n",
12 | "from addict import Dict\n",
13 | "import xarray as xr\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "\n",
16 | "from vmodel import plot as vplot\n",
17 | "from vmodel.util import color as vcolor\n",
18 | "from vmodel.util import mpl as vmpl\n",
19 | "\n",
20 | "%load_ext autoreload\n",
21 | "%autoreload 2\n"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": null,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "data_dir = '/home/fabian/vmodel_datasets/trajectories/pointmass_vs_quadcopter'"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "paths = glob.glob(data_dir + '/**/*.nc')\n",
40 | "len(paths)"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": null,
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "expdict = Dict()\n",
50 | "for path in paths:\n",
51 | " exp = path.split(os.sep)[-2]\n",
52 | " ds = xr.open_dataset(path)\n",
53 | " if exp == 'quadcopter':\n",
54 | " ds = ds.isel(time=slice(None, None, 10))\n",
55 | " expdict[exp] = ds"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "metadata": {},
62 | "outputs": [],
63 | "source": [
64 | "ds = xr.concat(expdict.values(), 'exp')\n",
65 | "ds.coords['exp'] = list(expdict.keys())"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "timeslice = slice(0, 60)"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "dsr = ds.sel(run=1).isel(time=timeslice, drop=True)"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": null,
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "figsize = (10, 3)"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": null,
98 | "metadata": {},
99 | "outputs": [],
100 | "source": [
101 | "def plot_trajectories(ax, ds, focal_agent=1, exp='visual', focal_color='black'):\n",
102 | " \"\"\"Plot trajectories of positions over time\n",
103 | " Args:\n",
104 | " ds (dataset): timestep x agent x state\n",
105 | " \"\"\"\n",
106 | " start, mid, end = 0, len(ds.time.data) // 2, -1\n",
107 | "\n",
108 | " putlabel = True\n",
109 | " for agent in ds.agent.data:\n",
110 | "\n",
111 | " # Check if focal agent\n",
112 | " isfocal = agent == focal_agent\n",
113 | "\n",
114 | " xs, ys = ds.position.sel(agent=agent)\n",
115 | "\n",
116 | " # alpha = 1.0 if isfocal else 0.5\n",
117 | " # lw = 2.0 if isfocal else 0.5\n",
118 | " alpha = 1.0\n",
119 | " lw = 2.0 if isfocal else 1.0\n",
120 | " color = focal_color if isfocal else vcolor.grey\n",
121 | " zorder = 1 if isfocal else 0\n",
122 | "\n",
123 | " if isfocal:\n",
124 | " label = f'{exp} (focal)'\n",
125 | " elif not isfocal and putlabel:\n",
126 | " label = f'{exp} (others)'\n",
127 | " putlabel = False\n",
128 | " else:\n",
129 | " label = None\n",
130 | "\n",
131 | " # Plot trajectory\n",
132 | " line = ax.plot(xs, ys, color=color, lw=lw, alpha=alpha, zorder=zorder, label=label)\n",
133 | " # color = line[0].get_color()\n",
134 | "\n",
135 | " # Plot trajectory start\n",
136 | " x0, y0 = xs.isel(time=start), ys.isel(time=start)\n",
137 | " ax.plot(x0, y0, color=color, marker='s', alpha=alpha, lw=lw, zorder=zorder)\n",
138 | "\n",
139 | " # Plot mid point\n",
140 | " xm, ym = xs.isel(time=mid), ys.isel(time=mid)\n",
141 | " ax.plot(xm, ym, color=color, marker='>', alpha=alpha, lw=lw, zorder=zorder)\n",
142 | "\n",
143 | " # Plot trajectory end\n",
144 | " xt, yt = xs.isel(time=end), ys.isel(time=end)\n",
145 | " circle = plt.Circle((xt, yt), color=color, radius=0.25, alpha=alpha, lw=lw, zorder=zorder)\n",
146 | " ax.add_patch(circle)\n",
147 | " # ax.plot(xt, yt, color=color, marker='o', alpha=alpha, lw=lw, zorder=zorder)\n",
148 | "\n",
149 | " # ax.grid()\n",
150 | " offset = 7\n",
151 | " ax.set(ylim=(-offset, offset), xlim=(-offset, 29 + offset))\n",
152 | " ax.set(xlabel=r'$x$ position [$m$]', ylabel=r'$y$ position [$m$]')\n",
153 | " ax.set(aspect='equal')\n",
154 | "\n",
155 | " handles, labels = ax.get_legend_handles_labels()\n",
156 | " order = [1, 0]\n",
157 | " newhandles = [handles[i] for i in order]\n",
158 | " newlabels = [labels[i] for i in order]\n",
159 | " ax.legend(newhandles, newlabels, loc='upper center', ncol=2)\n",
160 | " ax.locator_params(axis='y', nbins=5)"
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": null,
166 | "metadata": {},
167 | "outputs": [],
168 | "source": [
169 | "focal_agent = 12"
170 | ]
171 | },
172 | {
173 | "cell_type": "markdown",
174 | "metadata": {},
175 | "source": [
176 | "## Pointmass dynamics without noise"
177 | ]
178 | },
179 | {
180 | "cell_type": "code",
181 | "execution_count": null,
182 | "metadata": {},
183 | "outputs": [],
184 | "source": [
185 | "fig, ax = plt.subplots(figsize=figsize)\n",
186 | "plot_trajectories(ax, dsr.sel(exp='pointmass'), focal_agent=focal_agent, exp='point mass', focal_color=vcolor.pointmass)\n",
187 | "fig.savefig(f'trajectories_pointmass.pdf', bbox_inches='tight')"
188 | ]
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "metadata": {},
193 | "source": [
194 | "## Quadcopter dynamics with noise"
195 | ]
196 | },
197 | {
198 | "cell_type": "code",
199 | "execution_count": null,
200 | "metadata": {},
201 | "outputs": [],
202 | "source": [
203 | "fig, ax = plt.subplots(figsize=figsize)\n",
204 | "plot_trajectories(ax, dsr.sel(exp='quadcopter'), focal_agent=focal_agent, exp='quadcopter', focal_color=vcolor.quadcopter)\n",
205 | "fig.savefig(f'trajectories_quadcopter.pdf', bbox_inches='tight')"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": null,
211 | "metadata": {},
212 | "outputs": [],
213 | "source": []
214 | }
215 | ],
216 | "metadata": {
217 | "interpreter": {
218 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
219 | },
220 | "kernelspec": {
221 | "display_name": "Python 3.6.9 64-bit",
222 | "name": "python3"
223 | },
224 | "language_info": {
225 | "codemirror_mode": {
226 | "name": "ipython",
227 | "version": 3
228 | },
229 | "file_extension": ".py",
230 | "mimetype": "text/x-python",
231 | "name": "python",
232 | "nbconvert_exporter": "python",
233 | "pygments_lexer": "ipython3",
234 | "version": "3.6.9"
235 | },
236 | "orig_nbformat": 4
237 | },
238 | "nbformat": 4,
239 | "nbformat_minor": 2
240 | }
241 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | gitpython>=3.1.18
2 | matplotlib>=3.3.4
3 | numba>=0.53.1
4 | numpy>=1.19.5
5 | pandas>=1.1.5
6 | scipy>=1.5.4
7 | scikit-learn>=0.24.2
8 | xarray>=0.16.2
9 | netcdf4>=1.5.7
10 | psutil>=5.9.0
11 | tqdm>=4.63.0
12 | pyyaml>=6.0
13 | addict>=2.4.0
14 |
--------------------------------------------------------------------------------
/scripts/params.yaml:
--------------------------------------------------------------------------------
1 | seed: 1337
2 | verbose: true
3 | progress: true
4 | dry_run: true
5 |
--------------------------------------------------------------------------------
/scripts/vmerge:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | """
4 | This script merges several netCDF4 (*.nc) datasets along a desired dimension,
5 | e.g., if several simulations have been performed with different group sizes.
6 | Example:
7 | vmerge -vP --attr num_agents --dim nagents dataset_*.nc
8 |
9 | The above invocation will create a file `dataset_merged_num_agents.nc` from
10 | several datasets, e.g., `dataset_agents_10.nc`, `dataset_agents_100.nc`, etc.
11 | """
12 |
13 | import argparse
14 | import os
15 | import sys
16 |
17 | import xarray as xr
18 |
19 | SCRIPT = os.path.basename(__file__)
20 |
21 |
22 | def main():
23 |
24 | formatter_class = argparse.ArgumentDefaultsHelpFormatter
25 | parser = argparse.ArgumentParser(description=SCRIPT,
26 | formatter_class=formatter_class)
27 |
28 | parser.add_argument('datasets', nargs='+', type=str, help='input datasets')
29 | parser.add_argument('-o', '--output', type=str, default=None,
30 | help='output dataset filename')
31 | parser.add_argument('-f', '--force', action='store_true', default=False,
32 | help='force overwriting of existing file')
33 | parser.add_argument('-v', '--verbose', action='store_true', default=False,
34 | help='verbose output')
35 | parser.add_argument('-P', '--progress', action='store_true', default=False,
36 | help='show progress bar')
37 | parser.add_argument('--attr', type=str, required=True,
38 | help='Attribute name')
39 | parser.add_argument('--dim', type=str, required=True,
40 | help='Name of new dimension')
41 |
42 | args = parser.parse_args()
43 |
44 | def vprint(*pargs, **kwargs):
45 | if args.verbose:
46 | print(f'[{SCRIPT}]', *pargs, **kwargs)
47 | eprint = lambda *args, **kwargs: vprint(*args, **kwargs, file=sys.stderr)
48 |
49 | if args.output is None:
50 | args.output = f'merged_{args.attr}.nc'
51 |
52 | if len(args.datasets) < 2:
53 | eprint('Need at least two datasets to merge. Exiting.')
54 | sys.exit(-1)
55 |
56 | vprint(f'Reading {len(args.datasets)} datasets')
57 | datasets = [xr.load_dataset(p) for p in args.datasets]
58 |
59 | # Add to dictionary and sort dataset
60 | dsdict = {d.attrs[args.attr]: d for d in datasets}
61 | dsdict = {k: d for k, d in sorted(dsdict.items())}
62 |
63 | # Concatenate into big dataset
64 | vprint('Merging datasets')
65 | merged = xr.concat(dsdict.values(), args.dim)
66 | merged.coords[args.dim] = list(dsdict.keys())
67 |
68 | if os.path.exists(args.output) and not args.force:
69 | eprint(f'Output dataset already exists: {args.output}. Overwrite with --force.')
70 | sys.exit(-1)
71 |
72 | vprint(f'Writing merged dataset: {args.output}')
73 | merged.to_netcdf(args.output)
74 |
75 |
76 | if __name__ == '__main__':
77 | try:
78 | main()
79 | except KeyboardInterrupt:
80 | sys.exit(0)
81 |
--------------------------------------------------------------------------------
/scripts/vmetric:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | """
4 | This script computes flocking metrics from simulation runs saved as netCDF4.
5 | Example:
6 | vmetric -vP dataset.nc
7 | The above invocation will create a new metric dataset `dataset.metrics.nc` from
8 | a simulation run contained in `dataset.nc`.
9 | """
10 |
11 | import argparse
12 | import os
13 | import sys
14 | import time
15 |
16 | import xarray as xr
17 | from dask.diagnostics import ProgressBar
18 | from vmodel import metrics as vmetr
19 | from vmodel.util import xarray as xmetr
20 |
21 | SCRIPT = os.path.basename(__file__)
22 |
23 |
24 | def compute_metrics(ds, dsmetric, args):
25 |
26 | # Slice of time to speed up computation
27 | ds = ds.isel(time=slice(None, None, args.time_slice))
28 |
29 | # Chunk dataset
30 | ds = ds.chunk({'time': 10})
31 |
32 | metric = 'nndist'
33 | if metric not in dsmetric:
34 | dsmetric[metric] = xmetr.nndist(ds.position)
35 |
36 | metric = 'order'
37 | if metric not in dsmetric:
38 | dims = ['agent', 'space']
39 | dsmetric[metric] = xmetr.metric_ufunc(ds.velocity, vmetr.order, dims)
40 |
41 | metric = 'union'
42 | if metric not in dsmetric:
43 | dims = ['agent', 'agent2']
44 | dsmetric[metric] = xmetr.metric_ufunc(ds.visibility, vmetr.union, dims)
45 |
46 | # Computing connectivity takes very long
47 | # metric = 'connectivity'
48 | # if metric not in dsmetric:
49 | # dims = ['agent', 'agent2']
50 | # dsmetric[metric] = xmetr.metric_ufunc(ds.visibility, vmetr.connectivity, dims)
51 |
52 | metric = 'density' # technically a number density since nagents / area
53 | if metric not in dsmetric:
54 | dims = ['agent', 'space']
55 | dsmetric[metric] = xmetr.metric_ufunc(ds.position, vmetr.agent_density, dims)
56 |
57 | metric = 'traveldist' # distance of agent mean position between first & last timestep
58 | if metric not in dsmetric:
59 | meanpos = ds.position.mean('agent')
60 | traveldist = xmetr.norm(meanpos.isel(time=0) - meanpos.isel(time=-1), 'space')
61 | dsmetric[metric] = traveldist
62 |
63 | metric = 'nvisible'
64 | if metric not in dsmetric:
65 | # Convert to float to preserve NaNs when merging datasets!
66 | nvisible = ds.visibility.sum('agent2').astype(float)
67 | dsmetric[metric] = nvisible
68 |
69 | return dsmetric
70 |
71 |
72 | def main():
73 |
74 | formatter_class = argparse.ArgumentDefaultsHelpFormatter
75 | parser = argparse.ArgumentParser(description=SCRIPT,
76 | formatter_class=formatter_class)
77 |
78 | parser.add_argument('dataset', nargs='+', type=str, help='input dataset(s)')
79 | parser.add_argument('-a', '--append', action='store_true', default=False,
80 | help='append metrics to existing dataset')
81 | parser.add_argument('-f', '--force', action='store_true', default=False,
82 | help='force overwriting of existing file')
83 | parser.add_argument('-n', '--dry-run', action='store_true', default=False,
84 | help='dry run, do not save data')
85 | parser.add_argument('-v', '--verbose', action='store_true', default=False,
86 | help='verbose output')
87 | parser.add_argument('-P', '--progress', action='store_true', default=False,
88 | help='show progress bar')
89 | parser.add_argument('--time-slice', type=int, default=1,
90 | help='time slice')
91 |
92 | args = parser.parse_args()
93 |
94 | def vprint(*pargs, **kwargs):
95 | if args.verbose:
96 | print(f'[{SCRIPT}]', *pargs, **kwargs)
97 | eprint = lambda *args, **kwargs: vprint(*args, **kwargs, file=sys.stderr)
98 |
99 | start_time = time.time()
100 |
101 | for i, inpath in enumerate(args.dataset):
102 |
103 | # Skip files that are already metrics
104 | metric_ext = '.metrics.nc'
105 | if metric_ext in inpath:
106 | eprint(f'Skipping metric dataset: {inpath}')
107 | continue
108 |
109 | # Construct outpath
110 | path_without_ext = os.path.splitext(inpath)[0]
111 | outpath = f'{path_without_ext}{metric_ext}'
112 |
113 | # Check if metrics dataset already exists
114 | dsmetric = xr.Dataset()
115 | if os.path.exists(outpath):
116 | # Take existing dataset to append to
117 | if args.append:
118 | vprint(f'Appending to existing metric dataset: {outpath}')
119 | dsmetric = xr.load_dataset(outpath)
120 | if not (args.append or args.force):
121 | eprint(f'Metrics dataset already exists: {outpath}. Exiting.')
122 | sys.exit(-1)
123 |
124 | vprint(f'Reading dataset: {inpath}')
125 | ds = xr.open_dataset(inpath) # state dataset
126 | # If dataset has position, then it's a state dataset
127 | try:
128 | ds.position
129 | except AttributeError:
130 | continue
131 | dsmetric.attrs = ds.attrs # copy attrs
132 |
133 | vprint('Computing metrics...')
134 | dsmetric = compute_metrics(ds, dsmetric, args)
135 |
136 | if not args.dry_run:
137 | vprint(f'Writing output file: {outpath}')
138 | if args.progress:
139 | with ProgressBar():
140 | dsmetric.to_netcdf(outpath)
141 | else:
142 | dsmetric.to_netcdf(outpath)
143 |
144 | duration = time.time() - start_time
145 | vprint(f'Metrics computed in {duration:.2f}s')
146 |
147 |
148 | if __name__ == '__main__':
149 | try:
150 | main()
151 | except KeyboardInterrupt:
152 | sys.exit(0)
153 |
--------------------------------------------------------------------------------
/scripts/vmodel:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | """
4 | This script is used to run flocking simulations. Refer to the README or list
5 | existing options using:
6 | vmodel --help
7 | """
8 |
9 | import argparse
10 | import functools
11 | import itertools
12 | import os
13 | import sys
14 | import time
15 | import warnings
16 |
17 | import matplotlib.pyplot as plt
18 | import numpy as np
19 | import psutil
20 | import tqdm
21 | import yaml
22 | from vmodel import geometry
23 | from vmodel import liveplot as plot
24 | from vmodel import metrics
25 | from vmodel.core import update_single
26 | from vmodel.dataset import create_dataset, generate_filename, save_dataset
27 | from vmodel.flocking import olfati_saber_simplified, reynolds
28 | from vmodel.random import random_positions
29 | from vmodel.util.args import parse_vmodel_args
30 | from vmodel.util.multiprocessing import get_silent_pool
31 | from vmodel.util.util import human_readable as hr
32 | from vmodel.visibility import visibility_graph
33 |
34 | SCRIPT = os.path.basename(__file__)
35 |
36 |
37 | def run(r, args, func):
38 |
39 | # Set numpy random seed
40 | np.random.seed(args.seed[r])
41 |
42 | # Shorthands for arguments
43 | nagents, dt = args.num_agents, args.delta_time
44 |
45 | if args.progress:
46 | pbar = tqdm.tqdm(total=args.num_timesteps, unit=' timesteps',
47 | desc=f'[vmodel] [run {r + 1}]', position=r)
48 |
49 | data = argparse.Namespace()
50 | data.time, data.pos, data.vel, data.vis, data.dist = [], [], [], [], []
51 |
52 | if args.plot:
53 | # _, (axpos, axdist, axvel) = plt.subplots(num='Plot', figsize=(7, 10), nrows=3,
54 | # gridspec_kw={'height_ratios': [2, 1, 1]})
55 | _, axpos = plt.subplots(num='Plot', figsize=(7, 7))
56 | _, (axdist, axvel, axmet) = plt.subplots(num='State', nrows=3, figsize=(7, 7))
57 |
58 | # Compute metrics only when plotting them
59 | if args.plot_metrics:
60 | # _, axmet = plt.subplots(num='Metrics', figsize=(7, 7))
61 | data.ord, data.con, data.union, data.aspect = [], [], [], []
62 |
63 | positions = random_positions(nagents, args.spawn, args)
64 | velocities = np.zeros_like(positions)
65 |
66 | if args.parallel_agents:
67 | pool = get_silent_pool(args.jobs)
68 | warnings.warn('Cannot generate reproducible results with multiprocessing')
69 |
70 | # Timestep zero: add initial conditions to data
71 | t = 0.0
72 | data.time.append(t)
73 | data.pos.append(positions.copy())
74 | data.vel.append(velocities.copy())
75 | data.dist.append(geometry.distance_matrix(positions))
76 | data.vis.append(visibility_graph(positions, args.radius))
77 |
78 | # Compute metrics for zeroth timestep
79 | if args.plot_metrics:
80 | compute_metrics(data)
81 |
82 | for k in itertools.count(start=1, step=1):
83 |
84 | t += dt # Integrate time
85 |
86 | if args.num_timesteps is not None and k >= args.num_timesteps:
87 | break
88 |
89 | if args.progress:
90 | pbar.update(1)
91 |
92 | # List to hold precomputed distance and visibility data
93 | vis_list, dist_list = [], []
94 |
95 | # Compute updates, either in parallel or sequentially
96 | if args.parallel_agents:
97 | args_list = [(i, positions, func, args) for i in range(nagents)]
98 | updates = pool.starmap_async(update_single, args_list).get(9999999)
99 | else:
100 | updates = [update_single(i, positions, func, args) for i in range(nagents)]
101 |
102 | for i in range(nagents):
103 |
104 | cmd, cache = updates[i]
105 |
106 | # Compute new position from velocity command
107 | pos = positions[i] + cmd * dt
108 |
109 | # Save position and velocity
110 | positions[i] = pos
111 | velocities[i] = cmd
112 |
113 | # Save precomputed items
114 | vis_list.append(cache.vis.copy())
115 | dist_list.append(cache.dist.copy())
116 |
117 | # Save data
118 | if k % args.save_every == 0:
119 | data.time.append(t)
120 | data.pos.append(positions.copy())
121 | data.vel.append(velocities.copy())
122 |
123 | if not args.no_save_precomputed or args.plot:
124 |
125 | # Save visbility in any case
126 | vmat = [np.insert(vs, i, False) for i, vs in enumerate(vis_list)]
127 | data.vis.append(np.array(vmat))
128 |
129 | # Save distance *only* when plotting (can easily be re-computed)
130 | # Note: memory requirements blow up if this if-statement is not there
131 | if args.plot:
132 | dmat = [np.insert(ds, i, 0.0) for i, ds in enumerate(dist_list)]
133 | data.dist.append(np.array(dmat))
134 |
135 | if args.plot_metrics:
136 | compute_metrics(data)
137 |
138 | # Plotting
139 | if args.plot or args.plot_metrics:
140 | last_step = (args.num_timesteps is not None and k + 1 == args.num_timesteps)
141 | if k % args.plot_every == 0 or last_step:
142 | if args.plot:
143 | plot.plot_all(axpos, data, args)
144 | # plot_agents(axpos, data, args)
145 | plot_distances(axdist, data, args)
146 | plot_speed(axvel, data, args)
147 | # plot.plot_clutter(axpos, clutter_list)
148 | if args.plot_metrics:
149 | plot_metrics(axmet, data)
150 |
151 | # Wait for keypress if blocking, otherwise wait for dt and continue
152 | if args.plot_blocking:
153 | while plt.waitforbuttonpress() is False: # false -> mouse press
154 | pass # do nothing until true -> key press
155 | else:
156 | plt.pause(args.delta_time)
157 |
158 | if args.plot:
159 | plt.show()
160 |
161 | del data.dist # Save memory. Distance matrix can easily be re-computed...
162 |
163 | # Delete precomputed data if we're not saving it anyways...
164 | if args.no_save_precomputed:
165 | del data.vis
166 |
167 | return data
168 |
169 |
170 | def compute_metrics(data):
171 | # data.con.append(metrics.connectivity(data.vis[-1]))
172 | data.con.append(metrics.connectivity(data.vis[-1]))
173 | data.union.append(metrics.union(data.vis[-1]))
174 | data.ord.append(metrics.order(data.vel[-1]))
175 | data.aspect.append(metrics.aspect_ratio(data.pos[-1]))
176 |
177 |
178 | def plot_distances(ax, data, args):
179 | ax.clear()
180 | plot.plot_distances(ax, data.dist, data.time, radius=args.radius)
181 | ax.set(xlabel='time [s]', ylabel='distance [m]')
182 | ax.set(xlim=(0, data.time[-1]), ylim=(0, None))
183 | ax.legend(loc='lower right')
184 | ax.grid(True)
185 |
186 |
187 | def plot_speed(ax, data, args):
188 | ax.clear()
189 | plot.plot_speed(ax, data.vel, data.time)
190 | ax.set(xlabel='time [s]', ylabel='speed [m/s]')
191 | ax.set(xlim=(0, data.time[-1]), ylim=(0, None))
192 | ax.legend(loc='upper right')
193 | ax.grid(True)
194 |
195 |
196 | def plot_metrics(ax, data):
197 | ax.clear()
198 |
199 | # Plot order
200 | plot.plot_metric(ax, data.ord, data.time, name='order',
201 | color='tab:green')
202 |
203 | # Plot connectivity
204 | plot.plot_metric(ax, data.con, data.time, name='connectivity',
205 | color='tab:blue')
206 |
207 | # Plot union
208 | plot.plot_metric(ax, data.union, data.time, name='union',
209 | color='tab:orange')
210 |
211 | # Plot aspect
212 | plot.plot_metric(ax, data.aspect, data.time, name='aspect ratio',
213 | color='tab:cyan')
214 |
215 | ax.set(xlabel='time [s]', ylabel='metric [?]')
216 | ax.set(xlim=(0, data.time[-1]), ylim=(0, None))
217 | ax.legend(loc='lower right')
218 | ax.grid(True)
219 |
220 |
221 | def main():
222 |
223 | args = parse_vmodel_args()
224 |
225 | # Load arguments from file (overwrites CLI arguments!)
226 | if args.file != '':
227 | file_dict = yaml.load(open(args.file, 'r'), Loader=yaml.FullLoader)
228 | args.__dict__.update(file_dict)
229 |
230 | # Define convenience print function (only prints in verbose mode)
231 | def vprint(*pargs, **kwargs):
232 | if args.verbose:
233 | print(f'[{SCRIPT}]', *pargs, **kwargs)
234 | eprint = lambda *args, **kwargs: vprint(*args, **kwargs, file=sys.stderr)
235 |
236 | # Only perception radius or perception angle can be defined
237 | if args.perception_radius > 0 and args.perception_angle > 0:
238 | eprint('Define either perception radius or angle, not both')
239 | sys.exit(-1)
240 |
241 | if args.migration_point and args.migration_dir:
242 | eprint('Define either migration point or migration direction, not both')
243 | sys.exit(-1)
244 |
245 | # Save current git hash (for repeatable experiments)
246 | try:
247 | import git
248 | repo = git.Repo(os.path.realpath(__file__), search_parent_directories=True)
249 | args.git_hash = repo.head.object.hexsha
250 | args.git_branch = repo.active_branch.name
251 | except ImportError:
252 | eprint('Install gitpython to save current git branch and hash')
253 | exit()
254 |
255 | # Sample and save random seed for repeatable experiments
256 | if args.seed is None:
257 | # Sample positive 31 bit integers
258 | args.seed = np.random.randint(2 ** 31 - 1, size=args.num_runs)
259 | elif isinstance(args.seed, (int, float)):
260 | # Use the same seed for each run
261 | args.seed = np.full(args.num_runs, args.seed)
262 | elif isinstance(args.seed, list):
263 | # If we pass multiple seed values, we are trying to run the same experiment again
264 | if len(args.seed) != args.num_runs:
265 | eprint('If providing list of seeds, provide one for each run')
266 | exit(0)
267 | args.seed = np.array(args.seed)
268 |
269 | vprint(f'Using random seed: {args.seed.tolist()}')
270 |
271 | # Check if we have enough memory available
272 | mem = psutil.virtual_memory()
273 | num_bytes_float64, num_states = 8, 2 # position and velocity
274 | num_per_step = args.num_runs * args.num_agents * args.num_dims / args.save_every
275 | num_bytes_per_step = num_per_step * num_bytes_float64 * num_states
276 | if args.num_timesteps is not None:
277 | mem_req = num_bytes_per_step * args.num_timesteps
278 | vprint(f'RAM required: {hr(mem_req)} (Used: {hr(mem.used)}/{hr(mem.total)})')
279 | if mem_req > mem.available:
280 | eprint('Script will consume more memory than is available. Exiting.')
281 | sys.exit(0)
282 | else:
283 | vprint(f'RAM required per timestep: {hr(num_bytes_per_step)}')
284 |
285 | # Derived variables
286 | args.radius_collision = 2 * args.radius
287 | args.radius_safety = 2 * args.radius_collision
288 |
289 | if args.algorithm == 'reynolds':
290 | # For Reynolds algorithm, infer separation gain in case of Reynolds flocking
291 | if args.ref_distance is not None:
292 | args.cohesion_gain = 1.0
293 | args.separation_gain = reynolds.infer_separation_gain3(args.ref_distance)
294 | vprint(f'Setting separation gain to {args.separation_gain:.2f}')
295 | func = functools.partial(reynolds.flock, cohesion_gain=args.cohesion_gain,
296 | separation_gain=args.separation_gain)
297 | elif args.algorithm == 'olfati':
298 | # For Olfati algorithm, set perception radius to 1.2 * dist
299 | if args.perception_radius is None:
300 | args.perception_radius = 1.2 * args.ref_distance
301 | vprint(f'Setting perception radius to {args.perception_radius:.2f}')
302 | func = functools.partial(olfati_saber_simplified.flock,
303 | distance=args.ref_distance,
304 | perception_radius=args.perception_radius)
305 |
306 | if args.spawn_distance is None:
307 | args.spawn_distance = args.ref_distance
308 |
309 | # Calculate desired radius of arena to keep agent number density constant
310 | args.mean_area_per_agent = np.pi * args.spawn_distance ** 2
311 | args.radius_arena = np.sqrt(args.num_agents * args.mean_area_per_agent / np.pi)
312 |
313 | # Agent density
314 | area_arena = np.pi * args.radius_arena ** 2
315 | agent_number_density = args.num_agents / area_arena
316 | vprint(f'Agent number density: {agent_number_density:.2f} agents/m^2')
317 |
318 | # Migration
319 | if len(args.migration_dir) > 0:
320 | direction = np.array(args.migration_dir)
321 | args.migration_dir = direction / np.linalg.norm(direction)
322 |
323 | args.radius_migration_point = 1.0 # only relevant for visual migration
324 |
325 | if args.plot and (args.save_every > args.plot_every):
326 | eprint('Cannot plot more often than saving. Consider lowering --save-every')
327 | exit()
328 |
329 | # No need for parallelizing runs if there is just one
330 | args.no_parallel_runs = True if args.num_runs == 1 else args.no_parallel_runs
331 |
332 | # Time run to obtain real-time factor
333 | time_start = time.time()
334 |
335 | if args.no_parallel_runs:
336 | datas = [run(r, args, func) for r in range(args.num_runs)]
337 | else:
338 | pool = get_silent_pool(args.jobs)
339 | args_list = [(r, args, func) for r in range(args.num_runs)]
340 | datas = pool.starmap_async(run, args_list).get(9999999) # don't judge!
341 |
342 | duration = time.time() - time_start
343 | vprint(f'Total duration: {duration:.2f} seconds')
344 |
345 | if args.dry_run:
346 | vprint('Dry run. Exiting without writing data.')
347 | sys.exit(0)
348 |
349 | # Create dataset from multiple runs
350 | ds = create_dataset(datas, args)
351 |
352 | # Generate filename: date_args.ext
353 | fname = generate_filename(args)
354 |
355 | # Save dataset and config file
356 | vprint(f'Saving {args.format} file: {fname}')
357 | save_dataset(ds, fname, args)
358 |
359 |
360 | if __name__ == '__main__':
361 | try:
362 | main()
363 | except KeyboardInterrupt:
364 | sys.exit(0)
365 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from pkg_resources import parse_requirements
2 | from setuptools import setup
3 |
4 | long_description = open('README.md').read()
5 | install_requires = [str(r) for r in parse_requirements(open('requirements.txt'))]
6 |
7 | setup(
8 | name='vmodel',
9 | version='0.0.1',
10 | author='Fabian Schilling',
11 | email='fabian@schilli.ng',
12 | packages=['vmodel'],
13 | license='LICENSE',
14 | scripts=['scripts/vmodel', 'scripts/vmetric', 'scripts/vmerge'],
15 | description='Visual model',
16 | long_description=long_description,
17 | long_description_content_type='text/markdown',
18 | install_requires=install_requires,
19 | )
20 |
--------------------------------------------------------------------------------
/test/test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """Run tests from this directory as follows: python3 -m unittest -v test"""
3 |
4 | import unittest
5 |
6 | import numpy as np
7 | from vmodel.geometry import is_angle_in_interval, tangent_points_to_circle
8 | from vmodel.visibility import visibility_set, is_occluding
9 |
10 |
11 | class Test(unittest.TestCase):
12 |
13 | def test_tangent_points_to_circle_quadrant_1(self):
14 |
15 | center = np.array([3., 2.])
16 | radius = 0.5
17 | pt1_true = np.array([3.2169780165, 1.5495329753])
18 | pt2_true = np.array([2.6676373681, 2.3735439478])
19 |
20 | pt1, pt2 = tangent_points_to_circle(center, radius)
21 |
22 | self.assertTrue(np.allclose(pt1, pt1_true))
23 | self.assertTrue(np.allclose(pt2, pt2_true))
24 |
25 | def test_tangent_points_to_circle_quadrant_2(self):
26 |
27 | center = np.array([-4., 2.])
28 | radius = 1.
29 | pt1_true = np.array([-3.3641101056, 2.7717797887])
30 | pt2_true = np.array([-4.2358898944, 1.0282202113])
31 |
32 | pt1, pt2 = tangent_points_to_circle(center, radius)
33 |
34 | self.assertTrue(np.allclose(pt1, pt1_true))
35 | self.assertTrue(np.allclose(pt2, pt2_true))
36 |
37 | def test_tangent_points_to_circle_angles(self):
38 |
39 | center = np.array([6., 0.])
40 | radius = 1.
41 | angle1_true, angle2_true = -9.5940682269, 9.5940682269
42 |
43 | pt1, pt2 = tangent_points_to_circle(center, radius)
44 |
45 | angle1 = np.rad2deg(np.arctan2(pt1[1], pt1[0]))
46 | angle2 = np.rad2deg(np.arctan2(pt2[1], pt2[0]))
47 |
48 | self.assertAlmostEqual(angle1, angle1_true)
49 | self.assertAlmostEqual(angle2, angle2_true)
50 |
51 | def test_tangent_points_to_circle_within_circle(self):
52 | center = np.array([1., 1.])
53 | radius = 2.
54 | self.assertIsNone(tangent_points_to_circle(center, radius))
55 |
56 | def test_tangent_points_to_circle_on_circle(self):
57 | center = np.array([1., 0.])
58 | radius = 1.
59 | pt1_true = pt2_true = np.array([0., 0.])
60 | pt1, pt2 = tangent_points_to_circle(center, radius)
61 | self.assertTrue(np.allclose(pt1, pt1_true))
62 | self.assertTrue(np.allclose(pt2, pt2_true))
63 |
64 | def test_is_occluding_full_occlusion(self):
65 | center1, center2 = np.array([6., 2.]), np.array([9., 3.])
66 | radius1, radius2 = 1., 1.
67 | self.assertTrue(is_occluding(center1, radius1, center2, radius2))
68 |
69 | def test_is_occluding_distance_larger(self):
70 | center1, center2 = np.array([6., 5.]), np.array([6., 2.])
71 | radius1, radius2 = 1., 1.
72 | self.assertFalse(is_occluding(center1, radius1, center2, radius2))
73 |
74 | def test_is_occluding_distance_larger_radius_closer(self):
75 | center1, center2 = np.array([6., 5.]), np.array([6., 2.])
76 | radius1, radius2 = 4., 1.
77 | self.assertTrue(is_occluding(center1, radius1, center2, radius2))
78 |
79 | def test_is_occluding_no_occlusion(self):
80 | center1, center2 = np.array([2., 3.]), np.array([6., 2.])
81 | radius1, radius2 = 1., 1.
82 | self.assertFalse(is_occluding(center1, radius1, center2, radius2))
83 |
84 | def test_is_occluding_no_occlusion_quadrants(self):
85 | center1, center2 = np.array([6., -2.]), np.array([6., 2.])
86 | radius1, radius2 = 1., 1.
87 | self.assertFalse(is_occluding(center1, radius1, center2, radius2))
88 |
89 | def test_is_occluding_same_circle(self):
90 | center1, center2 = np.array([6., 0.]), np.array([6., 0.])
91 | radius1, radius2 = 1., 1.
92 | self.assertTrue(is_occluding(center1, radius1, center2, radius2))
93 |
94 | def test_is_occluding_smaller_within(self):
95 | center1, center2 = np.array([6., 0.]), np.array([6., 0.])
96 | radius1, radius2 = 0.5, 1.
97 | self.assertFalse(is_occluding(center1, radius1, center2, radius2))
98 |
99 | def test_is_occluding_smaller_point_within_both_circles(self):
100 | center1, center2 = np.array([1., 0.]), np.array([2., 0.])
101 | radius1, radius2 = 2., 3.
102 | self.assertTrue(is_occluding(center1, radius1, center2, radius2))
103 |
104 | def test_is_occluding_further_away_but_larger_quadrant(self):
105 | center1, center2 = np.array([7., -1.]), np.array([6., 2.])
106 | radius1, radius2 = 4., 1.
107 | self.assertTrue(is_occluding(center1, radius1, center2, radius2))
108 |
109 | def test_is_angle_in_interval_inside_normal(self):
110 | angle = np.deg2rad(45)
111 | interval = np.array((np.deg2rad(0), np.deg2rad(90)))
112 | self.assertTrue(is_angle_in_interval(angle, interval))
113 |
114 | def test_is_angle_in_interval_inside_quadrant(self):
115 | angle = np.deg2rad(300)
116 | interval = np.array((np.deg2rad(270), np.deg2rad(45)))
117 | self.assertTrue(is_angle_in_interval(angle, interval))
118 |
119 | def test_is_angle_in_interval_outside_normal(self):
120 | angle = np.deg2rad(45)
121 | interval = np.array((np.deg2rad(90), np.deg2rad(180)))
122 | self.assertFalse(is_angle_in_interval(angle, interval))
123 |
124 | def test_is_angle_in_interval_outside_quadrant(self):
125 | angle = np.deg2rad(45)
126 | interval = np.array((np.deg2rad(340), np.deg2rad(25)))
127 | self.assertFalse(is_angle_in_interval(angle, interval))
128 |
129 | def test_compute_occlusions_random1(self):
130 | radius = 0.5
131 | occluded_agents_true = np.array([3, 5, 7]) # 1-indexed!
132 | positions = np.array([[2.83778229, 3.0783252],
133 | [-1.46653437, 1.80453313],
134 | [-2.80929834, 1.67320948],
135 | [-2.38409982, 0.6235275],
136 | [-2.91558909, 0.22018174],
137 | [2.37974831, -3.56279092],
138 | [-1.98530291, 1.58483259],
139 | [3.81785359, 0.99041497],
140 | [0.12622196, -1.98584117]])
141 | occluded = ~visibility_set(positions, radius)
142 | occluded_agents = np.array([i + 1 for i in range(len(positions)) if occluded[i]])
143 | self.assertTrue(np.all(occluded_agents == occluded_agents_true))
144 |
145 | def test_compute_occlusions_random2(self):
146 | radius = 0.5
147 | occluded_agents_true = np.array([2, 5, 6]) # 1-indexed!
148 | positions = np.array([[-2.75114029, -2.06400953],
149 | [-2.04081727, 2.16765629],
150 | [3.66583955, -1.15494228],
151 | [2.49819317, -3.82918581],
152 | [-2.99967231, 3.72956997],
153 | [-1.82957993, 3.58261861],
154 | [-3.85311192, 1.22281318],
155 | [2.09105496, 3.36389395],
156 | [-1.04757504, 1.94455198]])
157 | occluded = ~visibility_set(positions, radius)
158 | occluded_agents = np.array([i + 1 for i in range(len(positions)) if occluded[i]])
159 | self.assertTrue(np.all(occluded_agents == occluded_agents_true))
160 |
161 |
162 | def main():
163 | unittest.main()
164 |
165 |
166 | if __name__ == '__main__':
167 | main()
168 |
--------------------------------------------------------------------------------
/vmodel/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = '0.0.1'
2 |
--------------------------------------------------------------------------------
/vmodel/core.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import numpy as np
4 |
5 | from vmodel.geometry import subtended_angle_circle, voronoi_neighbors
6 | from vmodel.math import filter_by_angle, limit_norm, wrap_angle
7 | from vmodel.random import random_uniform_within_circle
8 | from vmodel.visibility import visibility_set
9 |
10 |
11 | # Uncomment the @profile decorator and run to perform line-by-line profiling:
12 | # kernprof -l -v vmodel ...
13 | # @profile
14 | def update_single(i, positions, func, args):
15 | """Update a single agent
16 | Args:
17 | i: index of focal agent (zero-indexed)
18 | positions: absolute positions of agents
19 | func: flocking algorithm function to apply
20 | args: parsed command-line arguments
21 | Returns:
22 | command: velocity command for focal agent
23 | cache: holds precomputed values
24 | """
25 |
26 | cache = argparse.Namespace()
27 |
28 | # Shorthands for arguments
29 | ndim = args.num_dims
30 |
31 | # Get relative positions of others
32 | pos_self = positions[i]
33 | pos_others = np.delete(positions, i, axis=0)
34 | pos = pos_others - pos_self
35 | # pos_waypoint = args.pos_waypoint - pos_self
36 |
37 | # Keep track of original agent indices
38 | idx = np.arange(len(pos), dtype=int)
39 |
40 | # Radii of agents
41 | rad = np.full(len(pos), args.radius)
42 |
43 | # Compute relative distances
44 | dist = np.linalg.norm(pos, axis=1)
45 | cache.dist = dist.copy()
46 | # cache.dist = np.insert(dist, i, 0.0)
47 |
48 | # Optionally add false positive detections
49 | if args.num_clutter > 0.0:
50 | # Add more clutter if not enough available
51 | num_clutter = np.random.poisson(args.num_clutter)
52 |
53 | if num_clutter > 0:
54 | low, high = args.radius_safety, args.perception_radius
55 | pos_clutter = random_uniform_within_circle(low, high,
56 | size=(num_clutter, ndim))
57 | pos_clutter = np.array(pos_clutter).reshape(-1, ndim)
58 |
59 | # while len(clutter_list) < num_clutter:
60 | # low, high = args.radius_arena, args.radius_arena + args.perception_radius
61 | # clutter = sample_fn(low=low, high=high)
62 | # clutter_list.append(clutter)
63 |
64 | # # Ranomly choose clutters from list
65 | # choice = np.random.choice(np.arange(len(clutter_list)), num_clutter)
66 | # pos_clutter = np.array(clutter_list)[choice].reshape(-1, ndim)
67 |
68 | # Add clutter to rest of detections
69 | dist_clutter = np.linalg.norm(pos_clutter, axis=1)
70 | pos = np.concatenate([pos, pos_clutter])
71 | dist = np.concatenate([dist, dist_clutter])
72 | idx = np.arange(len(pos), dtype=int)
73 | rad = np.concatenate([rad, np.full(len(pos_clutter), args.radius)])
74 |
75 | # If using visual migration, check if the waypoint is visible (as with agents)
76 | # waypoint_visible = True
77 | # if args.visual_migration:
78 | # idx_waypoint = idx[-1] + 1
79 | # idx = np.append(idx, idx_waypoint)
80 | # rad = np.append(rad, args.radius_waypoint)
81 | # dist = np.append(dist, np.linalg.norm(pos_waypoint))
82 | # pos = np.append(pos, [pos_waypoint], axis=0)
83 |
84 | # Filter out agents by metric distance
85 | if args.perception_radius > 0:
86 | mask = dist < args.perception_radius
87 | pos, dist, idx, rad = pos[mask], dist[mask], idx[mask], rad[mask]
88 |
89 | # Filter out agents by angle proportion of field of view
90 | if args.perception_angle > 0:
91 | angles_rad = subtended_angle_circle(dist, rad)
92 | mask = angles_rad > np.deg2rad(args.perception_angle)
93 | pos, dist, idx, rad = pos[mask], dist[mask], idx[mask], rad[mask]
94 |
95 | # Filter out occluded agents
96 | if args.filter_occluded:
97 | mask = visibility_set(pos, rad, dist)
98 | pos, dist, idx = pos[mask], dist[mask], idx[mask]
99 |
100 | # Filter out waypoint (if visible)
101 | # if args.visual_migration:
102 | # waypoint_visible = idx_waypoint in idx
103 | # if waypoint_visible:
104 | # mask = idx != idx_waypoint
105 | # pos, dist, idx = pos[mask], dist[mask], idx[mask]
106 |
107 | # Filter out agents by topological distance
108 | if args.max_agents > 0:
109 | indices = dist.argsort()[:args.max_agents]
110 | pos, dist, idx = pos[indices], dist[indices], idx[indices]
111 |
112 | if args.topo_angle > 0:
113 | angle = np.deg2rad(args.topo_angle)
114 | mask = filter_by_angle(pos, angle)
115 | pos, dist, idx = pos[mask], dist[mask], idx[mask]
116 |
117 | if args.filter_voronoi:
118 | posme = np.insert(pos, 0, np.zeros(ndim), axis=0)
119 | try:
120 | indices = np.array(voronoi_neighbors(posme)[0]) - 1
121 | except Exception:
122 | pass # in case there are not enough points, do nothing
123 | else:
124 | pos, dist, idx = pos[indices], dist[indices], idx[indices]
125 |
126 | # Save visibility data (after applying perception filtering)
127 | visibility = np.zeros(len(pos_others), dtype=bool)
128 | visibility[idx[idx < len(pos_others)]] = True
129 | # cache.vis = np.insert(visibility, i, False)
130 | cache.vis = visibility.copy()
131 |
132 | # Optionally add noise to range and bearing
133 | # We add range/bearing noise *after* the visual filtering since it prevents false
134 | # occlusions where (visually speaking) there weren't any
135 | if args.range_std > 0 or args.bearing_std > 0:
136 | xs, ys = pos.T.copy()
137 | distance = dist.copy()
138 | bearing = np.arctan2(ys, xs) # [-pi, +pi]
139 | noise_distance, noise_bearing = np.random.normal(size=(ndim, len(xs)))
140 | noise_distance *= args.range_std
141 | noise_bearing *= np.deg2rad(args.bearing_std)
142 | distance += (noise_distance * distance) # distance noise is distance-dependent!
143 | bearing += noise_bearing
144 | bearing = wrap_angle(bearing) # wrap to [-pi, +pi]
145 | xs, ys = distance * np.cos(bearing), distance * np.sin(bearing)
146 | pos = np.array([xs, ys]).T
147 | dist = np.linalg.norm(pos, axis=1)
148 |
149 | # Optionally discard agents with false negative prob
150 | if args.false_negative_prob:
151 | keep_prob, size = 1 - args.false_negative_prob, len(pos)
152 | mask = np.random.binomial(n=1, p=keep_prob, size=size).astype(bool)
153 | pos, dist = pos[mask], dist[mask]
154 |
155 | # Compute command from interactions (if any relative positions)
156 | command_interaction = np.zeros(ndim)
157 |
158 | if len(pos) > 0:
159 | command_interaction = func(pos, dist)
160 |
161 | # Compute migration command
162 | command_migration = np.zeros(ndim)
163 |
164 | # Migrate to migration point
165 | if len(args.migration_point) > 0:
166 | pos_waypoint = (args.migration_point - pos_self)
167 | command_migration = limit_norm(pos_waypoint, args.migration_gain)
168 |
169 | # Follow general migration direction
170 | if len(args.migration_dir) > 0:
171 | command_migration = limit_norm(args.migration_dir, args.migration_gain)
172 |
173 | # Compute final command
174 | command = command_interaction + command_migration
175 |
176 | # Limit speed
177 | command = limit_norm(command, args.max_speed)
178 |
179 | return command, cache
180 |
--------------------------------------------------------------------------------
/vmodel/dataset.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | from datetime import datetime
3 |
4 | import numpy as np
5 | import pandas as pd
6 | import xarray as xr
7 | import yaml
8 |
9 | from vmodel.util.util import clean_attrs
10 |
11 |
12 | def generate_filename(args):
13 |
14 | # Construct output file name
15 | time_str = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
16 | fnamedict = {
17 | 'agents': args.num_agents,
18 | 'runs': args.num_runs,
19 | 'times': args.num_timesteps,
20 | 'dist': args.ref_distance,
21 | 'perc': args.perception_radius,
22 | 'topo': args.max_agents,
23 | 'rngstd': args.range_std,
24 | }
25 | formatexts = {'netcdf': 'nc', 'pickle': 'pkl'}
26 | args_str = '_'.join(f'{k}_{v}' for k, v in fnamedict.items())
27 | return f'{time_str}_{args_str}.states.{formatexts[args.format]}'
28 |
29 |
30 | def create_dataset(datas, args):
31 |
32 | ds = xr.Dataset()
33 |
34 | # Clean up attrs dict to be compatible with YAML and NETCDF
35 | ds.attrs = clean_attrs(vars(args))
36 |
37 | time = np.array(datas[0].time)
38 | pos = np.array([d.pos for d in datas])
39 | vel = np.array([d.vel for d in datas])
40 |
41 | coord_run = np.arange(args.num_runs, dtype=int) + 1
42 | coord_time = pd.to_timedelta(time, unit='s')
43 | coord_agent = np.arange(args.num_agents, dtype=int) + 1
44 | coord_space = np.array(['x', 'y'])
45 |
46 | coords_rtas = {
47 | 'run': coord_run,
48 | 'time': coord_time,
49 | 'agent': coord_agent,
50 | 'space': coord_space
51 | }
52 |
53 | dapos = xr.DataArray(pos, dims=coords_rtas.keys(), coords=coords_rtas)
54 | dapos.attrs['units'] = 'meters'
55 | dapos.attrs['long_name'] = 'position'
56 | ds['position'] = dapos
57 |
58 | davel = xr.DataArray(vel, dims=coords_rtas.keys(), coords=coords_rtas)
59 | davel.attrs['units'] = 'meters/second'
60 | davel.attrs['long_name'] = 'velocity'
61 | ds['velocity'] = davel
62 |
63 | ds = ds.transpose('run', 'agent', 'space', 'time')
64 |
65 | # Return only state (position and velocity)
66 | if args.no_save_precomputed:
67 | return ds
68 |
69 | coords_rtaa = {
70 | 'run': coord_run,
71 | 'time': coord_time,
72 | 'agent': coord_agent,
73 | 'agent2': coord_agent
74 | }
75 |
76 | vis = np.array([d.vis for d in datas])
77 | davis = xr.DataArray(vis, dims=coords_rtaa.keys(), coords=coords_rtaa)
78 | davis.attrs['units'] = 'boolean'
79 | davis.attrs['long_name'] = 'visibility'
80 | ds['visibility'] = davis
81 |
82 | # Tranpose to match data generated from Gazebo
83 | ds = ds.transpose('run', 'agent', 'agent2', 'space', 'time')
84 |
85 | return ds
86 |
87 |
88 | def save_dataset(ds, fname, args):
89 |
90 | if args.format == 'pickle':
91 | with open(fname, 'wb') as f:
92 | pickle.dump(ds, f, protocol=pickle.HIGHEST_PROTOCOL)
93 | elif args.format == 'netcdf':
94 | comp = dict(zlib=True, complevel=5)
95 | encoding = None if args.no_compress else {v: comp for v in ds.data_vars}
96 | ds.to_netcdf(fname, encoding=encoding)
97 |
98 | with open(f'{fname}.yaml', 'w') as f:
99 | yaml.dump(ds.attrs, f)
100 |
--------------------------------------------------------------------------------
/vmodel/flocking/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lis-epfl/vmodel/d8860087b94ffc3582f187523864550ecf135b24/vmodel/flocking/__init__.py
--------------------------------------------------------------------------------
/vmodel/flocking/gregoire.py:
--------------------------------------------------------------------------------
1 | """
2 | Moving and staying together without a leader
3 | Source:
4 | """
5 | import numpy as np
6 |
7 |
8 | def flock(positions: np.ndarray, distance: float, perception_radius: float, d_c: float):
9 | d_e, d_a = distance, perception_radius
10 | distances = np.linalg.norm(positions, axis=1)
11 | units = positions / distances[:, np.newaxis]
12 | commands = []
13 | for i in range(len(positions)):
14 | command = units[i] * potential(distances[i], d_c, d_e, d_a)
15 | commands.append(command)
16 | return np.sum(commands, axis=0)
17 |
18 |
19 | def potential(r, r_c, r_e, r_a):
20 | """Potential (Eq. 4)
21 | Args:
22 | r: distance between agent i and j
23 | r_c: hard-core repulsion distance
24 | r_e: equilibrium (preferred) distance
25 | r_a: maximum interaction range
26 | Returns:
27 | potential: attractive/repulsive potential
28 | """
29 | if r < r_c:
30 | return -1 # should be -inf
31 | elif r < r_a:
32 | return 0.25 * (r - r_e) / (r_a - r_c)
33 | else:
34 | return 1
35 |
--------------------------------------------------------------------------------
/vmodel/flocking/leonard.py:
--------------------------------------------------------------------------------
1 | """
2 | Virtual Leaders, Artificial Potentials and Coordinated Control of Groups
3 | Paper:
4 | """
5 | import numpy as np
6 |
7 |
8 | def flock(positions: np.ndarray, distance: float, perception_radius: float, a=1):
9 | ds = np.linalg.norm(positions, axis=1)
10 | us = positions / ds[:, np.newaxis]
11 | d_0, d_1 = distance, perception_radius
12 | return np.sum([action(ds[i], d_0, d_1, a) * us[i] for i in range(len(ds))], axis=0)
13 |
14 |
15 | def action(r_ij, d_0, d_1, a=1.0) -> float:
16 | return a * (- (d_0 / r_ij ** 2) + (1 / r_ij)) if r_ij < d_1 else 0.0
17 |
18 |
19 | def potential(r_ij, d_0, d_1, alpha=1.0):
20 | if r_ij < d_1:
21 | return alpha * (np.log(r_ij) + (d_0 / r_ij))
22 | else:
23 | return alpha * (np.log(d_1) + (d_0 / d_1))
24 |
--------------------------------------------------------------------------------
/vmodel/flocking/olfati_saber.py:
--------------------------------------------------------------------------------
1 | """
2 | Olfati-Saber algorithm (Flocking for Multi-Agent Dynamic Systems: Algorithms and Theory)
3 | Source:
4 | """
5 | import numpy as np
6 | from numba import njit
7 | from scipy import integrate
8 |
9 |
10 | def flock(positions: np.ndarray, distance: float, perception_radius: float,
11 | a=1.0, b=5.0, eps=0.1, h=0.2):
12 | """Olfati saber algorithm (only gradient based term) (Eq. 23, first term)
13 | Args:
14 | positions: relative positions of other agents
15 | distance: desired inter-agent distance
16 | perception_radius: cutoff at perception radius
17 | eps: parameter of the sigma-norm
18 | h: parameter of the bump function
19 | """
20 | d, r = distance, perception_radius
21 | assert d > 0
22 | assert r >= d
23 | assert eps > 0
24 | assert h >= 0 and h <= 1
25 | return np.sum([gradient_based_term(p, d, r, a, b, eps, h) for p in positions], axis=0)
26 |
27 |
28 | def gradient_based_term(x, d, r, a, b, eps, h):
29 | """Gradient-based term (Eq. 23, first term) """
30 | # n = sigma_norm_grad(x) # This does not seem to work
31 | n = x / np.linalg.norm(x) # TODO: check why this works better than the above!
32 | return phi_alpha(sigma_norm(x, eps), d, r, a, b, eps, h) * n
33 |
34 |
35 | @njit
36 | def sigma_norm(z, eps=0.1):
37 | """Sigma-norm (Eq. 8)"""
38 | return (1 / eps) * (np.sqrt(1 + eps * np.linalg.norm(z) ** 2) - 1)
39 |
40 |
41 | @njit
42 | def sigma_norm_scalar(z, eps=0.1):
43 | """Scalar version of the sigma-norm (otherwise Numba complains about norm of float)"""
44 | return (1 / eps) * (np.sqrt(1 + eps * z ** 2) - 1)
45 |
46 |
47 | @njit
48 | def sigma_norm_grad(z, eps=0.1):
49 | """Gradient of sigma-norm (Eq. 9)"""
50 | return z / np.sqrt(1 + eps * np.linalg.norm(z) ** 2)
51 |
52 |
53 | @njit
54 | def sigma_norm_grad_scalar(z, eps=0.1):
55 | """Scalar version of the sigma-norm gradient"""
56 | return z / np.sqrt(1 + eps * z ** 2)
57 |
58 |
59 | @njit
60 | def phi_alpha(z, d, r, a, b, eps=0.1, h=0.2):
61 | """Action function (Eq. 15, first line)"""
62 | d_alpha = sigma_norm_scalar(d, eps)
63 | r_alpha = sigma_norm_scalar(r, eps)
64 | return rho_h(z / r_alpha, h) * phi(z - d_alpha, a, b)
65 |
66 |
67 | @njit
68 | def rho_h(z, h=0.2):
69 | """Bump function that varies smoothly from 1 to 0 over the (0, 1) interval (Eq. 10)
70 | Args:
71 | z (float): Relative distance
72 | h (float): Offset from which rho_h starts decreasing
73 | Returns:
74 | (float): value that varies smoothly on the interval (1 to 0)
75 | """
76 | if z < h:
77 | return 1
78 | elif z < 1:
79 | return 0.5 * (1 + np.cos(np.pi * ((z - h) / (1 - h))))
80 | else:
81 | return 0
82 |
83 |
84 | @njit
85 | def phi(z, a, b):
86 | """Action function helper (Eq. 15, second line)
87 | Args:
88 | z (float): relative distance
89 | a (float): a parameter (a > 0)
90 | b (float): b parameter (b >= a)
91 | """
92 | c = np.abs(a - b) / np.sqrt(4 * a * b)
93 | return 0.5 * ((a + b) * sigma_1(z + c) + (a - b))
94 |
95 |
96 | @njit
97 | def sigma_1(z):
98 | """Action function helper (Eq. 15, text below)
99 | Args:
100 | z: relative distance
101 | """
102 | return z / np.sqrt(1 + z ** 2)
103 |
104 |
105 | def psi_alpha(z, d, r, a, b, e, h):
106 | """Pairwise attractive/repulsive potential (Eq. 16)"""
107 | d_alpha = sigma_norm_scalar(d, e)
108 | return integrate.quad(lambda z: phi_alpha(z, d, r, a, b, e, h), d_alpha, z)[0]
109 |
--------------------------------------------------------------------------------
/vmodel/flocking/olfati_saber_simplified.py:
--------------------------------------------------------------------------------
1 | """
2 | Olfati-Saber algorithm (Flocking for Multi-Agent Dynamic Systems: Algorithms and Theory)
3 | Source:
4 | This version is simplified in the sense that it does not use the sigma norm.
5 | """
6 | import numpy as np
7 | from numba import njit
8 | from scipy import integrate
9 |
10 |
11 | def flock(positions: np.ndarray, distance: float, perception_radius: float,
12 | a=5.0, b=5.0, eps=0.1, h=0.2):
13 | """Olfati saber algorithm (only gradient based term) (Eq. 23, first term)
14 | Args:
15 | positions: relative positions of other agents
16 | distance: desired inter-agent distance
17 | perception_radius: cutoff at perception radius
18 | h: parameter of the bump function
19 | """
20 | d, r = distance, perception_radius
21 | assert d > 0
22 | assert r >= d
23 | assert eps > 0
24 | assert h >= 0 and h <= 1
25 | ds = np.linalg.norm(positions, axis=1)
26 | us = positions / ds[:, np.newaxis]
27 | terms = [phi_alpha(z, d, r, a, b, h) * n for z, n in zip(ds, us)]
28 | return np.sum(terms, axis=0)
29 |
30 |
31 | @njit
32 | def phi_alpha(z, d, r, a, b, h=0.2):
33 | """Action function (Eq. 15, first line)"""
34 | return rho_h(z / r, h) * phi(z - d, a, b)
35 |
36 |
37 | @njit
38 | def rho_h(z, h=0.2):
39 | """Bump function that varies smoothly from 1 to 0 over the (0, 1) interval (Eq. 10)
40 | Args:
41 | z (float): Relative distance
42 | h (float): Offset from which rho_h starts decreasing
43 | Returns:
44 | (float): value that varies smoothly on the interval (1 to 0)
45 | """
46 | if z < h:
47 | return 1
48 | elif z < 1:
49 | return 0.5 * (1 + np.cos(np.pi * ((z - h) / (1 - h))))
50 | else:
51 | return 0
52 |
53 |
54 | @njit
55 | def phi(z, a, b):
56 | """Action function helper (Eq. 15, second line)
57 | Args:
58 | z (float): relative distance
59 | a (float): a parameter (a > 0)
60 | b (float): b parameter (b >= a)
61 | """
62 | c = np.abs(a - b) / np.sqrt(4 * a * b)
63 | return 0.5 * ((a + b) * sigma_1(z + c) + (a - b))
64 |
65 |
66 | @njit
67 | def sigma_1(z):
68 | """Action function helper (Eq. 15, text below)
69 | Args:
70 | z: relative distance
71 | """
72 | return z / np.sqrt(1 + z ** 2)
73 |
74 |
75 | def psi_alpha(z, d, r, a, b, h):
76 | """Pairwise attractive/repulsive potential (Eq. 16)"""
77 | return integrate.quad(lambda z: phi_alpha(z, d, r, a, b, h), d, z)[0]
78 |
--------------------------------------------------------------------------------
/vmodel/flocking/olfati_saber_unified.py:
--------------------------------------------------------------------------------
1 | """
2 | Flocking with obstacle avoidance: cooperation with limited communication in mobile networks
3 | Conference paper (CDC):
4 | Technical report:
5 | """
6 | import numpy as np
7 | from numba import njit
8 |
9 |
10 | def flock(positions: np.ndarray, distance: float, perception_radius: float,
11 | a=1.0, b=5.0):
12 | """Olfati saber algorithm (only gradient based term) (Eq. 23, first term)
13 | Args:
14 | positions: Relative positions of other agents
15 | distance: Desired inter-agent distance
16 | perception_radius: Cutoff at perception radius
17 | """
18 | d, r = distance, perception_radius
19 | assert d > 0
20 | assert r > d
21 | assert a > 0
22 | assert b > a
23 | ds = np.linalg.norm(positions, axis=1)
24 | us = positions / ds[..., np.newaxis]
25 | return np.sum([phi(ds[i] - d, d, r, a, b) * us[i] for i in range(len(us))], axis=0)
26 |
27 |
28 | @njit
29 | def phi(z, d, r, a, b, delta=0.2):
30 | x = (z + d)
31 | term1 = rho_grad(x, r, delta=delta) * psi(z, a, b)
32 | term2 = rho(x, r, delta=delta) * psi_grad(z, a, b)
33 | return term1 + term2
34 |
35 |
36 | @njit
37 | def psi_hat(z, d, r, a, b, delta=0.0):
38 | return rho((z + d), r, delta) * psi(z, a, b)
39 |
40 |
41 | @njit
42 | def psi(z: float, a: float, b: float) -> float:
43 | """Penalty function with uniformly bounded derivative (Eq. 20)
44 | Args:
45 | z: Relative distance
46 | a: Cohesion strength
47 | b: Separation strength
48 | """
49 | c = np.abs(a - b) / (2 * np.sqrt(a * b))
50 | return ((a + b) / 2) * (np.sqrt(1 + (z + c) ** 2) - np.sqrt(1 + c ** 2)) + ((a - b) / 2) * z
51 |
52 |
53 | @njit
54 | def psi_grad(z: float, a: float, b: float) -> float:
55 | """Derivative of the penalty function is an asymmetric sigmoidal function (Eq. 21)
56 | Args:
57 | z: Relative distance
58 | a: Cohesion strength
59 | b: Separation strength
60 | """
61 | c = np.abs(a - b) / (2 * np.sqrt(a * b))
62 | return ((a + b) / 2) * ((z + c) / np.sqrt(1 + (z + c) ** 2)) + ((a - b) / 2)
63 |
64 |
65 | @njit
66 | def rho(z: float, r: float = 1.0, delta: float = 0.0) -> float:
67 | """Influence map that varies smoothly betwenn 0 and 1 (Eq. 4)
68 | Args:
69 | z: Relative distance
70 | r: perception radius
71 | delta: Cutoff after which the function starts decreasing
72 | """
73 | if z < delta * r:
74 | return 1
75 | elif z < r:
76 | return 0.5 * (1 + np.cos(np.pi * (((z / r) - delta) / (1 - delta))))
77 | else:
78 | return 0
79 |
80 |
81 | @njit
82 | def rho_grad(z: float, r: float = 1.0, delta: float = 0.0) -> float:
83 | if z < delta * r:
84 | return 0
85 | elif z < r:
86 | return (1 / r) * -0.5 * np.pi * np.sin(np.pi * (((z / r) - delta) / (1 - delta))) / (1 - delta)
87 | else:
88 | return 0
89 |
--------------------------------------------------------------------------------
/vmodel/flocking/reynolds.py:
--------------------------------------------------------------------------------
1 | """
2 | Reynolds flocking algorithm (aka Boids)
3 | Source:
4 | """
5 | import numpy as np
6 |
7 |
8 | def flock(positions: np.ndarray, distances: np.ndarray = None, separation_gain=1.0,
9 | cohesion_gain=1.0) -> np.ndarray:
10 | """Computes Reynolds command (separation + cohesion)
11 | Args:
12 | positions: relative positions of agents (N x D)
13 | distances: precomputed relative distances (N x 1) (optional)
14 | separation_gain: separation gain
15 | cohesion_gain: cohesion gain
16 | Returns:
17 | command: Reynolds command (D)
18 | """
19 | # Compute separation commmand (inversely proportional to distance)
20 | distances = np.linalg.norm(positions, axis=1) if distances is None else distances
21 | dist_inv = positions / distances[:, np.newaxis] ** 2
22 | separation = -separation_gain * dist_inv.sum(axis=0)
23 |
24 | # Compute cohesion command (proportional to distance)
25 | cohesion = cohesion_gain * positions.mean(axis=0)
26 |
27 | return separation + cohesion
28 |
29 |
30 | def infer_separation_gain3(distance: float) -> float:
31 | """Compute separation gain from desired distance
32 | Args:
33 | distance: Desired minimum inter-agent distance
34 | Returns:
35 | separation_gain: Separation gain to produce (at least) the desired distance
36 | Note: This is the analytical equilibrium distance of the Reynolds equations for N = 3
37 | """
38 | return distance ** 2 / 2
39 |
--------------------------------------------------------------------------------
/vmodel/geometry.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | from itertools import combinations
3 |
4 | import numpy as np
5 | from numba import njit
6 | from scipy.spatial import ConvexHull, Delaunay
7 | from scipy.spatial import distance_matrix as compute_distance_matrix
8 |
9 |
10 | def distance_matrix(positions: np.ndarray):
11 | return compute_distance_matrix(positions, positions)
12 |
13 |
14 | def convex_hull(positions: np.ndarray):
15 | """Convenience function for convex hull
16 | Args:
17 | positions (agent x space): Positions
18 | Return:
19 | hull (indices): Indices of positions belonging to the convex hull
20 | """
21 | hull = np.zeros(len(positions), dtype=np.bool_)
22 | indices = ConvexHull(positions).vertices
23 | hull[indices] = True
24 | return hull
25 |
26 |
27 | def voronoi_neighbors(positions: np.ndarray) -> list:
28 | """Compute a list of adjacent neighbors in a Voronoi sense
29 | Args:
30 | positions (agent x space): positions
31 | Return:
32 | voronoi: agent-indexed list of neighbor index list
33 | """
34 | tri = Delaunay(positions)
35 | neighbors = defaultdict(set)
36 | for p in tri.vertices:
37 | for i, j in combinations(p, 2):
38 | neighbors[i].add(j)
39 | neighbors[j].add(i)
40 | return [list(neighbors[k]) for k in range(len(positions))]
41 |
42 |
43 | def subtended_angle_circle(distance, radius):
44 | """Find subtended angle of a circle with given distance and radius
45 | Args:
46 | distance: Relative distance to the center
47 | radius: Radius of the circle
48 | Return:
49 | subtended_angle: Subtended angle [rad]
50 | """
51 | radius_unit = radius / distance # project radius onto unit circle
52 | subtended_angle = np.arcsin(radius_unit) * 2.
53 | return subtended_angle
54 |
55 |
56 | def distance_for_subtended_angle(angle, radius):
57 | """Find distance to a circle with a given radius that produces a given subtened angle
58 | Args:
59 | angle: Subtended angle [rad]
60 | radius: radius of the circle
61 | Return:
62 | distance: distance to center of the circle
63 | """
64 | return radius / np.sin(angle / 2.)
65 |
66 |
67 | @njit
68 | def is_angle_in_interval(angle, interval):
69 | """Check if angle is in the interval
70 | Args:
71 | angle (float): Angle [rad]
72 | interval tuple(float, float): Angle interval (low, high) [rad]
73 | Returns:
74 | True, if the angle is in the given interval
75 | """
76 | lo, hi = interval
77 | if lo < hi:
78 | return angle >= lo and angle <= hi # normal case
79 | else:
80 | return angle >= lo or angle <= hi # interval spans accros quadrant 1 and 4
81 |
82 |
83 | @njit
84 | def tangent_points_to_circle(center, radius):
85 | """Computes the tangent intersection points of a circle (as seen from the origin)
86 |
87 | Args:
88 | center (x, y): Target point (center of circle)
89 | radius (float): Radius of cicle
90 |
91 | Returns:
92 | points ((x, y), (x, y)): Tangent points, or None if no such points exist
93 | Source: https://en.wikipedia.org/wiki/Tangent_lines_to_circles#With_analytic_geometry
94 | """
95 | x, y = center
96 | d2 = x ** 2 + y ** 2
97 | r = radius
98 | r2 = radius ** 2
99 | if d2 < r2:
100 | return None # no tangent points exist if point within circle radius
101 | r2_d2 = r2 / d2
102 | first_x = r2_d2 * x
103 | first_y = r2_d2 * y
104 | r_d2_sqrt_d2_r2 = r / d2 * np.sqrt(d2 - r2)
105 | second_x = r_d2_sqrt_d2_r2 * -y
106 | second_y = r_d2_sqrt_d2_r2 * x
107 | pt1_x = first_x + second_x
108 | pt1_y = first_y + second_y
109 | pt2_x = first_x - second_x
110 | pt2_y = first_y - second_y
111 | return (x - pt1_x, y - pt1_y), (x - pt2_x, y - pt2_y)
112 |
113 |
114 | def lattice2d_rect(shape, basis):
115 | """Generate rectangular lattice with shape and basis vectors
116 | Args:
117 | shape (tuple): (width, height)
118 | basis (list): list of basis vectors v1 and v2
119 | Source:
120 | """
121 | # Get the lower limit on the cell size.
122 | dx_cell = max(abs(basis[0][0]), abs(basis[1][0]))
123 | dy_cell = max(abs(basis[0][1]), abs(basis[1][1]))
124 | # Get an over estimate of how many cells across and up.
125 | nx = shape[0] // dx_cell
126 | ny = shape[1] // dy_cell
127 | # Generate a square lattice, with too many points.
128 | # Here I generate a factor of 4 more points than I need, which ensures
129 | # coverage for highly sheared lattices. If your lattice is not highly
130 | # sheared, than you can generate fewer points.
131 | x_sq = np.arange(-nx, nx, dtype=float)
132 | y_sq = np.arange(-ny, nx, dtype=float)
133 | x_sq.shape = x_sq.shape + (1,)
134 | y_sq.shape = (1,) + y_sq.shape
135 | # Now shear the whole thing using the lattice vectors
136 | x_lattice = basis[0][0] * x_sq + basis[1][0] * y_sq
137 | y_lattice = basis[0][1] * x_sq + basis[1][1] * y_sq
138 | # Trim to fit in box.
139 | mask = ((x_lattice < shape[0] / 2.0) & (x_lattice > -shape[0] / 2.0))
140 | mask = mask & ((y_lattice < shape[1] / 2.0) & (y_lattice > -shape[1] / 2.0))
141 | x_lattice = x_lattice[mask]
142 | y_lattice = y_lattice[mask]
143 | # Make output compatible with original version.
144 | out = np.empty((len(x_lattice), 2), dtype=float)
145 | out[:, 0] = x_lattice
146 | out[:, 1] = y_lattice
147 | return out
148 |
149 |
150 | def lattice2d_circ(radius, basis):
151 | """Generate circular lattice with radius and basis vectors
152 | Args:
153 | radius (float): radius of the lattice
154 | basis (list): list of basis vectors v1 and v2
155 | """
156 | shape = (radius * 2, radius * 2)
157 | points = lattice2d_rect(shape, basis)
158 | distances = np.linalg.norm(points, axis=1)
159 | mask = distances < radius
160 | points = points[mask]
161 | return points
162 |
--------------------------------------------------------------------------------
/vmodel/liveplot.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | from scipy.spatial import Voronoi
4 |
5 | from vmodel import geometry as vgeom
6 | from vmodel import plot
7 | from vmodel.util import color as vcolor
8 |
9 |
10 | def plot_all(ax, data, args, focal=0):
11 |
12 | # Plot setup
13 | ax.clear()
14 | ax.grid(True, zorder=-1, linestyle='dotted')
15 | ax.set(xlabel=r'$x$ position [$m$]', ylabel=r'$y$ position [$m$]')
16 | # ax.set_aspect('equal', 'datalim')
17 | ax.set_aspect('equal')
18 | ax.locator_params(axis='x', nbins=5)
19 | ax.locator_params(axis='y', nbins=5)
20 |
21 | # Plot agents and visibility
22 | if args.perception_radius > 0:
23 | plot_nselect_metric(ax, data.pos[-1], args.perception_radius, focal=focal)
24 | if args.filter_occluded:
25 | plot_nselect_visual(ax, data.pos[-1], radius=args.radius, focal=focal)
26 | if args.max_agents > 0:
27 | plot_nselect_topo(ax, data.pos[-1], data.vis[-1], focal=focal)
28 | if args.filter_voronoi:
29 | plot_nselect_voronoi(ax, data.pos[-1], focal=focal)
30 | plot_agents(ax, data.pos, data.vel, data.vis, radius=args.radius, tail_length=20)
31 |
32 | # Plot waypoint
33 | # plot.plot_circle(ax, args.pos_waypoint, color='tab:orange',
34 | # radius=args.radius_waypoint, zorder=-10)
35 | # Plot arena
36 | # plot.plot_circle(ax, (0, 0), radius=args.radius_arena, color='coral',
37 | # fill=False, zorder=0, ls='--', alpha=0.5)
38 | # plot.plot_lattice(ax, data.pos[-1], data.dist[-1], zorder=-1)
39 |
40 |
41 | def plot_nselect_metric(ax, positions, perception_radius, focal=0):
42 | x, y = positions[focal]
43 | perc_circle = plt.Circle((x, y), radius=perception_radius, fill=False, ls='-',
44 | lw=0.25, ec=vcolor.grey, zorder=100)
45 | ax.add_patch(perc_circle)
46 | perc_radius = plt.Circle((x, y), radius=perception_radius, color='white',
47 | zorder=-1)
48 | ax.add_patch(perc_radius)
49 | ax.set(facecolor=vcolor.background)
50 | k = 1.1
51 | radius = k * perception_radius
52 | xlim = (x - radius, x + radius)
53 | ylim = (y - radius, y + radius)
54 | ax.set(xlim=xlim, ylim=ylim)
55 |
56 |
57 | def plot_nselect_visual(ax, positions, radius=0.25, focal=0):
58 |
59 | pos_self = positions[focal]
60 | for a in range(len(positions)):
61 |
62 | # Don't draw shadows for focal agent
63 | if a == focal:
64 | continue
65 |
66 | rel = positions[a] - pos_self
67 | p1, p2 = vgeom.tangent_points_to_circle(rel, radius)
68 | p1, p2 = np.array(p1), np.array(p2)
69 | u1, u2 = p1 / np.linalg.norm(p1), p2 / np.linalg.norm(p2)
70 | scale = 100
71 | ps1, ps2 = u1 * scale, u2 * scale
72 | poly = np.array([p1, ps1, ps2, p2]) + pos_self
73 | polygon = plt.Polygon(poly, color=vcolor.shadow, zorder=-1)
74 | ax.add_patch(polygon)
75 |
76 |
77 | def plot_nselect_topo(ax, positions, visibility, focal=0):
78 | pos_self = positions[focal]
79 | x, y = pos_self
80 | vis = visibility[focal]
81 | for i in range(len(positions)):
82 | isvisible = vis[i]
83 | isfocal = (i == focal)
84 |
85 | # Don't draw connecting line to focal or invisible agent
86 | if isfocal or not isvisible:
87 | continue
88 |
89 | xt, yt = positions[i]
90 | ax.plot([x, xt], [y, yt], color=vcolor.grey, lw=1)
91 |
92 |
93 | def plot_nselect_voronoi(ax, positions, color_regions=False, focal=0):
94 | vor = Voronoi(positions)
95 | neighbors = np.array(vgeom.voronoi_neighbors(positions)[0]) - 1
96 | plot.voronoi_plot_2d(vor, ax=ax, show_vertices=False, point_size=0, line_alpha=0.7,
97 | line_colors=vcolor.grey, line_width=0.25, line_style='-')
98 |
99 | if not color_regions:
100 | return
101 |
102 | # Color all non neighbor regions light grey
103 | for index, r in enumerate(vor.point_region):
104 | region = vor.regions[r]
105 | if index - 1 in neighbors or index == 0:
106 | continue
107 | if -1 not in region:
108 | polygon = [vor.vertices[i] for i in region]
109 | ax.fill(*zip(*polygon), color=vcolor.lightgrey)
110 |
111 |
112 | def plot_agents(ax, positions, velocities=None, visibility=None, radius=0.25,
113 | focal_agent=0, show_identity=False, tail_length=0):
114 | """Plot agents
115 | Args:
116 | positions (ndarray): Position array (T x N x D)
117 | velocities (ndarray): Velocity array (T x x N x D)
118 | visibility (ndarray): Visibility matrix (T x N x N)
119 | radius (float): Agent radius
120 | show_identity (bool): If true, show the agent identity
121 | """
122 | post, velt, vist = positions[-1], velocities[-1], visibility[-1]
123 |
124 | pos_self = post[focal_agent]
125 | x, y = pos_self
126 |
127 | postail = np.array(positions[-tail_length:])
128 |
129 | for a in range(len(post)):
130 |
131 | color = vcolor.visible
132 | x, y = post[a]
133 |
134 | isfocal = a == focal_agent
135 | isvisible = vist[focal_agent][a]
136 |
137 | if isfocal:
138 | color = vcolor.focal
139 |
140 | # Plot visibility
141 | if visibility is not None:
142 | if not isvisible and not isfocal:
143 | color = vcolor.invisible
144 |
145 | # Plot positions
146 | plot_circle(ax, (x, y), radius=radius, color=color, zorder=10)
147 |
148 | if show_identity:
149 | ax.text(x, y, s=f'{a + 1}', ha='center', va='center')
150 |
151 | alpha = 0.3
152 |
153 | # Plot tails
154 | if tail_length > 0:
155 | xs, ys = postail[:, a].T
156 | ax.plot(xs, ys, color=color, lw=1, alpha=alpha)
157 |
158 | # Plot velocities
159 | if velocities is not None:
160 | width = radius / 32
161 | head_width = radius / 2
162 | head_length = radius / 2
163 | vel, pos = velt[a], post[a]
164 | # Draw velocity vectors outside of agent radius!
165 | speed = np.linalg.norm(vel)
166 | dir = vel / (1e-9 + speed)
167 | x, y = pos + dir * radius
168 | scaled = np.maximum(0, speed - radius) / speed # scale speed by rad
169 | dx, dy = dir * scaled
170 | ax.arrow(x, y, dx, dy, width=width, head_width=head_width,
171 | length_includes_head=False, head_length=head_length,
172 | zorder=10, edgecolor=color, facecolor=color, alpha=alpha)
173 |
174 |
175 | def plot_circle(ax, pos, **kwargs):
176 | x, y = pos
177 | ax.plot(x, y) # only for lims calculation
178 | ax.add_patch(plt.Circle((x, y), **kwargs))
179 |
180 |
181 | def plot_clutter(ax, clutters):
182 | if len(clutters) == 0:
183 | return
184 | xs, ys = np.array(clutters).reshape(-1, 2).T
185 | ax.scatter(xs, ys, marker='x')
186 |
187 |
188 | def plot_lattice(ax, positions, distances, distance_setpoint=None, epsilon=None, **kwargs):
189 | """Plot quasi-lattice
190 | Args:
191 | positions: (ndarray): Positions (N x D)
192 | distances: (ndarray): Distance matrix (N x N)
193 | distance_setpoint (float): Desired reference distance, take mean if not given
194 | epsilon (float): Max deviation from distance setpoint, use 1/3 mean if not given
195 | """
196 | # Use mean nearest neighbor distance as reference distance
197 | dmat = np.array(distances)
198 | dmat[dmat == 0.0] = float('inf')
199 | if distance_setpoint is None:
200 | distance_setpoint = dmat.min(axis=0).mean()
201 | if epsilon is None:
202 | epsilon = distance_setpoint / 3
203 | a, b = distance_setpoint - epsilon, distance_setpoint + epsilon
204 | for i in range(len(positions)):
205 | indices = np.arange(len(positions))
206 | dist = dmat[i]
207 | js = indices[(dist > a) & (dist < b)]
208 | for j in js:
209 | x1, y1 = positions[i]
210 | x2, y2 = positions[j]
211 | xs, ys = [x1, x2], [y1, y2]
212 | ax.plot(xs, ys, color='silver', **kwargs)
213 |
214 |
215 | def plot_distances(ax, distances, time, radius=0.25):
216 | """Plot distances
217 | Args:
218 | distances (ndarray): Distance matrices (K x N x N)
219 | time (ndarray): Time array
220 | radius (float): Agent radius [m]
221 | """
222 | dmats = np.array(distances)
223 | dmats[dmats == 0.0] = float('inf')
224 | min_dist = dmats.min(axis=-1)
225 | xs, xerr = min_dist.mean(axis=-1), min_dist.std(axis=-1)
226 | ax.plot(time, xs, color='tab:blue', label=f'mean & std ({xs[-1]:.2f} m)')
227 | ax.fill_between(time, xs - xerr, xs + xerr, alpha=0.25)
228 | xs = min_dist.min(axis=-1)
229 | ax.plot(time, xs, color='tab:red', label=f'minimum ({xs[-1]:.2f} m)')
230 | xs = np.ones(len(time)) * radius * 2
231 | ax.plot(time, xs, color='tab:red', ls='--', alpha=0.5, label='collision distance')
232 |
233 |
234 | def plot_speed(ax, velocities, time):
235 | """Plot speed
236 | Args:
237 | velocities: velocity matrix (K x N x D)
238 | time: time array
239 | """
240 | vels = np.array(velocities)
241 | speeds = np.linalg.norm(vels, axis=-1) # K x N
242 | xs, xerr = speeds.mean(axis=-1), speeds.std(axis=-1) # K
243 | ax.plot(time, xs, color='tab:blue', label=f'mean & std ({xs[-1]:.2f} m/s)')
244 | ax.fill_between(time, xs - xerr, xs + xerr, alpha=0.25)
245 |
246 |
247 | def plot_metric(ax, metrics, time, name='metric', **kwargs):
248 | """
249 | Args:
250 | metrics: Array of length K containing metrics
251 | time: Time array
252 | """
253 | ax.plot(time, metrics, label=f'{name} ({metrics[-1]:.2f})', **kwargs)
254 |
--------------------------------------------------------------------------------
/vmodel/math.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numba import njit
3 |
4 |
5 | def limit_norm(x, limit):
6 | """Limits the norm of n-dimensional vector
7 | Args:
8 | x (ndarray): Vector
9 | limit (float): Limit norm
10 | Returns:
11 | x (ndarray): Vector with norm limited to limit
12 | """
13 | norm = np.linalg.norm(x)
14 | if norm > limit:
15 | x /= norm # make x unit vector
16 | x *= limit # scale x by limit
17 | return x
18 |
19 |
20 | @njit
21 | def cartesian2polar(x, y):
22 | """Converts cartesian to polar coordinates
23 |
24 | Args:
25 | x (float): x-coordinate
26 | y (float): y-coordinate
27 |
28 | Returns:
29 | distance (float): range/distance
30 | bearing (float): bearing angle [rad]
31 | """
32 | distance = np.sqrt(x ** 2 + y ** 2)
33 | bearing = np.arctan2(y, x)
34 | return distance, bearing
35 |
36 |
37 | @njit
38 | def polar2cartesian(distance, bearing):
39 | """Converts polar to cartesian coordinates
40 |
41 | Args:
42 | distance (float): range/distance
43 | bearing (float): bearing angle [rad]
44 |
45 | Returns:
46 | x (float): x-coordinate
47 | y (float): y-coordinate
48 | """
49 | x = distance * np.cos(bearing)
50 | y = distance * np.sin(bearing)
51 | return x, y
52 |
53 |
54 | @njit
55 | def wrap_angle(angle):
56 | """Wraps angle around to remain in range [-pi, pi]
57 |
58 | Args:
59 | angle (float): Angle [rad]
60 |
61 | Returns:
62 | wrapped_angle (float): Angle in [-pi, pi] [rad]
63 | """
64 | return (angle + np.pi) % (2 * np.pi) - np.pi
65 |
66 |
67 | def grid_positions(n: int, distance: float = 1, ndim: int = 2) -> np.ndarray:
68 | """Generate positions on a grid n-d grid
69 | Args:
70 | n: number of positions to generate
71 | distance: distance between positions
72 | ndim: number of dimensions
73 | Returns:
74 | positions: (n x ndim) positions
75 | """
76 | assert n > 0
77 | assert ndim >= 1
78 | assert distance > 0
79 |
80 | # Compute side length of hypercube
81 | sidelen = int(np.ceil(np.sqrt(n)))
82 |
83 | # Get indices of each dimension
84 | indices = tuple(np.arange(sidelen) for _ in range(ndim))
85 |
86 | # Create n-d cooridinate array from indices
87 | positions = np.array(np.meshgrid(*indices), dtype=float).T.reshape(-1, ndim)
88 |
89 | # Remove superfluous positions (if any)
90 | positions = positions[:n]
91 |
92 | # Center positions at zero point
93 | positions -= ((sidelen - 1) / 2)
94 |
95 | # Scale positions by distance
96 | positions *= distance
97 |
98 | return positions
99 |
100 |
101 | def filter_by_angle(positions, angle):
102 | """Filter positions by spatially balanced topological distance
103 | Args:
104 | positions: relative positions
105 | angle: minimum angle between relative positions
106 | Return:
107 | mask: boolean mask of selected positions
108 | Source:
109 | """
110 | pos = positions.copy()
111 | N = len(pos)
112 | if N <= 1:
113 | return np.zeros(N, dtype=bool)
114 | dist = np.linalg.norm(pos, axis=1)
115 | idx = dist.argsort()
116 | idx_orig = idx.argsort()
117 | pos, dist = pos[idx], dist[idx]
118 | unit = pos / dist[:, np.newaxis]
119 | deleted = set()
120 | for i in range(N):
121 | for j in range(i + 1, N):
122 | angle_between = np.arccos(np.dot(unit[i], unit[j]))
123 | if angle_between < angle:
124 | deleted.add(j)
125 | indices = np.array(list(set(range(N)) - deleted), dtype=int)
126 | mask = np.zeros(N, dtype=bool)
127 | mask[indices] = True
128 | return mask[idx_orig]
129 |
--------------------------------------------------------------------------------
/vmodel/metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.sparse.csgraph import connected_components
3 | from scipy.spatial import ConvexHull
4 | from sklearn.decomposition import PCA
5 |
6 |
7 | def connectivity(adjacency: np.ndarray) -> float:
8 | """Compute algebraic connectivity from adjacency matrix
9 | Args:
10 | adjacency: adjacency matrix (N x N)
11 | Returns:
12 | connectivity: second smallest eigenvalue of the Laplacian adjusted for graph size
13 | Note: The algebraic connectivity is only well-defined for undirected graphs!
14 | """
15 | adj = adjacency.copy()
16 | if (adj != adj.T).any():
17 | adj[adj != adj.T] = True
18 | degree = np.diag(adj.sum(axis=1))
19 | laplacian = degree - adj
20 | eigvals = np.linalg.eigvalsh(laplacian)
21 | connectivity = np.sort(eigvals)[1] / len(eigvals)
22 | return np.maximum(connectivity, 0.0)
23 |
24 |
25 | def aspect_ratio(pos: np.ndarray, use_pca=False) -> float:
26 | """Computes the aspect ratio of the smallest bounding rectangle
27 | Args:
28 | pas: positions (N x D)
29 | Return
30 | ratio: aspect ratio of the positions
31 | """
32 | pos = PCA().fit_transform(pos) if use_pca else pos
33 | xs, ys = pos.T
34 | width, height = (xs.max() - xs.min()), (ys.max() - ys.min())
35 | return width / height
36 |
37 |
38 | def area_convex_hull(pos: np.ndarray) -> float:
39 | """Computes the area of the agent positions based on their convex hull
40 | Args:
41 | pos: absolute positions (N x D)
42 | Return:
43 | area: area of the convex hull
44 | Note: Meaning of area/volume of the ConvexHull object are dimension-dependent
45 | See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.html
46 | """
47 | # Note: When input points are 2-dim, hull.volume is the area of the convex hull!
48 | ndim = pos.shape[1]
49 | hull = ConvexHull(pos)
50 | return hull.volume if ndim == 2 else hull.area
51 |
52 |
53 | def agent_density(pos: np.ndarray) -> float:
54 | """Computes the agent density
55 | Args:
56 | pos: absolute positions (N x D)
57 | Return:
58 | density: agent density
59 | """
60 | N = pos.shape[0]
61 | return N / area_convex_hull(pos)
62 |
63 |
64 | def order(vel: np.ndarray) -> float:
65 | """Compute order parameter from velocity matrix
66 | Args:
67 | vel: velocity matrix (N x D)
68 | Returns:
69 | order: velocity correlation
70 | """
71 | N, _ = vel.shape
72 | speed = np.linalg.norm(vel, axis=1, keepdims=True) # N x 1
73 | speed_prod = speed.dot(speed.T) # N x N
74 | mask = (speed_prod != 0) # avoid division by zero!
75 | dot_prod = vel.dot(vel.T) # N x N
76 | np.fill_diagonal(dot_prod, 0) # i != j
77 | return (dot_prod[mask] / speed_prod[mask]).sum() / (N * (N - 1))
78 |
79 |
80 | def distance_matrix(pos: np.ndarray) -> np.ndarray:
81 | """Compute distance matrix
82 | Args:
83 | pos: positions (N x D)
84 | Return:
85 | dmat: distance matrix (N x N)
86 | """
87 | return np.sqrt(np.sum((pos[:, np.newaxis, :] - pos[np.newaxis, :, :]) ** 2, axis=-1))
88 |
89 |
90 | def union(adj: np.ndarray) -> float:
91 | """Compute the union metric
92 | Args:
93 | adj: adjacency matrix (N x N)
94 | Returns:
95 | union: union metric
96 | """
97 | N = len(adj)
98 | ncomps = connected_components(adj, return_labels=False)
99 | return 1 - ((ncomps - 1) / (N - 1))
100 |
--------------------------------------------------------------------------------
/vmodel/random.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.special import gammainc
3 |
4 | from vmodel.math import grid_positions
5 |
6 |
7 | def random_positions(num, method, args):
8 |
9 | if method == 'poisson':
10 | # Sample initial positions using spherical poisson disk sampling
11 | return poisson_disk_spherical(args.radius_arena, args.spawn_distance,
12 | num_samples=num, method='volume',
13 | candidate='random')
14 | elif method == 'uniform':
15 | def sample_fn(low=0, high=args.radius_arena):
16 | return np.array(random_uniform_within_circle(low=low, high=high))
17 | # Sample random non-overlapping initial positions and velocities
18 | return poisson_disk_dart_throw(num, sample_fn, distance=args.spawn_distance)
19 | elif method == 'grid':
20 | # Sample positions on a grid with noise
21 | positions = grid_positions(num, args.spawn_distance, args.num_dims)
22 | positions += np.random.normal(scale=0.1, size=positions.shape)
23 | return positions
24 | else:
25 | raise ValueError(f'Unrecognized method: {args.spawn}')
26 |
27 |
28 | # Uniform sampling in a hyperspere
29 | # Based on Matlab implementation by Roger Stafford
30 | # Can be optimized for Bridson algorithm by excluding all points within the r/2 sphere
31 | def hypersphere_volume_sample(center, radius, k=1):
32 | ndim = center.size
33 | x = np.random.normal(size=(k, ndim))
34 | ssq = np.sum(x**2, axis=1)
35 | fr = radius * gammainc(ndim / 2, ssq / 2) ** (1 / ndim) / np.sqrt(ssq)
36 | frtiled = np.tile(fr.reshape(k, 1), (1, ndim))
37 | p = center + np.multiply(x, frtiled)
38 | return p
39 |
40 |
41 | # Uniform sampling on the sphere's surface
42 | def hypersphere_surface_sample(center, radius, k=1):
43 | ndim = center.size
44 | vec = np.random.standard_normal(size=(k, ndim))
45 | vec /= np.linalg.norm(vec, axis=1)[:, None]
46 | p = center + np.multiply(vec, radius)
47 | return p
48 |
49 |
50 | def random_uniform_within_circle(low=0, high=1, size=None):
51 | """Sample random point within a unit circle
52 | Args:
53 | low (float): Radius of inner circle
54 | high (float): Radius of outer circle
55 | Returns:
56 | x (float): x-coordinate
57 | y (float): y-coordinate
58 |
59 | """
60 | distance = np.sqrt(np.random.uniform(low ** 2, high ** 2, size=size))
61 | bearing = np.random.uniform(0, 2 * np.pi, size=size)
62 | x = distance * np.cos(bearing)
63 | y = distance * np.sin(bearing)
64 | return x, y
65 |
66 |
67 | def poisson_disk_square2d(radius, distance, num_samples):
68 | dims = (radius, radius)
69 | positions = poisson_disk(dims, distance, num_samples, method='surface')
70 | positions -= positions.mean(axis=0) # center positions around origin
71 | return positions
72 |
73 |
74 | def poisson_disk_spherical(radius: float, distance: float, num_samples=None, k: int = 30,
75 | method='volume', candidate='random', ndim=2):
76 | """Fast spherical Poisson disk sampling in arbitrary dimensions
77 | Args:
78 | dims: sizes of the dimensions
79 | distance: minimum distance between samples
80 | num_samples: maximum number of samples to draw
81 | k: limit of sample to choose before rejection
82 | method: sample points on hypersphere `volume` or `surface`
83 | candidate: whether the next candidate point is the `first` or chosen at `random`
84 | ndim: number of dimensions
85 | Source:
86 | Inspired by:
87 | """
88 |
89 | dims = np.array([radius * 2 for _ in range(ndim)], dtype=float)
90 |
91 | # Two distinct sampling methods
92 | # volume: samples points within volume of radius to 2 * radius
93 | # surface: samples points uniformly on the circle
94 | # Source:
95 | if method == 'volume':
96 | sample_fn = hypersphere_volume_sample
97 | sample_factor = 2
98 | elif method == 'surface':
99 | sample_fn = hypersphere_surface_sample
100 | eps = 0.001 # add epsilon to avoid rejection
101 | sample_factor = 1 + eps
102 | else:
103 | raise ValueError(f'Unrecognized method: {method}')
104 |
105 | # Two distinct methods of choosing the next candidate
106 | # random: select next candidate randomly, may lead to very non-circular shapes
107 | # first: select the first candidate, leads to more circular shapes
108 | # can be seen as breadth-first sampling instead of random sampling
109 | if candidate not in ['random', 'first']:
110 | raise ValueError(f'Unrecognized candidate method: {candidate}')
111 |
112 | def in_limits(p):
113 | dist = np.linalg.norm(p - np.full(ndim, radius))
114 | return np.all(dist < radius)
115 |
116 | # Check if there are samples closer than squared distance to the candidate "p"
117 | def in_neighborhood(p, n=2):
118 | indices = (p / cellsize).astype(int)
119 | indmin = np.maximum(indices - n, np.zeros(ndim, dtype=int))
120 | indmax = np.minimum(indices + n + 1, gridsize)
121 |
122 | # Check if the center cell is empty
123 | if not np.isnan(P[tuple(indices)][0]):
124 | return True
125 | a = []
126 | for i in range(ndim):
127 | a.append(slice(indmin[i], indmax[i]))
128 | with np.errstate(invalid='ignore'):
129 | if np.any(np.sum(np.square(p - P[tuple(a)]), axis=ndim) < distance_squared):
130 | return True
131 |
132 | def add_point(p):
133 | points.append(p)
134 | indices = (p / cellsize).astype(int)
135 | P[tuple(indices)] = p
136 |
137 | # Cell size bounded by r/sqrt(n), so each grid cell will contain at most one sample
138 | cellsize = distance / np.sqrt(ndim)
139 | gridsize = (np.ceil(dims / cellsize)).astype(int)
140 |
141 | # Squared distance because we'll compare squared distance
142 | distance_squared = distance * distance
143 |
144 | # n-dimensional background grid for storing samples and accelerating spatial searches
145 | P = np.empty(np.append(gridsize, ndim), dtype=float)
146 | P.fill(np.nan) # initialize empty cells with nan
147 |
148 | points = []
149 | # p = np.random.uniform(np.zeros(ndim), dims)
150 | p = np.zeros(ndim) + radius
151 | add_point(p)
152 | num_points = 1
153 | keepsampling = True
154 | while keepsampling and len(points) > 0:
155 | # Selecting 0th sample is breadth-first sampling, random otherwise
156 | i = np.random.randint(len(points)) if candidate == 'random' else 0
157 | p = points[i]
158 | del points[i]
159 | Q = sample_fn(np.array(p), distance * sample_factor, k)
160 | for q in Q:
161 | if in_limits(q) and not in_neighborhood(q):
162 | add_point(q)
163 | if num_samples is not None:
164 | num_points += 1
165 | if num_points >= num_samples:
166 | keepsampling = False # max nsamples reached
167 | break
168 |
169 | # Extract all non-nan samples
170 | samples = P[~np.isnan(P).any(axis=ndim)]
171 |
172 | # Subtract radius to center samples at the origin
173 | samples -= np.full((1, ndim), radius)
174 |
175 | return samples
176 |
177 |
178 | def poisson_disk(dims: tuple, distance: float, num_samples=None, k: int = 30,
179 | method='volume'):
180 | """Fast Poisson disk sampling in arbitrary dimensions
181 | Args:
182 | dims: sizes of the dimensions
183 | distance: minimum distance between samples
184 | num_samples: maximum number of samples to draw
185 | k: limit of sample to choose before rejection
186 | method: sample points on hypersphere `volume` or `surface`
187 | Source:
188 | Inspired by:
189 | """
190 |
191 | dims = np.array(dims, dtype=float)
192 | ndim = dims.size
193 |
194 | # Two distinct sampling methods
195 | # volume: samples points within volume of radius to 2 * radius
196 | # surface: samples points uniformly on the circle
197 | # Source:
198 | if method == 'volume':
199 | sample_fn = hypersphere_volume_sample
200 | sample_factor = 2
201 | elif method == 'surface':
202 | sample_fn = hypersphere_surface_sample
203 | eps = 0.001 # add epsilon to avoid rejection
204 | sample_factor = 1 + eps
205 | else:
206 | raise ValueError(f'Unrecognized method: {method}')
207 |
208 | def in_limits(p):
209 | return np.all(np.zeros(ndim) <= p) and np.all(p < dims)
210 |
211 | # Check if there are samples closer than squared distance to the candidate "p"
212 | def in_neighborhood(p, n=2):
213 | indices = (p / cellsize).astype(int)
214 | indmin = np.maximum(indices - n, np.zeros(ndim, dtype=int))
215 | indmax = np.minimum(indices + n + 1, gridsize)
216 |
217 | # Check if the center cell is empty
218 | if not np.isnan(P[tuple(indices)][0]):
219 | return True
220 | a = []
221 | for i in range(ndim):
222 | a.append(slice(indmin[i], indmax[i]))
223 | with np.errstate(invalid='ignore'):
224 | if np.any(np.sum(np.square(p - P[tuple(a)]), axis=ndim) < distance_squared):
225 | return True
226 |
227 | def add_point(p):
228 | points.append(p)
229 | indices = (p / cellsize).astype(int)
230 | P[tuple(indices)] = p
231 |
232 | # Cell size bounded by r/sqrt(n), so each grid cell will contain at most one sample
233 | cellsize = distance / np.sqrt(ndim)
234 | gridsize = (np.ceil(dims / cellsize)).astype(int)
235 |
236 | # Squared distance because we'll compare squared distance
237 | distance_squared = distance * distance
238 |
239 | # n-dimensional background grid for storing samples and accelerating spatial searches
240 | P = np.empty(np.append(gridsize, ndim), dtype=float)
241 | P.fill(np.nan) # initialize empty cells with nan
242 |
243 | points = []
244 | add_point(np.random.uniform(np.zeros(ndim), dims))
245 | num_points = 1
246 | keepsampling = True
247 | while keepsampling and len(points) > 0:
248 | i = np.random.randint(len(points))
249 | p = points[i]
250 | del points[i]
251 | Q = sample_fn(np.array(p), distance * sample_factor, k)
252 | for q in Q:
253 | if in_limits(q) and not in_neighborhood(q):
254 | add_point(q)
255 | if num_samples is not None:
256 | num_points += 1
257 | if num_points >= num_samples:
258 | keepsampling = False # max nsamples reached
259 | break
260 | return P[~np.isnan(P).any(axis=ndim)]
261 |
262 |
263 | def poisson_disk_dart_throw(n, sample_fn, distance):
264 | """Generate n random non-overlapping positions with a minimum distance.
265 |
266 | Args:
267 | n: Number of positions to generate
268 | sample_fn: Function that samples a random position
269 | distance: Minimum distance between positions
270 |
271 | Returns:
272 | positions (ndarray): Positions (n x ndim)
273 | """
274 |
275 | # First position is always valid
276 | positions = []
277 | positions.append(sample_fn())
278 |
279 | # Stop when we're sampling 100x as many positions desired
280 | maxiter = n * 1000
281 |
282 | iterations = 1
283 | while len(positions) < n:
284 |
285 | position = sample_fn()
286 |
287 | # Compute distance between sampled position and existing ones
288 | distances = np.linalg.norm(np.array(positions) - position, axis=1)
289 |
290 | # If the position is too close to any other, sample again
291 | if distances.min() > distance:
292 | positions.append(position)
293 |
294 | iterations += 1
295 | if iterations > maxiter:
296 | raise RuntimeError(f'Maximum number of iterations reached: {maxiter}')
297 |
298 | return np.array(positions)
299 |
--------------------------------------------------------------------------------
/vmodel/util/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | def sort_dict(d):
3 | return dict(sorted(d.items()))
4 |
--------------------------------------------------------------------------------
/vmodel/util/args.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 |
5 | def parse_vmodel_args() -> argparse.Namespace:
6 |
7 | def formatter_class(prog):
8 | return argparse.ArgumentDefaultsHelpFormatter(prog,
9 | max_help_position=52,
10 | width=90)
11 |
12 | parser = argparse.ArgumentParser(description='vmodel',
13 | formatter_class=formatter_class)
14 |
15 | # Script arguments
16 | formats = ['netcdf', 'pickle']
17 | parser.add_argument('-v', '--verbose', action='store_true', default=False,
18 | help='verbose output')
19 | parser.add_argument('-f', '--file', type=str, default='',
20 | help='read parameters from YAML file')
21 | parser.add_argument('-p', '--plot', action='store_true', default=False,
22 | help='show live plots')
23 | parser.add_argument('--plot-metrics', action='store_true', default=False,
24 | help='show live plots of metrics')
25 | parser.add_argument('--plot-every', type=int, default=10, metavar='K',
26 | help='plot every k timesteps')
27 | parser.add_argument('--plot-blocking', action='store_true', default=False,
28 | help='wait for key press to resume plotting')
29 | parser.add_argument('-j', '--jobs', type=int, default=os.cpu_count(),
30 | metavar='J', help='number of parallel jobs')
31 | parser.add_argument('-n', '--dry-run', action='store_true', default=False,
32 | help='dry run, do not save data')
33 | parser.add_argument('-P', '--progress', action='store_true', default=False,
34 | help='show progress bar')
35 | parser.add_argument('--save-every', type=int, default=10, metavar='K',
36 | help='save data only every k timesteps')
37 | parser.add_argument('--parallel-agents', action='store_true', default=False,
38 | help='process every agent in parallel')
39 | parser.add_argument('--no-parallel-runs', action='store_true',
40 | default=False, help='do not process runs in parallel')
41 | parser.add_argument('--no-save-precomputed', action='store_true',
42 | default=False,
43 | help='save precomputed variables (saves memory)')
44 | parser.add_argument('--format', choices=formats, default='netcdf',
45 | help='format for saved dataset')
46 | parser.add_argument('--no-compress', action='store_true', default=False,
47 | help='do not compress datasets')
48 |
49 | # Experimental arguments
50 | algorithms = ['reynolds', 'olfati']
51 | spawns = ['poisson', 'uniform', 'grid']
52 | experiment = parser.add_argument_group('experiment arguments')
53 | experiment.add_argument('--num-agents', type=int, default=10, metavar='N',
54 | help='number of agents')
55 | experiment.add_argument('--num-runs', type=int, default=1, metavar='N',
56 | help='number of runs')
57 | experiment.add_argument('--num-dims', type=int, default=2, metavar='N',
58 | help='number of dimensions')
59 | experiment.add_argument('--delta-time', type=float, default=0.1,
60 | metavar='SEC',
61 | help='time delta between timesteps [s]')
62 | experiment.add_argument('--num-timesteps', type=int, default=None,
63 | metavar='K',
64 | help='number of timesteps for experiment')
65 | experiment.add_argument('--algorithm', choices=algorithms,
66 | default='reynolds', help='flocking algorithm')
67 | experiment.add_argument('--spawn', choices=spawns, default='uniform',
68 | help='spawn method')
69 | experiment.add_argument('--spawn-distance', type=float, default=None,
70 | metavar='F', help='spawn distance')
71 | experiment.add_argument('--seed', type=int, default=None,
72 | help='set seed for repeatability')
73 |
74 | # Perception arguments
75 | perception = parser.add_argument_group('perception arguments')
76 | perception.add_argument('--radius', type=float, default=0.25, metavar='F',
77 | help='radius of an agent [m]')
78 | perception.add_argument('--perception-radius', type=float, default=0.0,
79 | metavar='F', help='perception radius of agents [m]')
80 | perception.add_argument('--perception-angle', type=float, default=0.0,
81 | metavar='DEG',
82 | help='angle below which objects are invisible [deg]')
83 | perception.add_argument('--filter-occluded', action='store_true',
84 | default=False,
85 | help='if true, filter out occluded agents')
86 | perception.add_argument('--filter-voronoi', action='store_true',
87 | default=False,
88 | help='if true, filter out non-voronoi neighbors')
89 | perception.add_argument('--max-agents', type=int, default=0, metavar='N',
90 | help='maximum number of closest agents to consider')
91 | perception.add_argument('--false-negative-prob', type=float, default=0.0,
92 | metavar='P',
93 | help='false negative probability [prob]')
94 | perception.add_argument('--num-clutter', type=float, default=0.0,
95 | metavar='K',
96 | help='avg number of clutter returns per timestep')
97 | perception.add_argument('--range-std', type=float, default=0.0,
98 | metavar='STD',
99 | help='distance-scaled range std dev [m]')
100 | perception.add_argument('--bearing-std', type=float, default=0.0,
101 | metavar='STD',
102 | help='bearing std dev [deg]')
103 | perception.add_argument('--visual-migration', action='store_true',
104 | default=False,
105 | help='if true, subject waypoint to occlusion')
106 | perception.add_argument('--topo-angle', type=float, default=0.0,
107 | metavar='DEG',
108 | help='minimum angle between closest agents [deg]')
109 |
110 | # Control arguments
111 | control = parser.add_argument_group('control arguments')
112 | control.add_argument('--ref-distance', type=float, default=1,
113 | metavar='F', help='desired inter-agent distance [m]')
114 | control.add_argument('--migration-gain', type=float, default=0.5,
115 | metavar='K', help='migration gain [m/s]')
116 | control.add_argument('--migration-point', nargs=2, type=float, default=[],
117 | metavar=('X', 'Y'), help='migration point (x, y)')
118 | control.add_argument('--migration-dir', nargs=2, type=float, default=[],
119 | metavar=('X', 'Y'), help='migration direction (x, y)')
120 | control.add_argument('--max-speed', type=float, default=1.0, metavar='F',
121 | help='Maximum speed [m/s]')
122 |
123 | # Need to parse known args when launching from ROS node
124 | # Reason: __name and __log are not specified above
125 | args, _ = parser.parse_known_args()
126 |
127 | return args
128 |
--------------------------------------------------------------------------------
/vmodel/util/color.py:
--------------------------------------------------------------------------------
1 | """Globally defined colors for consistency in plots
2 | XKCD color reference:
3 | Matplotlib list of named colors:
4 | """
5 |
6 | red = focal = 'tab:red'
7 | blue = visible = 'tab:blue'
8 | grey = occluded = invisible = 'xkcd:grey'
9 | lightgrey = shadow = background = 'gainsboro'
10 | darkblue = appeared = 'xkcd:dark blue'
11 | darkgrey = disappeared = 'xkcd:dark grey'
12 | white = 'white'
13 |
14 | # For results plots
15 | metric = 'tab:grey'
16 | topological = 'tab:brown'
17 | myopic = 'tab:red'
18 | visual = 'tab:blue'
19 | quadcopter = 'tab:purple'
20 | voronoi = pointmass = 'tab:green'
21 |
--------------------------------------------------------------------------------
/vmodel/util/mpl.py:
--------------------------------------------------------------------------------
1 |
2 | # Text and columnwidth for IEEE Access paper
3 | TEXTWIDTH = 7.015580 # [inches]
4 | COLUMNWIDTH = 3.370466 # [inches]
5 |
--------------------------------------------------------------------------------
/vmodel/util/multiprocessing.py:
--------------------------------------------------------------------------------
1 | import signal
2 | from multiprocessing import Pool
3 |
4 |
5 | def get_silent_pool(processes=None):
6 | sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
7 | pool = Pool(processes)
8 | signal.signal(signal.SIGINT, sigint_handler)
9 | return pool
10 |
--------------------------------------------------------------------------------
/vmodel/util/numpy.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def toindices(x: np.ndarray) -> np.ndarray:
5 | """Transform mask array to array of indices:
6 | Example:
7 | [False, True, True, False] -> [1, 2]
8 | """
9 | return np.where(x)[0]
10 |
--------------------------------------------------------------------------------
/vmodel/util/util.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def human_readable(num, binary=True, precision=1, suffix=''):
5 | """Return human-readable version of bytes
6 | Args:
7 | num: number of bytes
8 | binary: if True, use base 1024, else use base 1000
9 | precision: precision of floating point number
10 | suffix: for instance 'b' or 'bytes'
11 | Inspired by:
12 | """
13 | units = ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
14 | # units = [f'{u}i' for u in units] if binary else units
15 | base = 1024 if binary else 1000
16 | for unit in units:
17 | if abs(num) < base:
18 | return f'{num:.{precision}f}{unit}{suffix}'
19 | num /= base
20 | yotta = 'Yi' if binary else 'Y'
21 | return f'{num:.{precision}f}{yotta}{suffix}'
22 |
23 |
24 | def clean_attrs(attrs):
25 | for k, v in attrs.items():
26 | if v is None:
27 | attrs[k] = 0 # required for NETCDF
28 | elif type(v) == bool:
29 | attrs[k] = int(v) # required for NETCDF
30 | elif isinstance(v, (np.ndarray, np.generic)):
31 | attrs[k] = v.tolist() # required for YAML
32 | return attrs
33 |
--------------------------------------------------------------------------------
/vmodel/util/xarray.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import xarray as xr
3 |
4 | from vmodel import geometry as vgeom
5 | from vmodel import metrics as vmetr
6 | from vmodel import visibility as vvisi
7 |
8 |
9 | def metric_ufunc(da: xr.DataArray, ufunc, dims):
10 | dims = list(dims)
11 | name = ufunc.__name__
12 | return xr.apply_ufunc(ufunc, da, input_core_dims=[dims], vectorize=True,
13 | output_dtypes=[np.float],
14 | dask='parallelized').rename(name)
15 |
16 |
17 | def nndist(da: xr.DataArray) -> xr.DataArray:
18 |
19 | def compute_nndist(x):
20 | dmat = vmetr.distance_matrix(x)
21 | np.fill_diagonal(dmat, float('inf')) # otherwise min(dmat) = 0 for dist. to self
22 | nndist = dmat.min(axis=0)
23 | return nndist
24 |
25 | input_dims = [['agent', 'space']]
26 | output_dims = [['agent']]
27 | return xr.apply_ufunc(compute_nndist, da, input_core_dims=input_dims,
28 | output_core_dims=output_dims, vectorize=True,
29 | dask='parallelized').rename('nndist')
30 |
31 |
32 | def convex_hull(da: xr.DataArray) -> xr.DataArray:
33 | result = xr.apply_ufunc(vgeom.convex_hull, da, input_core_dims=[['agent', 'space']],
34 | output_core_dims=[['agent']], vectorize=True)
35 | result = result.transpose(..., 'agent', 'time')
36 | result.name = 'hull'
37 | return result
38 |
39 |
40 | def norm(da: xr.DataArray, dims, ord=None) -> xr.DataArray:
41 | kwargs = {'ord': ord, 'axis': -1}
42 | name = f'{da.name}_norm'
43 | return xr.apply_ufunc(np.linalg.norm, da, input_core_dims=[[dims]], kwargs=kwargs,
44 | dask='parallelized').rename(name)
45 |
46 |
47 | def connectivity(da: xr.DataArray) -> xr.DataArray:
48 | result = xr.apply_ufunc(vmetr.connectivity, da,
49 | input_core_dims=[['agent', 'agent2']], vectorize=True)
50 | result.name = 'connectivity'
51 | return result
52 |
53 |
54 | def distance_matrix(da: xr.DataArray, fill_diag=True) -> xr.DataArray:
55 | from scipy.spatial import distance_matrix
56 |
57 | dm = lambda x: distance_matrix(x, x)
58 |
59 | dmat = xr.apply_ufunc(dm, da, input_core_dims=[['agent', 'space']],
60 | output_core_dims=[['agent', 'agent2']], vectorize=True)
61 | dmat.name = 'distance'
62 | if fill_diag:
63 | dmat = dmat.where(dmat != 0, float('inf'))
64 | return dmat
65 |
66 |
67 | def visibility(da: xr.DataArray) -> xr.DataArray:
68 |
69 | def compute_visibility(pos, rad=float(da.radius)):
70 | return vvisi.visibility_graph(pos, rad)
71 |
72 | input_dims = [['agent', 'space']]
73 | output_dims = [['agent', 'agent2']]
74 | output_sizes = {'agent2': len(da.agent)}
75 | return xr.apply_ufunc(compute_visibility, da,
76 | input_core_dims=input_dims,
77 | output_core_dims=output_dims,
78 | output_dtypes=[np.bool],
79 | dask_gufunc_kwargs={'output_sizes': output_sizes},
80 | vectorize=True, dask='parallelized')
81 |
--------------------------------------------------------------------------------
/vmodel/visibility.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numba import njit
3 |
4 | from vmodel.metrics import distance_matrix
5 |
6 |
7 | def visibility_set(relative: np.ndarray, radius: float, distances: np.ndarray = None):
8 | """Check whether there are any occlusions
9 | Args:
10 | relative: relative positions
11 | radius: radius of agents
12 | Returns:
13 | visibility_set: boolean mask of visible relative positions
14 | """
15 | distances = np.linalg.norm(relative, axis=1) if distances is None else distances
16 | units = relative / distances[:, np.newaxis] # project onto unit circle
17 | radii = np.full(len(relative), radius) if type(radius) is float else radius
18 | scaled = radii / distances # shrink radii depending on distance
19 | rs = scaled[np.newaxis, :] + scaled[:, np.newaxis] # add pairwise radii
20 | dmat = distance_matrix(units) # pairwise distances between points on unit circle
21 | overlapmat = dmat < rs # check if overlap...
22 | closermat = distances[np.newaxis, :] < distances[:, np.newaxis] # ... and distance
23 | occmat = overlapmat & closermat
24 | occluded = occmat.sum(axis=1, dtype=bool) # all values > 0 will be True!
25 | return ~occluded
26 |
27 |
28 | def visibility_graph(positions: np.ndarray, radius: float):
29 | """Computes complete visibility graph (each agent to all others)
30 | Args:
31 | positions: absolute positions
32 | radius: radius of agents
33 | Returns:
34 | adj: boolean adjacency matrix of visibility graph (generally directed!)
35 | """
36 | N = len(positions)
37 | adj = np.zeros((N, N), dtype=bool)
38 | for a in range(N):
39 | pos_self = positions[a]
40 | pos_others = np.delete(positions, a, axis=0)
41 | pos_rel = pos_others - pos_self
42 | vis_others = visibility_set(pos_rel, radius)
43 | adj[a] = np.insert(vis_others, a, False) # cannot see self, i.e. zero diag
44 | return adj
45 |
46 |
47 | @njit
48 | def is_occluding(p1: np.ndarray, r1: float, p2: np.ndarray, r2: float):
49 | """Check whether p1 occludes p2 with radius"""
50 | d1, d2 = np.linalg.norm(p1), np.linalg.norm(p2) # compute distances
51 | u1, u2 = p1 / d1, p2 / d2 # project to unit circle
52 | rs1, rs2 = r1 / d1, r2 / d2 # scale radii by distance
53 | d = np.linalg.norm(u1 - u2) # compute distance between projected points
54 | return d < rs1 + rs2 and (d1 - r1) <= (d2 - r2)
55 |
--------------------------------------------------------------------------------