├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ └── release.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── RELEASE.md ├── release_checklist.md ├── rustfmt.toml ├── src ├── analysis.rs ├── arguments.rs ├── atoms.rs ├── bin │ └── bca.rs ├── errors.rs ├── grid.rs ├── io.rs ├── io │ ├── cube.rs │ ├── output.rs │ ├── reader.rs │ └── vasp.rs ├── lib.rs ├── methods.rs ├── progress.rs ├── utils.rs ├── voronoi.rs └── voxel_map.rs └── tests ├── cube.rs ├── cube └── anatase.cube ├── vasp.rs └── vasp ├── CHGCAR_ncl ├── CHGCAR_no_spin ├── CHGCAR_spin ├── CHG_ncl ├── CHG_no_spin └── CHG_spin /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | open-pull-requests-limit: 10 8 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | pull_request: 5 | 6 | env: 7 | RUST_BACKTRACE: 1 8 | 9 | jobs: 10 | test: 11 | name: Test 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - name: Test Stable 16 | run: cargo test --verbose 17 | - name: Install beta target 18 | run: rustup update && rustup toolchain install beta 19 | - name: Test beta 20 | run: cargo +beta test --verbose 21 | - name: Install nightly target 22 | run: rustup toolchain install nightly 23 | - name: Test nightly 24 | run: cargo +nightly test --verbose 25 | format: 26 | name: Format 27 | runs-on: ubuntu-latest 28 | steps: 29 | - uses: actions/checkout@v4 30 | - name: Check formatting 31 | run: cargo fmt --all -- --check 32 | clippy: 33 | name: Clippy 34 | runs-on: ubuntu-latest 35 | steps: 36 | - uses: actions/checkout@v4 37 | - name: Check linting 38 | run: cargo clippy -- -D warnings 39 | msrv: 40 | name: MSRV 41 | runs-on: ubuntu-latest 42 | steps: 43 | - uses: actions/checkout@v4 44 | - name: Install MSRV target 45 | run: rustup default 1.85.0 46 | - name: Check MSRV 47 | run: cargo check 48 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | branches: 6 | - '!master' 7 | tags: 8 | - 'v*' 9 | - '!v*-*' 10 | 11 | env: 12 | RUST_BACKTRACE: 1 13 | 14 | jobs: 15 | build-linux: 16 | name: Build (linux) 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: Install target 21 | run: rustup update && rustup target add x86_64-unknown-linux-musl 22 | - name: Build 23 | run: cargo build --verbose --release --target x86_64-unknown-linux-musl 24 | - name: Upload action assest 25 | uses: actions/upload-artifact@v3 26 | with: 27 | name: bca-x86_64-linux 28 | path: target/x86_64-unknown-linux-musl/release/bca 29 | build-windows: 30 | name: Build (windows) 31 | runs-on: windows-latest 32 | steps: 33 | - uses: actions/checkout@v4 34 | - name: Build 35 | run: cargo build --verbose --release 36 | - name: Upload action assest 37 | uses: actions/upload-artifact@v3 38 | with: 39 | name: bca-x86_64-windows.exe 40 | path: target/release/bca.exe 41 | build-macos: 42 | name: Build (macos) 43 | runs-on: macos-latest 44 | steps: 45 | - uses: actions/checkout@v4 46 | - name: Install target 47 | run: rustup update && rustup target add aarch64-apple-darwin 48 | - name: Build Intel 49 | run: cargo build --verbose --release 50 | - name: Build Apple Silicon 51 | run: cargo build --verbose --release --target aarch64-apple-darwin 52 | - name: Upload action assest Intel 53 | uses: actions/upload-artifact@v3 54 | with: 55 | name: bca-x86_64-macos 56 | path: target/release/bca 57 | - name: Upload action assest Apple Silicon 58 | uses: actions/upload-artifact@v3 59 | with: 60 | name: bca-aarch64-macos 61 | path: target/release/bca 62 | release: 63 | name: Create GitHub Release 64 | runs-on: ubuntu-latest 65 | needs: [build-linux, build-macos, build-windows] 66 | steps: 67 | - name: Checkout 68 | uses: actions/checkout@v4 69 | - name: Get Tag 70 | id: get-release-version 71 | run: echo "version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT 72 | - name: Download Linux Artifacts 73 | uses: actions/download-artifact@v3 74 | with: 75 | name: bca-x86_64-linux 76 | - name: Download Windows Artifacts 77 | uses: actions/download-artifact@v3 78 | with: 79 | name: bca-x86_64-windows.exe 80 | - name: Download Intel Mac Artifacts 81 | uses: actions/download-artifact@v3 82 | with: 83 | name: bca-x86_64-macos 84 | - name: Download Apple Silicon Artifacts 85 | uses: actions/download-artifact@v3 86 | with: 87 | name: bca-aarch64-macos 88 | - name: Create GitHub release 89 | uses: softprops/action-gh-release@v1 90 | with: 91 | token: ${{ secrets.GITHUB_TOKEN }} 92 | tag_name: ${{ steps.get-release-version.outputs.version }} 93 | body_path: RELEASE.md 94 | files: | 95 | bca-x86_64-linux 96 | bca-x86_64-windows.exe 97 | bca-x86_64-macos 98 | bca-aarch64-macos 99 | publish: 100 | name: Publish to crates.io 101 | runs-on: ubuntu-latest 102 | needs: release 103 | steps: 104 | - uses: actions/checkout@v4 105 | - name: Publish crate 106 | run: cargo publish --verbose --token ${{ secrets.CARGO_PUBLISH_TOKEN }} 107 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # These are backup files generated by rustfmt 6 | **/*.rs.bk 7 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## v0.5.0 2 | ### Features 3 | - Added critical point handling. 4 | - Atoms can now be bonded to images of themselves. 5 | ### Changes 6 | - Voronoi shift has been changed to include the passing of periodic boundaries, a no check version has been added to perform as previously. 7 | ## v0.4.8 8 | ### Changes 9 | - Opt -> Arg in arguments. 10 | - added a parse_filetype function. 11 | ## v0.4.7 12 | ### Small things 13 | - Took VoxelMap out of the box in bca. 14 | ## v0.4.6 15 | ### Features 16 | - Added a method for calculating the Laplacian at a point. 17 | - Added way to calculate the error in the partitioning from the Laplacian. 18 | ### Changes 19 | - Voronoi now stores the volume of the Voronoi cell. 20 | - Removed clap as a dependancy. 21 | - Changed the flags for file type to -f --file_type from -t --type. 22 | - Changed the short flag for threads to -t from -J. 23 | - Removed indicatif and atomic-counter as dependancies. 24 | - Removed regex from dependancies. 25 | - Progress bars are now created inside functions and whether they are shown is optional. 26 | - Added a new silent flag: -x --silent. 27 | ## v0.4.5 28 | ### Changes 29 | - Removed the need of passing a density to the calculate_bader_volume_radius function. 30 | - Changed the name of calculate_bader_volume_radius to calculate_bader_volumes_and_radii. 31 | - Changed AtomVoxelMap to VoxelMap as there are no longer two VoxelMap variants. 32 | - removed the VoxelMap triat. 33 | - Changed the name of VoxelMap.boundary_iter() to VoxelMap.weight_iter(). 34 | - Added VoxelMap.maxima_len() and VoxelMap.weight_len(). 35 | - Removed anyhow for the error management. 36 | - Changed the return of invert_lattice to Option as there is only one way it can fail. 37 | ## v0.4.4 38 | ### Bug fix 39 | - Removed index deletion in maxima finding. 40 | ## v0.4.3 41 | ### Changes 42 | - Removed the option to run at higher verbosities, will instead throw an error if maxima is far from atom. 43 | - Added flag to pass to bca to control the distance at which the maxima distance error is thrown. 44 | - Now runs with 1E-6 vacuum tolerance as default. 45 | - Reduced memory usage of VoxelMap. 46 | - Split out sum_bader_densities to calculate_bader_density and calculate_bader_volume_radius 47 | ## v0.4.2 48 | ### Changes 49 | - Updated dependancies and fixed the breaking changes associated with them. 50 | - Bumped the minimum rust version required. 51 | ## v0.4.1 52 | ### Changes 53 | - Moving to allow python bindings by separating functions and making structs more streamlined. 54 | - Switch the entire analysis section to functions rather than a struct. 55 | - Threaded charge summing, assigning maxima to atoms and the new maxima finding function. 56 | - Lots of moving around of functions and changing outcomes, i.e, to_cartesian now returns cartesian coordinates. 57 | - Updated to Clap v3. 58 | - Created a VoxelMap Trait. 59 | ### Features 60 | - Added a nearest neighbour function. 61 | ## v0.4.0 62 | ### Changes 63 | - VoxelMap now handles the running of the bader calculation, using VoxelMap::calc(). 64 | ## v0.3.2 65 | ### Features 66 | - Writing of the charge density is now suppported. 67 | ### Changes 68 | - Changed how the maxima and weights are stored for the boundary voxels. 69 | - Memory optimisations ([issue: #30](https://github.com/adam-kerrigan/bader-rs/issues/30)). 70 | ## v0.3.1 71 | ### Changes 72 | - Progress bars changed to not show by default, stops drawing of default bar. 73 | - Functions that create a progress bar (assign_atom, charge_sum) now take one as an argument. 74 | - Moved analysis functions (assign_atom, charge_sum, atoms_charge_sum) to own structure. 75 | - Simplified VoxelMap to remove the weight index map. Weight indices are stored as negative numbers. 76 | - Removed Rayon ([issue: #25](https://github.com/kerrigoon/bader-rs/issues/25)). 77 | - Moved writing output to own module in anticipation of removing prettytable-rs. 78 | - Dropped ongrid and neargrid, weight method fast and superior. 79 | - github username change so updated all the links. 80 | - Density now no longer contains the density and so has been renamed Grid. 81 | - Crossbeam is used for threading scopes. 82 | - Changed program name from bader to bca (Bader charge analysis). 83 | - Removed Prettytables. 84 | ### Bug Fixes 85 | - Overflow error when vacuum tolerance is so high that all charge is vacuum. 86 | ## v0.3.0 - (Yanked) 87 | ## v0.2.3 88 | ### Changes 89 | - Set up new target for releases that doesn't require GLibC. 90 | - Removed parking_lot::Mutex from the main program. 91 | - Set up Zenodo 92 | ### Bug Fixes 93 | - Corrected the total of the Assigning to Atoms progress bar. 94 | ## v0.2.2 95 | ### Bug Fixes 96 | - Fixed SegFault at high thread count by pre-allocating weight_map ([issue: #19](https://github.com/kerrigoon/bader-rs/issues/19)). 97 | ### Feature Changes 98 | - Added a cap of 12 to the amount of threads distrubuted over by default. 99 | ## v0.2.1 100 | ### Bug Fixes 101 | - Added a lock to maxima_get() in VoxelMap and made a maxima_non_blocking_get(), unsure if this would ever be a problem due to the lock on index.pop() but better safe than sorry. 102 | ### Documentation Changes 103 | - More of the crate documented 104 | - Documentation tests added for all partitioning methods and for using weight_store in VoxelMap. 105 | ## v0.2.0 106 | ### New Features 107 | - Added spin flag for allowing cubes have spin and density output. 108 | - Complete revamp weight method, now very fast and scales well. 109 | ### Removed Features 110 | - No longer able to apply weighting of boundaries to all methods, just the weight method. 111 | ### UI Changes 112 | - '--weight, -w' now controls the weight tolerance allowing extremely small contributions to be discarded. 113 | - '--spin, -s' has been added for spin output on cube files. 114 | ### Library Changes 115 | - I/O now has a trait for standardising implementation of new file types. 116 | - I/O modules now contribute to the formating of ACF and BCF files (maybe units in future). 117 | - VoxelMap now controls the population and processing of the voxel maps. 118 | - Custom Lock introduced to speed up threading. 119 | ## v0.1.1 120 | ### Bug Fixes 121 | - Volume weighting logic error ([issue: #5](https://github.com/kerrigoon/bader-rs/issues/5)). 122 | ### Cosmetic Changes 123 | - Standardised the method description in help information. 124 | - Changed the BCF.dat file to include atom number. 125 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 4 4 | 5 | [[package]] 6 | name = "bader" 7 | version = "0.4.8" 8 | dependencies = [ 9 | "crossbeam-utils", 10 | "rustc-hash", 11 | ] 12 | 13 | [[package]] 14 | name = "crossbeam-utils" 15 | version = "0.8.21" 16 | source = "registry+https://github.com/rust-lang/crates.io-index" 17 | checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" 18 | 19 | [[package]] 20 | name = "rustc-hash" 21 | version = "2.1.1" 22 | source = "registry+https://github.com/rust-lang/crates.io-index" 23 | checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" 24 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bader" 3 | version = "0.4.8" 4 | authors = ["Adam Kerrigan "] 5 | edition = "2024" 6 | rust-version = "1.85.0" 7 | description = "Multi-threaded Bader Charge Analysis" 8 | license = "MIT" 9 | readme = "README.md" 10 | repository = "https://github.com/adam-kerrigan/bader-rs" 11 | categories = ["command-line-utilities", "science"] 12 | keywords = ["Analysis", "Materials_Science", "Physics", "Chemistry", "Multi-Threading"] 13 | 14 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 15 | 16 | [dependencies] 17 | crossbeam-utils = "0.8.12" 18 | rustc-hash = "2.1.0" 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 kerrigoon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # bader-rs (0.4.8) 2 | ![build](https://github.com/kerrigoon/bader-rs/workflows/build/badge.svg?branch=master) 3 | [![Latest Version](https://img.shields.io/crates/v/bader.svg)](https://crates.io/crates/bader) 4 | [![Documentation](https://docs.rs/bader/badge.svg)](https://docs.rs/bader/) 5 | [![DOI](https://zenodo.org/badge/292534636.svg)](https://zenodo.org/badge/latestdoi/292534636) 6 | [![MSRV: rustc 1.85.0+](https://img.shields.io/badge/MSRV-rustc_1.85.0+-lightgray.svg)](https://blog.rust-lang.org/2025/02/20/Rust-1.85.0/) 7 | 8 | An incredibly fast, multi-threaded, Bader charge partitioning tool. Based on methods presented in [Yu Min and Trinkle Dallas R. 2011 J. Che.m Phys. 134 064111] with adaptions for multi-threading and increased speed. 9 | ## Installation 10 | ### Pre-built Binary 11 | There are pre-built 64bit binaries for Linux, Mac and Windows provided with the source code for the latest [release]. 12 | ### Cargo 13 | If these binaries don't cover your OS the easiest way to install is via [cargo]. 14 | ```sh 15 | $ cargo install bader 16 | ``` 17 | ### From Source 18 | To check out the lastest features not in the binaries yet you can compile from source. To do this run the following, which will create the ./target/release/bca executable. 19 | ```sh 20 | $ git clone https://github.com/adam-kerrigan/bader-rs 21 | $ cd bader-rs 22 | $ cargo build --verbose --release 23 | ``` 24 | From here you can either move or link the binary to folder in your path. 25 | ```sh 26 | $ mv ./target/release/bca ~/bin 27 | ``` 28 | ### Minimum Supported Rust Version (MSRV) 29 | This crate is guaranteed to compile on stable Rust 1.85.0 and up. 30 | ## Usage 31 | The program takes a charge density file as input and performs Bader analysis of the data. Currently it supports density in [VASP] or [cube] formats. It is recommended to run VASP calculations with [LAECHG] = .TRUE. to print the core density and self-consistent valence density. These can then be passed as reference files to the program using the -r, --reference flag where they will be summed. 32 | ```sh 33 | $ bca CHGCAR -r AECCAR0 -r AECCAR2 34 | ``` 35 | VASP charge density files containing spin densities will output the the partitioned spin also. To achieve this for cube files requires using the --spin flag to pass a second file to treat as the spin density. 36 | ```sh 37 | $ bca charge-density.cube -s spin-density.cube 38 | ``` 39 | For a detailed list of usage options run 40 | ```sh 41 | $ bca --help 42 | ``` 43 | ## Output 44 | The program outputs the Atomic Charge File (ACF.dat) which contians the charge (and spin) information for each atom. 45 | ## License 46 | MIT 47 | 48 | [//]: # (These are reference links used in the body of this note and get stripped out when the markdown processor does its job. There is no need to format nicely because it shouldn't be seen. Thanks SO - http://stackoverflow.com/questions/4823468/store-comments-in-markdown-syntax) 49 | 50 | [release]: 51 | [VASP]: 52 | [cube]: 53 | [LAECHG]: 54 | [Yu Min and Trinkle Dallas R. 2011 J. Che.m Phys. 134 064111]: 55 | [cargo]: 56 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | ## v0.4.8 2 | ### Changes 3 | - Opt -> Arg in arguments 4 | - added a parse_filetype function. 5 | 6 | small release to check workflows 7 | -------------------------------------------------------------------------------- /release_checklist.md: -------------------------------------------------------------------------------- 1 | [] Create v{VERSIONNUMBER}-relsease branch 2 | [] Update the version number in Cargo.toml README.md CHANGELOG.md src/lib.rs 3 | [] Copy the latest CHANGELOG to the RELEASE.md 4 | [] Commit the changes and push to branch 5 | [] Create tag for the version and push to tag 6 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | reorder_imports = true 2 | max_width = 80 3 | -------------------------------------------------------------------------------- /src/analysis.rs: -------------------------------------------------------------------------------- 1 | use crate::atoms::Atoms; 2 | use crate::grid::Grid; 3 | use crate::methods::{CriticalPoint, CriticalPointKind, laplacian}; 4 | use crate::progress::{Bar, HiddenBar, ProgressBar}; 5 | use crate::utils::{cross, dot, norm, subtract, vdot}; 6 | use crate::voxel_map::{Voxel, VoxelMap}; 7 | use crossbeam_utils::thread; 8 | use rustc_hash::{FxHashMap, FxHashSet}; 9 | 10 | /// Sums the densities of each Bader volume. 11 | /// 12 | /// #Example: 13 | /// ``` 14 | /// use bader::analysis::calculate_bader_density; 15 | /// use bader::atoms::{Atoms, Lattice}; 16 | /// use bader::grid::Grid; 17 | /// use bader::voxel_map::VoxelMap; 18 | /// 19 | /// // Intialise Atoms and VoxelMap structs as well as a density to sum. 20 | /// let lattice = 21 | /// Lattice::new([[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 3.0]]); 22 | /// let atoms = Atoms::new( 23 | /// lattice, 24 | /// vec![[0.0, 0.0, 0.0], [1.5, 1.5, 1.5]], 25 | /// String::from(""), 26 | /// ); 27 | /// let grid = Grid::new( 28 | /// [10, 10, 10], 29 | /// [[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 3.0]], 30 | /// [0.0, 0.0, 0.0], 31 | /// ); 32 | /// // each atom gets 500 voxels all of value 1 33 | /// let mut voxel_map = (0..1000).map(|i| i / 500).collect::>(); 34 | /// // add some vacuum meaning atom 2 has 499 voxels 35 | /// voxel_map[600] = -1; 36 | /// // add a weighted voxel meaning atom 1 now has 499.7 voxels and atom 2 has 499.3 37 | /// voxel_map[400] = -2; 38 | /// let weight_map: Vec> = vec![vec![0.7, 1.3].into(); 1]; 39 | /// let voxel_map = VoxelMap::new(voxel_map, weight_map, grid); 40 | /// let density = vec![1.0; 1000]; 41 | /// 42 | /// let summed_density = 43 | /// calculate_bader_density(&density, &voxel_map, &atoms, 1, false); 44 | /// let volume = voxel_map.grid_get().voxel_lattice.volume; 45 | /// assert_eq!( 46 | /// summed_density, 47 | /// vec![ 48 | /// 499.7 * volume, 49 | /// 499.3 * volume, 50 | /// volume 51 | /// ] 52 | /// .into() 53 | /// ); 54 | /// ``` 55 | pub fn calculate_bader_density( 56 | density: &[f64], 57 | voxel_map: &VoxelMap, 58 | atoms: &Atoms, 59 | threads: usize, 60 | visible_bar: bool, 61 | ) -> Box<[f64]> { 62 | let progress_bar: Box = match visible_bar { 63 | false => Box::new(HiddenBar {}), 64 | true => Box::new(Bar::new( 65 | density.len(), 66 | String::from("Summing Bader Density"), 67 | )), 68 | }; 69 | let pbar = &progress_bar; 70 | let mut bader_density = vec![0.0; atoms.positions.len() + 1]; 71 | let vm = &voxel_map; 72 | // Calculate the size of the vector to be passed to each thread. 73 | let chunk_size = 74 | (density.len() / threads) + (density.len() % threads).min(1); 75 | thread::scope(|s| { 76 | let spawned_threads = voxel_map 77 | .maxima_chunks(chunk_size) 78 | .enumerate() 79 | .map(|(index, chunk)| { 80 | s.spawn(move |_| { 81 | let mut bd = vec![0.0; atoms.positions.len() + 1]; 82 | chunk.iter().enumerate().for_each( 83 | |(voxel_index, maxima)| { 84 | let p = index * chunk.len() + voxel_index; 85 | match vm.maxima_to_voxel(*maxima) { 86 | Voxel::Maxima(m) => { 87 | let (m, _) = 88 | voxel_map.grid.decode_maxima(m); 89 | bd[m] += density[p]; 90 | } 91 | Voxel::Boundary(weights) => { 92 | for weight in weights.iter() { 93 | let m = *weight as usize; 94 | let w = weight - (m as f64); 95 | let (m, _) = 96 | voxel_map.grid.decode_maxima(m); 97 | bd[m] += w * density[p]; 98 | } 99 | } 100 | Voxel::Vacuum => { 101 | bd[atoms.positions.len()] += density[p] 102 | } 103 | }; 104 | pbar.tick(); 105 | }, 106 | ); 107 | bd 108 | }) 109 | }) 110 | .collect::>(); 111 | // Join each thread and collect the results. 112 | // If one thread terminates before the other this is not operated on first. 113 | // Either use the sorted index to remove vacuum from the summation or 114 | // find a way to operate on finshed threads first (ideally both). 115 | for thread in spawned_threads { 116 | if let Ok(tmp_bd) = thread.join() { 117 | bader_density.iter_mut().zip(tmp_bd.into_iter()).for_each( 118 | |(a, b)| { 119 | *a += b; 120 | }, 121 | ); 122 | } else { 123 | panic!("Unable to join thread in sum_bader_densities.") 124 | }; 125 | } 126 | }) 127 | .unwrap(); 128 | // The final result needs to be converted to a charge rather than a density. 129 | bader_density.iter_mut().for_each(|a| { 130 | *a *= voxel_map.grid_get().voxel_lattice.volume; 131 | }); 132 | bader_density.into() 133 | } 134 | 135 | /// Calculates the volume and radius of each Bader atom. 136 | /// 137 | /// #Example: 138 | /// ``` 139 | /// use bader::analysis::calculate_bader_volumes_and_radii; 140 | /// use bader::atoms::{Atoms, Lattice}; 141 | /// use bader::grid::Grid; 142 | /// use bader::voxel_map::VoxelMap; 143 | /// 144 | /// // Intialise Atoms and VoxelMap structs as well as a density to sum. 145 | /// let lattice = 146 | /// Lattice::new([[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 3.0]]); 147 | /// let atoms = Atoms::new( 148 | /// lattice, 149 | /// vec![[0.0, 0.0, 0.0], [1.5, 1.5, 1.5]], 150 | /// String::from(""), 151 | /// ); 152 | /// let grid = Grid::new( 153 | /// [10, 10, 10], 154 | /// [[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 3.0]], 155 | /// [0.0, 0.0, 0.0], 156 | /// ); 157 | /// // each atom gets 500 voxels all of value 1 158 | /// let mut voxel_map = (0..1000).map(|i| i / 500).collect::>(); 159 | /// // add some vacuum meaning atom 2 has 499 voxels 160 | /// voxel_map[600] = -1; 161 | /// // add a weighted voxel meaning atom 1 now has 499.7 voxels and atom 2 has 499.3 162 | /// // this is the only factor in determining the radius; 2 * (a + b + c) for atom 1 163 | /// // and 3 * (a + b + c) for atom 2. 164 | /// voxel_map[222] = -2; 165 | /// let weight_map: Vec> = vec![vec![0.7, 1.3].into(); 1]; 166 | /// let voxel_map = VoxelMap::new(voxel_map, weight_map, grid); 167 | /// 168 | /// let (volumes, radii) = calculate_bader_volumes_and_radii(&voxel_map, &atoms, 1, false); 169 | /// let volume = voxel_map.grid_get().voxel_lattice.volume; 170 | /// let a_b_c = (0.3_f64.powi(2) + 0.3_f64.powi(2) + 0.3_f64.powi(2)).powf(0.5); 171 | /// assert_eq!( 172 | /// volumes, 173 | /// vec![ 174 | /// 499.7 * volume, 175 | /// 499.3 * volume, 176 | /// volume 177 | /// ] 178 | /// .into() 179 | /// ); 180 | /// assert!(radii[0] - (2.0 * a_b_c) <= f64::EPSILON); 181 | /// assert!(radii[1] - (3.0 * a_b_c) <= f64::EPSILON); 182 | /// ``` 183 | pub fn calculate_bader_volumes_and_radii( 184 | voxel_map: &VoxelMap, 185 | atoms: &Atoms, 186 | threads: usize, 187 | visible_bar: bool, 188 | ) -> (Box<[f64]>, Box<[f64]>) { 189 | let progress_bar: Box = match visible_bar { 190 | false => Box::new(HiddenBar {}), 191 | true => Box::new(Bar::new( 192 | voxel_map.maxima_len(), 193 | String::from("Calculating Volumes"), 194 | )), 195 | }; 196 | let pbar = &progress_bar; 197 | let mut bader_radius = vec![f64::INFINITY; atoms.positions.len()]; 198 | let mut bader_volume = vec![0.0; atoms.positions.len() + 1]; 199 | let vm = &voxel_map; 200 | // Calculate the size of the vector to be passed to each thread. 201 | let chunk_size = (voxel_map.maxima_len() / threads) 202 | + (voxel_map.maxima_len() % threads).min(1); 203 | thread::scope(|s| { 204 | let spawned_threads = voxel_map 205 | .maxima_chunks(chunk_size) 206 | .enumerate() 207 | .map(|(index, chunk)| { 208 | s.spawn(move |_| { 209 | let mut br = vec![f64::INFINITY; atoms.positions.len()]; 210 | let mut bv = vec![0.0; atoms.positions.len() + 1]; 211 | chunk.iter().enumerate().for_each(|(voxel_index, maxima)| { 212 | let p = index * chunk.len() + voxel_index; 213 | match vm.maxima_to_voxel(*maxima) { 214 | Voxel::Boundary(weights) => { 215 | for weight in weights.iter() { 216 | let m = *weight as usize; 217 | let w = weight - (m as f64); 218 | let (m, _) = 219 | voxel_map.grid.decode_maxima(m); 220 | bv[m] += w; 221 | let atom_number = vm.maxima_to_atom(m); 222 | let p_c = vm.grid.to_cartesian(p as isize); 223 | let p_lll_c = atoms.lattice.cartesian_to_reduced(p_c); 224 | let atom = atoms.reduced_positions[atom_number]; 225 | br[atom_number] = atoms.lattice.minimum_distance(p_lll_c, atom, Some(br[atom_number])); 226 | } 227 | } 228 | Voxel::Maxima(m) => { 229 | let (m, _) = 230 | voxel_map.grid.decode_maxima(m); 231 | bv[m] += 1.0; 232 | } 233 | Voxel::Vacuum => { 234 | bv[atoms.positions.len()] += 1.0; 235 | } 236 | }; 237 | pbar.tick(); 238 | }); 239 | (bv, br) 240 | }) 241 | }) 242 | .collect::>(); 243 | // Join each thread and collect the results. 244 | // If one thread terminates before the other this is not operated on first. 245 | // Either use the sorted index to remove vacuum from the summation or 246 | // find a way to operate on finshed threads first (ideally both). 247 | for thread in spawned_threads { 248 | if let Ok((tmp_bv, tmp_br)) = thread.join() { 249 | bader_volume 250 | .iter_mut() 251 | .zip(tmp_bv.into_iter()) 252 | .for_each(|(a, b)| { 253 | *a += b; 254 | }); 255 | bader_radius 256 | .iter_mut() 257 | .zip(tmp_br.into_iter()) 258 | .for_each(|(a, b)| { 259 | *a = a.min(b); 260 | }); 261 | } else { 262 | panic!("Unable to join thread in calculate_bader_volumes_and_radii.") 263 | }; 264 | } 265 | }) 266 | .unwrap(); 267 | // The distance isn't square rooted in the calcation of distance to save time. 268 | // As we need to filter out the infinite distances (atoms with no assigned maxima) 269 | // we can square root here also. 270 | bader_volume.iter_mut().for_each(|a| { 271 | *a *= voxel_map.grid_get().voxel_lattice.volume; 272 | }); 273 | bader_radius.iter_mut().for_each(|d| { 274 | match (*d).partial_cmp(&f64::INFINITY) { 275 | Some(std::cmp::Ordering::Less) => *d = d.powf(0.5), 276 | _ => *d = 0.0, 277 | } 278 | }); 279 | (bader_volume.into(), bader_radius.into()) 280 | } 281 | 282 | /// calcuate the error associated with each atom from the Laplacian 283 | pub fn calculate_bader_error( 284 | density: &[f64], 285 | voxel_map: &VoxelMap, 286 | atoms: &Atoms, 287 | threads: usize, 288 | visible_bar: bool, 289 | ) -> Box<[f64]> { 290 | let progress_bar: Box = match visible_bar { 291 | false => Box::new(HiddenBar {}), 292 | true => Box::new(Bar::new( 293 | voxel_map.maxima_len(), 294 | String::from("Calculating Errors"), 295 | )), 296 | }; 297 | let pbar = &progress_bar; 298 | let mut bader_error = vec![0.0; atoms.positions.len() + 1]; 299 | let vm = &voxel_map; 300 | // Calculate the size of the vector to be passed to each thread. 301 | let chunk_size = 302 | (density.len() / threads) + (density.len() % threads).min(1); 303 | thread::scope(|s| { 304 | let spawned_threads = voxel_map 305 | .maxima_chunks(chunk_size) 306 | .enumerate() 307 | .map(|(index, chunk)| { 308 | s.spawn(move |_| { 309 | let mut bd = vec![0.0; atoms.positions.len() + 1]; 310 | chunk.iter().enumerate().for_each( 311 | |(voxel_index, maxima)| { 312 | let p = index * chunk.len() + voxel_index; 313 | let lapl = laplacian(p, density, &vm.grid); 314 | match vm.maxima_to_voxel(*maxima) { 315 | Voxel::Maxima(m) => { 316 | let (m, _) = 317 | voxel_map.grid.decode_maxima(m); 318 | bd[m] += lapl; 319 | } 320 | Voxel::Boundary(weights) => { 321 | for weight in weights.iter() { 322 | let m = *weight as usize; 323 | let w = weight - (m as f64); 324 | let (m, _) = 325 | voxel_map.grid.decode_maxima(m); 326 | bd[m] += w * lapl; 327 | } 328 | } 329 | Voxel::Vacuum => { 330 | bd[atoms.positions.len()] += lapl 331 | } 332 | }; 333 | pbar.tick(); 334 | }, 335 | ); 336 | bd 337 | }) 338 | }) 339 | .collect::>(); 340 | // Join each thread and collect the results. 341 | // If one thread terminates before the other this is not operated on first. 342 | // Either use the sorted index to remove vacuum from the summation or 343 | // find a way to operate on finshed threads first (ideally both). 344 | for thread in spawned_threads { 345 | if let Ok(tmp_bd) = thread.join() { 346 | bader_error.iter_mut().zip(tmp_bd.into_iter()).for_each( 347 | |(a, b)| { 348 | *a += b; 349 | }, 350 | ); 351 | } else { 352 | panic!("Unable to join thread in sum_bader_densities.") 353 | }; 354 | } 355 | }) 356 | .unwrap(); 357 | bader_error.into() 358 | } 359 | 360 | pub fn nuclei_ordering( 361 | nuclei: Vec, 362 | density: &[f64], 363 | atom_len: usize, 364 | visible_bar: bool, 365 | ) -> Vec { 366 | let progress_bar: Box = match visible_bar { 367 | false => Box::new(HiddenBar {}), 368 | true => Box::new(Bar::new( 369 | nuclei.len(), 370 | String::from("Pruning Nucleus Critical Points"), 371 | )), 372 | }; 373 | let pbar = &progress_bar; 374 | // Find all the nuclei with the same atom number and group them based on which is largest 375 | // charge density. All maxima will be in image (0, 0, 0). 376 | // We need to order the nuclei by atom so that we can get the position of them by knowing the 377 | // atom number. 378 | let mut ordered_nuclei = 379 | vec![ 380 | CriticalPoint::new(0, CriticalPointKind::Blank, Box::new([])); 381 | atom_len 382 | ]; 383 | nuclei.iter().for_each(|cp| { 384 | let p = cp.position; 385 | let rho = density[p as usize]; 386 | let atom_num = cp.atoms[0]; 387 | if let CriticalPointKind::Blank = ordered_nuclei[atom_num].kind { 388 | ordered_nuclei[atom_num] = 389 | CriticalPoint::new(cp.position, cp.kind, cp.atoms.clone()); 390 | } else if rho > density[ordered_nuclei[atom_num].position as usize] { 391 | ordered_nuclei[atom_num] = 392 | CriticalPoint::new(cp.position, cp.kind, cp.atoms.clone()); 393 | } 394 | pbar.tick(); 395 | }); 396 | ordered_nuclei 397 | } 398 | 399 | pub fn bond_pruning( 400 | bonds: &[CriticalPoint], 401 | density: &[f64], 402 | grid: &Grid, 403 | visible_bar: bool, 404 | ) -> Vec { 405 | let progress_bar: Box = match visible_bar { 406 | false => Box::new(HiddenBar {}), 407 | true => Box::new(Bar::new( 408 | bonds.len(), 409 | String::from("Pruning Bond Critical Points"), 410 | )), 411 | }; 412 | let pbar = &progress_bar; 413 | bonds 414 | .iter() 415 | .filter_map(|cp| { 416 | pbar.tick(); 417 | let mut origin_flag = false; 418 | cp.atoms.iter().for_each(|a| { 419 | let (_, image) = grid.decode_maxima(*a); 420 | if image[0].abs() + image[1].abs() + image[2].abs() == 0 { 421 | origin_flag = true; 422 | } 423 | }); 424 | let rho = density[cp.position as usize]; 425 | let atom_num = match origin_flag { 426 | true => FxHashSet::from_iter(vec![cp.atoms.to_vec()]), 427 | false => cp 428 | .atoms 429 | .iter() 430 | .map(|a| { 431 | let (_, image) = grid.decode_maxima(*a); 432 | cp.atoms 433 | .iter() 434 | .map(|a| { 435 | let (a, i) = grid.decode_maxima(*a); 436 | grid.encode_maxima( 437 | a, 438 | i.into_iter() 439 | .zip(image.iter()) 440 | .map(|(i, ii)| i - *ii) 441 | .collect::>() 442 | .try_into() 443 | .unwrap(), 444 | ) 445 | }) 446 | .collect::>() 447 | }) 448 | .collect(), 449 | }; 450 | for cp_t in bonds.iter() { 451 | let pt = cp_t.position; 452 | let mut origin_flag_t = false; 453 | cp_t.atoms.iter().for_each(|a| { 454 | let (_, image) = grid.decode_maxima(*a); 455 | if image[0].abs() + image[1].abs() + image[2].abs() == 0 { 456 | origin_flag_t = true; 457 | } 458 | }); 459 | let atom_num_t = match origin_flag_t { 460 | true => FxHashSet::from_iter(vec![cp_t.atoms.to_vec()]), 461 | false => FxHashSet::from_iter(cp_t.atoms.iter().map(|a| { 462 | let (_, image) = grid.decode_maxima(*a); 463 | cp_t.atoms 464 | .iter() 465 | .map(|a| { 466 | let (a, i) = grid.decode_maxima(*a); 467 | grid.encode_maxima( 468 | a, 469 | i.into_iter() 470 | .zip(image.iter()) 471 | .map(|(i, ii)| i - *ii) 472 | .collect::>() 473 | .try_into() 474 | .unwrap(), 475 | ) 476 | }) 477 | .collect::>() 478 | })), 479 | }; 480 | for an in atom_num.iter() { 481 | let an = FxHashSet::from_iter(an); 482 | for ant in atom_num_t.iter() { 483 | let ant = FxHashSet::from_iter(ant); 484 | if an == ant && rho < density[pt as usize] { 485 | return None; 486 | } 487 | } 488 | } 489 | } 490 | Some(CriticalPoint::new(cp.position, cp.kind, cp.atoms.clone())) 491 | }) 492 | .collect() 493 | } 494 | 495 | pub fn ring_pruning( 496 | rings: &[CriticalPoint], 497 | ordered_nuclei: &[CriticalPoint], 498 | density: &[f64], 499 | atoms: &Atoms, 500 | grid: &Grid, 501 | visible_bar: bool, 502 | ) -> Vec { 503 | let progress_bar: Box = match visible_bar { 504 | false => Box::new(HiddenBar {}), 505 | true => Box::new(Bar::new( 506 | rings.len(), 507 | String::from("Pruning Ring Critical Points"), 508 | )), 509 | }; 510 | let pbar = &progress_bar; 511 | let mut pbc_rings = vec![]; 512 | let rings = rings 513 | .iter() 514 | .filter_map(|cp| { 515 | let mut folded_atom_nums = 516 | Vec::::with_capacity(cp.atoms.len()); 517 | let mut atom_images = Vec::<[i8; 3]>::with_capacity(cp.atoms.len()); 518 | let mut origin_flag = false; 519 | let positions = cp.atoms[..3] 520 | .iter() 521 | .map(|a| { 522 | let (atom_num, image) = grid.decode_maxima(*a); 523 | folded_atom_nums.push(atom_num); 524 | atom_images.push(image); 525 | if image[0].abs() + image[1].abs() + image[2].abs() == 0 { 526 | origin_flag = true; 527 | } 528 | let image_shift = dot( 529 | image 530 | .into_iter() 531 | .map(|i| i as f64) 532 | .collect::>() 533 | .try_into() 534 | .unwrap(), 535 | atoms.lattice.to_cartesian, 536 | ); 537 | grid.to_cartesian(ordered_nuclei[atom_num].position) 538 | .iter() 539 | .zip(image_shift) 540 | .map(|(p, s)| *p + s) 541 | .collect::>() 542 | .try_into() 543 | .unwrap() 544 | }) 545 | .collect::>(); 546 | // if has 3 atoms they must form a plane 547 | if cp.atoms.len() > 3 { 548 | // create a vector normal to the plane span by the first 3 atoms 549 | let vec_1 = subtract(positions[1], positions[0]); 550 | let vec_2 = subtract(positions[2], positions[0]); 551 | let plane = cross(vec_1, vec_2); 552 | let plane_normal = norm(plane); 553 | let plane: [f64; 3] = plane 554 | .into_iter() 555 | .map(|f| f / plane_normal) 556 | .collect::>() 557 | .try_into() 558 | .unwrap(); 559 | // check if every other atom falls on that plane 560 | for a in cp.atoms[3..].iter() { 561 | let (atom_num, image) = grid.decode_maxima(*a); 562 | if image[0].abs() + image[1].abs() + image[2].abs() == 0 { 563 | origin_flag = true; 564 | } 565 | let image_shift = dot( 566 | image 567 | .into_iter() 568 | .map(|i| i as f64) 569 | .collect::>() 570 | .try_into() 571 | .unwrap(), 572 | atoms.lattice.to_cartesian, 573 | ); 574 | let position = grid 575 | .to_cartesian(ordered_nuclei[atom_num].position) 576 | .iter() 577 | .zip(image_shift.into_iter()) 578 | .map(|(p, s)| *p + s) 579 | .collect::>() 580 | .try_into() 581 | .unwrap(); 582 | let vec_2 = subtract(position, positions[0]); 583 | let plane_t = cross(vec_1, vec_2); 584 | let plane_normal = norm(plane_t); 585 | let plane_t: [f64; 3] = plane_t 586 | .into_iter() 587 | .map(|f| f / plane_normal) 588 | .collect::>() 589 | .try_into() 590 | .unwrap(); 591 | // TODO: make this a tolerance currently 5.73 degrees 592 | if vdot(plane, plane_t).abs() < 0.995 { 593 | pbar.tick(); 594 | return None; 595 | //TODO: remove this, its is for testing 596 | } else { 597 | folded_atom_nums.push(atom_num); 598 | atom_images.push(image); 599 | } 600 | } 601 | } 602 | // filter down all the same rings into the one with the highest charge density 603 | let p = cp.position; 604 | let rho = density[p as usize]; 605 | let atom_nums = FxHashSet::from_iter(cp.atoms.iter()); 606 | // check if the ring is the same as others and get rid if it has lower density 607 | for cp_t in rings.iter() { 608 | let pt = cp_t.position; 609 | let atom_num_t = FxHashSet::from_iter(cp_t.atoms.iter()); 610 | if atom_nums == atom_num_t && rho < density[pt as usize] { 611 | pbar.tick(); 612 | return None; 613 | } 614 | } 615 | // the origin flag is still false if none of the atoms are within the periodic bounds. 616 | // need to add these to a seperate list so they can be wrapped in and checked 617 | if !origin_flag { 618 | // these are all the unique collections of atoms that are shifted by the images 619 | let pbc_atoms = 620 | FxHashSet::from_iter(cp.atoms.iter().map(|a| { 621 | let (_, image) = grid.decode_maxima(*a); 622 | cp.atoms 623 | .iter() 624 | .map(|a| { 625 | let (pbc_a, pbc_image) = grid.decode_maxima(*a); 626 | grid.encode_maxima( 627 | pbc_a, 628 | pbc_image 629 | .into_iter() 630 | .zip(image.iter()) 631 | .map(|(pbc_i, i)| pbc_i - i) 632 | .collect::>() 633 | .try_into() 634 | .unwrap(), 635 | ) 636 | }) 637 | .collect::>() 638 | })); 639 | pbc_rings.push(( 640 | CriticalPoint::new(cp.position, cp.kind, cp.atoms.clone()), 641 | pbc_atoms, 642 | )); 643 | None 644 | } else { 645 | Some(CriticalPoint::new(cp.position, cp.kind, cp.atoms.clone())) 646 | } 647 | }) 648 | .collect::>(); 649 | // now we prune the out of origin atoms 650 | let mut pbc_rings = pbc_rings 651 | .iter() 652 | .enumerate() 653 | .filter_map(|(i, (cp, pbc_atoms))| { 654 | // we want to check every pbc ring against all of its pbc reflections 655 | let p = cp.position; 656 | let rho = density[p as usize]; 657 | // first we compare against all of the other non origin rings and remove if it has 658 | // lower density 659 | for (ii, (cp_t, pbc_t)) in pbc_rings.iter().enumerate() { 660 | if i != ii { 661 | let pt = cp_t.position; 662 | for atom_nums in pbc_atoms.iter() { 663 | let atom_nums = FxHashSet::from_iter(atom_nums); 664 | for atom_nums_t in pbc_t.iter() { 665 | let atom_nums_t = FxHashSet::from_iter(atom_nums_t); 666 | if atom_nums == atom_nums_t { 667 | if rho < density[pt as usize] { 668 | pbar.tick(); 669 | return None; 670 | } 671 | } else if atom_nums.is_subset(&atom_nums_t) { 672 | pbar.tick(); 673 | return None; 674 | } 675 | } 676 | } 677 | } 678 | } 679 | // then compare against the other rings. this is mor complicated, if they are the same 680 | // remove if the density is lower, else we need to add this one and later remove the 681 | // one that has lower density 682 | for cp_t in rings.iter() { 683 | let pt = cp_t.position; 684 | for atom_nums in pbc_atoms.iter() { 685 | let atom_nums = FxHashSet::from_iter(atom_nums); 686 | let atom_nums_t = FxHashSet::from_iter(cp_t.atoms.iter()); 687 | // if they have the exact same atoms check the density 688 | if atom_nums == atom_nums_t { 689 | if rho < density[pt as usize] { 690 | pbar.tick(); 691 | return None; 692 | } else { 693 | return Some(CriticalPoint::new( 694 | cp.position, 695 | cp.kind, 696 | cp_t.atoms.clone(), 697 | )); 698 | } 699 | // if its a subset remove it 700 | } else if atom_nums.is_subset(&atom_nums_t) { 701 | pbar.tick(); 702 | return None; 703 | } 704 | } 705 | } 706 | Some(CriticalPoint::new(cp.position, cp.kind, cp.atoms.clone())) 707 | }) 708 | .collect::>(); 709 | pbc_rings.extend(rings); 710 | pbc_rings 711 | .iter() 712 | .filter_map(|cp| { 713 | pbar.tick(); 714 | let atom_num = FxHashSet::from_iter(cp.atoms.iter()); 715 | let rho = density[cp.position as usize]; 716 | // filter down all the same rings into the one with the highest charge density 717 | for cp_t in pbc_rings.iter() { 718 | let pt = cp_t.position; 719 | let atom_num_t = FxHashSet::from_iter(cp_t.atoms.iter()); 720 | if atom_num == atom_num_t { 721 | if rho < density[pt as usize] { 722 | return None; 723 | } 724 | } else if atom_num.is_subset(&atom_num_t) { 725 | return None; 726 | } 727 | } 728 | Some(CriticalPoint::new(cp.position, cp.kind, cp.atoms.clone())) 729 | }) 730 | .collect() 731 | } 732 | 733 | pub fn cage_pruning( 734 | cages: &[CriticalPoint], 735 | ordered_nuclei: &[CriticalPoint], 736 | density: &[f64], 737 | atoms: &Atoms, 738 | grid: &Grid, 739 | visible_bar: bool, 740 | ) -> Vec { 741 | let progress_bar: Box = match visible_bar { 742 | false => Box::new(HiddenBar {}), 743 | true => Box::new(Bar::new( 744 | cages.len(), 745 | String::from("Pruning Cage Critical Points"), 746 | )), 747 | }; 748 | let pbar = &progress_bar; 749 | cages 750 | .iter() 751 | .filter_map(|cp| { 752 | pbar.tick(); 753 | if cp.atoms.len() < 4 { 754 | // impossible to have a cage with 3 atoms 755 | return None; 756 | } else { 757 | // form a plane with the first 3 atoms 758 | let positions = cp.atoms[..3] 759 | .iter() 760 | .map(|a| { 761 | let (atom_num, image) = grid.decode_maxima(*a); 762 | let image_shift = dot( 763 | image 764 | .into_iter() 765 | .map(|i| i as f64) 766 | .collect::>() 767 | .try_into() 768 | .unwrap(), 769 | atoms.lattice.to_cartesian, 770 | ); 771 | // get the position of the nuclei and apply the image shift 772 | grid.to_cartesian(ordered_nuclei[atom_num].position) 773 | .iter() 774 | .zip(image_shift) 775 | .map(|(p, s)| *p + s) 776 | .collect::>() 777 | .try_into() 778 | .unwrap() 779 | }) 780 | .collect::>(); 781 | let vec_1 = subtract(positions[1], positions[0]); 782 | let vec_2 = subtract(positions[2], positions[0]); 783 | let plane = cross(vec_1, vec_2); 784 | let plane_normal = norm(plane); 785 | let plane: [f64; 3] = plane 786 | .into_iter() 787 | .map(|f| f / plane_normal) 788 | .collect::>() 789 | .try_into() 790 | .unwrap(); 791 | let mut flag = true; 792 | // check every other atom against this plane, at least one should not be in the 793 | // plane 794 | for a in cp.atoms[3..].iter() { 795 | let (atom_num, image) = grid.decode_maxima(*a); 796 | let image_shift = dot( 797 | image 798 | .into_iter() 799 | .map(|i| i as f64) 800 | .collect::>() 801 | .try_into() 802 | .unwrap(), 803 | atoms.lattice.to_cartesian, 804 | ); 805 | let position = grid 806 | .to_cartesian(ordered_nuclei[atom_num].position) 807 | .iter() 808 | .zip(image_shift.into_iter()) 809 | .map(|(p, s)| *p + s) 810 | .collect::>() 811 | .try_into() 812 | .unwrap(); 813 | let vec_2 = subtract(position, positions[0]); 814 | let plane_t = cross(vec_1, vec_2); 815 | let plane_normal = norm(plane_t); 816 | let plane_t: [f64; 3] = plane_t 817 | .into_iter() 818 | .map(|f| f / plane_normal) 819 | .collect::>() 820 | .try_into() 821 | .unwrap(); 822 | // TODO: make this a tolerance currently 5.73 degrees 823 | if vdot(plane, plane_t).abs() < 0.995 { 824 | flag = false; 825 | } 826 | } 827 | // if the flag is still true then all were in the same plane and the cage is a 828 | // density fluctuation 829 | if flag { 830 | return None; 831 | } 832 | } 833 | let p = cp.position; 834 | let rho = density[p as usize]; 835 | let atom_num = FxHashSet::from_iter(cp.atoms.iter()); 836 | // now we check against the other cages, note there is no check on folded atoms 837 | for cp_t in cages { 838 | let pt = cp_t.position; 839 | let atom_num_t = FxHashSet::from_iter(cp_t.atoms.iter()); 840 | if atom_num_t == atom_num && rho > density[pt as usize] { 841 | return None; 842 | } 843 | } 844 | Some(CriticalPoint::new(cp.position, cp.kind, cp.atoms.clone())) 845 | }) 846 | .collect() 847 | } 848 | 849 | /// Calculates the Laplacian at each saddle point. This is currently basic analysis, atoms images 850 | /// are associated by distance not gradient paths and ring points are just being ignored. 851 | pub fn calculate_bond_strengths( 852 | saddles: &[isize], 853 | density: &[f64], 854 | atoms: &Atoms, 855 | voxel_map: &VoxelMap, 856 | visible_bar: bool, 857 | ) -> Vec> { 858 | let progress_bar: Box = match visible_bar { 859 | false => Box::new(HiddenBar {}), 860 | true => Box::new(Bar::new( 861 | voxel_map.maxima_len(), 862 | String::from("Calculating Bond Strength"), 863 | )), 864 | }; 865 | let pbar = &progress_bar; 866 | let lat = &atoms.lattice; 867 | let mut bonds = vec![ 868 | FxHashMap::<(usize, usize), f64>::default(); 869 | atoms.positions.len() 870 | ]; 871 | saddles.iter().for_each(|p| { 872 | let lapl = laplacian(*p as usize, density, voxel_map.grid_get()); 873 | let p_lll = lat.cartesian_to_reduced(voxel_map.grid.to_cartesian(*p)); 874 | // We know that the saddle point is a weight not a maximum. 875 | let weights = voxel_map.maxima_to_weight(voxel_map.maxima_get(*p)); 876 | // If the weight is two atoms it's a bond > 2 is a ring? 877 | if let std::cmp::Ordering::Equal = weights.len().cmp(&2) { 878 | let atom_nums = weights 879 | .iter() 880 | .map(|w| { 881 | let n = *w as usize; 882 | let atom = atoms.reduced_positions[n]; 883 | let mut min_distance = f64::INFINITY; 884 | let mut atom_image = 0; 885 | for (i, atom_shift) in 886 | lat.reduced_cartesian_shift_matrix.iter().enumerate() 887 | { 888 | let distance = { 889 | (p_lll[0] - (atom[0] + atom_shift[0])).powi(2) 890 | + (p_lll[1] - (atom[1] + atom_shift[1])).powi(2) 891 | + (p_lll[2] - (atom[2] + atom_shift[2])).powi(2) 892 | }; 893 | if distance < min_distance { 894 | min_distance = distance; 895 | atom_image = i; 896 | } 897 | } 898 | (atom_image, n) 899 | }) 900 | .collect::>(); 901 | // compare images 902 | // crossing (out[0] * 9 + out[1] * 3 + out[2] + 13) as usize 903 | let (image_1, atom_1) = atom_nums[0]; 904 | let (image_2, atom_2) = atom_nums[1]; 905 | let x1 = (image_1 / 9) as isize - 1; 906 | let y1 = (image_1 / 3).rem_euclid(3) as isize - 1; 907 | let z1 = image_1.rem_euclid(3) as isize - 1; 908 | let x2 = (image_2 / 9) as isize - 1; 909 | let y2 = (image_2 / 3).rem_euclid(3) as isize - 1; 910 | let z2 = image_2.rem_euclid(3) as isize - 1; 911 | let image_1_adjust = 912 | ((x1 - x2) * 9 + (y1 - y2) * 3 + (z1 - z2) + 13) as usize; 913 | let image_2_adjust = 914 | ((x2 - x1) * 9 + (y2 - y1) * 3 + (z2 - z1) + 13) as usize; 915 | let bonds_1 = 916 | bonds[atom_1].entry((atom_2, image_2_adjust)).or_insert(0.0); 917 | if let std::cmp::Ordering::Greater = 918 | lapl.abs().partial_cmp(&bonds_1.abs()).unwrap() 919 | { 920 | *bonds_1 = lapl; 921 | let bonds_2 = bonds[atom_2] 922 | .entry((atom_1, image_1_adjust)) 923 | .or_insert(0.0); 924 | *bonds_2 = lapl; 925 | }; 926 | } 927 | pbar.tick(); 928 | }); 929 | bonds.iter_mut().for_each(|b| b.shrink_to_fit()); 930 | bonds 931 | } 932 | -------------------------------------------------------------------------------- /src/atoms.rs: -------------------------------------------------------------------------------- 1 | use crate::utils; 2 | use std::cmp::Ordering::Less; 3 | 4 | /// struct for containing the information about the atoms. 5 | pub struct Atoms { 6 | /// The lattice of the structure. 7 | pub lattice: Lattice, 8 | /// The positions of the atoms in cartesian coordinates. 9 | pub positions: Vec<[f64; 3]>, 10 | /// Text representation from the input file. 11 | pub text: String, 12 | /// The positions of the atoms in the LLL-reduced basis. 13 | pub reduced_positions: Vec<[f64; 3]>, 14 | } 15 | 16 | impl Atoms { 17 | /// Initialises the structure. 18 | pub fn new( 19 | lattice: Lattice, 20 | positions: Vec<[f64; 3]>, 21 | text: String, 22 | ) -> Self { 23 | let reduced_positions = positions 24 | .iter() 25 | .map(|p| lattice.cartesian_to_reduced(*p)) 26 | .collect::>(); 27 | Self { 28 | lattice, 29 | positions, 30 | text, 31 | reduced_positions, 32 | } 33 | } 34 | } 35 | 36 | /// Lattice - structure for containing information on the cell 37 | /// 38 | ///
 39 | /// shift matrix ordering:
 40 | ///     0 -> (-1,-1,-1)   7 -> (-1, 1, 0)  14 -> (0, 0, 1)  21 -> (1, 0,-1)
 41 | ///     1 -> (-1,-1, 0)   8 -> (-1, 1, 1)  15 -> (0, 1,-1)  22 -> (1, 0, 0)
 42 | ///     2 -> (-1,-1, 1)   9 -> (0,-1,-1)   16 -> (0, 1, 0)  23 -> (1, 0, 1)
 43 | ///     3 -> (-1, 0,-1)  10 -> (0,-1, 0)   17 -> (0, 1, 1)  24 -> (1, 1,-1)
 44 | ///     4 -> (-1, 0, 0)  11 -> (0,-1, 1)   18 -> (1,-1,-1)  25 -> (1, 1, 0)
 45 | ///     5 -> (-1, 0, 1)  12 -> (0, 0,-1)   19 -> (1,-1, 0)  26 -> (1, 1, 1)
 46 | ///     6 -> (-1, 1,-1)  13 -> (0, 0, 0)   20 -> (1,-1, 1)
 47 | /// 
48 | pub struct Lattice { 49 | /// The cartesian vectors for every combination of lattice vector. 50 | pub cartesian_shift_matrix: [[f64; 3]; 27], 51 | /// Transformation matrix for converting to fractional coordinates. 52 | pub to_fractional: [[f64; 3]; 3], 53 | /// Transformation matrix for converting to cartesian coordinates. 54 | pub to_cartesian: [[f64; 3]; 3], 55 | /// The cartesian vectors for every combination of reduced lattice vector. 56 | pub reduced_cartesian_shift_matrix: [[f64; 3]; 27], 57 | /// The conversion of the reduced shift matrix to the individual steps in the 58 | /// [`crate::grid::Grid`] 59 | pub reduced_grid_shift_matrix: Vec>, 60 | /// Transformation matrix for converting to fractional coordinates. 61 | pub reduced_to_fractional: [[f64; 3]; 3], 62 | /// Transformation matrix for converting to cartesian coordinates. 63 | pub reduced_to_cartesian: [[f64; 3]; 3], 64 | /// Volume of the lattice. 65 | pub volume: f64, 66 | } 67 | 68 | impl Lattice { 69 | /// Initialises the structure. Builds all the fields of the lattice structure 70 | /// from a 2d vector in the form: 71 | /// 72 | ///
 73 |     /// [
 74 |     ///     [ax, ay, az],
 75 |     ///     [bx, by, bz],
 76 |     ///     [cx, cy, cz],
 77 |     /// ]
 78 |     /// 
79 | pub fn new(lattice: [[f64; 3]; 3]) -> Self { 80 | let cartesian_shift_matrix = 81 | Lattice::create_cartesian_shift_matrix(&lattice); 82 | let to_fractional = match utils::invert_lattice(&lattice) { 83 | Some(inv) => inv, 84 | None => panic!("Supplied lattice does not span 3d space."), 85 | }; 86 | let reduced_lattice = lll_lattice(lattice); 87 | let reduced_cartesian_shift_matrix = 88 | Lattice::create_cartesian_shift_matrix(&reduced_lattice); 89 | let reduced_to_fractional = 90 | match utils::invert_lattice(&reduced_lattice) { 91 | Some(inv) => inv, 92 | None => panic!("Supplied lattice does not span 3d space."), 93 | }; 94 | let reduced_grid_shift_matrix = Lattice::create_grid_shift_matrix( 95 | &reduced_cartesian_shift_matrix, 96 | &reduced_to_fractional, 97 | ); 98 | let volume = 99 | utils::vdot(lattice[0], utils::cross(lattice[1], lattice[2])).abs(); 100 | let to_cartesian = lattice; 101 | let reduced_to_cartesian = reduced_lattice; 102 | Self { 103 | cartesian_shift_matrix, 104 | to_fractional, 105 | to_cartesian, 106 | reduced_cartesian_shift_matrix, 107 | reduced_grid_shift_matrix, 108 | reduced_to_fractional, 109 | reduced_to_cartesian, 110 | volume, 111 | } 112 | } 113 | 114 | /// Turn fractional coordinates into Cartesian coordinates in the reduced basis. 115 | pub fn fractional_to_reduced(&self, p: [f64; 3]) -> [f64; 3] { 116 | self.cartesian_to_reduced(utils::dot(p, self.to_cartesian)) 117 | } 118 | 119 | /// Map Cartesian coordinates into the reduced basis. 120 | pub fn cartesian_to_reduced(&self, p: [f64; 3]) -> [f64; 3] { 121 | let pn = utils::dot(p, self.reduced_to_fractional) 122 | .iter() 123 | .map(|p| p - p.floor()) 124 | .collect::>() 125 | .try_into() 126 | .unwrap(); 127 | utils::dot(pn, self.reduced_to_cartesian) 128 | } 129 | 130 | // Calculates the minimum distance between two points, with an optional upper bound. 131 | pub fn minimum_distance( 132 | &self, 133 | a: [f64; 3], 134 | b: [f64; 3], 135 | min_dist: Option, 136 | ) -> f64 { 137 | let mut min_dist = min_dist.unwrap_or(f64::INFINITY); 138 | for periodic_shift in self.reduced_cartesian_shift_matrix.iter() { 139 | let distance = { 140 | (a[0] - (b[0] + periodic_shift[0])).powi(2) 141 | + (a[1] - (b[1] + periodic_shift[1])).powi(2) 142 | + (a[2] - (b[2] + periodic_shift[2])).powi(2) 143 | }; 144 | if distance < min_dist { 145 | min_dist = distance; 146 | } 147 | } 148 | min_dist 149 | } 150 | 151 | pub fn closest_image(&self, a: [f64; 3], b: [f64; 3]) -> [f64; 3] { 152 | let mut min_dist = f64::INFINITY; 153 | let mut position = [0.0; 3]; 154 | for periodic_shift in self.reduced_cartesian_shift_matrix.iter() { 155 | let image_position = b 156 | .iter() 157 | .zip(periodic_shift) 158 | .map(|(f, image)| *f + *image) 159 | .collect::>(); 160 | let distance = a 161 | .iter() 162 | .zip(&image_position) 163 | .fold(0.0, |acc, (f, p)| acc + (f - p).powi(2)); 164 | if distance < min_dist { 165 | min_dist = distance; 166 | position = image_position.try_into().unwrap(); 167 | } 168 | } 169 | position 170 | } 171 | 172 | /// Create the shift matrix from the lattice supplied. 173 | fn create_cartesian_shift_matrix( 174 | lattice: &[[f64; 3]; 3], 175 | ) -> [[f64; 3]; 27] { 176 | let x = lattice[0]; 177 | let y = lattice[1]; 178 | let z = lattice[2]; 179 | [ 180 | [ 181 | -x[0] - y[0] - z[0], 182 | -x[1] - y[1] - z[1], 183 | -x[2] - y[2] - z[2], 184 | ], 185 | [-x[0] - y[0], -x[1] - y[1], -x[2] - y[2]], 186 | [ 187 | -x[0] - y[0] + z[0], 188 | -x[1] - y[1] + z[1], 189 | -x[2] - y[2] + z[2], 190 | ], 191 | [-x[0] - z[0], -x[1] - z[1], -x[2] - z[2]], 192 | [-x[0], -x[1], -x[2]], 193 | [-x[0] + z[0], -x[1] + z[1], -x[2] + z[2]], 194 | [ 195 | -x[0] + y[0] - z[0], 196 | -x[1] + y[1] - z[1], 197 | -x[2] + y[2] - z[2], 198 | ], 199 | [-x[0] + y[0], -x[1] + y[1], -x[2] + y[2]], 200 | [ 201 | -x[0] + y[0] + z[0], 202 | -x[1] + y[1] + z[1], 203 | -x[2] + y[2] + z[2], 204 | ], 205 | [-y[0] - z[0], -y[1] - z[1], -y[2] - z[2]], 206 | [-y[0], -y[1], -y[2]], 207 | [-y[0] + z[0], -y[1] + z[1], -y[2] + z[2]], 208 | [-z[0], -z[1], -z[2]], 209 | [0.0, 0.0, 0.0], 210 | [z[0], z[1], z[2]], 211 | [y[0] - z[0], y[1] - z[1], y[2] - z[2]], 212 | [y[0], y[1], y[2]], 213 | [y[0] + z[0], y[1] + z[1], y[2] + z[2]], 214 | [x[0] - y[0] - z[0], x[1] - y[1] - z[1], x[2] - y[2] - z[2]], 215 | [x[0] - y[0], x[1] - y[1], x[2] - y[2]], 216 | [x[0] - y[0] + z[0], x[1] - y[1] + z[1], x[2] - y[2] + z[2]], 217 | [x[0] - z[0], x[1] - z[1], x[2] - z[2]], 218 | [x[0], x[1], x[2]], 219 | [x[0] + z[0], x[1] + z[1], x[2] + z[2]], 220 | [x[0] + y[0] - z[0], x[1] + y[1] - z[1], x[2] + y[2] - z[2]], 221 | [x[0] + y[0], x[1] + y[1], x[2] + y[2]], 222 | [x[0] + y[0] + z[0], x[1] + y[1] + z[1], x[2] + y[2] + z[2]], 223 | ] 224 | } 225 | 226 | /// Turn the shift matrix into a vector of all the required steps in the [`crate::grid::Grid`] 227 | /// required to move by the vector. 228 | fn create_grid_shift_matrix( 229 | shift_matrix: &[[f64; 3]; 27], 230 | to_fractional: &[[f64; 3]; 3], 231 | ) -> Vec> { 232 | shift_matrix 233 | .iter() 234 | .map(|c_shift| { 235 | let shift = utils::idot(*c_shift, *to_fractional); 236 | // how many times are we going to have to reduce the vector 237 | let max = shift.iter().map(|x| x.abs()).max().unwrap(); 238 | (0..max) 239 | .map(|i| { 240 | let out = shift 241 | .iter() 242 | .map(|s| { 243 | // if the value is 0 or below we have 244 | // finshed reducing this axis 245 | if let Less = (s.abs() - i).cmp(&1) { 246 | 0 247 | // if it is 1 or above then we need to 248 | // add a 1 with the same sign as the value 249 | } else { 250 | s.signum() 251 | } 252 | }) 253 | .collect::>(); 254 | (out[0] * 9 + out[1] * 3 + out[2] + 13) as usize 255 | }) 256 | .collect() 257 | }) 258 | .collect() 259 | } 260 | } 261 | 262 | /// Calculates the lll reduction of a lattice. 263 | pub fn lll_lattice(lattice: [[f64; 3]; 3]) -> [[f64; 3]; 3] { 264 | let delta = 0.75; 265 | let mut a = lattice; 266 | let (mut b, mut mu) = gram_schmidt(&a); 267 | let mut i = 1usize; 268 | while i <= 2 { 269 | for j in (0..i).rev() { 270 | match mu[i][j] { 271 | q if q.abs() <= 0.5 => (), 272 | q => { 273 | for k in 0..3 { 274 | a[i][k] -= q.round() * a[j][k]; 275 | } 276 | let (b_temp, mu_temp) = gram_schmidt(&a); 277 | b = b_temp; 278 | mu = mu_temp; 279 | } 280 | } 281 | } 282 | if utils::vdot(b[i], b[i]) 283 | >= (delta - mu[i][i - 1].powi(2)) * utils::vdot(b[i - 1], b[i - 1]) 284 | { 285 | i += 1; 286 | } else { 287 | for j in 0..3 { 288 | b[0][0] = a[i][j]; 289 | a[i][j] = a[i - 1][j]; 290 | a[i - 1][j] = b[0][0]; 291 | } 292 | let (b_temp, mu_temp) = gram_schmidt(&a); 293 | b = b_temp; 294 | mu = mu_temp; 295 | i = 1usize.max(i - 1); 296 | } 297 | } 298 | a 299 | } 300 | 301 | /// Calculates the Gram-Schmidt co-effecients for the lll-reduction. 302 | fn gram_schmidt(v: &[[f64; 3]; 3]) -> ([[f64; 3]; 3], [[f64; 3]; 3]) { 303 | let mut u = [[0f64; 3]; 3]; 304 | let mut mu = [[0f64; 3]; 3]; 305 | u[0] = [v[0][0], v[0][1], v[0][2]]; 306 | mu[1][0] = utils::vdot(v[1], u[0]) / utils::vdot(u[0], u[0]); 307 | for i in 0..3 { 308 | u[1][i] = v[1][i] - (mu[1][0] * u[0][i]); 309 | } 310 | mu[2][0] = utils::vdot(v[2], u[0]) / utils::vdot(u[0], u[0]); 311 | mu[2][1] = utils::vdot(v[2], u[1]) / utils::vdot(u[1], u[1]); 312 | for i in 0..3 { 313 | u[2][i] = v[2][i] - (mu[2][0] * u[0][i]) - (mu[2][1] * u[1][i]); 314 | } 315 | (u, mu) 316 | } 317 | 318 | #[cfg(test)] 319 | mod tests { 320 | use super::*; 321 | 322 | #[test] 323 | fn atoms_new() { 324 | let positions = vec![[0.; 3]]; 325 | let lattice = Lattice::new([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]); 326 | let text = String::new(); 327 | let atoms = Atoms::new(lattice, positions, text); 328 | let positions = vec![[0.; 3]]; 329 | let lattice = Lattice::new([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]); 330 | let text = String::new(); 331 | assert_eq!(atoms.lattice.to_cartesian, lattice.to_cartesian); 332 | assert_eq!(atoms.positions, positions); 333 | assert_eq!(atoms.text, text); 334 | } 335 | 336 | #[test] 337 | #[should_panic] 338 | fn lattice_new_non_invert() { 339 | let _ = Lattice::new([[1., 0., 0.], [1., 0., 0.], [0., 0., 2.]]); 340 | } 341 | } 342 | -------------------------------------------------------------------------------- /src/bin/bca.rs: -------------------------------------------------------------------------------- 1 | use bader::analysis::{ 2 | bond_pruning, cage_pruning, calculate_bader_density, calculate_bader_error, 3 | calculate_bader_volumes_and_radii, nuclei_ordering, ring_pruning, 4 | }; 5 | use bader::arguments::App; 6 | use bader::errors::ArgumentError; 7 | use bader::io::{self, FileFormat, FileType, WriteType}; 8 | use bader::methods::{maxima_finder, minima_finder, weight}; 9 | use bader::utils::vacuum_index; 10 | use bader::voxel_map::{BlockingVoxelMap, VoxelMap}; 11 | 12 | fn main() { 13 | // argument parsing 14 | let app = App::new(); 15 | let env_args = std::env::args().collect::>(); 16 | let args = 17 | match app.parse_args(env_args.iter().map(|s| s.as_str()).collect()) { 18 | Ok(a) => a, 19 | Err(e) => match e { 20 | ArgumentError::ShortHelp(_) 21 | | ArgumentError::LongHelp(_) 22 | | ArgumentError::NoFile(_) => { 23 | println!("{}", e); 24 | return; 25 | } 26 | _ => panic!("{}", e), 27 | }, 28 | }; 29 | // print the splash 30 | if !args.silent { 31 | let version = env!("CARGO_PKG_VERSION"); 32 | let description = env!("CARGO_PKG_DESCRIPTION"); 33 | println!("{}: v{}", description, version); 34 | println!("Running on {} threads.", args.threads); 35 | } 36 | // read the input files into a densities vector and a Grid struct 37 | let file_type: Box = match args.file_type { 38 | FileType::Vasp => Box::new(io::vasp::Vasp {}), 39 | FileType::Cube => Box::new(io::cube::Cube {}), 40 | }; 41 | let (densities, rho, atoms, grid, voxel_origin) = file_type.init(&args); 42 | let reference = if rho.is_empty() { &densities[0] } else { &rho }; 43 | let voxel_map = 44 | BlockingVoxelMap::new(grid, atoms.lattice.to_cartesian, voxel_origin); 45 | // create the index list which will tell us in which order to evaluate the 46 | // voxels 47 | let mut index: Vec = (0..voxel_map.grid.size.total).collect(); 48 | index.sort_unstable_by(|a, b| { 49 | reference[*b].partial_cmp(&reference[*a]).unwrap() 50 | }); 51 | // remove from the indices any voxel that is below the vacuum limit 52 | let vacuum_i = match vacuum_index(reference, &index, args.vacuum_tolerance) 53 | { 54 | Ok(i) => i, 55 | Err(e) => panic!("{}", e), 56 | }; 57 | index.truncate(vacuum_i); 58 | // find the maxima in the system and store them whilst removing them from 59 | // the index list 60 | let nuclei = match maxima_finder( 61 | &index, 62 | reference, 63 | &voxel_map, 64 | &args.maximum_distance, 65 | &atoms, 66 | args.threads, 67 | !args.silent, 68 | ) { 69 | Ok(v) => v, 70 | Err(e) => panic!( 71 | "\nBader maximum at {:#?}\n is too far away from nearest atom: {} with a distance of {} Ang.", 72 | file_type.coordinate_format(e.maximum), 73 | e.atom + 1, 74 | e.distance, 75 | ), 76 | }; 77 | // input the maxima as atoms into the voxel map 78 | nuclei.iter().for_each(|maximum| { 79 | voxel_map.maxima_store(maximum.position, maximum.atoms[0] as isize); 80 | }); 81 | let n_bader_maxima = nuclei.len(); 82 | let nuclei = 83 | nuclei_ordering(nuclei, reference, atoms.positions.len(), !args.silent); 84 | // calculate the weights leave the critical points for now 85 | let (bonds, rings) = weight( 86 | reference, 87 | &voxel_map, 88 | &index, 89 | args.weight_tolerance, 90 | !args.silent, 91 | args.threads, 92 | ); 93 | // convert into a VoxelMap as the map is filled and no longer needs to block 94 | let voxel_map = VoxelMap::from_blocking_voxel_map(voxel_map); 95 | // Find the minima 96 | let cages = minima_finder( 97 | &index, 98 | reference, 99 | &voxel_map, 100 | args.threads, 101 | !args.silent, 102 | ); 103 | // sum the densities and then write the charge partition files 104 | let (atoms_volume, atoms_radius) = calculate_bader_volumes_and_radii( 105 | &voxel_map, 106 | &atoms, 107 | args.threads, 108 | !args.silent, 109 | ); 110 | let mut atoms_density = 111 | vec![vec![0.0; densities.len()]; atoms_volume.len()]; 112 | densities.iter().enumerate().for_each(|(i, density)| { 113 | atoms_density 114 | .iter_mut() 115 | .zip( 116 | calculate_bader_density( 117 | density, 118 | &voxel_map, 119 | &atoms, 120 | args.threads, 121 | !args.silent, 122 | ) 123 | .iter(), 124 | ) 125 | .for_each(|(ad, bd)| ad[i] += bd); 126 | }); 127 | let atoms_error = calculate_bader_error( 128 | reference, 129 | &voxel_map, 130 | &atoms, 131 | args.threads, 132 | !args.silent, 133 | ); 134 | let bonds = 135 | bond_pruning(&bonds, reference, voxel_map.grid_get(), !args.silent); 136 | let rings = ring_pruning( 137 | &rings, 138 | &nuclei, 139 | reference, 140 | &atoms, 141 | voxel_map.grid_get(), 142 | !args.silent, 143 | ); 144 | let cages = cage_pruning( 145 | &cages, 146 | &nuclei, 147 | reference, 148 | &atoms, 149 | voxel_map.grid_get(), 150 | !args.silent, 151 | ); 152 | println!( 153 | "{} {} {} {}", 154 | nuclei.len(), 155 | bonds.len(), 156 | rings.len(), 157 | cages.len() 158 | ); 159 | /* 160 | critical_points.0.iter().for_each(|cp| { 161 | let [x, y, z] = voxel_map.grid.to_3d(cp.position); 162 | let x = x as f64 / voxel_map.grid.size.x as f64; 163 | let y = y as f64 / voxel_map.grid.size.y as f64; 164 | let z = z as f64 / voxel_map.grid.size.z as f64; 165 | let (x, y, z) = file_type.coordinate_format([x, y, z]); 166 | println!("{} {} {} {:?}", x, y, z, cp.atoms); 167 | }); 168 | critical_points.1.iter().for_each(|cp| { 169 | let [x, y, z] = voxel_map.grid.to_3d(cp.position); 170 | let x = x as f64 / voxel_map.grid.size.x as f64; 171 | let y = y as f64 / voxel_map.grid.size.y as f64; 172 | let z = z as f64 / voxel_map.grid.size.z as f64; 173 | let (x, y, z) = file_type.coordinate_format([x, y, z]); 174 | println!("{} {} {} {:?}", x, y, z, cp.atoms); 175 | }); 176 | critical_points.2.iter().for_each(|cp| { 177 | let [x, y, z] = voxel_map.grid.to_3d(cp.position); 178 | let x = x as f64 / voxel_map.grid.size.x as f64; 179 | let y = y as f64 / voxel_map.grid.size.y as f64; 180 | let z = z as f64 / voxel_map.grid.size.z as f64; 181 | let (x, y, z) = file_type.coordinate_format([x, y, z]); 182 | println!("{} {} {} {:?}", x, y, z, cp.atoms); 183 | }); 184 | critical_points.3.iter().for_each(|cp| { 185 | let [x, y, z] = voxel_map.grid.to_3d(cp.position); 186 | let x = x as f64 / voxel_map.grid.size.x as f64; 187 | let y = y as f64 / voxel_map.grid.size.y as f64; 188 | let z = z as f64 / voxel_map.grid.size.z as f64; 189 | let (x, y, z) = file_type.coordinate_format([x, y, z]); 190 | println!("{} {} {} {:?}", x, y, z, cp.atoms); 191 | }); 192 | */ 193 | // prepare the positions for writing out 194 | let positions = atoms 195 | .positions 196 | .iter() 197 | .map(|coords| file_type.coordinate_format(*coords)) 198 | .collect(); 199 | // generate the output file 200 | let mut atoms_charge_file = io::output::partitions_file( 201 | positions, 202 | &atoms_density, 203 | &atoms_volume, 204 | &atoms_radius, 205 | &atoms_error, 206 | ); 207 | atoms_charge_file.push_str(&format!( 208 | "\n Bader Maxima: {}\n Boundary Voxels: {}\n Total Voxels: {}", 209 | n_bader_maxima, 210 | voxel_map.weight_len(), 211 | reference.len() 212 | )); 213 | // check that the write was successfull 214 | if io::output::write(atoms_charge_file, String::from("ACF.dat")).is_err() { 215 | panic!("Error in writing ACF.dat") 216 | } 217 | // let bonds_file = io::output::bonds_file(&bonds); 218 | // if io::output::write(bonds_file, String::from("BF.dat")).is_err() { 219 | // panic!("Error in writing BF.dat") 220 | // } 221 | // Prepare to write any densities that have been requested. 222 | let filename = match densities.len().cmp(&2) { 223 | std::cmp::Ordering::Less => vec![String::from("charge")], 224 | std::cmp::Ordering::Equal => { 225 | vec![String::from("charge"), String::from("spin")] 226 | } 227 | std::cmp::Ordering::Greater => vec![ 228 | String::from("charge"), 229 | String::from("spin_x"), 230 | String::from("spin_y"), 231 | String::from("spin_z"), 232 | ], 233 | }; 234 | // create a map that has an optional weight for each voxel and store that 235 | // with an id for each volume that is to be outputted. Save this as a lazy 236 | // iterator as to save memory? 237 | let write_map: Box>)>> = { 238 | if let WriteType::Atom(a) = args.output { 239 | let atom_iter = if a.is_empty() { 240 | (0..atoms.positions.len() as isize).collect() 241 | } else { 242 | a 243 | }; 244 | Box::new(atom_iter.into_iter().map(|atom_number| { 245 | let map = voxel_map.volume_map(atom_number); 246 | (atom_number, map) 247 | })) 248 | } else { 249 | Box::new(Vec::with_capacity(0).into_iter()) 250 | } 251 | }; 252 | write_map.for_each(|(id, weight_map)| { 253 | densities.iter().zip(&filename).for_each(|(rho, flnm)| { 254 | if file_type 255 | .write( 256 | &atoms, 257 | weight_map 258 | .iter() 259 | .zip(rho) 260 | .map(|(weight, charge)| weight.map(|w| w * charge)) 261 | .collect(), 262 | format!("{}_{}", id + 1, flnm), 263 | !args.silent, 264 | ) 265 | .is_err() 266 | { 267 | panic!("Error in writing {}", flnm) 268 | } 269 | }); 270 | }); 271 | } 272 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | use crate::arguments::App; 2 | use std::fmt::{Debug, Display}; 3 | 4 | /// An error for not being able to assign Bader maxima to atoms 5 | pub struct MaximaError { 6 | /// The position of the maxima 7 | pub maximum: [f64; 3], 8 | /// The index of the atom 9 | pub atom: usize, 10 | /// the distance between maximum and atom 11 | pub distance: f64, 12 | } 13 | 14 | /// Display should never be called explicitly as the Bader maximum needs to be formatted by the 15 | /// file type 16 | impl Display for MaximaError { 17 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 18 | write!(f, "") 19 | } 20 | } 21 | 22 | /// Debug should never be called explicitly as the Bader maximum needs to be formatted by the 23 | /// file type 24 | impl Debug for MaximaError { 25 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 26 | write!(f, "") 27 | } 28 | } 29 | 30 | /// An error for the vacuum tolerance being heigher than the highest density value 31 | pub struct VacuumError { 32 | pub vacuum_tolerance: f64, 33 | pub density: f64, 34 | } 35 | 36 | impl Display for VacuumError { 37 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 38 | write!( 39 | f, 40 | "Vacuum tolerance ({}) is higher than maximum value of density ({}).", 41 | self.vacuum_tolerance, self.density 42 | ) 43 | } 44 | } 45 | 46 | impl Debug for VacuumError { 47 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 48 | write!(f, "{}", self) 49 | } 50 | } 51 | 52 | /// Error for reading of Arguments. 53 | pub enum ArgumentError<'a> { 54 | /// Passed an argument that isn't a flag. 55 | NotFlag(String), 56 | /// Passed a value that isn't parsable. 57 | /// Unparsable(flag, value, type) 58 | Unparsable(String, String, String), 59 | /// Didn't pass a value. 60 | /// NoValue(flag) 61 | NoValue(String), 62 | /// Passed too many values. 63 | /// TooManyValues(flag, max, supplied) 64 | TooManyValues(String, usize, usize), 65 | /// Passed an unvalid value for the flag, ie. filetype. 66 | /// NotValidValue(flag, value) 67 | NotValidValue(String, String), 68 | /// Missing a dependant flag. 69 | /// MissingDependant(flag, dependancy) 70 | MissingDependant(String, String), 71 | /// Passed a flag for a different filetype than the one given. 72 | /// WrongFileType(flag, filetype) 73 | WrongFileType(String, String), 74 | /// No file given. 75 | NoFile(&'a App), 76 | /// Asked for help with -h. 77 | ShortHelp(&'a App), 78 | /// Asked for help with --help. 79 | LongHelp(&'a App), 80 | } 81 | 82 | impl Display for ArgumentError<'_> { 83 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 84 | match self { 85 | Self::NotFlag(flag) => { 86 | write!(f, "The flag: {} does not exist", flag) 87 | } 88 | Self::Unparsable(flag, value, typ) => write!( 89 | f, 90 | "The supplied value \"{}\" for the option \"{}\" is unparsable as a {}.", 91 | value, flag, typ 92 | ), 93 | Self::NoValue(flag) => write!( 94 | f, 95 | "The option \"{}\" requires a value to be supplied.", 96 | flag 97 | ), 98 | Self::TooManyValues(flag, max, supplied) => write!( 99 | f, 100 | "Option \"{}\" was supplied {} times, the maximum allowed is \"{}\".", 101 | flag, supplied, max 102 | ), 103 | Self::NotValidValue(flag, value) => write!( 104 | f, 105 | "The value \"{}\" is not valid input for the option \"{}\".", 106 | value, flag 107 | ), 108 | Self::MissingDependant(flag, dependant) => write!( 109 | f, 110 | "The option \"{}\" requires the option \"{}\" to also be set.", 111 | flag, dependant 112 | ), 113 | Self::WrongFileType(flag, file_type) => write!( 114 | f, 115 | "The option \"{}\" cannot be set for the file type \"{}\".", 116 | flag, file_type 117 | ), 118 | Self::NoFile(app) => write!(f, "No file supplied.\n\n{}", app), 119 | Self::ShortHelp(app) => write!(f, "{}", app), 120 | Self::LongHelp(app) => write!(f, "{:?}", app), 121 | } 122 | } 123 | } 124 | 125 | impl Debug for ArgumentError<'_> { 126 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 127 | write!(f, "{}", self) 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /src/io.rs: -------------------------------------------------------------------------------- 1 | use crate::arguments::{Args, Reference}; 2 | use crate::atoms::Atoms; 3 | 4 | /// File I/O for the gaussian cube format. 5 | pub mod cube; 6 | /// Write analysis files. 7 | pub mod output; 8 | /// Custom BufReader. 9 | pub mod reader; 10 | /// File I/O for the VASP file format. 11 | pub mod vasp; 12 | 13 | /// Indicates the available file types of the density file. 14 | pub enum FileType { 15 | /// CHGCAR, CHG and PARCHG. 16 | Vasp, 17 | /// Guassian, CP2K etc. 18 | Cube, 19 | } 20 | 21 | /// What type of density to write. 22 | pub enum WriteType { 23 | /// Write a Bader Atom. 24 | Atom(Vec), 25 | /// Don't write anything. 26 | None, 27 | } 28 | 29 | /// Turn a float into fortran "scientific" notation (leading digit is zero). 30 | pub struct FortranFormat { 31 | /// The float to convert to a string. Wrapped in an option as we need to log 32 | /// so `0f64` should be stored as None. 33 | float: Option, 34 | /// A value to multiply the float by before printing, eg. a volume. 35 | mult: f64, 36 | } 37 | 38 | impl std::fmt::Display for FortranFormat { 39 | /// Format the structure into a fortran style exponential. 40 | fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { 41 | let prec = formatter.precision().unwrap_or(6); 42 | match self.float { 43 | None => { 44 | write!(formatter, " 0.{:0 { 47 | let float = f * self.mult; 48 | let exponant = float.log10() as i32 + 1; 49 | let decimals = float.abs() * 10f64.powi(prec as i32 - exponant); 50 | let decimals = decimals.round() as usize; 51 | if float.is_sign_negative() { 52 | write!( 53 | formatter, 54 | "-0.{:0>)>; 76 | /// Return type of the init function in FileFormat. 77 | type InitReturn = (Vec>, Vec, Atoms, [usize; 3], [f64; 3]); 78 | 79 | /// FileFormat trait. Used for handling input from a file. 80 | pub trait FileFormat { 81 | /// Returns the parts required to build [`Grid`](crate::grid::Grid) and [`Atoms`] structures. 82 | /// 83 | /// * `args`: [`Args`] parsed from the command line. 84 | fn init(&self, args: &Args) -> InitReturn { 85 | let (voxel_origin, grid, atoms, mut densities) = 86 | match self.read(args.file.clone()) { 87 | Ok(x) => x, 88 | Err(e) => panic!("Error: Problem reading file.\n{}", e), 89 | }; 90 | if let Some(x) = args.spin.clone() { 91 | match densities.len() { 92 | 1 => { 93 | let (_, g, _, d) = match self.read(x.clone()) { 94 | Ok(r) => r, 95 | Err(e) => panic!("{}", e), 96 | }; 97 | if 1 != d.len() { 98 | panic!( 99 | "Number of densities in original file is not 1. 100 | Ambiguous how to handle spin density when {} contains {} densities.", 101 | x, 102 | d.len() 103 | ); 104 | } 105 | assert_eq!( 106 | g, grid, 107 | "Error: Spin density has different grid size." 108 | ); 109 | densities.push(d[0].clone()); 110 | } 111 | x => panic!( 112 | "Number of densities in original file is not 1. 113 | Ambiguous how to handle new spin when {} already has {} spin densities.", 114 | args.file, 115 | x - 1 116 | ), 117 | } 118 | } 119 | let rho = match args.reference.clone() { 120 | Reference::None => Vec::with_capacity(0), 121 | Reference::One(f) => { 122 | let (_, g, _, densities) = match self.read(f) { 123 | Ok(r) => r, 124 | Err(e) => panic!("{}", e), 125 | }; 126 | assert_eq!( 127 | g, grid, 128 | "Error: Reference density has different grid size." 129 | ); 130 | densities[0].clone() 131 | } 132 | Reference::Two(f1, f2) => { 133 | let (_, g, _, densities) = match self.read(f1) { 134 | Ok(r) => r, 135 | Err(e) => panic!("{}", e), 136 | }; 137 | assert_eq!( 138 | g, grid, 139 | "Error: Reference density has different grid size." 140 | ); 141 | let (_, g2, _, densities2) = match self.read(f2) { 142 | Ok(r) => r, 143 | Err(e) => panic!("{}", e), 144 | }; 145 | 146 | assert_eq!( 147 | g2, grid, 148 | "Error: Reference density has different grid size." 149 | ); 150 | densities[0] 151 | .iter() 152 | .zip(&densities2[0]) 153 | .map(|(a, b)| a + b) 154 | .collect::>() 155 | } 156 | }; 157 | (densities, rho, atoms, grid, voxel_origin) 158 | } 159 | 160 | /// Reads the file into a [`ReadFunction`] containing the information 161 | /// needed from the file to build a [`Grid`](crate::grid::Grid). 162 | /// 163 | /// * `filename`: The name of the file to read. 164 | fn read(&self, filename: String) -> ReadFunction; 165 | 166 | /// Reads the non-density section of the file into an [`Atoms`] object. 167 | /// 168 | /// * `atom_text`: The full string of non-density information from the 169 | /// density file. 170 | fn to_atoms(&self, atom_text: String) -> Atoms; 171 | 172 | /// Writes a specific density, data, to tile in the correct format. 173 | /// 174 | /// * `atoms`: The associated &[`Atoms`] object for the density file. 175 | /// * `data`: The density to write to file wrapped in options with None representing 0. 176 | /// * `filename`: Where to save the file, minus any suffix as this should 177 | /// be applied in the function. 178 | /// * `pbar`: A progress bar for monitoring the write. 179 | fn write( 180 | &self, 181 | atoms: &Atoms, 182 | data: Vec>, 183 | filename: String, 184 | visible_pbar: bool, 185 | ) -> std::io::Result<()>; 186 | 187 | /// How the format the positions of maxima and atoms 188 | /// 189 | /// * `coords`: The 3d representation of the position. 190 | fn coordinate_format(&self, coords: [f64; 3]) -> (String, String, String); 191 | } 192 | -------------------------------------------------------------------------------- /src/io/cube.rs: -------------------------------------------------------------------------------- 1 | use crate::atoms::{Atoms, Lattice}; 2 | use crate::io::reader::BufReader; 3 | use crate::io::{FileFormat, FortranFormat, ReadFunction}; 4 | use crate::progress::{Bar, HiddenBar, ProgressBar}; 5 | use crate::utils; 6 | use std::fs::File; 7 | use std::io::{BufWriter, Read, Write}; 8 | 9 | /// Convert from chemists. 10 | const LENGTH_UNITS: f64 = 0.52917721067; 11 | /// Convert from chemists. 12 | const VOLUME_UNITS: f64 = LENGTH_UNITS * LENGTH_UNITS * LENGTH_UNITS; 13 | 14 | /// Structure for reading/writing a cube file. 15 | pub struct Cube {} 16 | 17 | impl FileFormat for Cube { 18 | /// reads a cube file from filename. 19 | fn read(&self, filename: String) -> ReadFunction { 20 | // the voxel origin in cube files is (0.5, 0.5, 0.5) 21 | let mut voxel_origin = [0.5f64; 3]; 22 | // find the start and end points of the density as well as the total file size 23 | let (start, grid_pts) = { 24 | // open the file in a buffer reader 25 | let mut reader = BufReader::open(filename.clone())?; 26 | 27 | let mut buffer = String::new(); 28 | let mut pos = 0; 29 | // first two lines are comments 30 | for _ in 0..2 { 31 | let size = match reader.read_line(&mut buffer) { 32 | Some(line) => { 33 | let (_, size) = line?; 34 | size 35 | } 36 | None => 0, 37 | }; 38 | pos += size; 39 | } 40 | // lets start trying to match 41 | let natoms = match reader.read_line(&mut buffer) { 42 | Some(line) => { 43 | let (text, size) = line?; 44 | pos += size; 45 | let split = text 46 | .split_whitespace() 47 | .map(|x| x.parse::()) 48 | .collect::>>( 49 | ); 50 | if split.len() == 5 && split[4] != Ok(1.) { 51 | panic!( 52 | "Error(Unsuppoerted): Multiple values per voxel." 53 | ); 54 | } 55 | let natoms = match split[0] { 56 | Ok(x) => x as isize, 57 | Err(_) => panic!( 58 | "Error: Cannot read {} as cube file.", 59 | filename 60 | ), 61 | }; 62 | for i in 0..3 { 63 | voxel_origin[i] += match split[i + 1] { 64 | Ok(x) => x, 65 | Err(_) => { 66 | panic!( 67 | "Error: Cannot read {} as cube file.", 68 | filename 69 | ) 70 | } 71 | }; 72 | } 73 | natoms 74 | } 75 | None => panic!("Error: Cannot read {} as cube file.", filename), 76 | }; 77 | if natoms < 0 { 78 | panic!("Error(Unsuppoerted): Multiple values per voxel."); 79 | } 80 | let mut grid_pts = [0usize; 3]; 81 | for gp in &mut grid_pts { 82 | *gp = match reader.read_line(&mut buffer) { 83 | Some(line) => { 84 | let (text, size) = line?; 85 | pos += size; 86 | match text.split_whitespace().next() { 87 | Some(x) => match x.parse::() { 88 | Ok(x) => x, 89 | Err(_) => panic!( 90 | "Error: Cannot read {} as cube file.", 91 | filename 92 | ), 93 | }, 94 | None => { 95 | panic!( 96 | "Error: Cannot read {} as cube file.", 97 | filename 98 | ) 99 | } 100 | } 101 | } 102 | None => { 103 | panic!("Error: Cannot read {} as cube file.", filename) 104 | } 105 | } 106 | } 107 | for _ in 0..natoms.abs() { 108 | match reader.read_line(&mut buffer) { 109 | Some(line) => { 110 | let (_, size) = line?; 111 | pos += size; 112 | } 113 | None => { 114 | panic!("Error: Cannot read {} as cube file.", filename) 115 | } 116 | } 117 | } 118 | (pos, grid_pts) 119 | }; 120 | // Now we know where everything is so let's work out what to do 121 | // Start by making vector of start and end points of the densities 122 | let mut file = File::open(filename)?; 123 | let total = file.metadata()?.len(); 124 | // assign Vectos with the capacity of what it is to hold 125 | let mut xyz_b = Vec::with_capacity(start); 126 | let mut density_b = Vec::with_capacity(total as usize - start); 127 | // read the xyz information into xyz_b 128 | let _ = ::by_ref(&mut file) 129 | .take(start as u64) 130 | .read_to_end(&mut xyz_b)?; 131 | // read the total charge density into density_b 132 | let _ = ::by_ref(&mut file) 133 | .take(total - start as u64) 134 | .read_to_end(&mut density_b)?; 135 | // convert the bytes we have read into a String and an Atoms struct 136 | let xyz = String::from_utf8(xyz_b).unwrap(); 137 | let atoms = self.to_atoms(xyz); 138 | // convert out of Bohr 139 | let density = String::from_utf8(density_b) 140 | .unwrap() 141 | .split_whitespace() 142 | .map(|x| x.parse::().unwrap() / VOLUME_UNITS) 143 | .collect::>(); 144 | Ok((voxel_origin, grid_pts, atoms, vec![density])) 145 | } 146 | 147 | /// Read atoms information from file header. 148 | fn to_atoms(&self, atoms_text: String) -> Atoms { 149 | let mut lines = atoms_text.lines(); 150 | // skip the 2 comment lines + voxel info and then read the lattice information 151 | let _ = lines.next(); 152 | let _ = lines.next(); 153 | let _ = lines.next(); 154 | let mut a = { 155 | lines 156 | .next() 157 | .unwrap() 158 | .to_string() 159 | .split_whitespace() 160 | .map(|x| x.parse::().unwrap()) 161 | .collect::>() 162 | }; 163 | // density[z, y, x] so lets swap the c and a 164 | let mut b = { 165 | lines 166 | .next() 167 | .unwrap() 168 | .to_string() 169 | .split_whitespace() 170 | .map(|x| x.parse::().unwrap()) 171 | .collect::>() 172 | }; 173 | let mut c = { 174 | lines 175 | .next() 176 | .unwrap() 177 | .to_string() 178 | .split_whitespace() 179 | .map(|x| x.parse::().unwrap()) 180 | .collect::>() 181 | }; 182 | for i in 1..4 { 183 | c[i] *= c[0] * LENGTH_UNITS; 184 | b[i] *= b[0] * LENGTH_UNITS; 185 | a[i] *= a[0] * LENGTH_UNITS; 186 | } 187 | let lattice = Lattice::new([ 188 | [a[1], a[2], a[3]], 189 | [b[1], b[2], b[3]], 190 | [c[1], c[2], c[3]], 191 | ]); 192 | let mut positions: Vec<[f64; 3]> = vec![]; 193 | // make the positions fractional and swap c and a 194 | for line in lines { 195 | let pos = line 196 | .split_whitespace() 197 | .map(|x| x.parse::().unwrap() * LENGTH_UNITS) 198 | .collect::>(); 199 | let pos_frac = 200 | utils::dot([pos[2], pos[3], pos[4]], lattice.to_fractional) 201 | .iter() 202 | .map(|x| x - x.floor()) 203 | .collect::>(); 204 | let pos_cart = { 205 | utils::dot( 206 | [pos_frac[0], pos_frac[1], pos_frac[2]], 207 | lattice.to_cartesian, 208 | ) 209 | }; 210 | positions.push(pos_cart); 211 | } 212 | Atoms::new(lattice, positions, atoms_text) 213 | } 214 | 215 | /// Write a cube file from a vector of options where None will be written as 216 | /// zero. 217 | fn write( 218 | &self, 219 | atoms: &Atoms, 220 | data: Vec>, 221 | filename: String, 222 | visible_pbar: bool, 223 | ) -> std::io::Result<()> { 224 | let filename = format!("{}.cube", filename); 225 | let mut buffer = BufWriter::new(File::create(filename.clone())?); 226 | let length = data.len() / 6 + (data.len() % 6 != 0) as usize; 227 | let pbar: Box = match visible_pbar { 228 | true => Box::new(Bar::new( 229 | length, 230 | format!("Writing file {}:", filename), 231 | )), 232 | false => Box::new(HiddenBar {}), 233 | }; 234 | buffer.write_all(atoms.text.as_bytes())?; 235 | data.chunks(6).for_each(|line| { 236 | if let Err(e) = line.iter().try_for_each(|f| { 237 | write!( 238 | buffer, 239 | " {:.5}", 240 | FortranFormat { 241 | float: *f, 242 | mult: VOLUME_UNITS 243 | } 244 | ) 245 | }) { 246 | panic!("Error occured during write: {}", e) 247 | }; 248 | if let Err(e) = writeln!(buffer) { 249 | panic!("Error occured during write: {}", e) 250 | }; 251 | pbar.tick(); 252 | }); 253 | Ok(()) 254 | } 255 | 256 | /// Coordinate format for dealing with fortran indexing (doesn't affect cube). 257 | fn coordinate_format(&self, coords: [f64; 3]) -> (String, String, String) { 258 | let x = format!("{:.6}", coords[0]); 259 | let y = format!("{:.6}", coords[1]); 260 | let z = format!("{:.6}", coords[2]); 261 | (x, y, z) 262 | } 263 | } 264 | -------------------------------------------------------------------------------- /src/io/output.rs: -------------------------------------------------------------------------------- 1 | use rustc_hash::FxHashMap; 2 | use std::fs::File; 3 | use std::io::Write; 4 | 5 | /// Create the partitioned charge files using an optional atom map to decide the format 6 | pub fn partitions_file( 7 | positions: Vec<(String, String, String)>, 8 | partitioned_density: &[Vec], 9 | partitioned_volume: &[f64], 10 | radius: &[f64], 11 | errors: &[f64], 12 | ) -> String { 13 | // calculate the total density for each density supplied 14 | let total_density: Vec = partitioned_density.iter().fold( 15 | vec![0.0; partitioned_density[0].len()], 16 | |mut sum, d| { 17 | sum.iter_mut().zip(d).for_each(|(tpd, pd)| *tpd += pd); 18 | sum 19 | }, 20 | ); 21 | // the last value is is the vacuum and it has definitely been added 22 | let vacuum_density = partitioned_density.last().unwrap(); 23 | let total_partitioned_density = total_density 24 | .iter() 25 | .zip(vacuum_density) 26 | .map(|(td, vd)| td - vd) 27 | .collect::>(); 28 | // the volume is the same for all densities 29 | let total_volume: f64 = partitioned_volume.iter().sum(); 30 | // the last value is is the vacuum and it has definitely been added 31 | let vacuum_volume = *partitioned_volume.last().unwrap(); 32 | let total_partitioned_volume = total_volume - vacuum_volume; 33 | let mut table = Table::new(partitioned_density[0].len()); 34 | let mut index = 1; 35 | positions 36 | .into_iter() 37 | .zip(partitioned_density) 38 | .zip(partitioned_volume) 39 | .zip(radius) 40 | .zip(errors) 41 | .for_each(|((((coord, density), volume), radius), error)| { 42 | table.add_row(index, coord, density, *volume, *radius, *error); 43 | index += 1; 44 | }); 45 | table.get_string( 46 | vacuum_density, 47 | vacuum_volume, 48 | &total_partitioned_density, 49 | total_partitioned_volume, 50 | ) 51 | } 52 | 53 | pub fn bonds_file(bonds: &[FxHashMap<(usize, usize), f64>]) -> String { 54 | let mut text = String::with_capacity( 55 | 58 * (bonds.iter().map(|m| m.len()).sum::() + bonds.len() + 1) 56 | + bonds.len() * 2 57 | - 1, 58 | ); 59 | text.push_str(" | Atom Number | Image | Strength"); 60 | let len = text.len() + 2; 61 | for (atom_num, bond) in bonds.iter().enumerate() { 62 | text.push_str( 63 | format!("\n-Atom: {:-2} {:>2} {:>2}", x, y, z); 79 | text.push_str( 80 | format!("|{:^width$}", position, width = 10).as_str(), 81 | ); 82 | text.push_str( 83 | format!("|{:>width$.6}", strength, width = 11).as_str(), 84 | ); 85 | } 86 | } 87 | text.shrink_to_fit(); 88 | text 89 | } 90 | 91 | /// Enum of available tables. 92 | pub enum TableType { 93 | /// Table for the ACF file. 94 | AtomsCharge, 95 | /// Table for the BCF file. 96 | BaderCharge, 97 | } 98 | 99 | /// Structure that contains and builds the table. 100 | struct Table { 101 | /// How wide each column is. 102 | column_width: Vec, 103 | /// The number of charge and spin densities. 104 | density_num: usize, 105 | /// The rows of the table as a vector of strings. 106 | rows: Vec>, 107 | } 108 | 109 | impl Table { 110 | /// Creates a new structure and sets the minimum widths of each. 111 | fn new(density_num: usize) -> Self { 112 | let rows = vec![Vec::with_capacity(0)]; 113 | let mut column_width = Vec::with_capacity(7 + density_num); 114 | column_width.push(1); 115 | column_width.push(1); 116 | column_width.push(1); 117 | column_width.push(1); 118 | column_width.push(6); 119 | match density_num.cmp(&2) { 120 | std::cmp::Ordering::Greater => { 121 | column_width.push(6); 122 | column_width.push(6); 123 | column_width.push(6); 124 | } 125 | std::cmp::Ordering::Equal => column_width.push(6), 126 | std::cmp::Ordering::Less => (), 127 | }; 128 | column_width.push(6); 129 | column_width.push(8); 130 | column_width.push(6); 131 | Self { 132 | column_width, 133 | density_num, 134 | rows, 135 | } 136 | } 137 | 138 | /// Adds a row the table. 139 | fn add_row( 140 | &mut self, 141 | index: usize, 142 | p: (String, String, String), 143 | density: &[f64], 144 | volume: f64, 145 | distance: f64, 146 | error: f64, 147 | ) { 148 | let mut row: Vec = Vec::with_capacity(6 + self.density_num); 149 | row.push(format!("{}", index)); 150 | row.push(p.0); 151 | row.push(p.1); 152 | row.push(p.2); 153 | density.iter().for_each(|d| row.push(format!("{:.6}", d))); 154 | row.push(format!("{:.6}", volume)); 155 | row.push(format!("{:.6}", distance)); 156 | row.push(format!("{:.6}", error)); 157 | for (i, col) in row.iter().enumerate() { 158 | self.column_width[i] = self.column_width[i].max(col.len()); 159 | } 160 | self.rows.push(row); 161 | } 162 | 163 | /// Creates and formats the footer. 164 | fn format_footer( 165 | &self, 166 | vacuum_density: &[f64], 167 | vacuum_volume: f64, 168 | partitioned_density: &[f64], 169 | partitioned_volume: f64, 170 | ) -> String { 171 | let mut separator = self.format_separator(); 172 | let footer = match self.density_num.cmp(&2) { 173 | std::cmp::Ordering::Less => format!( 174 | "\n Vacuum Charge: {:>18.4}\n Vacuum Volume: {:>18.4}\n Partitioned Charge: {:>13.4}\n Partitioned Volume: {:>13.4}", 175 | vacuum_density[0], 176 | vacuum_volume, 177 | partitioned_density[0], 178 | partitioned_volume, 179 | ), 180 | std::cmp::Ordering::Equal => format!( 181 | "\n Vacuum Charge: {:>18.4}\n Vacuum Spin: {:>20.4}\n Vacuum Volume: {:>18.4}\n Partitioned Charge: {:>13.4}\n Partitioned Spin: {:>15.4}\n Partitioned Volume: {:>13.4}", 182 | vacuum_density[0], 183 | vacuum_density[1], 184 | vacuum_volume, 185 | partitioned_density[0], 186 | partitioned_density[1], 187 | partitioned_volume, 188 | ), 189 | std::cmp::Ordering::Greater => format!( 190 | "\n Vacuum Charge: {:>18.4}\n Vacuum Spin X: {:>18.4}\n Vacuum Spin Y: {:>18.4}\n Vacuum Spin Z: {:>18.4}\n Vacuum Volume: {:>18.4}\n Partitioned Charge: {:>13.4}\n Partitioned Spin X: {:>13.4}\n Partitioned Spin Y: {:>13.4}\n Partitioned Spin Z: {:>13.4}\n Partitioned Volume: {:>13.4}", 191 | vacuum_density[0], 192 | vacuum_density[1], 193 | vacuum_density[2], 194 | vacuum_density[3], 195 | vacuum_volume, 196 | partitioned_density[0], 197 | partitioned_density[1], 198 | partitioned_density[2], 199 | partitioned_density[3], 200 | partitioned_volume, 201 | ), 202 | }; 203 | separator.push_str(&footer); 204 | separator 205 | } 206 | 207 | /// Creates and formats the header. 208 | fn format_header(&self) -> String { 209 | let mut header = String::new(); 210 | let mut iter = self.column_width.iter(); 211 | header.push_str(&format!( 212 | " {:^width$} |", 213 | "#", 214 | width = iter.next().unwrap() 215 | )); 216 | header.push_str(&format!( 217 | " {:^width$} |", 218 | "X", 219 | width = iter.next().unwrap() 220 | )); 221 | header.push_str(&format!( 222 | " {:^width$} |", 223 | "Y", 224 | width = iter.next().unwrap() 225 | )); 226 | header.push_str(&format!( 227 | " {:^width$} |", 228 | "Z", 229 | width = iter.next().unwrap() 230 | )); 231 | header.push_str(&format!( 232 | " {:^width$} |", 233 | "Charge", 234 | width = iter.next().unwrap() 235 | )); 236 | match self.density_num.cmp(&2) { 237 | std::cmp::Ordering::Equal => { 238 | header.push_str(&format!( 239 | " {:^width$} |", 240 | "Spin", 241 | width = iter.next().unwrap() 242 | )); 243 | } 244 | std::cmp::Ordering::Greater => { 245 | header.push_str(&format!( 246 | " {:^width$} |", 247 | "Spin X", 248 | width = iter.next().unwrap() 249 | )); 250 | header.push_str(&format!( 251 | " {:^width$} |", 252 | "Spin Y", 253 | width = iter.next().unwrap() 254 | )); 255 | header.push_str(&format!( 256 | " {:^width$} |", 257 | "Spin Z", 258 | width = iter.next().unwrap() 259 | )); 260 | } 261 | std::cmp::Ordering::Less => (), 262 | } 263 | header.push_str(&format!( 264 | " {:^width$} |", 265 | "Volume", 266 | width = iter.next().unwrap() 267 | )); 268 | header.push_str(&format!( 269 | " {:^width$} |", 270 | "Distance", 271 | width = iter.next().unwrap() 272 | )); 273 | header.push_str(&format!( 274 | " {:^width$}\n", 275 | "Error", 276 | width = iter.next().unwrap() 277 | )); 278 | header 279 | } 280 | 281 | /// Creates and formats a separator. 282 | fn format_separator(&self) -> String { 283 | let mut separator = String::new(); 284 | self.column_width.iter().for_each(|w| { 285 | separator.push_str(&format!("-{:-^width$}-+", "-", width = w)); 286 | }); 287 | separator.pop(); 288 | separator.pop(); 289 | separator 290 | } 291 | 292 | /// Creates a String representation of the Table. 293 | fn get_string( 294 | self, 295 | vacuum_density: &[f64], 296 | vacuum_volume: f64, 297 | partitioned_density: &[f64], 298 | partitioned_volume: f64, 299 | ) -> String { 300 | let mut table = String::new(); 301 | table.push_str(&self.format_header()); 302 | self.rows.iter().for_each(|r| { 303 | if r.is_empty() { 304 | table.push_str(&self.format_separator()); 305 | } else { 306 | let mut row = String::new(); 307 | r.iter().zip(&self.column_width).for_each(|(s, w)| { 308 | row.push_str(&format!(" {:>width$} |", s, width = w)) 309 | }); 310 | row.pop(); 311 | table.push_str(&row); 312 | } 313 | table.push('\n'); 314 | }); 315 | table.push_str(&self.format_footer( 316 | vacuum_density, 317 | vacuum_volume, 318 | partitioned_density, 319 | partitioned_volume, 320 | )); 321 | table 322 | } 323 | } 324 | 325 | /// Write a string to filename. Creates a new file regardless of what exists. 326 | pub fn write(string: String, filename: String) -> std::io::Result<()> { 327 | let mut bader_file = File::create(filename)?; 328 | bader_file.write_all(string.as_bytes())?; 329 | Ok(()) 330 | } 331 | -------------------------------------------------------------------------------- /src/io/reader.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io::{BufRead, BufReader as Reader, Result}; 3 | 4 | /// Read a file into a mutable buffer 5 | pub struct BufReader { 6 | reader: Reader, 7 | } 8 | 9 | impl BufReader { 10 | /// Opens the file from the path into a reader 11 | pub fn open(path: impl AsRef) -> Result { 12 | let file = File::open(path)?; 13 | let reader = Reader::new(file); 14 | 15 | Ok(Self { reader }) 16 | } 17 | 18 | /// Reads a line from the buffer reader to mutable string 19 | pub fn read_line<'buf>( 20 | &mut self, 21 | buffer: &'buf mut String, 22 | ) -> Option> { 23 | buffer.clear(); 24 | 25 | self.reader 26 | .read_line(buffer) 27 | .map(|u| if u == 0 { None } else { Some((buffer, u)) }) 28 | .transpose() 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/io/vasp.rs: -------------------------------------------------------------------------------- 1 | use crate::atoms::{Atoms, Lattice}; 2 | use crate::io::reader::BufReader; 3 | use crate::io::{FileFormat, FortranFormat, ReadFunction}; 4 | use crate::progress::{Bar, HiddenBar, ProgressBar}; 5 | use crate::utils; 6 | use std::fs::File; 7 | use std::io::{BufWriter, Read, Seek, SeekFrom, Write}; 8 | 9 | /// The coordinate system. 10 | enum Coord { 11 | /// Fractional coordinates. 12 | Fractional, 13 | /// Cartesian coordinates. 14 | Cartesian, 15 | } 16 | 17 | /// The VASP file format for reading/writing CHG, PARCHG and CHGCARs. 18 | pub struct Vasp {} 19 | 20 | impl FileFormat for Vasp { 21 | /// Read a VASP density. 22 | fn read(&self, filename: String) -> ReadFunction { 23 | // the voxel origin in VASP is (0, 0, 0) 24 | let voxel_origin = [0f64; 3]; 25 | // find the start and end points of the density as well as the total file size 26 | let (grid, aug, total) = { 27 | // open the file in a buffer reader 28 | let mut reader = BufReader::open(filename.clone())?; 29 | 30 | let mut buffer = String::new(); 31 | let mut grid: Vec<[usize; 2]> = vec![]; 32 | let mut aug: Vec = vec![]; 33 | let mut pos = 0; 34 | // the first 7 lines are useless to us 35 | for _ in 0..8 { 36 | let size = match reader.read_line(&mut buffer) { 37 | Some(line) => { 38 | let (_, size) = line?; 39 | size 40 | } 41 | None => 0, 42 | }; 43 | pos += size; 44 | } 45 | // lets find the start 46 | while let Some(line) = reader.read_line(&mut buffer) { 47 | let (text, size) = line?; 48 | pos += size; 49 | // empty line before the grid spacing 50 | if text.trim().is_empty() { 51 | break; 52 | } 53 | } 54 | // this line is the grid spacing 55 | let mut buffer = String::new(); 56 | let grid_spacing = reader 57 | .read_line(&mut buffer) 58 | .map(|line| { 59 | let (text, size) = line.unwrap(); 60 | let start = pos; 61 | pos += size; 62 | let end = pos; 63 | grid.push([start, end]); 64 | text 65 | }) 66 | .unwrap(); 67 | // lets fast forward a bit 68 | let grid_length = grid_spacing 69 | .split_whitespace() 70 | .fold(1, |acc, val| val.parse::().unwrap() * acc); 71 | let mut buffer = String::new(); 72 | let per_row = reader 73 | .read_line(&mut buffer) 74 | .map(|line| { 75 | let (text, size) = line.unwrap(); 76 | pos += size; 77 | text 78 | }) 79 | .unwrap() 80 | .split_whitespace() 81 | .count(); 82 | let grid_length = if grid_length.rem_euclid(per_row) == 0 { 83 | grid_length / per_row 84 | } else { 85 | grid_length / per_row + 1 86 | }; 87 | let mut buffer = String::new(); 88 | for _ in 1..grid_length { 89 | pos += reader.read_line(&mut buffer).unwrap().unwrap().1; 90 | } 91 | // lets start trying to match 92 | let mut buffer = String::new(); 93 | while let Some(line) = reader.read_line(&mut buffer) { 94 | let (text, size) = line?; 95 | if text == grid_spacing { 96 | let start = pos; 97 | pos += size; 98 | let end = pos; 99 | grid.push([start, end]); 100 | } else if text.starts_with("aug") && aug.len() < grid.len() { 101 | aug.push(pos); 102 | pos += size; 103 | } else { 104 | pos += size; 105 | } 106 | } 107 | (grid, aug, pos) 108 | }; 109 | // Now we know where everything is so let's work out what to do 110 | // Start by making vector of start and end points of the densities 111 | let mut start = Vec::with_capacity(4); 112 | let mut stop = Vec::with_capacity(4); 113 | for (i, start_stop) in grid.iter().enumerate() { 114 | start.push(start_stop[1]); 115 | let s = if !aug.is_empty() { 116 | aug[i * aug.len() / grid.len()] 117 | } else if grid.len() > (i + 1) { 118 | grid[i + 1][0] 119 | } else { 120 | total 121 | }; 122 | stop.push(s); 123 | } 124 | let mut file = File::open(filename)?; 125 | // assign Vectos with the capacity of what it is to hold 126 | let mut poscar_b = Vec::with_capacity(grid[0][0]); 127 | let mut grid_pts_b = Vec::with_capacity(grid[0][1] - grid[0][0]); 128 | let mut density_b = Vec::with_capacity(stop[0] - start[0]); 129 | // there could be a maximum of 4 densities 1 total and then 1 or 3 spin 130 | let mut density: Vec> = Vec::with_capacity(4); 131 | // read the poscar information poscar_b 132 | let _ = ::by_ref(&mut file) 133 | .take(grid[0][1] as u64) 134 | .read_to_end(&mut poscar_b)?; 135 | file.seek(SeekFrom::Current(grid[0][0] as i64 - grid[0][1] as i64))?; 136 | // read the grid line into grid_pts_b 137 | let _ = ::by_ref(&mut file) 138 | .take((grid[0][1] - grid[0][0]) as u64) 139 | .read_to_end(&mut grid_pts_b)?; 140 | // read the total charge density into density_b 141 | let _ = ::by_ref(&mut file) 142 | .take((stop[0] - start[0]) as u64) 143 | .read_to_end(&mut density_b)?; 144 | // convert the bytes we have read into a String and an Atoms struct 145 | let poscar = String::from_utf8(poscar_b).unwrap(); 146 | let grid_vec: Vec = { 147 | String::from_utf8(grid_pts_b) 148 | .unwrap() 149 | .split_whitespace() 150 | .map(|x| x.parse::().unwrap()) 151 | .collect() 152 | }; 153 | let atoms = self.to_atoms(poscar); 154 | // convert out of VASP's strange units 155 | density.push( 156 | String::from_utf8(density_b) 157 | .unwrap() 158 | .split_whitespace() 159 | .map(|x| x.parse::().unwrap() / atoms.lattice.volume) 160 | .collect(), 161 | ); 162 | for i in 1..start.len() { 163 | let mut spin_b = Vec::with_capacity(stop[i] - start[i]); 164 | let _ = file.seek(SeekFrom::Start(start[i] as u64)); 165 | let _ = ::by_ref(&mut file) 166 | .take((stop[i] - start[i]) as u64) 167 | .read_to_end(&mut spin_b)?; 168 | density.push( 169 | String::from_utf8(spin_b) 170 | .unwrap() 171 | .split_whitespace() 172 | .map(|x| x.parse::().unwrap() / atoms.lattice.volume) 173 | .collect(), 174 | ); 175 | } 176 | // flip the grid points as VASP outputs density[z, y, x] 177 | let grid_pts: [usize; 3] = [grid_vec[2], grid_vec[1], grid_vec[0]]; 178 | Ok((voxel_origin, grid_pts, atoms, density)) 179 | } 180 | 181 | /// Read atom information. 182 | fn to_atoms(&self, atoms_text: String) -> Atoms { 183 | // create regex for matching the (C|K)artesian | Direct line 184 | // the last match is the one we want so we don't match carbon or the comment line 185 | let mut lines = atoms_text.lines(); 186 | // skip the comment line and then read the lattice information 187 | let _ = lines.next(); 188 | let mut scale = { 189 | lines 190 | .next() 191 | .unwrap() 192 | .to_string() 193 | .split_whitespace() 194 | .map(|x| x.parse::().unwrap()) 195 | .collect::>() 196 | }; 197 | // density[z, y, x] so lets swap the c and a 198 | let mut c = { 199 | lines 200 | .next() 201 | .unwrap() 202 | .to_string() 203 | .split_whitespace() 204 | .map(|x| x.parse::().unwrap()) 205 | .collect::>() 206 | }; 207 | let mut b = { 208 | lines 209 | .next() 210 | .unwrap() 211 | .to_string() 212 | .split_whitespace() 213 | .map(|x| x.parse::().unwrap()) 214 | .collect::>() 215 | }; 216 | let mut a = { 217 | lines 218 | .next() 219 | .unwrap() 220 | .to_string() 221 | .split_whitespace() 222 | .map(|x| x.parse::().unwrap()) 223 | .collect::>() 224 | }; 225 | let volume = { 226 | (c[0] * (a[1] * b[2] - a[2] * b[1]) 227 | + c[1] * (a[2] * b[0] - a[0] * b[2]) 228 | + c[2] * (a[0] * b[1] - a[1] * b[0])) 229 | .abs() 230 | }; 231 | // the scale can be negative and this means that it is the volume of the cell 232 | // it can also be 3 values which is a multiplier for each lattice 233 | if scale.len() == 1 { 234 | if scale[0] < 0f64 { 235 | scale[0] /= -volume; 236 | } 237 | scale.push(scale[0]); 238 | scale.push(scale[0]); 239 | } 240 | for i in 0..3 { 241 | c[i] *= scale[2 - i]; 242 | b[i] *= scale[2 - i]; 243 | a[i] *= scale[2 - i]; 244 | } 245 | let lattice = Lattice::new([ 246 | [a[2], a[1], a[0]], 247 | [b[2], b[1], b[0]], 248 | [c[2], c[1], c[0]], 249 | ]); 250 | // now lets find out what type of file we are dealing with 251 | let dubious = lines.next().unwrap().split_whitespace(); 252 | let total_atoms = match dubious 253 | .clone() 254 | .fold(String::new(), |acc, val| format!("{}{}", acc, val)) 255 | .parse::() 256 | .is_ok() 257 | { 258 | true => dubious, 259 | false => lines.next().unwrap().split_whitespace(), 260 | } 261 | .fold(0, |acc, val| acc + val.parse::().unwrap()); 262 | let mut dubious = lines.next().unwrap().trim_start().to_lowercase(); 263 | if dubious.starts_with('s') { 264 | dubious = lines.next().unwrap().trim_start().to_lowercase(); 265 | } 266 | let coord = if dubious.starts_with('d') { 267 | Coord::Fractional 268 | } else { 269 | Coord::Cartesian 270 | }; 271 | let pos: Vec<[f64; 3]> = (0..total_atoms) 272 | .map(|_| { 273 | lines 274 | .next() 275 | .unwrap() 276 | .split_whitespace() 277 | .take(3) 278 | .map(|f| f.parse::().unwrap()) 279 | .collect::>() 280 | .try_into() // we only take 3 so safe 281 | .unwrap() 282 | }) 283 | .collect(); 284 | // make the positions fractional and swap c and a 285 | let positions = match coord { 286 | Coord::Fractional => pos 287 | .into_iter() 288 | .map(|p| { 289 | utils::dot( 290 | [ 291 | p[2] - p[2].floor(), 292 | p[1] - p[1].floor(), 293 | p[0] - p[0].floor(), 294 | ], 295 | lattice.to_cartesian, 296 | ) 297 | }) 298 | .collect(), 299 | Coord::Cartesian => pos 300 | .into_iter() 301 | .map(|p| { 302 | let p = 303 | utils::dot([p[2], p[1], p[0]], lattice.to_fractional); 304 | utils::dot( 305 | [ 306 | p[0] - p[0].floor(), 307 | p[1] - p[1].floor(), 308 | p[2] - p[2].floor(), 309 | ], 310 | lattice.to_cartesian, 311 | ) 312 | }) 313 | .collect(), 314 | }; 315 | Atoms::new(lattice, positions, atoms_text) 316 | } 317 | 318 | /// Write a CHGCAR from a vector of options where None will be written as zero. 319 | fn write( 320 | &self, 321 | atoms: &Atoms, 322 | data: Vec>, 323 | filename: String, 324 | visible_pbar: bool, 325 | ) -> std::io::Result<()> { 326 | let filename = format!("{}_CHGCAR", filename); 327 | let mut buffer = BufWriter::new(File::create(filename.clone())?); 328 | let length = data.len() / 5 + (data.len() % 5 != 0) as usize; 329 | let pbar: Box = match visible_pbar { 330 | true => Box::new(Bar::new( 331 | length, 332 | format!("Writing file {}:", filename), 333 | )), 334 | false => Box::new(HiddenBar {}), 335 | }; 336 | buffer.write_all(atoms.text.as_bytes())?; 337 | data.chunks(5).for_each(|line| { 338 | if let Err(e) = line.iter().try_for_each(|f| { 339 | write!( 340 | buffer, 341 | " {:.11}", 342 | FortranFormat { 343 | float: *f, 344 | mult: atoms.lattice.volume 345 | } 346 | ) 347 | }) { 348 | panic!("Error occured during write: {}", e) 349 | }; 350 | if let Err(e) = writeln!(buffer) { 351 | panic!("Error occured during write: {}", e) 352 | }; 353 | pbar.tick(); 354 | }); 355 | Ok(()) 356 | } 357 | 358 | /// Deals with fortran indexing. 359 | fn coordinate_format(&self, coords: [f64; 3]) -> (String, String, String) { 360 | let z = format!("{:.6}", coords[0]); 361 | let y = format!("{:.6}", coords[1]); 362 | let x = format!("{:.6}", coords[2]); 363 | (x, y, z) 364 | } 365 | } 366 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! An incredibly fast, multi-threaded, Bader charge partitioning binary and 2 | //! library. Based on methods presented in 3 | //! [Yu Min and Trinkle Dallas R. 2011 J. Che.m Phys. 134 064111] and with 4 | //! adaptions for multi-threading. 5 | //! 6 | //! ### Supported Platforms 7 | //! - Linux 8 | //! - Os X 9 | //! - Windows 10 | //! 11 | //! ## Installing the binary 12 | //! ### Cargo 13 | //! ```sh 14 | //! $ cargo install bader 15 | //! ``` 16 | //! ### From Source 17 | //! To check out the lastest features not in the binaries yet you can compile 18 | //! from source. To do this run the following, which will create the 19 | //! ./target/release/bca executable. 20 | //! ```sh 21 | //! $ git clone https://github.com/adam-kerrigan/bader-rs 22 | //! $ cd bader-rs 23 | //! $ cargo build --verbose --release 24 | //! ``` 25 | //! From here you can either move or link the binary to folder in your path. 26 | //! ```sh 27 | //! $ mv ./target/release/bca ~/bin 28 | //! ``` 29 | //! 30 | //! ## Using the library 31 | //! Add the following to your Cargo.toml: 32 | //! `bader = "0.4.8"` 33 | //! 34 | //! ### Minimum Supported Rust Version (MSRV) 35 | //! This crate is guaranteed to compile on stable Rust 1.85.0 and up. 36 | //! ## Usage 37 | //! The program takes a charge density file as input and performs Bader analysis 38 | //! of the data. Currently it supports density in [VASP] or [cube] formats. It 39 | //! is recommended to run VASP calculations with [LAECHG] = .TRUE. to print the 40 | //! core density and self-consistent valence density. These can then be passed 41 | //! as reference files to the program using the -r, --reference flag where they 42 | //! will be summed. 43 | //! ```sh 44 | //! $ bca CHGCAR -r AECCAR0 -r AECCAR2 45 | //! ``` 46 | //! VASP charge density files containing spin densities will output the the 47 | //! partitioned spin also. To achieve this for cube files requires using the 48 | //! --spin flag to pass a second file to treat as the spin density. 49 | //! ```sh 50 | //! $ bca charge-density.cube -s spin-density.cube 51 | //! ``` 52 | //! For a detailed list of usage options run 53 | //! ```sh 54 | //! $ bca --help 55 | //! ``` 56 | //! ## Output 57 | //! The program outputs two files, ACF.dat & BCF.dat. The Atomic Charge File 58 | //! (ACF.dat) contians the charge (and spin) information for each atom and the 59 | //! Bader Charge File (BCF.dat) contains the information about each Bader volume. 60 | //! The BCF file also includes the atom number in the number column formatted as 61 | //! 'atom number: bader volume'. 62 | //! ## License 63 | //! MIT 64 | //! 65 | //! [//]: # (These are reference links used in the body of this note and get stripped out when the markdown processor does its job. There is no need to format nicely because it shouldn't be seen. Thanks SO - http://stackoverflow.com/questions/4823468/store-comments-in-markdown-syntax) 66 | //! 67 | //! [VASP]: 68 | //! [cube]: 69 | //! [LAECHG]: 70 | //! [Yu Min and Trinkle Dallas R. 2011 J. Che.m Phys. 134 064111]: 71 | //! [cargo]: 72 | 73 | /// Performs analysis of the [VoxelMap](voxel_map::VoxelMap) to find the partitioned charge, 74 | /// assigned atom and other relevant properties. 75 | pub mod analysis; 76 | /// For parsing command-line arguments. 77 | pub mod arguments; 78 | /// Contains [Atoms](atoms::Atoms) for storing the relevant data on the atoms 79 | /// in the calculation. Also contains [Lattice](atoms::Lattice) and 80 | /// for storing information about the cell in which the density is stored. 81 | pub mod atoms; 82 | /// Provides custom errors types. 83 | pub mod errors; 84 | /// Contains [Grid](grid::Grid) for managing the movement around the grid on 85 | /// which the density is stored. 86 | pub mod grid; 87 | /// Handles the File I/O for both the density file and result files. 88 | /// Provides a [FileFormat](io::FileFormat) trait to be implemented by modules designed to 89 | /// cover a specific file format of a density file. 90 | pub mod io; 91 | /// Contains the methods for partioning the density, finding maxima and calculating the 92 | /// Laplacian for voxel based grids. 93 | pub mod methods; 94 | /// Provides a [visible](progress::Bar) and [hidden](progress::HiddenBar) implementation of the 95 | /// trait [ProgressBar](progress::ProgressBar). 96 | pub mod progress; 97 | /// Misc functions mainly for vector and matrix manipulation. 98 | pub mod utils; 99 | /// Calculates the Voronoi vectors, and their alpha values for the weight method, 100 | /// for lattices. Also useful for periodic minimum distances. 101 | pub mod voronoi; 102 | /// Provides the [BlockingVoxelMap](voxel_map::BlockingVoxelMap) and [VoxelMap](voxel_map::VoxelMap) 103 | /// for storing the maxima and weights of partioned voxels. 104 | pub mod voxel_map; 105 | -------------------------------------------------------------------------------- /src/methods.rs: -------------------------------------------------------------------------------- 1 | use crate::atoms::Atoms; 2 | use crate::errors::MaximaError; 3 | use crate::grid::Grid; 4 | use crate::progress::{Bar, HiddenBar, ProgressBar}; 5 | use crate::voxel_map::{BlockingVoxelMap, VoxelMap}; 6 | use crossbeam_utils::thread; 7 | use rustc_hash::FxHashMap; 8 | use std::sync::Arc; 9 | use std::sync::atomic::AtomicUsize; 10 | 11 | /// Result of a Weight step. 12 | /// 13 | /// TODO: turn this into an actual result type? 14 | pub enum WeightResult { 15 | /// Length of the Box dictates the type of Critical Point, 1 -> Maxima, 2 -> Saddle, 16 | /// 3+ -> Saddle or minima. Critical Points with >=2 will be on boundaries. 17 | Critical(Box<[f64]>), 18 | /// Entirely assigned to a single Bader atom. 19 | Interior(usize), 20 | /// Meeting point at the edge of 2 or more Bader atoms. 21 | Boundary(Box<[f64]>), 22 | /// Maximum 23 | Maximum, 24 | } 25 | 26 | #[derive(Clone)] 27 | pub struct CriticalPoint { 28 | pub position: isize, 29 | pub kind: CriticalPointKind, 30 | pub atoms: Box<[usize]>, 31 | } 32 | 33 | impl CriticalPoint { 34 | pub fn new( 35 | position: isize, 36 | kind: CriticalPointKind, 37 | atoms: Box<[usize]>, 38 | ) -> Self { 39 | CriticalPoint { 40 | position, 41 | kind, 42 | atoms, 43 | } 44 | } 45 | } 46 | 47 | #[derive(Eq, Ord, PartialEq, PartialOrd, Debug, Clone, Copy)] 48 | pub enum CriticalPointKind { 49 | Nuclei, 50 | Bond, 51 | Ring, 52 | Cage, 53 | Blank, 54 | } 55 | 56 | /// Steps in the density grid, from point p, following the gradient. 57 | /// 58 | /// This should be called from [`weight()`]. 59 | /// 60 | /// Note: This function will deadlock if the points above it have no associated 61 | /// maxima in [`VoxelMap.voxel_map`]. 62 | /// 63 | /// * `p`: The point from which to step. 64 | /// * `density`: The reference [`Grid`]. 65 | /// * `voxel_map`: An [`Arc`] wrapped [`BlockingVoxelMap`] for tracking the maxima. 66 | /// * `weight_tolerance`: Minimum percentage value to consider the weight significant. 67 | /// 68 | /// ### Returns: 69 | /// [`WeightResult`]: The type of point `p` is Critical, Interior or Boundary and 70 | /// the relevant data for each type. 71 | /// 72 | /// # Examples 73 | /// ``` 74 | /// use bader::methods::{weight_step, WeightResult}; 75 | /// use bader::voxel_map::BlockingVoxelMap as VoxelMap; 76 | /// 77 | /// // Intialise the reference density, setting index 34 to 0. for easy maths. 78 | /// let density = (0..64) 79 | /// .map(|rho| if rho != 34 { rho as f64 } else { 0. }) 80 | /// .collect::>(); 81 | /// let voxel_map = VoxelMap::new( 82 | /// [4, 4, 4], 83 | /// [[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 3.0]], 84 | /// [0.0, 0.0, 0.0], 85 | /// ); 86 | /// // The highest gradient between point, p = 33, and it's neighbours, with 87 | /// // periodic boundary conditions, is with point p = 61. 88 | /// 89 | /// // to avoid deadlock let's store maxima for all the values above us and 90 | /// // store as either 61 or 62 to make the current point a boundary. 91 | /// for (i, p) in [37, 45, 49].iter().enumerate() { 92 | /// voxel_map.maxima_store(*p, 62 - (i as isize) % 2); 93 | /// } 94 | /// let mut weight: Vec = match weight_step(33, &density, &voxel_map, 1E-8) { 95 | /// WeightResult::Critical(weights) => weights 96 | /// .iter() 97 | /// .map(|f| { 98 | /// let maxima = *f as usize; 99 | /// let weight = f - maxima as f64; 100 | /// let (decoded_maxima, _) = voxel_map.grid.decode_maxima(maxima); 101 | /// weight + decoded_maxima as f64 102 | /// }) 103 | /// .collect(), 104 | /// _ => panic!("None Weight"), 105 | /// }; 106 | /// weight.sort_by(|a, b| a.partial_cmp(b).unwrap()); 107 | /// assert_eq!(weight, vec![61.375, 62.625]) 108 | /// ``` 109 | pub fn weight_step( 110 | p: isize, 111 | density: &[f64], 112 | voxel_map: &BlockingVoxelMap, 113 | weight_tolerance: f64, 114 | ) -> WeightResult { 115 | let control = density[p as usize]; 116 | let grid = &voxel_map.grid; 117 | let mut t_sum = 0.; 118 | let mut weights = FxHashMap::::default(); 119 | let mut weight_count = 0; 120 | // colllect the shift and distances and iterate over them. 121 | grid.voronoi_shifts(p) 122 | .into_iter() 123 | .for_each(|((pt, image), alpha)| { 124 | let charge_diff = density[pt as usize] - control; 125 | // density differences of zero should be ignored to avoid division by 126 | // zero errors. 127 | if charge_diff > 0. { 128 | // calculate the gradient and add any weights to the HashMap. 129 | let rho = charge_diff * alpha; 130 | let maxima = voxel_map.maxima_get(pt); 131 | match maxima.cmp(&-1) { 132 | // feeds into already weighted voxel therefore not a saddle point 133 | std::cmp::Ordering::Less => { 134 | let point_weights = voxel_map.weight_get(maxima); 135 | weight_count = point_weights.len().max(weight_count); 136 | for maxima_weight in point_weights.iter() { 137 | let mut maxima = *maxima_weight as usize; 138 | let w = maxima_weight - maxima as f64; 139 | if image[0].abs() + image[1].abs() + image[2].abs() 140 | != 0 141 | { 142 | maxima = grid.encode_maxima(maxima, image); 143 | } 144 | let weight = weights.entry(maxima).or_insert(0.); 145 | *weight += w * rho; 146 | } 147 | } 148 | // interior point 149 | std::cmp::Ordering::Greater => { 150 | let mut maxima = maxima as usize; 151 | if image[0].abs() + image[1].abs() + image[2].abs() != 0 152 | { 153 | maxima = grid.encode_maxima(maxima, image); 154 | } 155 | let weight = weights.entry(maxima).or_insert(0.); 156 | *weight += rho; 157 | } 158 | // going into vacuum (this be impossible) 159 | std::cmp::Ordering::Equal => (), 160 | } 161 | t_sum += rho; 162 | } 163 | }); 164 | match weights.len().cmp(&1) { 165 | // more than one weight is a boundary or saddle (if the weight is weighty enough) 166 | std::cmp::Ordering::Greater => { 167 | let mut total = 0.; 168 | // remove weights below the tolerance 169 | let weights = weights 170 | .into_iter() 171 | .filter_map(|(maxima, weight)| { 172 | let weight = weight / t_sum; 173 | if weight > weight_tolerance { 174 | total += weight; 175 | Some((maxima, weight)) 176 | } else { 177 | None 178 | } 179 | }) 180 | .collect::>(); 181 | // still more than one weight then readjust the weights so that they sum to 1 182 | if let std::cmp::Ordering::Greater = weights.len().cmp(&1) { 183 | let weights = weights 184 | .iter() 185 | .map(|(maxima, w)| *maxima as f64 + w / total) 186 | .collect::>(); 187 | // check if new maxima has joined the weights -> Critical Point (saddle/ring/cage) 188 | if weights.len() > weight_count { 189 | WeightResult::Critical(weights) 190 | } else { 191 | WeightResult::Boundary(weights) 192 | } 193 | } else { 194 | WeightResult::Interior(weights[0].0) 195 | } 196 | } 197 | // only feeds one atom means interior voxel 198 | std::cmp::Ordering::Equal => { 199 | WeightResult::Interior(*weights.keys().next().unwrap()) 200 | } 201 | // no flux out means maximum 202 | std::cmp::Ordering::Less => WeightResult::Maximum, 203 | } 204 | } 205 | 206 | /// Assigns a maxima to the points within index. 207 | /// 208 | /// Note: This function will deadlock if the points above it have no associated 209 | /// maxima in [`VoxelMap.voxel_map`]. As such make sure index is sorted. 210 | pub fn weight( 211 | density: &[f64], 212 | voxel_map: &BlockingVoxelMap, 213 | index: &[usize], 214 | weight_tolerance: f64, 215 | visible_bar: bool, 216 | threads: usize, 217 | ) -> (Vec, Vec) { 218 | let counter = Arc::new(AtomicUsize::new(0)); 219 | let mut critical_points = (vec![], vec![]); 220 | let pbar: Box = match visible_bar { 221 | false => Box::new(HiddenBar {}), 222 | true => { 223 | Box::new(Bar::new(index.len(), String::from("Bader Partitioning"))) 224 | } 225 | }; 226 | thread::scope(|s| { 227 | // Assign the remaining voxels to Bader maxima 228 | let th = (0..threads) 229 | .map(|_| { 230 | s.spawn(|_| { 231 | let mut c_ps = (vec![], vec![]); 232 | loop { 233 | let p = { 234 | let i = counter.fetch_add( 235 | 1, 236 | std::sync::atomic::Ordering::Relaxed, 237 | ); 238 | if i >= index.len() { 239 | break; 240 | }; 241 | index[i] as isize 242 | }; 243 | match weight_step( 244 | p, 245 | density, 246 | voxel_map, 247 | weight_tolerance, 248 | ) { 249 | WeightResult::Maximum => {} 250 | WeightResult::Interior(maxima) => { 251 | voxel_map.maxima_store(p, maxima as isize); 252 | } 253 | WeightResult::Boundary(weights) => { 254 | let i = { 255 | let mut weight = voxel_map.lock(); 256 | let i = weight.len(); 257 | (*weight).push(weights); 258 | i 259 | }; 260 | voxel_map.weight_store(p, i); 261 | } 262 | WeightResult::Critical(weights) => { 263 | // length = 1 is a maxima and doesn't need storing. 264 | let (i, atoms) = { 265 | let mut weight = voxel_map.lock(); 266 | let i = weight.len(); 267 | let atoms: Vec = weights 268 | .iter() 269 | .map(|w| *w as usize) 270 | .collect(); 271 | (*weight).push(weights); 272 | (i, atoms) 273 | }; 274 | voxel_map.weight_store(p, i); 275 | if atoms.len() < 3 { 276 | c_ps.0.push(CriticalPoint::new( 277 | p, 278 | CriticalPointKind::Bond, 279 | atoms.into(), 280 | )); 281 | } else { 282 | c_ps.1.push(CriticalPoint::new( 283 | p, 284 | CriticalPointKind::Ring, 285 | atoms.into(), 286 | )); 287 | } 288 | } 289 | } 290 | pbar.tick(); 291 | } 292 | c_ps 293 | }) 294 | }) 295 | .collect::>(); 296 | for thread in th { 297 | if let Ok(c_ps) = thread.join() { 298 | critical_points.0.extend(c_ps.0); 299 | critical_points.1.extend(c_ps.1); 300 | } 301 | } 302 | }) 303 | .unwrap(); 304 | { 305 | let mut weights = voxel_map.lock(); 306 | weights.shrink_to_fit(); 307 | } 308 | critical_points.0.shrink_to_fit(); 309 | critical_points.1.shrink_to_fit(); 310 | critical_points 311 | } 312 | 313 | /// Find the maxima within the charge density 314 | pub fn maxima_finder( 315 | index: &[usize], 316 | density: &[f64], 317 | voxel_map: &BlockingVoxelMap, 318 | maximum_distance: &f64, 319 | atoms: &Atoms, 320 | threads: usize, 321 | visible_bar: bool, 322 | ) -> Result, MaximaError> { 323 | let grid = &voxel_map.grid; 324 | let mut bader_maxima = Vec::::new(); 325 | let pbar: Box = match visible_bar { 326 | false => Box::new(HiddenBar {}), 327 | true => Box::new(Bar::new(index.len(), String::from("Maxima Finding"))), 328 | }; 329 | let index_len = index.len(); 330 | let chunk_size = (index_len / threads) + (index_len % threads).min(1); 331 | thread::scope(|s| { 332 | // Identify all the maxima 333 | let th = index 334 | .chunks(chunk_size) 335 | .map(|chunk| { 336 | s.spawn(|_| { 337 | chunk 338 | .iter() 339 | .filter_map(|p| { 340 | // we have to tick first due to early return 341 | pbar.tick(); 342 | let rho = density[*p]; 343 | for (pt, _) in voxel_map 344 | .grid 345 | .voronoi_shifts_nocheck(*p as isize) 346 | { 347 | if density[pt as usize] > rho { 348 | return None; 349 | } 350 | } 351 | // if we made it this far we have a maxima 352 | // change this index to a value it could 353 | // never be and return it 354 | Some( 355 | assign_maximum( 356 | *p as isize, 357 | atoms, 358 | grid, 359 | maximum_distance, 360 | ) 361 | .map(|atom| { 362 | CriticalPoint::new( 363 | *p as isize, 364 | CriticalPointKind::Nuclei, 365 | Box::new([atom]), 366 | ) 367 | }), 368 | ) 369 | }) 370 | .collect::, MaximaError>>() 371 | }) 372 | }) 373 | .collect::>(); 374 | for thread in th { 375 | if let Ok(maxima_list) = thread.join() { 376 | match maxima_list { 377 | Ok(bm) => bader_maxima.extend(bm), 378 | Err(e) => return Err(e), 379 | } 380 | } else { 381 | panic!("Failed to join thread in manima finder.") 382 | }; 383 | } 384 | Ok(()) 385 | }) 386 | .unwrap()?; // There is no panic option in the threads that isn't covered 387 | bader_maxima.shrink_to_fit(); 388 | Ok(bader_maxima) 389 | } 390 | 391 | /// Find minima in the charge density 392 | pub fn minima_finder( 393 | index: &[usize], 394 | density: &[f64], 395 | voxel_map: &VoxelMap, 396 | threads: usize, 397 | visible_bar: bool, 398 | ) -> Vec { 399 | let mut bader_minima = Vec::::new(); 400 | let pbar: Box = match visible_bar { 401 | false => Box::new(HiddenBar {}), 402 | true => Box::new(Bar::new(index.len(), String::from("Minima Finding"))), 403 | }; 404 | let index_len = index.len(); 405 | let chunk_size = (index_len / threads) + (index_len % threads).min(1); 406 | thread::scope(|s| { 407 | // Identify all the maxima 408 | let th = index 409 | .chunks(chunk_size) 410 | .map(|chunk| { 411 | s.spawn(|_| { 412 | chunk 413 | .iter() 414 | .filter_map(|p| { 415 | // we have to tick first due to early return 416 | pbar.tick(); 417 | let rho = density[*p]; 418 | for (pt, _) in voxel_map 419 | .grid 420 | .voronoi_shifts_nocheck(*p as isize) 421 | { 422 | if density[pt as usize] < rho { 423 | return None; 424 | } 425 | } 426 | // if we made it this far we have a maxima 427 | // change this index to a value it could 428 | // never be and return it 429 | // TODO: This needs to check if the cage is actually a boundary and if 430 | // not complain that the weight tolerance is too high 431 | Some(CriticalPoint::new( 432 | *p as isize, 433 | CriticalPointKind::Cage, 434 | voxel_map 435 | .maxima_to_weight( 436 | voxel_map.maxima_get(*p as isize), 437 | ) 438 | .iter() 439 | .map(|f| *f as usize) 440 | .collect(), 441 | )) 442 | }) 443 | .collect::>() 444 | }) 445 | }) 446 | .collect::>(); 447 | for thread in th { 448 | if let Ok(minima_list) = thread.join() { 449 | bader_minima.extend(minima_list); 450 | } else { 451 | panic!("Failed to join thread in manima finder.") 452 | }; 453 | } 454 | }) 455 | .unwrap(); // There is no panic option in the threads that isn't covered 456 | bader_minima.shrink_to_fit(); 457 | bader_minima 458 | } 459 | 460 | /// Assign the Bader maxima to the nearest atom. 461 | /// 462 | /// # Example 463 | /// ``` 464 | /// use bader::atoms::{Atoms, Lattice}; 465 | /// use bader::grid::Grid; 466 | /// use bader::methods::assign_maximum; 467 | /// 468 | /// // Intialise Atoms and Grid structs as well as a list of maxima 469 | /// let lattice = 470 | /// Lattice::new([[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 3.0]]); 471 | /// // Place atoms at 0 and 555 in the grid 472 | /// let atoms = Atoms::new( 473 | /// lattice, 474 | /// vec![[0.0, 0.0, 0.0], [1.5, 1.5, 1.5]], 475 | /// String::from(""), 476 | /// ); 477 | /// let grid = Grid::new( 478 | /// [10, 10, 10], 479 | /// [[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 3.0]], 480 | /// [0.0, 0.0, 0.0], 481 | /// ); 482 | /// 483 | /// // Run with default maxima distance tolerance 484 | /// let maximum_distance = 0.1; 485 | /// let atom_list = assign_maximum(555, &atoms, &grid, &maximum_distance); 486 | /// assert!(atom_list.is_ok()); 487 | /// assert_eq!(atom_list.unwrap(), 1); 488 | /// 489 | /// // If the maxima is too far away we get an error. 490 | /// let atom_list = assign_maximum(554, &atoms, &grid, &maximum_distance); 491 | /// assert!(atom_list.is_err()); 492 | /// ``` 493 | pub fn assign_maximum( 494 | maximum: isize, 495 | atoms: &Atoms, 496 | grid: &Grid, 497 | maximum_distance: &f64, 498 | ) -> Result { 499 | // convert the point first to cartesian, then to the reduced basis 500 | let m_cartesian = grid.to_cartesian(maximum); 501 | let m_reduced_cartesian = atoms.lattice.cartesian_to_reduced(m_cartesian); 502 | let mut atom_num = 0; 503 | let mut min_distance = f64::INFINITY; 504 | // go through each atom in the reduced basis and shift in each 505 | // reduced direction, save the atom with the shortest distance 506 | for (i, atom) in atoms.reduced_positions.iter().enumerate() { 507 | for atom_shift in atoms.lattice.reduced_cartesian_shift_matrix.iter() { 508 | let distance = { 509 | (m_reduced_cartesian[0] - (atom[0] + atom_shift[0])).powi(2) 510 | + (m_reduced_cartesian[1] - (atom[1] + atom_shift[1])) 511 | .powi(2) 512 | + (m_reduced_cartesian[2] - (atom[2] + atom_shift[2])) 513 | .powi(2) 514 | }; 515 | if distance < min_distance { 516 | min_distance = distance; 517 | atom_num = i; 518 | } 519 | } 520 | } 521 | if min_distance.powf(0.5) > *maximum_distance { 522 | Err(MaximaError { 523 | maximum: m_cartesian, 524 | atom: atom_num, 525 | distance: min_distance.powf(0.5), 526 | }) 527 | } else { 528 | Ok(atom_num) 529 | } 530 | } 531 | 532 | /// Calculate the Laplacian of the density at a point in the grid 533 | pub fn laplacian(p: usize, density: &[f64], grid: &Grid) -> f64 { 534 | let rho = density[p]; 535 | grid.voronoi_shifts_nocheck(p as isize) 536 | .iter() 537 | .fold(0.0, |acc, (pt, alpha)| { 538 | acc + alpha * (density[*pt as usize] - rho) 539 | }) 540 | / grid.voronoi.volume 541 | } 542 | -------------------------------------------------------------------------------- /src/progress.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::sync::atomic::AtomicUsize; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | pub trait ProgressBar: Send + Sync { 7 | fn tick(&self); 8 | } 9 | 10 | pub struct Bar { 11 | counter: Arc, 12 | } 13 | 14 | impl Bar { 15 | pub fn new(length: usize, text: String) -> Self { 16 | let counter = Arc::new(AtomicUsize::new(0)); 17 | let thread_counter = counter.clone(); 18 | thread::spawn(move || { 19 | while Arc::strong_count(&thread_counter) > 1 { 20 | let count = thread_counter 21 | .fetch_min(length, std::sync::atomic::Ordering::Relaxed); 22 | if let std::cmp::Ordering::Less = count.cmp(&length) { 23 | let progress = (count * 40) / length; 24 | let done = format!("{:={}]", text, done, remain); 28 | } else { 29 | break; 30 | } 31 | thread::sleep(Duration::from_millis(100)); 32 | } 33 | eprint!("\r{: [f64; 3] { 5 | [ 6 | a[1] * b[2] - a[2] * b[1], 7 | a[2] * b[0] - a[0] * b[2], 8 | a[0] * b[1] - a[1] * b[0], 9 | ] 10 | } 11 | 12 | /// subtract two [f64;3] vectors 13 | pub fn subtract(a: [f64; 3], b: [f64; 3]) -> [f64; 3] { 14 | [a[0] - b[0], a[1] - b[1], a[2] - b[2]] 15 | } 16 | 17 | /// compute the dot product between a vector and a matrix 18 | pub fn dot(v: [f64; 3], m: [[f64; 3]; 3]) -> [f64; 3] { 19 | (0..3) 20 | .map(|i| (v[0] * m[0][i] + v[1] * m[1][i] + v[2] * m[2][i])) 21 | .collect::>() 22 | .try_into() 23 | .unwrap() // safe to unwrap as is size 3 24 | } 25 | 26 | /// compute the integer dot product between a vector and a matrix 27 | pub fn idot(v: [f64; 3], m: [[f64; 3]; 3]) -> [isize; 3] { 28 | (0..3) 29 | .map(|i| { 30 | (v[0] * m[0][i] + v[1] * m[1][i] + v[2] * m[2][i]).round() as isize 31 | }) 32 | .collect::>() 33 | .try_into() 34 | .unwrap() // safe to unwrap as is size 3 35 | } 36 | 37 | /// compute the dot product between two vectors 38 | pub fn vdot(a: [f64; 3], b: [f64; 3]) -> f64 { 39 | a.iter().zip(b.iter()).map(|(a, b)| a * b).sum() 40 | } 41 | 42 | /// compute the norm of a vector 43 | pub fn norm(a: [f64; 3]) -> f64 { 44 | a.iter().map(|a| a.powi(2)).sum::().powf(0.5) 45 | } 46 | 47 | /// compute the sum of two vectors 48 | pub fn vsum(a: [f64; 3], b: [f64; 3]) -> [f64; 3] { 49 | [a[0] + b[0], a[1] + b[1], a[2] + b[2]] 50 | } 51 | 52 | /// compute M.T * M 53 | pub fn transpose_square(m: [[f64; 3]; 3]) -> [[f64; 3]; 3] { 54 | [ 55 | [ 56 | vdot([m[0][0], m[1][0], m[2][0]], [m[0][0], m[1][0], m[2][0]]), 57 | vdot([m[0][0], m[1][0], m[2][0]], [m[0][1], m[1][1], m[2][1]]), 58 | vdot([m[0][0], m[1][0], m[2][0]], [m[0][2], m[1][2], m[2][2]]), 59 | ], 60 | [ 61 | vdot([m[0][1], m[1][1], m[2][1]], [m[0][0], m[1][0], m[2][0]]), 62 | vdot([m[0][1], m[1][1], m[2][1]], [m[0][1], m[1][1], m[2][1]]), 63 | vdot([m[0][1], m[1][1], m[2][1]], [m[0][2], m[1][2], m[2][2]]), 64 | ], 65 | [ 66 | vdot([m[0][2], m[1][2], m[2][2]], [m[0][0], m[1][0], m[2][0]]), 67 | vdot([m[0][2], m[1][2], m[2][2]], [m[0][1], m[1][1], m[2][1]]), 68 | vdot([m[0][2], m[1][2], m[2][2]], [m[0][2], m[1][2], m[2][2]]), 69 | ], 70 | ] 71 | } 72 | 73 | /// calculates the inverse of a 3x3 lattice if it is invertible 74 | pub fn invert_lattice(lattice: &[[f64; 3]; 3]) -> Option<[[f64; 3]; 3]> { 75 | let minor00 = lattice[1][1] * lattice[2][2] - lattice[1][2] * lattice[2][1]; 76 | let minor01 = lattice[1][0] * lattice[2][2] - lattice[1][2] * lattice[2][0]; 77 | let minor02 = lattice[1][0] * lattice[2][1] - lattice[1][1] * lattice[2][0]; 78 | let determinant = lattice[0][0] * minor00 - lattice[0][1] * minor01 79 | + lattice[0][2] * minor02; 80 | // a determinant of zero is not invertible 81 | if determinant.abs() <= f64::EPSILON { 82 | None 83 | } else { 84 | Some([ 85 | [ 86 | minor00 / determinant, 87 | (lattice[0][2] * lattice[2][1] - lattice[2][2] * lattice[0][1]) 88 | / determinant, 89 | (lattice[0][1] * lattice[1][2] - lattice[1][1] * lattice[0][2]) 90 | / determinant, 91 | ], 92 | [ 93 | -minor01 / determinant, 94 | (lattice[0][0] * lattice[2][2] - lattice[2][0] * lattice[0][2]) 95 | / determinant, 96 | (lattice[0][2] * lattice[1][0] - lattice[1][2] * lattice[0][0]) 97 | / determinant, 98 | ], 99 | [ 100 | minor02 / determinant, 101 | (lattice[0][1] * lattice[2][0] - lattice[2][1] * lattice[0][0]) 102 | / determinant, 103 | (lattice[0][0] * lattice[1][1] - lattice[1][0] * lattice[0][1]) 104 | / determinant, 105 | ], 106 | ]) 107 | } 108 | } 109 | 110 | /// returns the first index that is not vacuum from a sorted index list 111 | pub fn vacuum_index( 112 | density: &[f64], 113 | index: &[usize], 114 | tolerance: Option, 115 | ) -> Result { 116 | match tolerance { 117 | Some(tol) => { 118 | for (i, p) in index.iter().rev().enumerate() { 119 | if density[*p] > tol { 120 | return Ok(index.len() - i); 121 | } 122 | } 123 | Err(VacuumError { 124 | vacuum_tolerance: tol, 125 | density: density[index[0]], 126 | }) 127 | } 128 | None => Ok(index.len()), 129 | } 130 | } 131 | 132 | #[cfg(test)] 133 | mod tests { 134 | use super::*; 135 | 136 | #[test] 137 | fn utils_dot() { 138 | assert_eq!( 139 | dot([0.5, 0.5, 0.5], [[1., 0., 0.], [1., 2., 0.], [2., 2., 4.]]), 140 | [2., 2., 2.] 141 | ) 142 | } 143 | 144 | #[test] 145 | fn utils_vdot() { 146 | assert_eq!(vdot([1., 2., 3.], [1., 2., 3.]), 14.) 147 | } 148 | 149 | #[test] 150 | fn utils_norm() { 151 | assert_eq!(norm([3., 4., 12.]), 13.) 152 | } 153 | 154 | #[test] 155 | fn utils_transpose_square() { 156 | let matrix = [[3., 0., 0.], [2.5, 2., 0.], [0., 0., 5.]]; 157 | let t_squared = [[15.25, 5., 0.], [5., 4., 0.], [0., 0., 25.]]; 158 | assert_eq!(transpose_square(matrix), t_squared) 159 | } 160 | 161 | #[test] 162 | fn utils_vacuum_index_some_high() { 163 | let data = (0..60).map(|x| x as f64).collect::>(); 164 | let index = (0..60).rev().collect::>(); 165 | assert!(vacuum_index(&data, &index, Some(100.)).is_err()) 166 | } 167 | 168 | #[test] 169 | fn utils_vacuum_index_some_low() { 170 | let data = (0..60).map(|x| x as f64).collect::>(); 171 | let index = (0..60).rev().collect::>(); 172 | let i = vacuum_index(&data, &index, Some(-1.)).unwrap(); 173 | assert_eq!(i, 60) 174 | } 175 | 176 | #[test] 177 | fn utils_vacuum_index_some() { 178 | let data = (0..60).map(|x| x as f64).collect::>(); 179 | let index = (0..60).rev().collect::>(); 180 | let i = vacuum_index(&data, &index, Some(10.)).unwrap(); 181 | assert_eq!(i, 49) 182 | } 183 | 184 | #[test] 185 | fn utils_vacuum_index_none() { 186 | let data = (0..60).map(|x| x as f64).collect::>(); 187 | let index = (0..60).rev().collect::>(); 188 | let i = vacuum_index(&data, &index, None).unwrap(); 189 | assert_eq!(i, 60) 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /src/voronoi.rs: -------------------------------------------------------------------------------- 1 | use crate::atoms::Lattice; 2 | use crate::utils::{cross, invert_lattice, vdot}; 3 | 4 | /// Holds information on the Voronoi vectors. 5 | pub struct Voronoi { 6 | /// Voronoi vectors as indices in a shift matrix. 7 | pub vectors: Vec>, 8 | /// The alphas associated with each Voronoi vector. 9 | /// alphas are used to multiply the charge difference by to calculate flux. 10 | pub alphas: Vec, 11 | /// The volume of the Voronoi cell. 12 | pub volume: f64, 13 | } 14 | 15 | impl Voronoi { 16 | /// Generates a Voronoi struct from a [`Lattice`]. 17 | pub fn new(lattice: &Lattice) -> Self { 18 | let (vectors, alphas, volume) = Voronoi::voronoi_vectors(lattice); 19 | Self { 20 | vectors, 21 | alphas, 22 | volume, 23 | } 24 | } 25 | 26 | /// Calculates the Voronoi vectors and their alphas from a reduced basis. 27 | fn voronoi_vectors(lattice: &Lattice) -> (Vec>, Vec, f64) { 28 | // allocate the storage for voronoi vectors and flux coefficients 29 | let mut vectors = Vec::>::with_capacity(14); 30 | let mut alphas = Vec::::with_capacity(14); 31 | // allocate the vertex storage and vector/matrix for calculating them 32 | // allocate the plane vectors for each voronoi vector 33 | // find the vertices for each plane by solving 3-way intersections between the plane and 34 | // every other plane pair and then checking if it falls within the Voronoi volume 35 | let volume = lattice 36 | .reduced_cartesian_shift_matrix 37 | .iter() 38 | .take(13) // the first 13 vectors are symmetrically equivalent to the last 13 39 | .enumerate() 40 | .filter_map(|(vec_i, n)| { 41 | let n_mag = vdot(*n, *n) * 0.5; 42 | let mut vertices = lattice 43 | .reduced_cartesian_shift_matrix 44 | .iter() 45 | .enumerate() 46 | .filter_map(|(neigh_i, r1)| { 47 | let r1_mag = vdot(*r1, *r1) * 0.5; 48 | let vertices = lattice 49 | .reduced_cartesian_shift_matrix 50 | .iter() 51 | .skip(neigh_i + 1) 52 | .filter_map(|r2| { 53 | let r2_mag = vdot(*r2, *r2) * 0.5; 54 | // If not invertable then no crossing point 55 | if let Some(vector_inv) = 56 | invert_lattice(&[*n, *r1, *r2]) 57 | { 58 | let mut vertex = [0f64; 3]; 59 | for i in 0..3 { 60 | vertex[i] = vdot( 61 | [n_mag, r1_mag, r2_mag], 62 | vector_inv[i], 63 | ) 64 | } 65 | // Check that the vertex isn't on the orginal vector which would make all 66 | // vertices apart from this one outside the Voronoi volume 67 | let vertex_mag = vdot(vertex, vertex); 68 | if (vertex_mag - 0.5 * n_mag).abs() 69 | < f64::EPSILON 70 | { 71 | return None; 72 | } 73 | // is this vertex inside the Voronoi volume, project along every lll vector 74 | // and compare to vector * vector / 2 if higher then outside Voronoi volume 75 | for s in lattice 76 | .reduced_cartesian_shift_matrix 77 | .iter() 78 | { 79 | let ss2 = 0.5 * vdot(*s, *s); 80 | if vdot(vertex, *s) > ss2 + f64::EPSILON 81 | { 82 | return None; 83 | } 84 | } 85 | Some(vertex) 86 | } else { 87 | None 88 | } 89 | }) 90 | .collect::>(); 91 | if vertices.is_empty() { 92 | None 93 | } else { 94 | Some(vertices) 95 | } 96 | }) 97 | .flatten() 98 | .collect::>(); 99 | // if the current vector does not form the Voronoi bounding planes then skip 100 | if vertices.is_empty() { 101 | return None; 102 | } 103 | // order the vertices by projecting on to an orthogonal basis that is itself orthogonal 104 | // to n for calculating the area 105 | let mut rx = vertices[0]; 106 | let r_coeff = vdot(rx, *n) / vdot(*n, *n); 107 | for (element, n) in rx.iter_mut().zip(n) { 108 | *element -= n * r_coeff; 109 | } 110 | let r_coeff = vdot(rx, rx).powf(-0.5); 111 | for element in rx.iter_mut() { 112 | *element *= r_coeff; 113 | } 114 | let mut ry = cross(*n, rx); 115 | let r_coeff = vdot(ry, ry).powf(-0.5); 116 | for element in ry.iter_mut() { 117 | *element *= r_coeff; 118 | } 119 | vertices.sort_unstable_by({ 120 | |a, b| { 121 | let c = vdot(*a, ry).atan2(vdot(*a, rx)); 122 | let d = vdot(*b, ry).atan2(vdot(*b, rx)); 123 | c.partial_cmp(&d).unwrap() 124 | } 125 | }); 126 | vertices.push(vertices[0]); 127 | // calculate the area of the facet and divide by the length 128 | // first calculate the volume of the tetrahedron of v[i], v[i+1] & n/2. This is the 129 | // scalar tripple product of the vectors divided by 6. The volume is for the Laplacian 130 | // calculation later 131 | let wedge_volume = vertices 132 | .windows(2) 133 | .fold(0f64, |sum, w| sum + vdot(w[0], cross(w[1], *n))) 134 | / 12f64; 135 | if wedge_volume.abs() < f64::EPSILON { 136 | return None; 137 | } 138 | // now we need to turn the volume into an area and divide by |n| 139 | // V = Ah/3 where h = n/2 140 | let alpha = 6f64 * wedge_volume / vdot(*n, *n); 141 | vectors.push(lattice.reduced_grid_shift_matrix[vec_i].clone()); 142 | alphas.push(alpha); 143 | vectors.push( 144 | lattice.reduced_grid_shift_matrix[26 - vec_i].clone(), 145 | ); 146 | alphas.push(alpha); 147 | Some(wedge_volume * 2f64) 148 | }) 149 | .sum(); 150 | vectors.shrink_to_fit(); 151 | alphas.shrink_to_fit(); 152 | (vectors, alphas, volume) 153 | } 154 | } 155 | 156 | #[cfg(test)] 157 | mod test { 158 | use super::*; 159 | #[test] 160 | fn test_voronoi_vectors() { 161 | let lattice = 162 | Lattice::new([[1.0, 0., 0.], [0.707, 0.707, 0.], [0., 0., 1.]]); 163 | let voronoi = Voronoi::new(&lattice); 164 | let vecs = vec![ 165 | vec![4], 166 | vec![22], 167 | vec![7], 168 | vec![19], 169 | vec![10], 170 | vec![16], 171 | vec![12], 172 | vec![14], 173 | ]; 174 | assert_eq!(vecs, voronoi.vectors) 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/voxel_map.rs: -------------------------------------------------------------------------------- 1 | use crate::grid::Grid; 2 | use rustc_hash::FxHashSet; 3 | use std::cell::UnsafeCell; 4 | use std::ops::{Deref, DerefMut}; 5 | use std::sync::atomic::{AtomicBool, AtomicIsize, Ordering}; 6 | 7 | /// Describes the state of the voxel. 8 | pub enum Voxel<'a> { 9 | /// Contians the position of the voxel's maxima. 10 | Maxima(usize), 11 | /// Contians a vector of the maxima the current voxel contributes to and 12 | /// their weights. 13 | Boundary(&'a [f64]), 14 | /// A voxel beneath the vacuum tolerance and not contributing to any maxima. 15 | Vacuum, 16 | } 17 | 18 | /// A lock guard for write access to [`VoxelMap.weight_map`]. 19 | pub struct Lock<'a> { 20 | data: &'a BlockingVoxelMap, 21 | } 22 | 23 | unsafe impl Sync for Lock<'_> {} 24 | 25 | /// Deref only exposes the weight_map field of a [`VoxelMap`]. 26 | impl Deref for Lock<'_> { 27 | type Target = Vec>; 28 | fn deref(&self) -> &Vec> { 29 | unsafe { &*self.data.weight_map.get() } 30 | } 31 | } 32 | 33 | /// DerefMut only exposes the weight_map field of a [`VoxelMap`]. 34 | impl DerefMut for Lock<'_> { 35 | fn deref_mut(&mut self) -> &mut Vec> { 36 | unsafe { &mut *self.data.weight_map.get() } 37 | } 38 | } 39 | 40 | /// Make sure to free the lock when the struct is dropped. 41 | impl Drop for Lock<'_> { 42 | fn drop(&mut self) { 43 | self.data.lock.store(false, Ordering::SeqCst); 44 | } 45 | } 46 | 47 | /// A structure for building and processing the map between voxel and maxima. 48 | /// Bader maxima are stored in the voxel_map whilst the contributing weights are 49 | /// stored in the weight_map. The weight_map is only written to once by each 50 | /// point and so once a value has been written it is safe to read by any thread. 51 | /// To check it has been written to `weight_get` monitors the state of corresponding 52 | /// voxel_map value. Writing to the map is acheived by acquiring the lock, noting 53 | /// the length of the weight_map, pushing the weight vector for voxel p to the 54 | /// weight_map, droping the write lock and then storing the index of the inserted 55 | /// vector using `weight_store`. 56 | /// 57 | /// # Examples 58 | /// ``` 59 | /// use bader::voxel_map::BlockingVoxelMap; 60 | /// 61 | /// for p in 0..1isize { 62 | /// let voxel_map = BlockingVoxelMap::new( 63 | /// [2, 5, 2], 64 | /// [[2.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 2.0]], 65 | /// [0.0, 0.0, 0.0], 66 | /// ); 67 | /// let i = { 68 | /// let mut weight = voxel_map.lock(); 69 | /// (*weight).push(Vec::with_capacity(0).into()); 70 | /// weight.len() - 1 71 | /// }; 72 | /// voxel_map.weight_store(p, i) 73 | /// } 74 | /// ``` 75 | pub struct BlockingVoxelMap { 76 | weight_map: UnsafeCell>>, 77 | voxel_map: Vec, 78 | pub grid: Grid, 79 | lock: AtomicBool, 80 | } 81 | 82 | unsafe impl Sync for BlockingVoxelMap {} 83 | 84 | impl BlockingVoxelMap { 85 | /// Initialises a [`BlockingVoxelMap`] and the [`Grid`] that will faciliate movemoment around the 86 | /// map. 87 | pub fn new( 88 | grid: [usize; 3], 89 | lattice: [[f64; 3]; 3], 90 | voxel_origin: [f64; 3], 91 | ) -> Self { 92 | let grid = Grid::new(grid, lattice, voxel_origin); 93 | let size = grid.size.total; 94 | // For mapping the the voxels 95 | let weight_map = 96 | UnsafeCell::new(Vec::>::with_capacity(size)); 97 | let mut voxel_map = Vec::with_capacity(size); 98 | voxel_map.resize_with(size, || AtomicIsize::new(-1)); 99 | let lock = AtomicBool::new(false); 100 | // For post processing 101 | Self { 102 | weight_map, 103 | voxel_map, 104 | grid, 105 | lock, 106 | } 107 | } 108 | 109 | /// Retrieves the state of the voxel, p. This will lock until p has been stored 110 | /// in the VoxelMap and then return either a `Voxel::Maxima` or `Voxel::Weight`. 111 | /// Calling this on a voxel, p, that is below the vacuum_tolerance will deadlock 112 | /// as a voxel is considered stored once voxel_map\[p\] > -1. 113 | pub fn weight_get(&self, i: isize) -> &[f64] { 114 | let i = -2 - i; 115 | &(unsafe { &*self.weight_map.get() })[i as usize] 116 | } 117 | 118 | /// Atomic loading of voxel, p, from voxel_map blocks if maxima == -1 119 | pub fn maxima_get(&self, p: isize) -> isize { 120 | loop { 121 | match self.voxel_map[p as usize].load(Ordering::Relaxed) { 122 | -1 => (), 123 | x => break x, 124 | } 125 | } 126 | } 127 | 128 | /// Check if a maxima is stored 129 | pub fn maxima_check(&self, p: isize) -> Option { 130 | match self.voxel_map[p as usize].load(Ordering::Relaxed) { 131 | -1 => None, 132 | x => Some(x), 133 | } 134 | } 135 | 136 | /// Stores the maxima of voxel, p, in the voxel_map. 137 | pub fn maxima_store(&self, p: isize, maxima: isize) { 138 | self.voxel_map[p as usize].store(maxima, Ordering::Relaxed); 139 | } 140 | 141 | /// Stores the index of p's weight contributions in weight_map into the 142 | /// weight_index. 143 | pub fn weight_store(&self, p: isize, i: usize) { 144 | self.maxima_store(p, -2 - (i as isize)); 145 | } 146 | 147 | /// Locks the structure for write access unlock occurs when the returned 148 | /// Lock is dropped. 149 | pub fn lock(&self) -> Lock { 150 | while self.lock.swap(true, Ordering::SeqCst) {} 151 | Lock { data: self } 152 | } 153 | 154 | /// Extract the voxel map data. 155 | pub fn into_inner(self) -> (Vec, Vec>, Grid) { 156 | ( 157 | self.voxel_map.into_iter().map(|x| x.into_inner()).collect(), 158 | self.weight_map.into_inner(), 159 | self.grid, 160 | ) 161 | } 162 | } 163 | 164 | /// A VoxelMap for if the maxima stored are atomic indices. 165 | pub struct VoxelMap { 166 | /// The vector mapping the voxel to a maxima. 167 | pub voxel_map: Vec, 168 | /// The vector containing the weights for boundary voxels. 169 | pub weight_map: Vec>, 170 | /// The Grid used to navigate the VoxelMap. 171 | pub grid: Grid, 172 | } 173 | 174 | impl VoxelMap { 175 | /// Create a new [`VoxelMap`] 176 | pub fn new( 177 | voxel_map: Vec, 178 | weight_map: Vec>, 179 | grid: Grid, 180 | ) -> Self { 181 | Self { 182 | voxel_map, 183 | weight_map, 184 | grid, 185 | } 186 | } 187 | 188 | /// Create a new [`VoxelMap`] from a [`BlockingVoxelMap`]. 189 | pub fn from_blocking_voxel_map(voxel_map: BlockingVoxelMap) -> Self { 190 | let (voxel_map, weight_map, grid) = voxel_map.into_inner(); 191 | Self::new(voxel_map, weight_map, grid) 192 | } 193 | 194 | /// Produce an Iter over the boundary voxels. 195 | pub fn weight_iter(&self) -> std::slice::Iter<'_, Box<[f64]>> { 196 | self.weight_map.iter() 197 | } 198 | 199 | /// Get the length of the weight_map. 200 | pub fn weight_len(&self) -> usize { 201 | self.weight_map.len() 202 | } 203 | 204 | /// Get a refernce to the grid used by the VoxelMap. 205 | pub fn grid_get(&self) -> &Grid { 206 | &self.grid 207 | } 208 | 209 | /// Returns the atom associated with the point. 210 | pub fn maxima_to_atom(&self, maxima: usize) -> usize { 211 | maxima 212 | } 213 | 214 | /// Retrieval of the state of the voxel, p. 215 | pub fn maxima_to_voxel(&self, maxima: isize) -> Voxel { 216 | match maxima.cmp(&-1) { 217 | std::cmp::Ordering::Equal => Voxel::Vacuum, 218 | std::cmp::Ordering::Greater => Voxel::Maxima(maxima as usize), 219 | std::cmp::Ordering::Less => { 220 | Voxel::Boundary(self.maxima_to_weight(maxima)) 221 | } 222 | } 223 | } 224 | 225 | /// Return a reference to the weights from the given maxima, Note: maxima here must be < -1. 226 | pub fn maxima_to_weight(&self, maxima: isize) -> &[f64] { 227 | &self.weight_map[(-2 - maxima) as usize] 228 | } 229 | 230 | /// Return an Iter over the maxima stored in the VoxelMap. 231 | pub fn maxima_iter(&self) -> std::slice::Iter<'_, isize> { 232 | self.voxel_map.iter() 233 | } 234 | 235 | /// Get the length of the voxel_map. 236 | pub fn maxima_len(&self) -> usize { 237 | self.voxel_map.len() 238 | } 239 | 240 | /// Return a Chunk over the maxima stored in the VoxelMap. 241 | pub fn maxima_chunks( 242 | &self, 243 | chunk_size: usize, 244 | ) -> std::slice::Chunks<'_, isize> { 245 | self.voxel_map.chunks(chunk_size) 246 | } 247 | 248 | /// Retrieval of the state of the voxel, p. 249 | pub fn voxel_get(&self, p: isize) -> Voxel { 250 | self.maxima_to_voxel(self.maxima_get(p)) 251 | } 252 | 253 | /// Return the stored maxima at point p. 254 | pub fn maxima_get(&self, p: isize) -> isize { 255 | self.voxel_map[p as usize] 256 | } 257 | 258 | /// Produce a mask for a specific volume number. 259 | pub fn volume_map(&self, volume_number: isize) -> Vec> { 260 | self.maxima_iter() 261 | .map(|maxima| { 262 | if *maxima == volume_number { 263 | Some(1.0) 264 | } else if *maxima < -1 { 265 | let mut w = None; 266 | for weight in self.maxima_to_weight(*maxima).iter() { 267 | let m = *weight as isize; 268 | if m == volume_number { 269 | w = Some(weight - m as f64); 270 | break; 271 | } 272 | } 273 | w 274 | } else { 275 | None 276 | } 277 | }) 278 | .collect() 279 | } 280 | /// Produce a mask for a collection volume numbers. 281 | pub fn multi_volume_map( 282 | &self, 283 | volume_numbers: &FxHashSet, 284 | ) -> Vec> { 285 | self.maxima_iter() 286 | .map(|maxima| { 287 | if volume_numbers.contains(maxima) { 288 | Some(1.0) 289 | } else if *maxima < -1 { 290 | let mut w = 0.0; 291 | for weight in self.maxima_to_weight(*maxima).iter() { 292 | let m = *weight as isize; 293 | if volume_numbers.contains(&m) { 294 | w += weight - m as f64; 295 | } 296 | } 297 | Some(w) 298 | } else { 299 | None 300 | } 301 | }) 302 | .collect() 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /tests/cube.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use bader::io::FileFormat; 4 | use bader::io::cube::Cube; 5 | 6 | const LENGTH_UNITS: f64 = 0.52917721067; 7 | const VOLUME_UNITS: f64 = LENGTH_UNITS * LENGTH_UNITS * LENGTH_UNITS; 8 | 9 | #[test] 10 | fn cube_read() { 11 | let filename = String::from("tests/cube/anatase.cube"); 12 | let cube = Cube {}; 13 | let (voxel_origin, grid, atoms, densities) = match cube.read(filename) { 14 | Ok(r) => r, 15 | Err(e) => panic!("{}", e), 16 | }; 17 | assert_eq!(voxel_origin, [0.5; 3]); 18 | assert_eq!(grid, [96, 96, 180]); 19 | assert_eq!(atoms.positions.len(), 576); 20 | assert_eq!(densities[0][0], 0.13387E-02 / VOLUME_UNITS); 21 | assert_eq!(densities[0][1658879], 0.11782E+01 / VOLUME_UNITS); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /tests/vasp.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use bader::io::FileFormat; 4 | use bader::io::vasp::Vasp; 5 | 6 | #[test] 7 | fn vasp_read_no_spin() { 8 | let filename = String::from("tests/vasp/CHGCAR_no_spin"); 9 | let vasp = Vasp {}; 10 | let (voxel_origin, grid, atoms, densities) = match vasp.read(filename) { 11 | Ok(r) => r, 12 | Err(e) => panic!("{}", e), 13 | }; 14 | assert_eq!(voxel_origin, [0.; 3]); 15 | assert_eq!(grid, [32, 32, 32]); 16 | assert_eq!(atoms.positions, vec![[0., 0., 0.]]); 17 | assert_eq!(densities[0][0], 0.15246059033E+03 / atoms.lattice.volume); 18 | assert_eq!( 19 | densities[0][32767], 20 | 0.13036296982E+03 / atoms.lattice.volume 21 | ); 22 | } 23 | 24 | #[test] 25 | fn vasp_read_no_spin_chg() { 26 | let filename = String::from("tests/vasp/CHG_no_spin"); 27 | let vasp = Vasp {}; 28 | let (voxel_origin, grid, atoms, densities) = match vasp.read(filename) { 29 | Ok(r) => r, 30 | Err(e) => panic!("{}", e), 31 | }; 32 | assert_eq!(voxel_origin, [0.; 3]); 33 | assert_eq!(grid, [32, 32, 32]); 34 | assert_eq!(atoms.positions, vec![[0., 0., 0.]]); 35 | assert_eq!(densities[0][0], 152.46 / atoms.lattice.volume); 36 | assert_eq!(densities[0][32767], 130.36 / atoms.lattice.volume); 37 | } 38 | 39 | #[test] 40 | fn vasp_read_spin() { 41 | let filename = String::from("tests/vasp/CHGCAR_spin"); 42 | let vasp = Vasp {}; 43 | let (voxel_origin, grid, atoms, densities) = match vasp.read(filename) { 44 | Ok(r) => r, 45 | Err(e) => panic!("{}", e), 46 | }; 47 | assert_eq!(voxel_origin, [0.; 3]); 48 | assert_eq!(grid, [32, 32, 32]); 49 | assert_eq!(atoms.positions, vec![[0., 0., 0.]]); 50 | assert_eq!(densities[0][0], 0.15245934681E+03 / atoms.lattice.volume); 51 | assert_eq!( 52 | densities[0][32767], 53 | 0.13036192086E+03 / atoms.lattice.volume 54 | ); 55 | assert_eq!(densities[1][0], -0.10283642961E-07 / atoms.lattice.volume); 56 | assert_eq!( 57 | densities[1][32767], 58 | -0.87468511150E-08 / atoms.lattice.volume 59 | ); 60 | } 61 | 62 | #[test] 63 | fn vasp_read_spin_chg() { 64 | let filename = String::from("tests/vasp/CHG_spin"); 65 | let vasp = Vasp {}; 66 | let (voxel_origin, grid, atoms, densities) = match vasp.read(filename) { 67 | Ok(r) => r, 68 | Err(e) => panic!("{}", e), 69 | }; 70 | assert_eq!(voxel_origin, [0.; 3]); 71 | assert_eq!(grid, [32, 32, 32]); 72 | assert_eq!(atoms.positions, vec![[0., 0., 0.]]); 73 | assert_eq!(densities[0][0], 152.46 / atoms.lattice.volume); 74 | assert_eq!(densities[0][32767], 130.36 / atoms.lattice.volume); 75 | assert_eq!(densities[1][0], -0.10284E-07 / atoms.lattice.volume); 76 | assert_eq!(densities[1][32767], -0.87469E-08 / atoms.lattice.volume); 77 | } 78 | 79 | #[test] 80 | fn vasp_read_ncl() { 81 | let filename = String::from("tests/vasp/CHGCAR_ncl"); 82 | let vasp = Vasp {}; 83 | let (voxel_origin, grid, atoms, densities) = match vasp.read(filename) { 84 | Ok(r) => r, 85 | Err(e) => panic!("{}", e), 86 | }; 87 | assert_eq!(voxel_origin, [0.; 3]); 88 | assert_eq!(grid, [32, 32, 32]); 89 | assert_eq!(atoms.positions, vec![[0., 0., 0.]]); 90 | assert_eq!(densities[0][0], 0.15229118148E+03 / atoms.lattice.volume); 91 | assert_eq!( 92 | densities[0][32767], 93 | 0.13021559741E+03 / atoms.lattice.volume 94 | ); 95 | assert_eq!(densities[1][0], -0.50501186231E-02 / atoms.lattice.volume); 96 | assert_eq!( 97 | densities[1][32767], 98 | -0.56304248048E-02 / atoms.lattice.volume 99 | ); 100 | assert_eq!(densities[2][0], -0.89074011765E-03 / atoms.lattice.volume); 101 | assert_eq!( 102 | densities[2][32767], 103 | -0.95861710945E-03 / atoms.lattice.volume 104 | ); 105 | assert_eq!(densities[3][0], 0.16139598297E+02 / atoms.lattice.volume); 106 | assert_eq!( 107 | densities[3][32767], 108 | 0.13834498321E+02 / atoms.lattice.volume 109 | ); 110 | } 111 | 112 | #[test] 113 | fn vasp_read_ncl_chg() { 114 | let filename = String::from("tests/vasp/CHG_ncl"); 115 | let vasp = Vasp {}; 116 | let (voxel_origin, grid, atoms, densities) = match vasp.read(filename) { 117 | Ok(r) => r, 118 | Err(e) => panic!("{}", e), 119 | }; 120 | assert_eq!(voxel_origin, [0.; 3]); 121 | assert_eq!(grid, [32, 32, 32]); 122 | assert_eq!(atoms.positions, vec![[0., 0., 0.]]); 123 | assert_eq!(densities[0][0], 152.29 / atoms.lattice.volume); 124 | assert_eq!(densities[0][32767], 130.22 / atoms.lattice.volume); 125 | assert_eq!(densities[1][0], -0.50501E-02 / atoms.lattice.volume); 126 | assert_eq!(densities[1][32767], -0.56304E-02 / atoms.lattice.volume); 127 | assert_eq!(densities[2][0], -0.89074E-03 / atoms.lattice.volume); 128 | assert_eq!(densities[2][32767], -0.95862E-03 / atoms.lattice.volume); 129 | assert_eq!(densities[3][0], 16.140 / atoms.lattice.volume); 130 | assert_eq!(densities[3][32767], 13.834 / atoms.lattice.volume); 131 | } 132 | } 133 | --------------------------------------------------------------------------------