├── .github └── workflows │ └── ci.yml ├── .gitignore ├── LICENSE ├── README.md ├── TODO ├── bin ├── cif2any.py ├── cif2sgroup.py ├── cut-cp2k.sh ├── cut-cpmd.sh ├── matdyn2fqha.py ├── pdb2cif.py ├── plot_dispersion.py └── solvate.sh ├── doc ├── Makefile ├── README.md ├── generate-doc.sh ├── print-fextension-signatures.py └── source │ ├── _static │ ├── crossval_pr_gauss.png │ ├── interpol_2d.png │ ├── logo.png │ ├── overfit_reg.png │ ├── random_Si.png │ ├── rbf_2d_surface.png │ ├── rbfs.png │ └── smooth_1d.png │ ├── _templates │ └── autosummary │ │ └── class.rst │ ├── conf.py │ ├── index.rst │ └── written │ ├── background │ ├── ase.rst │ ├── coord_trans.rst │ ├── index.rst │ ├── param_study.rst │ ├── parsing.rst │ ├── phonon_dos.rst │ ├── pwscf.rst │ ├── qha.md │ └── rbf.rst │ ├── cp2k_restart.rst │ ├── dispersion_example.rst │ ├── features.rst │ ├── index.rst │ ├── install.rst │ ├── refs.rst │ └── tutorial.rst ├── examples ├── benchmarks │ ├── dist_speed_struct.py │ ├── dist_speed_traj.py │ └── distmat_speed.py ├── dispersion │ ├── clean.sh │ ├── dispersion.py │ ├── pw.out │ └── q2r.fc.gz ├── enthalpy.py ├── fft_padding.py ├── filter_example.py ├── lammps │ └── md_nvt_npt │ │ ├── AlN.tersoff │ │ ├── README.rst │ │ ├── clean.sh │ │ └── run.py ├── lorentz.py ├── nd_matmul.py ├── parameter_study │ ├── 10local_lammps_ev │ │ ├── 10input.py │ │ ├── 20parse.py │ │ ├── 30gather.py │ │ ├── 40eval.sh │ │ ├── 50eval.py │ │ ├── README │ │ ├── calc.templ │ │ │ ├── job.local │ │ │ ├── lmp.in │ │ │ ├── lmp.struct │ │ │ └── lmp.struct.symbols │ │ ├── clean.sh │ │ └── potentials │ │ │ └── AlN.tersoff │ ├── 20cluster_pwscf_convergence │ │ ├── 10input.py │ │ ├── 20parse.py │ │ ├── 30gather.py │ │ ├── 40eval.sh │ │ ├── 50eval.py │ │ ├── README │ │ ├── calc.templ │ │ │ ├── job.pbs.theo │ │ │ └── pw.in │ │ └── clean.sh │ ├── 30two_hosts_extend │ │ ├── 10input.py │ │ ├── README │ │ ├── calc.templ │ │ │ ├── input.in │ │ │ ├── job.host0 │ │ │ └── job.host1 │ │ └── clean.sh │ └── README ├── phonon_dos │ └── pdos_methods.py ├── print_struct_traj_api.py ├── rbf │ ├── compare_pwtools-rbf-krr_sklearn-gp.py │ ├── crossval_convergence.py │ ├── crossval_map_p_r.py │ ├── overfit.py │ ├── plot_rbfs.py │ └── surface.py ├── rpdf │ ├── compare_vmd │ │ ├── pw.in │ │ ├── pw.out.gz │ │ ├── rpdf_pw_data.py │ │ └── rpdf_random.py │ └── rpdf_aln.py ├── vinet_deriv.wxm └── vmd │ ├── nice_bonds.tcl │ └── snap.tcl ├── requirements.txt ├── requirements_doc.txt ├── requirements_optional.txt ├── requirements_test.txt ├── setup.py ├── src ├── _ext_src │ ├── Makefile │ ├── dcd.f90 │ └── flib.f90 └── pwtools │ ├── __init__.py │ ├── arrayio.py │ ├── atomic_data.py │ ├── base.py │ ├── batch.py │ ├── calculators.py │ ├── comb.py │ ├── common.py │ ├── config.py │ ├── constants.py │ ├── crys.py │ ├── dcd.py │ ├── decorators.py │ ├── eos.py │ ├── io.py │ ├── kpath.py │ ├── lammps.py │ ├── mpl.py │ ├── mttk.py │ ├── num.py │ ├── parse.py │ ├── pwscf.py │ ├── pydos.py │ ├── random.py │ ├── rbf │ ├── __init__.py │ ├── core.py │ └── hyperopt.py │ ├── regex.py │ ├── signal.py │ ├── sql.py │ ├── symmetry.py │ ├── test │ ├── testenv.py │ ├── tools.py │ └── utils │ │ ├── __init__.py │ │ ├── gibbs_test_data.py │ │ ├── lammps │ │ ├── 10gen_lammps_test_data.py │ │ ├── 20run.sh │ │ ├── 30pack.sh │ │ └── AlN.tersoff │ │ ├── matdyn_modes.py │ │ ├── rand_container.py │ │ ├── rpdf_ref.py │ │ └── vc_md_cell.py │ ├── thermo.py │ ├── timer.py │ ├── verbose.py │ └── visualize.py └── test ├── README ├── __init__.py ├── check_dependencies.py ├── conftest.py ├── files ├── angle │ └── rs.cif ├── ase │ ├── pw.scf.out.start.rs-AlN │ └── pw.scf.out.start.wz-AlN ├── calc.templ │ ├── job.host0 │ ├── job.host1 │ └── pw.in ├── cif_cart_struct.cif ├── cif_struct.cif ├── cml_struct.cml ├── cp2k │ ├── cell_opt │ │ └── cell_opt.tgz │ ├── dcd │ │ ├── npt_dcd.tgz │ │ └── npt_xyz.tgz │ ├── md │ │ ├── npt_f_print_low.tgz │ │ └── nvt_print_low.tgz │ └── scf │ │ ├── cp2k.scf.out.print_low.gz │ │ └── cp2k.scf.out.print_medium.gz ├── cpmd │ ├── md_bo_lanczos.tgz │ ├── md_bo_odiis.tgz │ ├── md_bo_odiis_npt.tgz │ ├── md_cp_mttk.tgz │ ├── md_cp_nve.tgz │ ├── md_cp_nvt_nose.tgz │ ├── md_cp_pr.tgz │ └── scf.tgz ├── dyn │ ├── ph.dyn1 │ └── ph.dyn2 ├── dynmat │ ├── dynmat.axsf │ ├── dynmat.in │ ├── dynmat.modes │ ├── dynmat_all.out │ └── dynmat_min.out ├── ev │ ├── EVPAI.OUT.gz │ ├── PARAM.OUT │ ├── PVPAI.OUT.gz │ ├── evdata.txt │ └── min.txt ├── fqha.out.gz ├── gibbs │ ├── 1d │ │ ├── cartman.h5 │ │ └── kenny.h5 │ ├── 2d │ │ ├── cartman.h5 │ │ └── kenny.h5 │ └── 3d-fake-1d │ │ └── deskbot.h5 ├── lammps │ ├── md-npt.tgz │ ├── md-nvt.tgz │ ├── mix_output.tgz │ └── vc-relax.tgz ├── matdyn.freq ├── matdyn.modes ├── pdb_struct.pdb ├── pw.constant_cell.txt ├── pw.md.in ├── pw.md.out.gz ├── pw.md_london.out.gz ├── pw.md_one_atom.out.gz ├── pw.scf.out.gz ├── pw.scf_no_forces_stress.out.gz ├── pw.scf_one_atom.out.gz ├── pw.scf_verbose_london.out.gz ├── pw.vc_md.cell.out ├── pw.vc_relax.in ├── pw.vc_relax.out.gz ├── pw.vc_relax_cell_unit.out.gz ├── pw.vc_relax_coords_fixed.out.gz ├── pw.vc_relax_no_cell_unit.out.gz ├── qe_matdyn_disp │ ├── matdyn.freq.disp.gp.gz │ └── matdyn.freq.disp.gz ├── qe_pseudos │ ├── Al.pbe-n-kjpaw_psl.0.1.UPF.gz │ └── N.pbe-n-kjpaw_psl.0.1.UPF.gz ├── ref_test_pdos │ ├── dd.txt.gz │ ├── dv.txt.gz │ ├── fd.txt.gz │ └── fv.txt.gz ├── rpdf │ ├── aln_ibrav0_sc.cif │ ├── aln_ibrav2_sc.cif │ ├── rand_3d.cell.txt │ ├── rand_3d.coords0.txt │ ├── rand_3d.coords1.txt │ ├── result.hist.aln_ibrav0_sc.txt │ ├── result.hist.aln_ibrav2_sc.txt │ ├── result.hist.rand_3d.txt │ ├── result.num_int.aln_ibrav0_sc.txt │ ├── result.num_int.aln_ibrav2_sc.txt │ ├── result.num_int.rand_3d.txt │ ├── result.rad.aln_ibrav0_sc.txt │ ├── result.rad.aln_ibrav2_sc.txt │ ├── result.rad.rand_3d.txt │ ├── result.rmax_auto.aln_ibrav0_sc.txt │ ├── result.rmax_auto.aln_ibrav2_sc.txt │ └── result.rmax_auto.rand_3d.txt └── si.phdos.gz ├── runtests.sh ├── test_acorr.py ├── test_angle.py ├── test_ase.py ├── test_ase_calculators.py ├── test_backup.py ├── test_batch.py ├── test_celldm_cryst_const.py ├── test_center_on_atom.py ├── test_cif.py ├── test_common.py ├── test_conv_table.py ├── test_coord_trans.py ├── test_cp2k.py ├── test_cpmd_md.py ├── test_cpmd_scf.py ├── test_crys_align_cart.py ├── test_crys_cell_tools.py ├── test_crys_cell_tools_fortran.py ├── test_cut_cpmd.py ├── test_data2d.py ├── test_datand.py ├── test_dcd.py ├── test_deriv.py ├── test_dist.py ├── test_dist_traj.py ├── test_distsq_frac.py ├── test_eos.py ├── test_extend_array.py ├── test_f2py_flib_openmp.py ├── test_fft.py ├── test_file_template.py ├── test_frepr.py ├── test_fromstring.py ├── test_get_cont.py ├── test_gibbs.py ├── test_h5.py ├── test_ibrav.py ├── test_import.py ├── test_interpol.py ├── test_io_txt.py ├── test_is_seq.py ├── test_kpath.py ├── test_lammps.py ├── test_lazyprop.py ├── test_match_mask.py ├── test_mpl.py ├── test_nearest_neighbor.py ├── test_norm_int.py ├── test_num.py ├── test_parameter_study.py ├── test_parser_units.py ├── test_pbc_wrap.py ├── test_pdb.py ├── test_pdos.py ├── test_pdos_coord_trans.py ├── test_polyfit.py ├── test_pw_md_out.py ├── test_pw_more_forces.py ├── test_pw_scf_out.py ├── test_pw_vc_md_cell_alat.py ├── test_pw_vc_relax_cell_unit.py ├── test_pw_vc_relax_coords_fixed.py ├── test_pw_vc_relax_out.py ├── test_pwscf.py ├── test_pwscf_read_dyn.py ├── test_pytest_stuff.py ├── test_qha.py ├── test_rand_struct.py ├── test_rbf.py ├── test_rms.py ├── test_rpdf.py ├── test_save_object.py ├── test_scell.py ├── test_signal.py ├── test_spline.py ├── test_sql.py ├── test_sql_column.py ├── test_structure.py ├── test_sum.py ├── test_symmetry.py ├── test_template_replace.py ├── test_test_tools.py ├── test_timer.py ├── test_trajectory.py ├── test_vacf_methods.py ├── test_velocity.py ├── test_vlinspace.py └── test_write_mol.py /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | dist/ 3 | doc/build 4 | doc/source/generated 5 | doc/source/written/background/pwtools.pydos.*.rst 6 | doc/sphinx-autodoc 7 | *.swp 8 | *.swo 9 | *.pyf 10 | *.pyc 11 | *.so 12 | *.egg-info 13 | *.bak 14 | .eggs/ 15 | __pycache__ 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) Steve Schmerler 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | 3. Neither the name of the copyright holder nor the names of its contributors 15 | may be used to endorse or promote products derived from this software 16 | without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![image](https://zenodo.org/badge/51149109.svg)](https://zenodo.org/badge/latestdoi/51149109) 2 | ![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/elcorto/pwtools/ci.yml?label=tests) 3 | 4 | # About 5 | 6 | `pwtools` is a Python package for pre- and postprocessing of atomistic 7 | calculations, mostly targeted to [Quantum 8 | Espresso](http://www.quantum-espresso.org), [CPMD](http://www.cpmd.org), 9 | [CP2K](http://cp2k.org) and [LAMMPS](http://lammps.org). It is almost, 10 | but not quite, entirely unlike [ASE](https://wiki.fysik.dtu.dk/ase), 11 | with some tools extending 12 | [numpy](http://www.numpy.org)/[scipy](http://www.scipy.org). It has a 13 | set of powerful parsers and data types for storing calculation data. See 14 | the [feature 15 | overview](http://elcorto.github.io/pwtools/written/features.html) for 16 | more. 17 | 18 | The [dcd 19 | code](https://github.com/elcorto/pwtools/blob/master/src/pwtools/dcd.py) 20 | is now part of [ASE](https://wiki.fysik.dtu.dk/ase)'s [dcd reader for 21 | CP2K files](https://gitlab.com/ase/ase/blob/master/ase/io/cp2k.py). 22 | [Thanks](https://gitlab.com/ase/ase/merge_requests/1109)! 23 | 24 | # Documentation 25 | 26 | Have a look at [the docs](http://elcorto.github.io/pwtools). Quick start 27 | instructions can be found in [the 28 | tutorial](http://elcorto.github.io/pwtools/written/tutorial.html). Many 29 | examples, besides the ones in the doc strings are in [the 30 | tests](https://github.com/elcorto/pwtools/tree/master/test). 31 | 32 | # Install 33 | 34 | See the [install 35 | docs](http://elcorto.github.io/pwtools/written/install.html). 36 | 37 | # Publications 38 | 39 | `pwtools` was used in these works: 40 | 41 | * [S. Schmerler, J. Kortus, "Ab initio study of AlN: Anisotropic thermal 42 | expansion, phase diagram, and high-temperature rocksalt to wurtzite phase 43 | transition", PRB 89, 064109, 44 | 2014](https://journals.aps.org/prb/abstract/10.1103/PhysRevB.89.064109) 45 | -------------------------------------------------------------------------------- /bin/cif2sgroup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # cif2sgroup.py 4 | # 5 | # Extract information from a .cif file and print an input file for WIEN2k's 6 | # "sgroup" symmetry analysis tool. Note that the Cif reader assumes P1 7 | # symmetry, i.e. symmetry information in the cif file is ignored. Use 8 | # ase.io.read(), which seems to parse symmetry information. 9 | # 10 | # usage:: 11 | # $ cif2sgroup.py foo.cif > foo.sgroup.in 12 | # # Find primitive cell 13 | # $ sgroup -prim [-set-TOL=1e-4] foo.sgroup.in 14 | # 15 | # See ``sgroup -help`` for more options. 16 | # 17 | # Notes 18 | # ----- 19 | # The unit of length of a,b,c is not important (Angstrom, Bohr,...). 20 | 21 | import sys 22 | from pwtools import io 23 | fn = sys.argv[1] 24 | struct = io.read_cif(fn) 25 | print(io.wien_sgroup_input(struct, lat_symbol='P')) 26 | -------------------------------------------------------------------------------- /bin/cut-cpmd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | prog=$(basename $0) 4 | simulate=false 5 | sed_opts="-r -e" 6 | inplace=true 7 | bakname=bak 8 | 9 | usage(){ 10 | cat << EOF 11 | 12 | Truncate CPMD output files from the 's iteration. Which files is 13 | hardcoded: 14 | 15 | CELL 16 | ENERGIES 17 | STRESS 18 | TRAJECTORY 19 | FTRAJECTORY 20 | TRAJEC.xyz 21 | 22 | Files which don't exist will be skipped. 23 | 24 | usage: 25 | ------ 26 | $prog [-hs] 27 | 28 | options: 29 | -------- 30 | -s : simulate 31 | --not-inplace : don't use ``sed -i.$bakname ...``, use if YKWYAD 32 | 33 | example 34 | ------- 35 | Say your calc ran 8423 steps, then was killed. The restart file is written 36 | every 50 steps, so the last was written at 8400. A restart using "RESTART 37 | ACCUMULATORS" and continuing files with "<<< NEW DATA >>>" markers starts at 38 | 8401 and thus repeats 8401..8423. You want to delete these 23 steps from the 39 | old CELL, ENERGIES, etc files before the restart, such that they are cleanly 40 | continued. Then use: 41 | 42 | $ $prog /path/to/calc/ 8401 43 | EOF 44 | } 45 | 46 | msg(){ 47 | echo "$prog: $@" 48 | } 49 | 50 | err(){ 51 | echo "$prog: error: $@" 52 | exit 1 53 | } 54 | 55 | cmdline=$(getopt -o hs -l not-inplace -- "$@") 56 | eval set -- "$cmdline" 57 | while [ $# -gt 0 ]; do 58 | case "$1" in 59 | -s) 60 | simulate=true 61 | ;; 62 | --not-inplace) 63 | inplace=false 64 | ;; 65 | -h) 66 | usage 67 | exit 0 68 | ;; 69 | --) 70 | shift 71 | break 72 | ;; 73 | *) 74 | echo "cmdline error" 75 | exit 1 76 | ;; 77 | esac 78 | shift 79 | done 80 | 81 | [ $# -ne 2 ] && err "illegal number of args (need 2)" 82 | dr=$1 83 | [ -d "$dr" ] || err "not a dir: $dr" 84 | step=$2 85 | 86 | while read conf; do 87 | fn=$(echo "$conf" | awk -F '@@' '{print $1}') 88 | sedstr=$(echo "$conf" | awk -F '@@' '{print $2}') 89 | _bakname=$(echo "$conf" | awk -F '@@' '{print $3}') 90 | [ -n "$_bakname" ] && bakname=$_bakname 91 | if $inplace; then 92 | cmd="sed -i.$bakname $sed_opts '$sedstr' $fn" 93 | else 94 | cmd="sed $sed_opts '$sedstr' $fn" 95 | fi 96 | if [ -f $fn ]; then 97 | echo ">> $fn <<" 98 | echo " $cmd" 99 | $simulate || eval $cmd 100 | else 101 | msg "not found: $fn" 102 | fi 103 | done << EOF 104 | $dr/CELL@@/.*CELL PARAMETERS.*Step.*$step.*/,$ d 105 | $dr/ENERGIES@@/^\\\s+$step\\\s.*/,$ d 106 | $dr/STRESS@@/.*TOTAL STRESS.*Step.*$step.*/,$ d 107 | $dr/TRAJECTORY@@/^\\\s+$step\\\s.*/,$ d 108 | $dr/FTRAJECTORY@@/^\\\s+$step\\\s.*/,$ d 109 | $dr/TRAJEC.xyz@@/^\\\s+STEP:\\\s+$step.*/,$ d 110 | $dr/TRAJEC.xyz@@$,$ d@@bak2 111 | EOF 112 | -------------------------------------------------------------------------------- /bin/matdyn2fqha.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Load phonon DOS as calculated by QE's matdyn.x (e.g. matdyn.phdos): 4 | # [f in cm^-1] [dos] 5 | # and int_f dos(f) = 3*natom. 6 | # 7 | # Print a file to stdout which is a suitable phonon dos input for F_QHA.f90 (as 8 | # of QE 4.2). This file *must* be named PHDOS.out . This file has the form 9 | # 10 | # natom 11 | # natom nstep emax de 12 | # 13 | # 14 | # where 15 | # natom : number of atoms in unit cell 16 | # nstep : number of rows in matdyn.phdos 17 | # emax : max. frequency (in cm^-1) 18 | # de : frequency axis spacing 19 | # 20 | # usage: 21 | # $ matdyn2fqha.py matdyn.phdos > PHDOS.out 22 | # $ f90 F_QHA.f90 -o fqha.x 23 | # # make input file "fqha.in" for fqha.x: 24 | # $ cat fqha.in 25 | # PHDOS.out 26 | # fqha.out 27 | # 10,1500,10 28 | # ! phonon dos file w/ special header 29 | # ! file where to write output 30 | # ! Tmin,Tmax,dT 31 | # $ ./fqha.x < fqha.in 32 | # 33 | # Find results in fqha.out . 34 | 35 | import sys 36 | import numpy as np 37 | from scipy.integrate import simpson as simps 38 | 39 | filename = sys.argv[1] 40 | arr = np.loadtxt(filename) 41 | freq = arr[:,0] 42 | dos = arr[:,1] 43 | 44 | integral = simps(dos, x=freq) 45 | 46 | natom = integral / 3. 47 | nstep = len(freq) 48 | emax = freq[-1] 49 | de = freq[1] - freq[0] 50 | 51 | sys.stderr.write("""\ 52 | integal: %f 53 | natom: %f 54 | nstep: %i 55 | emax: %f 56 | de: %f 57 | """%(integral, natom, nstep, emax, de)) 58 | 59 | inatom = int(round(natom)) 60 | print("%i\n%i %i %f %f" %(inatom, inatom, nstep, emax, de)) 61 | print(open(filename).read()) 62 | -------------------------------------------------------------------------------- /bin/pdb2cif.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Read badly formatted PDB file (i.e. generated by Gromacs' genbox tool) and 4 | # write to CIF file. Use this if openbabel doesn't do the job properly. It may 5 | # complain that columns 77-78 are empty and reads the atom symbols wrong. 6 | # pwtools.parse.PDBFile is more forgiving. 7 | 8 | import sys 9 | from pwtools import io 10 | 11 | def help(prog): 12 | print(""" 13 | usage 14 | ----- 15 | {} input.pdb [output.cif] 16 | """.format(prog)) 17 | 18 | if __name__ == '__main__': 19 | argv = sys.argv 20 | prog = argv[0] 21 | if (len(argv) not in [2,3]) or (argv[1] in ['-h', '--help']): 22 | help(prog) 23 | sys.exit() 24 | infile = argv[1] 25 | if len(argv) == 3: 26 | outfile = argv[2] 27 | else: 28 | outfile = infile + '.cif' 29 | 30 | io.write_cif(outfile, io.read_pdb(infile)) 31 | -------------------------------------------------------------------------------- /bin/plot_dispersion.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Plot a dispersion generated by QE's matdyn.x. This is a version of QE's 4 | # plotband.x, which produces a nice matplotlib figure :) Reads a k-point - 5 | # frequency file produced by QE's matdyn.x (flfrq in matdyn.x input). 6 | # 7 | # This is only intended as a quick inspection tool. For labeling special 8 | # points, see examples/dispersion/ for a more complete example. 9 | 10 | # usage: 11 | # plot_dispersion.py matdyn.freq 12 | # 13 | # matdyn.x must have been instructed to calculate a phonon dispersion along a 14 | # predefined path in the BZ. e.g. natom=2, nbnd=6, 101 k-points on path 15 | # 16 | # `matdyn.in`:: 17 | # 18 | # &input 19 | # asr='crystal', 20 | # amass(1)=26.981538, 21 | # amass(2)=14.00674, 22 | # flfrc='fc', 23 | # flfrq='matdyn.freq.disp' 24 | # / 25 | # 101 | nks 26 | # 0.000000 0.000000 0.000000 | 27 | # 0.037500 0.037500 0.000000 | List of nks = 101 k-points 28 | # .... | 29 | # 30 | # `matdyn_freq` has the form:: 31 | # 32 | #
33 | # 34 | # 36 | # 2: 58 | warnings.warn('kpath definition file no longer supported') 59 | 60 | # QE 5.x: plot matdyn.freq.gp 61 | # 62 | ##import numpy as np 63 | ##data = np.loadtxt(sys.argv[1]) 64 | ##path_norm = data[:,0] 65 | ##freqs = data[:,1:] 66 | 67 | ks, freqs = read_matdyn_freq(sys.argv[1]) 68 | path_norm = get_path_norm(ks) 69 | 70 | plt.plot(path_norm, freqs, 'k') 71 | plt.show() 72 | 73 | -------------------------------------------------------------------------------- /bin/solvate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | prog=$(basename $0) 4 | logfile=${prog}.log 5 | usage(){ 6 | cat << EOF 7 | Add molecules (-ci option) to a molecule in a box (foo.pdb). If -ci is not 8 | given, then water is used. We use Gromacs's "gmx insert-molecules" to insert. 9 | Also we use pwtools's pdb2cif.py and cif2any.py scripts. 10 | 11 | If you only have foo.cif or so, use openbabel's "babel" tool to convert first: 12 | 13 | $ apt-get install gromacs openbabel # Debian and derivatives 14 | $ babel foo.cif foo.pdb 15 | $ $prog foo.pdb ... 16 | 17 | usage 18 | ----- 19 | $prog foo.pdb [ -ci insert.pdb ] [] 20 | 21 | will produce 22 | foo_gmx.pdb # with water 23 | foo_gmx.cif # with water, cif version 24 | foo_gmx.cif.txt # output from pwtools/bin/cif2any.py 25 | 26 | options 27 | ------- 28 | All options are passed to Gromacs, such as "-nmol" and "-seed". Run 29 | gmx help insert-molecules 30 | for more options. 31 | 32 | example 33 | ------- 34 | $prog foo.pdb -nmol 18 -seed 123 35 | $prog foo.pdb -nmol 18 -seed 123 -ci not_water.pdb 36 | EOF 37 | } 38 | 39 | err(){ 40 | echo "error: $@" 41 | exit 1 42 | } 43 | 44 | [ $# -eq 0 ] && err "no input args" 45 | if echo "$@" | grep -qEe '-h|--help'; then 46 | usage 47 | exit 0 48 | fi 49 | 50 | if echo "$@" | grep -qEe '-ci '; then 51 | default_ci= 52 | else 53 | default_ci="-ci water.pdb" 54 | cat > water.pdb << eof 55 | ATOM 1 O OSP3 1 4.013 0.831 -9.083 1.00 0.00 56 | ATOM 2 1H OSP3 1 4.941 0.844 -8.837 1.00 0.00 57 | ATOM 3 2H OSP3 1 3.750 -0.068 -9.293 1.00 0.00 58 | TER 59 | eof 60 | fi 61 | 62 | rm -f *.log \#* 63 | 64 | name=${1/.pdb/} 65 | shift 66 | start_gmx=${name}.pdb 67 | out_gmx=${name}_gmx.pdb 68 | out_cif=${name}_gmx.cif 69 | 70 | # add -ci molecule 71 | # older Gromacs versions (< 5.x I guess) have a tool called genbox, with 72 | # slighty different options, smth like 73 | # genbox_d -ci water.pdb -cp foo_gmx.pdb -nmol 18 -o foo_gmx.pdb 74 | gmx insert-molecules $@ $default_ci -f $start_gmx -o $out_gmx >> $logfile 2>&1 75 | 76 | pdb2cif.py $out_gmx $out_cif >> $logfile 2>&1 77 | cif2any.py $out_cif > $out_cif.txt 78 | 79 | grep -iE 'error|fatal|illegal' $logfile 80 | grep 'Added.*molecules.*out of.*requested' $logfile 81 | rm -f \#* 82 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SPHINXPROJ = pwtools 9 | SOURCEDIR = source 10 | BUILDDIR = build 11 | 12 | # Put it first so that "make" without argument is like "make help". 13 | help: 14 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 15 | 16 | .PHONY: help Makefile 17 | 18 | # Catch-all target: route all unknown targets to Sphinx using the new 19 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 20 | %: Makefile 21 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 22 | -------------------------------------------------------------------------------- /doc/README.md: -------------------------------------------------------------------------------- 1 | We use [sphinx-autodoc], please check the docs over there. In particular, 2 | `generate-apidoc.sh` is almost the same as [the script in `sphinx-autodoc`'s 3 | example project][script]. We only changed the exclude regex `sphinx-autodoc ... 4 | -X `. 5 | 6 | [sphinx-autodoc]: https://github.com/elcorto/sphinx-autodoc 7 | [script]: https://github.com/elcorto/sphinx-autodoc/blob/master/example_package/autodoctest/doc/generate-apidoc.sh 8 | -------------------------------------------------------------------------------- /doc/generate-doc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -eu 4 | 5 | err(){ 6 | echo "error: $@" 7 | exit 1 8 | } 9 | 10 | # We assume 11 | # /path/to/package_name 12 | # ├── doc # <-- here 13 | # │   ├── generate-doc.sh 14 | # │   ├── Makefile 15 | # │   └── source 16 | # │   ├── conf.py 17 | # │   ├── _static 18 | # │   └── _templates 19 | # ├── setup.py 20 | # ... 21 | 22 | # /path/to/package_name 23 | package_dir=$(readlink -f ../) 24 | 25 | # package_name 26 | package_name=$(basename $package_dir) 27 | 28 | # /path/to/package_name/source/index.rst 29 | main_index_file=source/index.rst 30 | 31 | ##autodoc_extra_opts=--write-doc 32 | autodoc_extra_opts= 33 | 34 | autodoc=sphinx-autodoc 35 | which $autodoc > /dev/null 2>&1 || err "executable $autodoc not found" 36 | 37 | # ensure a clean generated tree, "make clean" only removes build/ 38 | rm -rf $(find $package_dir -name "*.pyc" -o -name "__pycache__") 39 | rm -rf build/ source/generated/ 40 | 41 | # If main index doesn't exist, generate, else don't touch it, even though 42 | # sphinx-autodoc's -i option creates a backup before overwriting, it 43 | # would still be annoying. Thus use -i once to create an initial 44 | # source/index.rst which can then be tweaked. 45 | [ -f $main_index_file ] || autodoc_extra_opts="$autodoc_extra_opts -i" 46 | 47 | # generate API doc rst files 48 | $autodoc $autodoc_extra_opts -s source -a generated/api \ 49 | -X 'test\.(test_|check_dep.*|utils|testenv|tools|conftest)' $package_name 50 | 51 | make html 52 | -------------------------------------------------------------------------------- /doc/print-fextension-signatures.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Load Fortran extensions and print all function signatures. 5 | """ 6 | 7 | # XXX maybe use that to auto-generate an rst file with API docs for the 8 | # extension functions. 9 | 10 | def print_doc_attr(module): 11 | name = "module = " + module.__name__ 12 | fn = "file = " + module.__file__ 13 | bar = "="*79 14 | print("%s\n%s\n%s\n%s" %(bar, name, fn, bar)) 15 | for key,val in module.__dict__.items(): 16 | if not key.startswith('__') and hasattr(val, '__doc__'): 17 | doc = getattr(val, '__doc__') 18 | print(doc) 19 | 20 | from pwtools import _flib 21 | print_doc_attr(_flib) 22 | from pwtools import _dcd 23 | print_doc_attr(_dcd) 24 | -------------------------------------------------------------------------------- /doc/source/_static/crossval_pr_gauss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/doc/source/_static/crossval_pr_gauss.png -------------------------------------------------------------------------------- /doc/source/_static/interpol_2d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/doc/source/_static/interpol_2d.png -------------------------------------------------------------------------------- /doc/source/_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/doc/source/_static/logo.png -------------------------------------------------------------------------------- /doc/source/_static/overfit_reg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/doc/source/_static/overfit_reg.png -------------------------------------------------------------------------------- /doc/source/_static/random_Si.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/doc/source/_static/random_Si.png -------------------------------------------------------------------------------- /doc/source/_static/rbf_2d_surface.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/doc/source/_static/rbf_2d_surface.png -------------------------------------------------------------------------------- /doc/source/_static/rbfs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/doc/source/_static/rbfs.png -------------------------------------------------------------------------------- /doc/source/_static/smooth_1d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/doc/source/_static/smooth_1d.png -------------------------------------------------------------------------------- /doc/source/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {{ fullname }} 2 | {{ underline }} 3 | 4 | .. currentmodule:: {{ module }} 5 | 6 | .. autoclass:: {{ objname }} 7 | :no-members: 8 | :no-inherited-members: 9 | :no-special-members: 10 | 11 | {% block methods %} 12 | {% if methods %} 13 | 14 | .. automethod:: __init__ 15 | 16 | {% if ('__call__' in all_methods) or ('__call__' in inherited_members) %} 17 | 18 | .. automethod:: __call__ 19 | 20 | {% endif %} 21 | 22 | .. rubric:: Methods 23 | 24 | .. autosummary:: 25 | :toctree: 26 | {% for item in all_methods %} 27 | {%- if not item.startswith('_') or item in ['__mul__', '__getitem__', '__len__'] %} 28 | ~{{ name }}.{{ item }} 29 | {%- endif -%} 30 | {%- endfor %} 31 | {% for item in inherited_members %} 32 | {%- if item in ['__mul__', '__getitem__', '__len__'] %} 33 | ~{{ name }}.{{ item }} 34 | {%- endif -%} 35 | {%- endfor %} 36 | {% endif %} 37 | {% endblock %} 38 | 39 | 40 | {% block attributes %} 41 | {% if attributes %} 42 | .. rubric:: Attributes 43 | 44 | .. autosummary:: 45 | :toctree: 46 | {% for item in all_attributes %} 47 | {%- if not item.startswith('_') %} 48 | ~{{ name }}.{{ item }} 49 | {%- endif -%} 50 | {%- endfor %} 51 | {% endif %} 52 | {% endblock %} 53 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | ==================================== 2 | Welcome to the pwtools documentation 3 | ==================================== 4 | 5 | ``pwtools`` is a Python package for pre- and postprocessing of atomistic 6 | calculations, mostly targeted to `Quantum Espresso `_, CPMD_, CP2K_ and 7 | LAMMPS_. It is almost, but not quite, entirely unlike ASE_, with some tools 8 | extending numpy_/scipy_. It has a set of powerful parsers and data types for 9 | storing calculation data. See the :ref:`feature overview ` for more. 10 | 11 | .. To be generated by sphinx-autodoc.py or some other API documentation tool. 12 | If generated/ or parts of it doesn't exist, it will simply be ignored. 13 | 14 | .. toctree:: 15 | :maxdepth: 1 16 | 17 | generated/api/index 18 | 19 | 20 | .. Written docs. 21 | 22 | .. toctree:: 23 | :maxdepth: 2 24 | 25 | written/index 26 | 27 | 28 | .. include:: written/refs.rst 29 | -------------------------------------------------------------------------------- /doc/source/written/background/ase.rst: -------------------------------------------------------------------------------- 1 | Relation to ASE 2 | =============== 3 | 4 | The very nice ASE_ project is in some way related. It also stores atomic 5 | structure data in Python objects for further manipulation. If needed, one can 6 | convert a :class:`~pwtools.crys.Structure` to an ASE Atoms object. The design 7 | goal of ASE is, however, different from pwtools. ASE provides interfaces to a 8 | large pile of ab initio codes ("calculators"). MD and structure optimization in 9 | ASE is coded in Python, using only the calculator's SCF engine in every step to 10 | get energy and forces. This is a very good idea, but only structure 11 | optimization is really developed and tested, as it seems. MD not so much. 12 | Better use a special MD code here. I'm not sure if ASE provides wave function 13 | extrapolation for Born-Oppenheimer MD [*]. Phonon calculations based on density 14 | functional perturbation theory like PWscf/PH or Abinit are not implemented 15 | (maybe in GPAW_?). However, the supercell method can be used with the related 16 | phonopy_ package. The focus of the pwtools package is to be a handy pre- and 17 | postprocessor providing pythonic access to all input and output quantities of 18 | the used ab initio codes. In ASE, the calculator abtracts the backend code's 19 | input away. With pwtools, you need to know the input file syntax of your 20 | backend code. Once you know that, you use only template files to set up 21 | calculations. Regarding visualization, ASE has some kind of GUI. We have 22 | :mod:`~pwtools.visualize`, which is best used in an interactive Ipython 23 | session. 24 | 25 | In fact, appart from :mod:`~pwtools.parse`, which implements parsers for ab 26 | initio code output and :mod:`~pwtools.pwscf`, all other parts of the package 27 | are completely independent from any external simulation code's output. 28 | Especially the parameter study tools in :mod:`~pwtools.batch` can be used for 29 | any kind of (computational) study, since only user-supplied template files are 30 | used. 31 | 32 | [*] Last time I checked, I stumbled over a `mailing list thread`_ where they said 33 | that in LCAO mode, the density would be re-used between steps. 34 | 35 | .. _mailing list thread: https://listserv.fysik.dtu.dk/pipermail/gpaw-users/2013-April/002044.html 36 | 37 | .. include:: ../refs.rst 38 | -------------------------------------------------------------------------------- /doc/source/written/background/index.rst: -------------------------------------------------------------------------------- 1 | .. Written documentation. 2 | 3 | =================================== 4 | Background, details, special topics 5 | =================================== 6 | 7 | .. toctree:: 8 | 9 | parsing 10 | phonon_dos 11 | qha 12 | param_study 13 | pwscf 14 | ase 15 | rbf 16 | coord_trans 17 | -------------------------------------------------------------------------------- /doc/source/written/background/param_study.rst: -------------------------------------------------------------------------------- 1 | .. _param_study: 2 | 3 | Parameter studies 4 | ================= 5 | 6 | .. note:: 7 | For a modern version of the tools in :mod:`~pwtools.batch` with more 8 | features, see https://github.com/elcorto/psweep. 9 | 10 | :mod:`~pwtools.batch` has tools for setting up parameter studies: automatic 11 | input generation, database tools (see also :mod:`~pwtools.sql`), ... 12 | 13 | See ``examples/parameter_study`` for examples which you can copy and run. 14 | Here is just one of the input files: 15 | 16 | .. literalinclude:: ../../../../examples/parameter_study/20cluster_pwscf_convergence/10input.py 17 | -------------------------------------------------------------------------------- /doc/source/written/dispersion_example.rst: -------------------------------------------------------------------------------- 1 | .. _dispersion_example: 2 | 3 | Phonon dispersion calculation and plotting 4 | ------------------------------------------ 5 | File: :file:`examples/dispersion/dispersion.py` 6 | 7 | .. literalinclude:: ../../../examples/dispersion/dispersion.py 8 | -------------------------------------------------------------------------------- /doc/source/written/features.rst: -------------------------------------------------------------------------------- 1 | .. _features: 2 | 3 | Features 4 | ======== 5 | 6 | * Container classes for single unit cells (:class:`~pwtools.crys.Structure`) 7 | and structure sequences such as molecular dynamics trajectories, relaxation 8 | runs or NEB paths (:class:`~pwtools.crys.Trajectory`). See 9 | :ref:`parsers_and_containers`. 10 | 11 | * Classes to set up calculations (parameter studies) based on template input 12 | files for any kind of computational backend (:mod:`~pwtools.batch`). See 13 | :ref:`param_study`. 14 | 15 | * Simple sqlite3 interface with convenience data extraction methods 16 | (:mod:`~pwtools.sql`). 17 | 18 | * Parsing of PWscf (QE_), CPMD_ , CP2K_ and LAMMPS_ 19 | output into Python objects for easy access (:mod:`~pwtools.parse`). See 20 | :ref:`parsers_and_containers`. 21 | 22 | * Structure io: read cif, pdb, write axsf, cif, xyz (:mod:`~pwtools.io`) 23 | 24 | * Pythonic interface to external molecular viewers for interactive use: 25 | xcrysden_, avogadro_, jmol_, VMD_ (:mod:`~pwtools.visualize`). 26 | 27 | * EOS fitting tools (:mod:`~pwtools.eos`) 28 | 29 | * Thermodynamic properties in the quasi-harmonic approximation from phonon 30 | density of states, QHA implementation (:mod:`~pwtools.thermo`). See 31 | :ref:`qha`. 32 | 33 | * MD analysis: radial pair distribution function (own implementation and VMD_ 34 | interface), RMS, RMSD (:mod:`~pwtools.crys`). 35 | 36 | * Velocity autocorrelation function and phonon DOS from MD trajectories 37 | (:mod:`~pwtools.pydos`). See :ref:`pdos_from_vacf`. 38 | 39 | * Unit cell related tools: super cell building, coordinate transformation, 40 | k-grid tools, ... (:mod:`~pwtools.crys`). 41 | 42 | * Thin wrappers for spglib_ functions (:mod:`~pwtools.symmetry`) 43 | 44 | * Functions and classes to extend numpy/scipy, e.g. N-dim polynomial fitting 45 | and a number of convenient 1D classes (polynomial, spline) with a common 46 | API (:mod:`~pwtools.num`). 47 | 48 | * N-dim radial basis function interpolation and fitting 49 | (:mod:`~pwtools.rbf.core`, :mod:`~pwtools.rbf.hyperopt`). See :ref:`rbf`. 50 | 51 | * Basic signal processing / fft related tools (:mod:`~pwtools.signal`) 52 | 53 | * Tools to handle matplotlib plots in scripts (:mod:`~pwtools.mpl`) 54 | 55 | * QE and LAMMPS calculators for ASE (:mod:`~pwtools.calculators`) 56 | 57 | * extensive test suite 58 | 59 | .. include:: refs.rst 60 | -------------------------------------------------------------------------------- /doc/source/written/index.rst: -------------------------------------------------------------------------------- 1 | .. Written documentation. 2 | 3 | .. toctree:: 4 | 5 | features 6 | install 7 | tutorial 8 | background/index 9 | -------------------------------------------------------------------------------- /doc/source/written/refs.rst: -------------------------------------------------------------------------------- 1 | .. _QE: http://www.quantum-espresso.org 2 | .. _CPMD: http://www.cpmd.org 3 | .. _CP2K: http://cp2k.org 4 | .. _LAMMPS: http://lammps.sandia.gov 5 | .. _ASE: https://wiki.fysik.dtu.dk/ase 6 | .. _pycifrw_orig: https://bitbucket.org/jamesrhester/pycifrw 7 | .. _Elk: http://elk.sourceforge.net 8 | .. _VMD: http://www.ks.uiuc.edu/Research/vmd/ 9 | .. _phonopy: http://phonopy.sourceforge.net 10 | .. _tfreq: http://www.timteatro.net/2010/09/29/velocity-autocorrelation-and-vibrational-spectrum-calculation 11 | .. _spglib: https://spglib.github.io/spglib 12 | .. _GPAW: https://wiki.fysik.dtu.dk/gpaw/ 13 | .. _numpy: http://www.numpy.org 14 | .. _scipy: http://www.scipy.org 15 | .. _nose: https://nose.readthedocs.org 16 | .. _h5py: http://www.h5py.org/ 17 | .. _jmol: http://jmol.sourceforge.net/ 18 | .. _avogadro: http://avogadro.cc 19 | .. _xcrysden: http://www.xcrysden.org/ 20 | .. _matplotlib: http://matplotlib.org/ 21 | .. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io 22 | .. _wiki_uat: https://en.wikipedia.org/wiki/Universal_approximation_theorem 23 | .. _wiki_rbf: http://en.wikipedia.org/wiki/Radial_basis_function_network 24 | .. _nr3: http://numerical.recipes/aboutNR3book.html 25 | .. _samplepkg: https://github.com/elcorto/samplepkg 26 | -------------------------------------------------------------------------------- /examples/benchmarks/dist_speed_struct.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Compare speed of distance calculation: numpy vs. fortran. The Fortran version 4 | # is ~10x faster. See also test/test_distsq_frac.py . 5 | 6 | import timeit 7 | 8 | import numpy as np 9 | from pwtools import _flib, crys, timer, num 10 | 11 | 12 | def pydist(arr, cell, pbc=0): 13 | distvecs_frac = arr[:, None, :] - arr[None, ...] 14 | if pbc == 1: 15 | distvecs_frac = crys.min_image_convention(distvecs_frac) 16 | distvecs = np.dot(distvecs_frac, cell) 17 | distsq = (distvecs ** 2.0).sum(axis=2) 18 | return distsq, distvecs, distvecs_frac 19 | 20 | 21 | def fdist(arr, cell, pbc=0): 22 | natoms = arr.shape[0] 23 | distsq = num.fempty((natoms, natoms)) 24 | dummy1 = num.fempty((natoms, natoms, 3)) 25 | dummy2 = num.fempty((natoms, natoms, 3)) 26 | return _flib.distsq_frac(arr, cell, pbc, distsq, dummy1, dummy2) 27 | 28 | 29 | if __name__ == "__main__": 30 | 31 | pbc = 1 32 | 33 | arr = np.random.rand(100, 3) 34 | cell = np.random.rand(3, 3) * 3 35 | 36 | globs = globals() 37 | 38 | statements = [ 39 | "pydist(arr, cell, pbc)", 40 | "fdist(arr, cell, pbc)", 41 | ] 42 | for stmt in statements: 43 | number = 500 44 | times = np.array( 45 | timeit.repeat(stmt, globals=globs, number=number, repeat=5) 46 | ) 47 | print( 48 | f"{number} loops: {times.mean():6.3f} +- {times.std():.4f}: {stmt}" 49 | ) 50 | -------------------------------------------------------------------------------- /examples/benchmarks/dist_speed_traj.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import timeit 4 | 5 | import numpy as np 6 | 7 | from pwtools.crys import Trajectory 8 | from pwtools import crys, num, timer 9 | 10 | rand = np.random.rand 11 | 12 | # example session on 4-core box, _flob compiled w/ OpenMP (make gfortran-omp) 13 | # --------------------------------------------------------------------------- 14 | # 15 | # $ export OMP_NUM_THREADS=1 16 | # --TagTimer--: py_bigmen: time: 3.56550598145 17 | # --TagTimer--: py_loop: time: 0.456802129745 18 | # --TagTimer--: f: time: 0.437112092972 19 | # 20 | # $ export OMP_NUM_THREADS=2 21 | # --TagTimer--: f: time: 0.206064939499 22 | # 23 | # $ export OMP_NUM_THREADS=4 24 | # --TagTimer--: f: time: 0.125560998917 25 | 26 | 27 | def pydist_bigmem(traj, pbc=True): 28 | # Pure numpy version w/ big temp arrays. Also slowest. 29 | # 30 | # (nstep, natoms, natoms, 3) 31 | distvecs_frac = ( 32 | traj.coords_frac[:, :, None, :] - traj.coords_frac[:, None, :, :] 33 | ) 34 | if pbc: 35 | distvecs_frac = crys.min_image_convention(distvecs_frac) 36 | distvecs = np.empty((nstep, natoms, natoms, 3)) 37 | for ii in range(traj.nstep): 38 | distvecs[ii, ...] = np.dot(distvecs_frac[ii, ...], traj.cell[ii, ...]) 39 | # (nstep, natoms, natoms) 40 | dists = np.sqrt((distvecs ** 2.0).sum(axis=-1)) 41 | return dists 42 | 43 | 44 | def pydist_loop(traj, pbc=True): 45 | dists = np.empty((nstep, natoms, natoms)) 46 | for ii, struct in enumerate(traj): 47 | dists[ii, ...] = crys.distances(struct, pbc=pbc) 48 | return dists 49 | 50 | 51 | def fdist(traj): 52 | return crys.distances_traj(traj, pbc=True) 53 | 54 | 55 | if __name__ == "__main__": 56 | natoms = 100 57 | nstep = 1000 58 | cell = rand(nstep, 3, 3) 59 | stress = rand(nstep, 3, 3) 60 | forces = rand(nstep, natoms, 3) 61 | coords_frac = rand(nstep, natoms, 3) 62 | symbols = ["H"] * natoms 63 | traj = Trajectory(coords_frac=coords_frac, cell=cell, symbols=symbols) 64 | 65 | ##assert np.allclose(pydist_bigmem(traj), pydist_loop(traj)) 66 | ##print("... ok") 67 | ##assert np.allclose(pydist_loop(traj), fdist(traj)) 68 | ##print("... ok") 69 | 70 | globs = globals() 71 | 72 | statements = [ 73 | "pydist_bigmem(traj)", 74 | "pydist_loop(traj)", 75 | "fdist(traj)", 76 | ] 77 | for stmt in statements: 78 | number = 1 79 | times = np.array( 80 | timeit.repeat(stmt, globals=globs, number=number, repeat=5) 81 | ) 82 | print( 83 | f"{number} loops: {times.mean():6.3f} +- {times.std():.4f}: {stmt}" 84 | ) 85 | -------------------------------------------------------------------------------- /examples/dispersion/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf matdyn.* 4 | -------------------------------------------------------------------------------- /examples/dispersion/q2r.fc.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/examples/dispersion/q2r.fc.gz -------------------------------------------------------------------------------- /examples/enthalpy.py: -------------------------------------------------------------------------------- 1 | """ 2 | Clarify the difference between the enthalpies 3 | H = E + P * V 4 | with 5 | P = P(V) = -dE/dV 6 | and 7 | H = E + Pconst * V 8 | In the latter case, min_V [ E + Pconst * V ] provides V_opt and 9 | P(V_opt) == Pconst. 10 | """ 11 | 12 | import numpy as np 13 | from pwtools import mpl, num 14 | 15 | pl = mpl.Plot() 16 | ax = pl.ax 17 | pl.ax2 = pl.ax.twinx(); 18 | ax2 = pl.ax2 19 | 20 | v=np.linspace(1,5,20) 21 | e=(v-3)**2+1; 22 | p=-2*(v-3); 23 | Pconst = 2.0 24 | ax.plot(v,e, label='E(V)'); 25 | ax.plot([3], [1], 'bo') 26 | 27 | # E at each point + P at each point * V = minimal enthalpy at each point 28 | # (minimal w.r.t. the target pressure at each point), i.e. the real enthalpy 29 | ax.plot(v, e+p*v, label='H=E+P(V)*V'); 30 | 31 | # E at each point + constant P * V -> minimize this H(V) to find optimal 32 | # V and minimal H for *only this* pressure 33 | ax.plot(v, e+Pconst*v, label='H=E+Pconst*V'); 34 | ax.plot([2], [6], 'ro') 35 | 36 | 37 | ax.grid(); 38 | ax2.plot(v, p, 'k', label='P=-dE/dV'); 39 | ax2.hlines(Pconst, *ax2.get_xlim(), color='m', label='Pconst=%g' %Pconst) 40 | ax2.plot([2],[2], 'mo') 41 | 42 | ax.set_ylabel('H') 43 | ax.set_xlabel('V') 44 | ax2.set_ylabel('P') 45 | pl.legend(legaxname='ax', axnames=['ax','ax2'], loc='lower left') 46 | mpl.plt.show() 47 | 48 | -------------------------------------------------------------------------------- /examples/fft_padding.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Sometimes, a signal needs to be zero-padded before an FFT (e.g. when one 4 | # calculates the correlation with FFT). That introduces "leakage" and sinc-like 5 | # ripple pattern due to the cut-off between signal and zero-padding. 6 | 7 | # This example shows how to use a Welch window before zero-padding or smoothing 8 | # the padded signal. The smoothing is done by convolution with a gaussian 9 | # kernel. 10 | # 11 | # The result is that Welch windowing is better than smoothing. Also, smoothing 12 | # needs an adjustable parameter - the kernel's std dev. If that is choosen too 13 | # big, then the smoothing will filter out high frequencies. 14 | 15 | 16 | from math import pi 17 | import numpy as np 18 | from matplotlib import pyplot as plt 19 | from pwtools import signal 20 | from scipy.signal import convolve, gaussian, correlate 21 | from scipy.fftpack import fft 22 | 23 | nn = 200 24 | nadd = 5*nn 25 | t = np.linspace(0.123,0.567,nn) 26 | x = np.sin(2*pi*10*t) + np.cos(2*pi*3*t) + np.sin(2*pi*30*t) 27 | dt = t[1]-t[0] 28 | 29 | pad_x = signal.pad_zeros(x, nadd=nadd) 30 | pad_welch_x = signal.pad_zeros(x*signal.welch(nn), nadd=nadd) 31 | kern = gaussian(M=20,std=2) # width M must be 6..10 x std 32 | smooth_pad_x = convolve(signal.pad_zeros(x,nadd=nadd),kern,'same')/10.0 33 | ##mirr_x = signal.mirror(x) 34 | ##welch_mirr_x = signal.mirror(x)*signal.welch(2*nn-1) 35 | ##pad_welch_mirr_x = signal.pad_zeros(signal.mirror(x)*signal.welch(2*nn-1), 36 | ## nadd=2*nn-1) 37 | 38 | plt.figure() 39 | plt.plot(pad_x, label='pad_x (padded signal)') 40 | plt.plot(pad_welch_x, label='pad_welch_x') 41 | plt.plot(smooth_pad_x,label='smooth_pad_x') 42 | plt.xlabel('time [s]') 43 | plt.xlim(0,300) 44 | plt.legend() 45 | 46 | plt.figure() 47 | f,d = signal.ezfft(x, dt) 48 | plt.plot(f,abs(d), label='x') 49 | f,d = signal.ezfft(pad_x, dt) 50 | plt.plot(f,abs(d), label='pad_x') 51 | f,d = signal.ezfft(pad_welch_x, dt) 52 | plt.plot(f,abs(d), label='pad_welch_x') 53 | f,d = signal.ezfft(smooth_pad_x, dt) 54 | plt.plot(f,abs(d), label='smooth_pad_x') 55 | ##f,d = signal.ezfft(mirr_x, dt) 56 | ##plt.plot(f,abs(d), label='mirr_x') 57 | ##f,d = signal.ezfft(welch_mirr_x, dt) 58 | ##plt.plot(f,abs(d), label='welch_mirr_x') 59 | ##f,d = signal.ezfft(pad_welch_mirr_x, dt) 60 | ##plt.plot(f,abs(d), label='pad_welch_mirr_x') 61 | 62 | plt.xlabel('freq [Hz]') 63 | plt.legend() 64 | plt.xlim(0,50) 65 | plt.show() 66 | -------------------------------------------------------------------------------- /examples/filter_example.py: -------------------------------------------------------------------------------- 1 | # Example for using a digital filter. 2 | # 3 | # Aliasing 4 | # -------- 5 | # Suppose you have a signal with 50 + 80 or 120 Hz and a Nyquist freq of 100. 6 | # The 80 Hz part can be filtered out by using a lowpass with e.g. cutoff=70 or 7 | # a bandpass with cutoff=[10,70] or so. 8 | # 9 | # Because the Nyquist frequency is 100 Hz, the 120 Hz signal is aliased back 10 | # (folded back at 100 Hz) to 80 Hz by sampling the signal and shows then up as 11 | # a peak in FFT. This can be also taken care of by using a filter in time 12 | # domain on the signal before FFT, but make sure that the filter cutoff 13 | # frequencies are such that the aliased peak is excluded (i.e. smaller then 80 14 | # Hz). As such, the aliased 50+120 signal behaves exactly like a 50+80 signal. 15 | # 16 | # Note that in general, you don't know to which frequency aliases have been put 17 | # and just using a bandpass around you desired frequency band won't help. The 18 | # only solution in this case is to avoid aliasing in the first place :) 19 | 20 | 21 | import numpy as np 22 | from pwtools import mpl 23 | from scipy.signal import hann 24 | from scipy.fftpack import fft 25 | from pwtools.signal import fftsample, FIRFilter, pad_zeros 26 | pi = np.pi 27 | plt = mpl.plt 28 | 29 | plots = mpl.prepare_plots(['freq', 'filt_pad', 'filt_nopad']) 30 | nyq = 100 # Hz 31 | df = 1.0 # Hz 32 | dt, nstep = fftsample(nyq, df, mode='f') 33 | t = np.linspace(0, 1, int(nstep)) 34 | filt1 = FIRFilter(cutoff=[10,50], nyq=nyq, mode='bandpass', ripple=60, 35 | width=10) 36 | filt2 = FIRFilter(cutoff=[10,50], nyq=nyq, mode='bandpass', ntaps=100, 37 | window='hamming') 38 | plots['freq'].ax.plot(filt1.w, abs(filt1.h), label='filt1') 39 | plots['freq'].ax.plot(filt2.w, abs(filt2.h), label='filt2') 40 | plots['freq'].ax.legend() 41 | 42 | for pad in [True,False]: 43 | x = np.sin(2*pi*20*t) + np.sin(2*pi*80*t) 44 | if pad: 45 | x = pad_zeros(x, nadd=len(x)) 46 | pl = plots['filt_pad'] 47 | else: 48 | pl = plots['filt_nopad'] 49 | f = np.fft.fftfreq(len(x), dt) 50 | sl = slice(0, len(x)//2, None) 51 | win = hann(len(x)) 52 | pl.ax.plot(f[sl], np.abs(fft(x)[sl]), label='fft(x)') 53 | pl.ax.plot(f[sl], np.abs(fft(filt1(x))[sl]), label='fft(filt1(x))') 54 | pl.ax.plot(f[sl], np.abs(fft(filt1(win*x))[sl]), label='fft(filt1(hann*x))') 55 | pl.ax.plot(f[sl], np.abs(fft(filt2(win*x))[sl]), label='fft(filt2(hann*x))') 56 | pl.ax.set_title('zero pad = %s' %pad) 57 | pl.ax.legend() 58 | 59 | plt.show() 60 | -------------------------------------------------------------------------------- /examples/lammps/md_nvt_npt/AlN.tersoff: -------------------------------------------------------------------------------- 1 | Al Al Al 3 0.3168 1.5 0.0748 19.5691 -0.6593 6.0865 1.0949 0.927415 23.0295 2.7 0.1 2.58526 492.675 2 | Al N Al 3 0.3168 1.5 0.0748 19.5691 -0.6593 6.0865 1.0949 0.927415 23.0295 2.7 0.1 2.58526 492.675 3 | Al Al N 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 4 | Al N N 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 5 | N Al Al 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 6 | N N Al 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 7 | N Al N 1 0.7661 0 0.1785 0.2017 0.0452 1 2.0595 2.38431 423.797 2.2 0.2 3.55787 1044.87 8 | N N N 1 0.7661 0 0.1785 0.2017 0.0452 1 2.0595 2.38431 423.797 2.2 0.2 3.55787 1044.87 9 | -------------------------------------------------------------------------------- /examples/lammps/md_nvt_npt/README.rst: -------------------------------------------------------------------------------- 1 | Run a short LAMMPS MD (rock salt AlN solid state system) either NPT or NVT and 2 | plot various quantities parsed from txt and dcd files or calculated from parsed 3 | data. 4 | 5 | This is used to show subtle differences in various output quantities in NPT 6 | runs which should in theory be exactly the same. 7 | 8 | Usage 9 | ----- 10 | ./run.py nvt | npt 11 | 12 | We plot a quantity obtained by 3 different ways: 13 | 14 | txt = parsed from lmp.out.dump, i.e. the printed value directly from lammps 15 | calc = set tr.=None and calculate it by the methods defined in 16 | Trajectory (i.e. by calling set_all() -> call get_()) 17 | dcd = parse dcd file with only coords and cryst_const, rest is calculated in 18 | Trajectory 19 | 20 | Here is a detailed overview of what is parsed and what is calculated from 21 | parsed data. 22 | 23 | txt 24 | --- 25 | parsed: (from lmp.out.dump) 26 | coords (xu yu zu) 27 | coords_frac (xsu ysu zsu) 28 | cell (ITEM BOX BOUNDS) 29 | velocity (vx vy vz) 30 | calculated: 31 | cryst_const from cell 32 | 33 | calc (based on txt values above) 34 | ---- 35 | parsed: 36 | -- 37 | calculated: 38 | coords from coords_frac+cell 39 | coords_frac from coords+cell 40 | cryst_const from cell 41 | cell from cryst_const 42 | velocity from coords 43 | 44 | dcd 45 | --- 46 | parsed: (from lmp.out.dcd) 47 | coords 48 | cryst_const 49 | calculated: 50 | cell from cryst_const 51 | coords_frac from coords+cell 52 | velocity from coords 53 | 54 | Results 55 | ------- 56 | Everything is the same for NVT. 57 | 58 | For NPT we find substantial(+) and small(-) differences for some quantities: 59 | 60 | cell : txt = dcd = calc 61 | cryst_const : txt = dcd = calc 62 | coords : txt = dcd != calc + 63 | coords_frac : txt != dcd = calc + 64 | velocity : txt != dcd = calc - 65 | 66 | => coords(txt) = coords(dcd) 67 | => cell(txt) = cell(dcd) 68 | => cryst_const(txt) = cryst_const(dcd) 69 | BUT: 70 | => coords_frac(txt), coords(txt) and cell(txt) don't fit together! Who is 71 | right? Should we rely on coords(txt) or coords_frac(txt)?? 72 | 73 | Pragmatic choice: Use only dcd, which is what we need to use anyway for real 74 | world MD runs. Then we have no choice other than to believe that coords(txt) 75 | is The Truth. We should therefore ignore and never use coords_frac(txt). 76 | -------------------------------------------------------------------------------- /examples/lammps/md_nvt_npt/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm lmp.* log.lammps 4 | -------------------------------------------------------------------------------- /examples/lorentz.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Example for smoothing a signal with a Lorentz kernel. 5 | 6 | We show how to use (a) scipy.signal.convolve, (b) direct sum of Lorentz 7 | functions (convolution by hand) and (c) pwtools.signal.smooth. We also test 8 | various kernel lengths (klen below) and we show the severe edge effects with 9 | normal convolution. 10 | 11 | Tails 12 | ----- 13 | 14 | The problem with Lorentz is that the function has very long tails (never really 15 | goes to zero at both ends) compared to a Gaussian with the same spread 16 | parameter "std". Therefore very wide kernels are needed, as in 100*std or 17 | better, where we get away with 6*std for gaussians. 18 | 19 | Edge effects 20 | ------------ 21 | 22 | If your to-be-smoothed data is properly zero at both ends, then you may skip 23 | convolution with edge-effect correction (pwtools.signal.smooth) and use direct 24 | convolution (scipy.signal.convolve), but there is really no reason to do so, 25 | except that you don't have pwtools :) 26 | """ 27 | 28 | import numpy as np 29 | from pwtools import mpl 30 | from pwtools.signal import scale, lorentz, smooth 31 | from scipy.signal import convolve 32 | plt = mpl.plt 33 | 34 | npoints = 200 35 | std = 1.0 36 | 37 | for nrand_fac in [0.2, 1.0]: 38 | # random data to be smoothed, with much (nrand_fac=0.2) or no 39 | # (nrand_fac=1.0) zeros at both ends, edge effects are visible for no zeros 40 | # at the ends 41 | plt.figure() 42 | y = np.zeros(npoints) 43 | x = np.arange(len(y)) 44 | nrand = int(npoints*nrand_fac) 45 | # even nrand 46 | if nrand % 2 == 1: 47 | nrand += 1 48 | y[npoints//2-nrand//2:npoints//2+nrand//2] = np.random.rand(nrand) + 2.0 49 | 50 | # Sum of Lorentz functions at data points. This is the same as convolution 51 | # with a Lorentz function withOUT end point correction, valid if data `y` 52 | # is properly zero at both ends, else edge effects are visible: smoothed 53 | # data always goes to zero at both ends, even if original data doesn't. We 54 | # need to use a very wide kernel with at least 100*std b/c of long 55 | # Lorentz tails. Better 200*std to be safe. 56 | sig = np.zeros_like(y) 57 | for xi,yi in enumerate(y): 58 | sig += yi * std / ((x-xi)**2.0 + std**2.0) 59 | sig = scale(sig) 60 | plt.plot(sig, label='sum') 61 | # convolution with wide kernel 62 | klen = 200*std 63 | klen = klen+1 if klen % 2 == 0 else klen # odd kernel 64 | kern = lorentz(klen, std=std) 65 | plt.plot(scale(convolve(y, kern/float(kern.sum()), 'same')), 66 | label='conv, klen=%i' %klen) 67 | 68 | # Convolution with Lorentz function with end-point correction. 69 | for klen in [10*std, 100*std, 200*std]: 70 | klen = klen+1 if klen % 2 == 0 else klen # odd kernel 71 | kern = lorentz(klen, std=std) 72 | plt.plot(scale(smooth(y, kern)), label='conv+egde, klen=%i' %klen) 73 | plt.title("npoints=%i" %npoints) 74 | plt.legend() 75 | plt.show() 76 | -------------------------------------------------------------------------------- /examples/nd_matmul.py: -------------------------------------------------------------------------------- 1 | # Examples for nd matrix mult. This is a leftover from playing arounf with 2 | # crys.coord_trans(). 3 | 4 | import numpy as np 5 | 6 | # some random 3d array 7 | natoms = 5 8 | nstep = 10 9 | a = np.ones((natoms, nstep, 3)) 10 | for j in range(a.shape[1]): 11 | a[:,j,:] *= (j+1) 12 | print(a[:,j,:]) 13 | 14 | print("-----------------------") 15 | 16 | # transformation matrix b, transform from cartesian with basis vecs 17 | # np.identity(3) to b 18 | # 19 | # primitive lattice vectors (rows): 20 | # a1 = b[0,:] 21 | # a2 = b[1,:] 22 | # a3 = b[2,:] 23 | # 24 | b = np.array([[1,2,3],[4,5,6],[7,8,9]]) 25 | 26 | # reference implementation: a[i,j,:] = old postion vector of atom i at time 27 | # step j 28 | print("c0") 29 | c0 = np.zeros(a.shape) 30 | # time steps 31 | for j in range(a.shape[1]): 32 | # atoms 33 | for i in range(a.shape[0]): 34 | # vector c0[i,j,:] in new coords: 35 | # a[i,j,0]*a0 + a[i,j,1]*a1 + a[i,j,2]*a2 36 | for k in range(a.shape[2]): 37 | c0[i,j,:] += a[i,j,k] * b[k,:] 38 | print(c0[:,j,:]) 39 | 40 | print("-----------------------") 41 | 42 | print("c1") 43 | c1 = np.empty(a.shape) 44 | for j in range(a.shape[1]): 45 | for i in range(a.shape[0]): 46 | c1[i,j,:] = np.dot(a[i,j,:], b) 47 | print(c1[:,j,:]) 48 | 49 | print("-----------------------") 50 | 51 | print("c2") 52 | c2 = np.empty(a.shape) 53 | for i in range(a.shape[0]): 54 | c2[i,...] = np.dot(a[i,...], b) 55 | for j in range(a.shape[1]): 56 | print(c2[:,j,:]) 57 | 58 | print("-----------------------") 59 | 60 | # (m, n, 3) x (3, 3) = (m, n, 3) .. so simple 61 | # vectorization rocks! 62 | print("c3") 63 | c3 = np.dot(a, b) 64 | for j in range(a.shape[1]): 65 | print(c3[:,j,:]) 66 | 67 | print("-----------------------") 68 | 69 | print("c4") 70 | c4 = np.empty(a.shape) 71 | for j in range(a.shape[1]): 72 | c4[:,j,:] = np.dot(a[:,j,:], b) 73 | print(c4[:,j,:]) 74 | 75 | from scipy.linalg import norm 76 | print(norm(c0-c1)) 77 | print(norm(c0-c2)) 78 | print(norm(c0-c3)) 79 | print(norm(c0-c4)) 80 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/10input.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Write lammps input files for a E(V) curve calculation. 4 | 5 | import os 6 | import numpy as np 7 | from pwtools import common, batch, sql, crys, lammps 8 | 9 | local = batch.Machine(hostname='local', 10 | subcmd='bash', 11 | scratch='/tmp', 12 | filename='calc.templ/job.local', 13 | home='/home/schmerler') 14 | 15 | templates = [batch.FileTemplate(basename=x) for x in 16 | ['lmp.in', 'lmp.struct', 'lmp.struct.symbols']] 17 | 18 | # rs-AlN 19 | st = crys.Structure(coords_frac=np.array([[0.0]*3, [0.5]*3]), 20 | symbols=['Al','N'], 21 | cryst_const=np.array([2.78]*3 + [60]*3)) 22 | 23 | params_lst = [] 24 | for target_press in np.linspace(-20,20,15): # GPa, bar in lammps 25 | params_lst.append([sql.SQLEntry(key='target_press', sqlval=target_press*1e4), 26 | sql.SQLEntry(key='struct', sqlval=lammps.struct_str(st)), 27 | sql.SQLEntry(key='symbols', sqlval='\n'.join(st.symbols)), 28 | ]) 29 | 30 | calc = batch.ParameterStudy(machines=local, 31 | templates=templates, 32 | params_lst=params_lst, 33 | study_name='lammps_ev', 34 | ) 35 | calc.write_input(sleep=0, backup=False, mode='w') 36 | 37 | if not os.path.exists('calc'): 38 | os.symlink('calc_local', 'calc') 39 | 40 | common.system("cp -r potentials calc_local/") 41 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/20parse.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Parse each lammps output and write results/idx/traj.pk 4 | 5 | from pwtools import sql, io 6 | 7 | db = sql.SQLiteDB('calc.db', table='calc') 8 | 9 | for idx in db.get_list1d("select idx from calc"): 10 | print(idx) 11 | tr = io.read_lammps_md_txt('calc/%i/log.lammps' %idx) 12 | tr.dump('results/%i/traj.pk' %idx) 13 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/30gather.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Load parsed results and put some values in the database. 4 | 5 | from pwtools import sql, io, num 6 | 7 | db = sql.SQLiteDB('calc.db', table='calc') 8 | idx_lst = db.get_list1d("select idx from calc") 9 | 10 | cols = [('etot', 'float'), # eV 11 | ('pressure', 'float'), # GPa 12 | ('volume', 'float'), # Ang**3 13 | ('forces_rms', 'float'), # eV / Ang 14 | ('sxx', 'float'), # GPa 15 | ('syy', 'float'), # GPa 16 | ('szz', 'float'), # GPa 17 | ] 18 | db.add_columns(cols) 19 | 20 | for idx in idx_lst: 21 | print(idx) 22 | struct = io.cpickle_load('results/%i/traj.pk' %idx)[-1] 23 | db.execute("update calc set etot=? where idx==?", (struct.etot, idx)) 24 | db.execute("update calc set volume=? where idx==?", (struct.volume, idx)) 25 | db.execute("update calc set pressure=? where idx==?", (struct.pressure, idx)) 26 | db.execute("update calc set sxx=? where idx==?", (struct.stress[0,0], idx)) 27 | db.execute("update calc set syy=? where idx==?", (struct.stress[1,1], idx)) 28 | db.execute("update calc set szz=? where idx==?", (struct.stress[2,2], idx)) 29 | db.execute("update calc set forces_rms=? where idx==?", 30 | (num.rms(struct.forces), idx)) 31 | db.commit() 32 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/40eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Print some raw results from the database. 4 | 5 | sqlite3 -column -header calc.db "select idx,volume,etot,target_press/1e4,pressure from calc" 6 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/50eval.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Plot E(V) curve. 4 | 5 | from pwtools import sql, mpl, io 6 | 7 | db = sql.SQLiteDB('calc.db') 8 | data = db.get_array("select volume,etot from calc order by volume") 9 | natoms = io.cpickle_load('results/0/traj.pk').natoms 10 | 11 | # plotting 12 | fig,ax = mpl.fig_ax() 13 | ax.plot(data[:,0]/float(natoms), data[:,1]/float(natoms)) 14 | ax.set_ylabel('energy/atom [eV]') 15 | ax.set_xlabel('volume/atom [Ang^3]') 16 | 17 | mpl.plt.show() 18 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/README: -------------------------------------------------------------------------------- 1 | Example session for using pwtools.batch. The example is self-contained. Just 2 | run it and inspect the created files. Only the files in calc.templ/ and 3 | the potential file AlN.tersoff are needed. 4 | 5 | The example calculations are very small and should run only some seconds. 6 | 7 | Run the example, clean the directory (clean.sh). 8 | 9 | The work flow would be: 10 | 11 | 1) 10input.py 12 | 13 | 2) run calculations 14 | local: 15 | * go to calc_local 16 | * execute run.sh -> submitt all jobs 17 | cluster: 18 | * copy calc_theo to the cluster 19 | rsync -auvz calc_theo theo:path/to/calculation/ 20 | * ssh to cluster, go to path/to/calculation/calc_theo 21 | * execute run.sh -> submitt all jobs 22 | * copy calc_theo back from cluster 23 | rsync -auvz theo:path/to/calculation/calc_theo ./ 24 | 25 | 3) 20parse.py 26 | 27 | 4) 30gather.py 28 | 29 | 5) 40eval.sh, 50eval.py 30 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/calc.templ/job.local: -------------------------------------------------------------------------------- 1 | echo "job: XXXIDX" 2 | lmp < lmp.in > lmp.out 2>&1 3 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/calc.templ/lmp.in: -------------------------------------------------------------------------------- 1 | # 2 | # In order to simulate a fully flexible triclinic cell, use 3 | # fix .. npt .. tri .. scaleyz no scalexz no scalexy no 4 | 5 | # units metal: 6 | # 7 | # mass = grams/mole 8 | # distance = Angstroms 9 | # time = picoseconds 10 | # energy = eV 11 | # velocity = Angstroms/picosecond 12 | # force = eV/Angstrom 13 | # torque = eV 14 | # temperature = degrees K 15 | # pressure = bars 16 | # dynamic viscosity = Poise 17 | # charge = multiple of electron charge (+1.0 is a proton) 18 | # dipole = charge*Angstroms 19 | # electric field = volts/Angstrom 20 | # density = gram/cm^dim 21 | 22 | clear 23 | units metal 24 | boundary p p p 25 | atom_style atomic 26 | 27 | # lmp.struct written by pwtools 28 | read_data lmp.struct 29 | 30 | ### interactions 31 | pair_style tersoff 32 | pair_coeff * * ../potentials/AlN.tersoff Al N 33 | 34 | ### IO 35 | dump dump_txt all custom 100 lmp.out.dump id type xu yu zu fx fy fz 36 | dump_modify dump_txt sort id 37 | 38 | thermo_style custom step temp vol cella cellb cellc cellalpha cellbeta cellgamma & 39 | pe pxx pyy pzz pxy pxz pyz 40 | thermo_modify flush yes 41 | thermo 100 42 | 43 | min_modify dmax 0.2 44 | 45 | # keep angles constant, vary only a,b,c 46 | variable scale string yes 47 | 48 | fix box all box/relax aniso XXXTARGET_PRESS nreset 50 & 49 | scaleyz ${scale} scalexz ${scale} scalexy ${scale} 50 | minimize 1e-8 1e-8 50000 10000 51 | 52 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/calc.templ/lmp.struct: -------------------------------------------------------------------------------- 1 | XXXSTRUCT 2 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/calc.templ/lmp.struct.symbols: -------------------------------------------------------------------------------- 1 | XXXSYMBOLS 2 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rvf calc calc_* calc.db* results 4 | -------------------------------------------------------------------------------- /examples/parameter_study/10local_lammps_ev/potentials/AlN.tersoff: -------------------------------------------------------------------------------- 1 | Al Al Al 3 0.3168 1.5 0.0748 19.5691 -0.6593 6.0865 1.0949 0.927415 23.0295 2.7 0.1 2.58526 492.675 2 | Al N Al 3 0.3168 1.5 0.0748 19.5691 -0.6593 6.0865 1.0949 0.927415 23.0295 2.7 0.1 2.58526 492.675 3 | Al Al N 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 4 | Al N N 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 5 | N Al Al 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 6 | N N Al 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 7 | N Al N 1 0.7661 0 0.1785 0.2017 0.0452 1 2.0595 2.38431 423.797 2.2 0.2 3.55787 1044.87 8 | N N N 1 0.7661 0 0.1785 0.2017 0.0452 1 2.0595 2.38431 423.797 2.2 0.2 3.55787 1044.87 9 | -------------------------------------------------------------------------------- /examples/parameter_study/20cluster_pwscf_convergence/10input.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Write PWscf input files for a convergence study: vary ecutwfc. 4 | 5 | import os 6 | import numpy as np 7 | from pwtools import common, batch, sql, crys, pwscf 8 | 9 | theo = batch.Machine(hostname='theo', 10 | subcmd='qsub', 11 | scratch='/scratch/schmerler', 12 | filename='calc.templ/job.pbs.theo', 13 | home='/home/schmerler') 14 | 15 | templates = [batch.FileTemplate(basename='pw.in')] 16 | 17 | # rs-AlN 18 | st = crys.Structure(coords_frac=np.array([[0.0]*3, [0.5]*3]), 19 | symbols=['Al','N'], 20 | cryst_const=np.array([2.76]*3 + [60]*3)) 21 | 22 | params_lst = [] 23 | for ecutwfc in np.linspace(30,100,8): 24 | params_lst.append([sql.SQLEntry(key='ecutwfc', sqlval=ecutwfc), 25 | sql.SQLEntry(key='ecutrho', sqlval=4.0*ecutwfc), 26 | sql.SQLEntry(key='cell', sqlval=common.str_arr(st.cell)), 27 | sql.SQLEntry(key='natoms', sqlval=st.natoms), 28 | sql.SQLEntry(key='atpos', 29 | sqlval=pwscf.atpos_str(st.symbols, 30 | st.coords_frac)), 31 | ]) 32 | 33 | calc = batch.ParameterStudy(machines=theo, 34 | templates=templates, 35 | params_lst=params_lst, 36 | study_name='convergence_test_cutoff', 37 | ) 38 | calc.write_input(sleep=0, backup=False, mode='w') 39 | 40 | if not os.path.exists('calc'): 41 | os.symlink('calc_theo', 'calc') 42 | 43 | common.system("cp -r ../../../test/files/qe_pseudos calc_theo/pseudo; gunzip calc_theo/pseudo/*") 44 | -------------------------------------------------------------------------------- /examples/parameter_study/20cluster_pwscf_convergence/20parse.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Parse each pw.out and write results/idx/struct.pk 4 | 5 | from pwtools import sql, io 6 | 7 | db = sql.SQLiteDB('calc.db', table='calc') 8 | 9 | for idx in db.get_list1d("select idx from calc"): 10 | print(idx) 11 | st = io.read_pw_scf('calc/%i/pw.out' %idx) 12 | st.dump('results/%i/struct.pk' %idx) 13 | -------------------------------------------------------------------------------- /examples/parameter_study/20cluster_pwscf_convergence/30gather.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Load parsed results and put some values in the database. 4 | 5 | from pwtools import sql, io, num 6 | 7 | db = sql.SQLiteDB('calc.db', table='calc') 8 | idx_lst = db.get_list1d("select idx from calc") 9 | 10 | cols = [('etot', 'float'), # eV 11 | ('pressure', 'float'), # GPa 12 | ('volume', 'float'), # Ang**3 13 | ('forces_rms', 'float'), # eV / Ang 14 | ('sxx', 'float'), # GPa 15 | ('syy', 'float'), # GPa 16 | ('szz', 'float'), # GPa 17 | ] 18 | db.add_columns(cols) 19 | 20 | for idx in idx_lst: 21 | print(idx) 22 | struct = io.cpickle_load('results/%i/struct.pk' %idx) 23 | db.execute("update calc set etot=? where idx==?", (struct.etot, idx)) 24 | db.execute("update calc set volume=? where idx==?", (struct.volume, idx)) 25 | db.execute("update calc set pressure=? where idx==?", (struct.pressure, idx)) 26 | db.execute("update calc set sxx=? where idx==?", (struct.stress[0,0], idx)) 27 | db.execute("update calc set syy=? where idx==?", (struct.stress[1,1], idx)) 28 | db.execute("update calc set szz=? where idx==?", (struct.stress[2,2], idx)) 29 | db.execute("update calc set forces_rms=? where idx==?", 30 | (num.rms(struct.forces), idx)) 31 | db.commit() 32 | -------------------------------------------------------------------------------- /examples/parameter_study/20cluster_pwscf_convergence/40eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Print some raw results from the database. 4 | 5 | sqlite3 -column -header calc.db "select idx,ecutwfc,etot,pressure from calc" 6 | -------------------------------------------------------------------------------- /examples/parameter_study/20cluster_pwscf_convergence/50eval.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Print result of convergence study: differences of etot, pressure 4 | 5 | from pwtools import sql, batch, mpl 6 | 7 | db = sql.SQLiteDB('calc.db') 8 | etot_fac = 1000.0/4 # eV -> meV/atom, 4 atoms 9 | data = db.get_array("select ecutwfc,etot,pressure from calc order by ecutwfc") 10 | print("ecutwfc, diff(etot) [meV/atom], diff(pressure) [GPa]") 11 | print(batch.conv_table(data[:,0], 12 | [data[:,1]*etot_fac, data[:,2]], 13 | mode='last', orig=False)) 14 | 15 | # plotting 16 | fig,ax = mpl.fig_ax() 17 | ax.plot(data[:,0], (data[:,1]-data[-1,1])*etot_fac, label='etot', color='b') 18 | ax.set_ylabel('diff(etot) [meV/atom]') 19 | ax.set_xlabel('ecutwfc [Ry]') 20 | ax.legend() 21 | 22 | mpl.plt.show() 23 | -------------------------------------------------------------------------------- /examples/parameter_study/20cluster_pwscf_convergence/README: -------------------------------------------------------------------------------- 1 | Example session for using pwtools.batch. The example is self-contained. Just 2 | run it and inspect the created files. Only the files in calc.templ/ and pseudo 3 | potential files are needed. 4 | 5 | The example calculations are very small and should run only some seconds. 6 | 7 | Run the example, clean the directory (clean.sh). 8 | 9 | The work flow would be: 10 | 11 | 1) 10input.py 12 | 13 | 2) run calculations 14 | local: 15 | * go to calc_local 16 | * execute run.sh -> submitt all jobs 17 | cluster: 18 | * copy calc_theo to the cluster 19 | rsync -auvz calc_theo theo:path/to/calculation/ 20 | * ssh to cluster, go to path/to/calculation/calc_theo 21 | * execute run.sh -> submitt all jobs 22 | * copy calc_theo back from cluster 23 | rsync -auvz theo:path/to/calculation/calc_theo ./ 24 | 25 | 3) 20parse.py 26 | 27 | 4) 30gather.py 28 | 29 | 5) 40eval.sh, 50eval.py 30 | -------------------------------------------------------------------------------- /examples/parameter_study/20cluster_pwscf_convergence/calc.templ/job.pbs.theo: -------------------------------------------------------------------------------- 1 | #PBS -q crunch 2 | #PBS -l nodes=1:fast:ppn=8 3 | #PBS -j oe 4 | #PBS -N XXXCALC_NAME 5 | 6 | module load espresso/5.0.3 7 | 8 | cd $PBS_O_WORKDIR 9 | echo "workdir: $(pwd)" 10 | echo "host: $(hostname)" 11 | cat $PBS_NODEFILE 12 | here=$(pwd) 13 | 14 | # same as outdir in pw.in 15 | scratch="XXXSCRATCH/XXXSTUDY_NAME/XXXIDX" 16 | mkdir -pv $scratch 17 | mpirun -np 8 pw.x < pw.in > pw.out 18 | rm -rv $scratch 19 | -------------------------------------------------------------------------------- /examples/parameter_study/20cluster_pwscf_convergence/calc.templ/pw.in: -------------------------------------------------------------------------------- 1 | &control 2 | calculation = 'scf' 3 | restart_mode='from_scratch', 4 | prefix='XXXCALC_NAME' 5 | tstress = .true. 6 | tprnfor = .true. 7 | pseudo_dir = '../pseudo/', 8 | outdir='XXXSCRATCH/XXXSTUDY_NAME/XXXIDX' 9 | wf_collect = .true. 10 | forc_conv_thr = 1e-4 11 | etot_conv_thr = 1e-5 12 | / 13 | &system 14 | ibrav = 0, 15 | nat = XXXNATOMS, 16 | ntyp = 2, 17 | ecutwfc = XXXECUTWFC, 18 | ecutrho = XXXECUTRHO, 19 | / 20 | &electrons 21 | diagonalization='david' 22 | mixing_mode = 'plain' 23 | mixing_beta = 0.7 24 | conv_thr = 1.0d-6 25 | / 26 | CELL_PARAMETERS angstrom 27 | XXXCELL 28 | ATOMIC_SPECIES 29 | Al 26.981538 Al.pbe-n-kjpaw_psl.0.1.UPF 30 | N 14.00674 N.pbe-n-kjpaw_psl.0.1.UPF 31 | ATOMIC_POSITIONS crystal 32 | XXXATPOS 33 | K_POINTS automatic 34 | 4 4 4 0 0 0 35 | -------------------------------------------------------------------------------- /examples/parameter_study/20cluster_pwscf_convergence/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rvf calc calc_* calc.db* results 4 | -------------------------------------------------------------------------------- /examples/parameter_study/30two_hosts_extend/README: -------------------------------------------------------------------------------- 1 | Run the example (input.py), clean the directory (clean.sh). 2 | -------------------------------------------------------------------------------- /examples/parameter_study/30two_hosts_extend/calc.templ/input.in: -------------------------------------------------------------------------------- 1 | # input file for super fast simulation code 2 | data_dir = XXXHOME/share/pseudo 3 | calc_name = XXXCALC_NAME 4 | my_path = XXXHOME/calculations 5 | study_name = XXXSTUDY_NAME 6 | scratch = /scratch/XXXSTUDY_NAME/XXXIDX 7 | ecutwfc = XXXECUTWFC 8 | pseudo = XXXPSEUDO 9 | -------------------------------------------------------------------------------- /examples/parameter_study/30two_hosts_extend/calc.templ/job.host0: -------------------------------------------------------------------------------- 1 | # job file for host0 2 | #PBS -N XXXCALC_NAME 3 | #PBS -q short.q 4 | data_dir = XXXHOME/share/pseudo 5 | scratch = /scratch/XXXSTUDY_NAME/XXXIDX 6 | # running job 7 | mkdir -pv $scratch 8 | my_app.x < input.in > output.out 9 | -------------------------------------------------------------------------------- /examples/parameter_study/30two_hosts_extend/calc.templ/job.host1: -------------------------------------------------------------------------------- 1 | # job file for host1 2 | # 3 | #BSUB -N XXXCALC_NAME 4 | #BSUB -q long.q 5 | data_dir = XXXHOME/share/pseudo 6 | scratch = /big/share/fastfs/XXXSTUDY_NAME/XXXIDX 7 | # running job 8 | mkdir -pv $scratch 9 | /path/to/apps/bin/my_app.x < input.in > output.out 10 | -------------------------------------------------------------------------------- /examples/parameter_study/30two_hosts_extend/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rv calc_host* calc.db* excl_push 4 | -------------------------------------------------------------------------------- /examples/parameter_study/README: -------------------------------------------------------------------------------- 1 | Examples for using pwtools.batch for parameter studies (create input, run 2 | calculations, analyze results). 3 | 4 | A parameter study can be anything. For example, typical DFT-related tasks are 5 | * E(V) curve (vary V or target pressure) 6 | * convergence study of cutoff, k-points, ... any other code parameter 7 | * vary the q-grid density in phonon calculations 8 | * vary lattice parameters on a grid 9 | * ... anything where you vary one or more parameters and do a calculation 10 | for each parameter set 11 | 12 | 10local_lammps_ev 13 | Calculate an E(V) curve of rocksalt AlN using LAMMPS (apt-get install 14 | lammps) using a Tersoff interatomic potential. 15 | 16 | 20cluster_pwscf_convergence 17 | Plane wave cutoff convergence study using PWscf on a cluster. 18 | 19 | 30two_hosts_extend 20 | Complex example showing more features: extend study (add calculations 21 | later), generate input for many hosts at once. 22 | -------------------------------------------------------------------------------- /examples/print_struct_traj_api.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools import crys 3 | from pwtools.test.test_trajectory import get_rand_traj, get_rand_struct 4 | rand = np.random.rand 5 | 6 | tr = get_rand_traj() 7 | st = get_rand_struct() 8 | onlytr = set.difference(set(tr.attr_lst), set(st.attr_lst)) 9 | onlyst = set.difference(set(st.attr_lst), set(tr.attr_lst)) 10 | print(""" 11 | API (possible attributes in attr_lst): 12 | 13 | Structure: 14 | {st} 15 | 16 | only in Trajectory: 17 | {onlytr} 18 | 19 | only in Structure: 20 | {onlyst} 21 | 22 | Attributes which are None w.r.t. the Trajectory API after the following 23 | operation, starting with a fully populated struct or traj (all attrs not None): 24 | """.format(st=st.attr_lst, tr=tr.attr_lst, onlytr=list(onlytr), 25 | onlyst=list(onlyst))) 26 | 27 | items = [\ 28 | ('tr', tr), 29 | ('tr.copy', tr.copy()), 30 | ('tr[0:5]', tr[0:5]), 31 | ('st', st), 32 | ('st.copy', st.copy()), 33 | ('tr[0]', tr[0]), 34 | ('mean(tr)', crys.mean(tr)), 35 | ('concatenate([st,st])', crys.concatenate([st,st])), 36 | ('concatenate([st,tr])', crys.concatenate([st,tr])), 37 | ('concatenate([tr,tr])', crys.concatenate([tr,tr])), 38 | ] 39 | for name,obj in items: 40 | none_attrs = set.difference(set(tr.attr_lst), 41 | crys.populated_attrs([obj])) 42 | typ = 'traj' if obj.is_traj else 'struct' 43 | print("{:25} {:7} {}".format(name, typ, list(none_attrs))) 44 | -------------------------------------------------------------------------------- /examples/rbf/plot_rbfs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | 5 | from pwtools import rbf, mpl 6 | plt = mpl.plt 7 | plt.rcParams["figure.autolayout"] = True 8 | 9 | fig,ax = plt.subplots() 10 | colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] 11 | 12 | r = np.linspace(-5,5,200) 13 | for color, tup in zip(colors, rbf.rbf_dct.items()): 14 | name, func = tup 15 | for p,ls in [(1, "-"), (0.1, "--")]: 16 | ax.plot(r, func(r**2, p=p), label=f"{name} p={p}", color=color, 17 | ls=ls) 18 | 19 | ax.set_xlabel("r") 20 | ax.set_ylabel("$\phi(r)$") 21 | ax.legend() 22 | fig.savefig('/tmp/rbfs.png') 23 | plt.show() 24 | -------------------------------------------------------------------------------- /examples/rpdf/compare_vmd/pw.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/examples/rpdf/compare_vmd/pw.out.gz -------------------------------------------------------------------------------- /examples/rpdf/compare_vmd/rpdf_random.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | from pwtools import crys, mpl 5 | rand = np.random.rand 6 | 7 | # Cubic box with random points and L=20, so rmax_auto=10. We randomly choose 8 | # some atoms to be O, the rest H, which lets us test 2 selections. 9 | # 10 | # For rpdf(), g(r) goes to zero for r > 10 b/c the minimum image convention is 11 | # violated. VMD is correct up to 2*sqrt(0.5)*rmax_auto b/c they apply their 12 | # "spherical cap"-correction. 13 | # 14 | # norm_vmd: For debugging, we also calculate with norm_vmd=True, which results 15 | # in slightly wrong g(r) for the all-all case, while num_int is always correct. 16 | # 17 | # The blue curves rpdf(..., norm_vmd=False) are correct up to rmax_auto. 18 | 19 | t1=crys.Trajectory(coords_frac=rand(100,20,3), 20 | cell=np.identity(3)*20, 21 | symbols=['O']*5+['H']*15) 22 | sy = np.array(t1.symbols) 23 | dr = 0.1 24 | rmax = 25 25 | 26 | dct = {'amask': [[sy=='O', sy=='H'], None], 27 | 'sel': [['name O', 'name H'], ['all', 'all']]} 28 | 29 | plots = [] 30 | for ii in range(2): 31 | amask = dct['amask'][ii] 32 | sel = dct['sel'][ii] 33 | title = sel[0] + ',' + sel[1] 34 | 35 | aa = crys.rpdf(t1, dr=dr, rmax=rmax, amask=amask, norm_vmd=False) 36 | bb = crys.rpdf(t1, dr=dr, rmax=rmax, amask=amask, norm_vmd=True) 37 | cc = crys.vmd_measure_gofr(t1, dr=dr, rmax=rmax, sel=sel) 38 | 39 | plots.append(mpl.Plot()) 40 | plots[-1].ax.plot(aa[:,0], aa[:,1], 'b', label="g(r), norm_vmd=False") 41 | plots[-1].ax.plot(bb[:,0], bb[:,1], 'r', label="g(r), norm_vmd=True") 42 | plots[-1].ax.plot(cc[:,0], cc[:,1], 'g', label="g(r), vmd") 43 | plots[-1].legend() 44 | plots[-1].ax.set_title(title) 45 | 46 | plots.append(mpl.Plot()) 47 | plots[-1].ax.plot(aa[:,0], aa[:,2], 'b', label="int, norm_vmd=False") 48 | plots[-1].ax.plot(bb[:,0], bb[:,2], 'r', label="int, norm_vmd=True") 49 | plots[-1].ax.plot(cc[:,0], cc[:,2], 'g', label="int, vmd") 50 | plots[-1].legend(loc='lower right') 51 | plots[-1].ax.set_title(title) 52 | 53 | mpl.plt.show() 54 | -------------------------------------------------------------------------------- /examples/vinet_deriv.wxm: -------------------------------------------------------------------------------- 1 | /* [wxMaxima batch file version 1] [ DO NOT EDIT BY HAND! ]*/ 2 | /* [ Created with wxMaxima version 13.04.2 ] */ 3 | 4 | /* [wxMaxima: input start ] */ 5 | kill(all); 6 | eta0: (V/V0)**(1/3); 7 | /* with data scaling, not tested */ 8 | /*eta0: ((V-Vmin)/(Vmax-Vmin)/V0)**(1/3);*/ 9 | eta: eta0; 10 | E: E0 + 2*B0*V0/(B1-1)**2* (2 - (5 +3*B1*(eta-1)-3*eta)*exp(-3*(B1-1)*(eta-1)/2)); 11 | /* with data scaling, not tested */ 12 | /*expr: (E-Emin)/(Emax-Emin);*/ 13 | expr: E; 14 | /* [wxMaxima: input end ] */ 15 | 16 | /* [wxMaxima: input start ] */ 17 | dif1: factor(diff(expr,V)); 18 | /* [wxMaxima: input end ] */ 19 | 20 | /* [wxMaxima: input start ] */ 21 | dif2: factor(diff(expr, V, 2)); 22 | /* [wxMaxima: input end ] */ 23 | 24 | /* [wxMaxima: input start ] */ 25 | remvalue(eta); 26 | ratsubst(eta, eta0, dif1); 27 | /* [wxMaxima: input end ] */ 28 | 29 | /* [wxMaxima: input start ] */ 30 | remvalue(eta); 31 | factor(ratsubst(eta, eta0, dif2)); 32 | /* [wxMaxima: input end ] */ 33 | 34 | /* Maxima can't load/batch files which end with a comment! */ 35 | "Created with wxMaxima"$ 36 | -------------------------------------------------------------------------------- /examples/vmd/nice_bonds.tcl: -------------------------------------------------------------------------------- 1 | # VMD script. 2 | # 3 | # Usage: 4 | # Load molecule on cmd line (or from the VMD File menu), set the variable 5 | # $molid (number of the current molecule, =0 for the first one) and source 6 | # the script on the VMD prompt. 7 | # $ vmd file1.axsf 8 | # vmd > set molid 0 9 | # vmd > source script.tcl 10 | # # load second file (VMD prompt or File menu), it gets ID 1 after 11 | # # loading, set $molid to refer to this mol and source script again 12 | # vmd > mol new file2.axsf type xsf first 0 last -1 step 1 waitfor -1 13 | # vmd > set molid 1 14 | # vmd > source script.tcl 15 | # 16 | # This script is a modified VMD log file (User Guide: 3.9 Tracking Script 17 | # Command Versions of the GUI Actions). In the original file, there were lines 18 | # like 19 | # mol modstyle 0 0 CPK 1.000000 0.000000 20.000000 6.000000 20 | # mol representation CPK 1.000000 0.000000 20.000000 6.000000 21 | # We replaced them by 22 | # set rep [mol modstyle 0 0 CPK 1.000000 0.000000 20.000000 6.000000] 23 | # mol representation $rep 24 | 25 | if !{[info exists molid]} then { 26 | set molid 0 27 | } 28 | 29 | set repnum -1 30 | display resetview 31 | animate style Loop 32 | display projection Orthographic 33 | color Display Background white 34 | display depthcue off 35 | axes location lowerright 36 | 37 | # Draw unit cell. This is a comand from the PBCtools plugin. 38 | pbc box 39 | 40 | # CPK 41 | # 42 | # Atoms with no bonds (bond radius 0.0). These bonds are calculated once when 43 | # the mol is loaded. If we have PBC wrap-around, some bonds span the whole box, 44 | # so make them invisible. The sphere scale is 1.0. Use 0.4 for atoms with the 45 | # same diameter as the bonds. 46 | incr repnum 47 | mol modcolor $repnum $molid Name 48 | mol modselect $repnum $molid all 49 | set rep [mol modstyle $repnum $molid CPK 1.000000 0.000000 20.000000 6.000000] 50 | mol representation $rep 51 | 52 | # Activate dynamic bond calculation in each frame. Set bond radius to 0.1 53 | # (default 0.3). Distance cut-off of 2.6. 54 | incr repnum 55 | mol addrep $molid 56 | mol color Name 57 | mol modselect $repnum $molid all 58 | mol modcolor $repnum $molid Name 59 | set rep [mol modstyle $repnum $molid DynamicBonds 2.60000 0.100000 6.000000] 60 | mol representation $rep 61 | -------------------------------------------------------------------------------- /examples/vmd/snap.tcl: -------------------------------------------------------------------------------- 1 | # Basic VMD example script for making snapshots and save them. 2 | 3 | proc snap {base} { 4 | set tga $base.tga 5 | set png $base.png 6 | render snapshot $tga "convert $tga $png; rm $tga" 7 | } 8 | 9 | source very_nice_bonds.tcl 10 | display resetview 11 | axes location lowerright 12 | ##axes location off 13 | scale to 0.120 14 | 15 | # y 16 | # z x 17 | snap axis_c 18 | 19 | # z 20 | # xy 21 | rotate y by -90; rotate z by -90 22 | snap axis_a 23 | 24 | # z 25 | # xy 26 | rotate y by -90 27 | snap axis_b 28 | 29 | exit 30 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | PyCifRW 2 | h5py 3 | matplotlib 4 | numpy 5 | scipy 6 | spglib 7 | -------------------------------------------------------------------------------- /requirements_doc.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | myst-parser 3 | 4 | # For doc/generate-doc.sh 5 | git+https://github.com/elcorto/sphinx-autodoc 6 | -------------------------------------------------------------------------------- /requirements_optional.txt: -------------------------------------------------------------------------------- 1 | ase 2 | scikit-learn 3 | -------------------------------------------------------------------------------- /requirements_test.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | pytest-xdist 3 | pytest-timeout 4 | -------------------------------------------------------------------------------- /src/pwtools/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [\ 2 | 'arrayio', 3 | 'atomic_data', 4 | 'base', 5 | 'batch', 6 | 'calculators', 7 | 'comb', 8 | 'common', 9 | 'config', 10 | 'constants', 11 | 'crys', 12 | 'dcd', 13 | 'decorators', 14 | 'eos', 15 | 'io', 16 | 'kpath', 17 | 'mpl', 18 | 'mttk', 19 | 'num', 20 | 'parse', 21 | 'pwscf', 22 | 'pydos', 23 | 'random', 24 | 'rbf', 25 | 'regex', 26 | 'signal', 27 | 'sql', 28 | 'symmetry', 29 | 'thermo', 30 | 'timer', 31 | 'verbose', 32 | 'visualize', 33 | ] 34 | -------------------------------------------------------------------------------- /src/pwtools/comb.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.common import assert_cond as _assert 3 | from pwtools import common 4 | from functools import reduce 5 | import itertools 6 | 7 | 8 | def unique2d(arr, what='row'): 9 | """Reduce 2d array `arr` to a 2d array with unique rows (or cols). 10 | 11 | Parameters 12 | ---------- 13 | arr : 2d-like 14 | what : str 15 | {'row', 'col'} 16 | 17 | Returns 18 | ------- 19 | numpy 2d array 20 | 21 | Examples 22 | -------- 23 | >>> a=array([[1,2,3], [1,2,3], [1,2,4]]) 24 | >>> unique2d(a, 'row') 25 | array([[1, 2, 3], 26 | [1, 2, 4]]) 27 | """ 28 | if what == 'row': 29 | arr = np.asarray(arr) 30 | elif what == 'col': 31 | arr = np.asarray(arr).T 32 | else: 33 | raise ValueError("illegal value of 'what': %s" %what) 34 | uniq = [arr[0,:]] 35 | for row_a in arr: 36 | is_in = False 37 | for row_u in uniq: 38 | if (row_a == row_u).all(): 39 | is_in = True 40 | break 41 | if not is_in: 42 | uniq.append(row_a) 43 | if what == 'row': 44 | return np.asarray(uniq) 45 | else: 46 | return np.asarray(uniq).T 47 | 48 | 49 | def _ensure_list(arg): 50 | if common.is_seq(arg): 51 | return [_ensure_list(xx) for xx in arg] 52 | else: 53 | return arg 54 | 55 | 56 | # XXX return iterator (py3) 57 | # legacy, we keep it for now b/c it is used in batch.py 58 | def nested_loops(lists, flatten=False): 59 | """Nested loops, optional flattening. 60 | 61 | Parameters 62 | ---------- 63 | lists : list of sequences 64 | The objects to permute. len(lists) == the depth (nesting levels) of the 65 | equivalent nested loops. Individual lists may contain a mix of 66 | different types/objects, e.g. [['a', 'b'], [Foo(), Bar(), Baz()], 67 | [1,2,3,4,5,6,7]]. 68 | flatten : bool 69 | Flatten each entry in returned list. 70 | 71 | Returns 72 | ------- 73 | list : nested lists 74 | 75 | Examples 76 | -------- 77 | >>> from pwtools import comb 78 | >>> comb.nested_loops([[1,2],['a','b']]) 79 | [[1, 'a'], [1, 'b'], [2, 'a'], [2, 'b']] 80 | 81 | If values of different lists should be varied together, use zip(). Note 82 | that you get nested lists back. Use flatten=True to get flattened lists. 83 | 84 | >>> comb.nested_loops([(1,2), zip(['a','b'],(np.sin,np.cos))]) 85 | [[1, ['a', ]], 86 | [1, ['b', ]], 87 | [2, ['a', ]], 88 | [2, ['b', ]]] 89 | 90 | >>> comb.nested_loops([(1,2), zip(['a','b'],(np.sin,np.cos))], flatten=True) 91 | [[1, 'a', ], 92 | [1, 'b', ], 93 | [2, 'a', ], 94 | [2, 'b', ]] 95 | """ 96 | perms = itertools.product(*lists) 97 | ret = [common.flatten(xx) for xx in perms] if flatten else perms 98 | return _ensure_list(ret) 99 | -------------------------------------------------------------------------------- /src/pwtools/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | use_jax = False 4 | 5 | name = "PWTOOLS_USE_JAX" 6 | if name in os.environ: 7 | use_jax = bool(int(os.environ[name])) 8 | -------------------------------------------------------------------------------- /src/pwtools/lammps.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy as np 3 | from pwtools import atomic_data, common 4 | 5 | 6 | def struct_str(struct): 7 | """Convert Structure object to lammps format. 8 | 9 | The returned string can be written to a file and read in a lammps input 10 | file by ``read_data``. 11 | 12 | Parameters 13 | ---------- 14 | struct : Structure 15 | 16 | Returns 17 | ------- 18 | str : string 19 | 20 | References 21 | ---------- 22 | ase.calculators.lammpsrun (ASE 3.8). 23 | """ 24 | # align cell to [[x,0,0],[xy,y,0],[xz, yz, z]] (row format, the transpose 25 | # is what lammps uses) 26 | st = struct.copy() 27 | st.coords = None 28 | st.cell = None 29 | st.set_all() 30 | head_str = "structure written by pwtools {0}".format(time.asctime()) 31 | info_str = '%i atoms\n%i atom types' %(st.natoms, len(st.symbols_unique)) 32 | cell_str = "0.0 {x:.14g} xlo xhi\n0.0 {y:.14g} ylo yhi\n0.0 {z:.14g} zlo zhi\n" 33 | cell_str += "{tilts} xy xz yz\n" 34 | cell_str = cell_str.format(x=st.cell[0,0], 35 | y=st.cell[1,1], 36 | z=st.cell[2,2], 37 | tilts=common.str_arr(np.array([st.cell[1,0], 38 | st.cell[2,0], 39 | st.cell[2,1]]), 40 | eps=1e-13, fmt='%.14g', 41 | delim=' ')) 42 | atoms_str = "Atoms\n\n" 43 | for iatom in range(st.natoms): 44 | atoms_str += "{iatom} {ispec} {xyz}".format( 45 | iatom=iatom+1, 46 | ispec=st.order[st.symbols[iatom]], 47 | xyz=common.str_arr(st.coords[iatom,:], eps=1e-13, fmt='%23.16e') + '\n') 48 | mass_str = "Masses\n\n" 49 | for idx,sy in enumerate(st.symbols_unique): 50 | mass_str += "%i %g\n" %(idx+1, atomic_data.pt[sy]['mass']) 51 | return head_str + '\n\n' + info_str + '\n\n' + cell_str + \ 52 | '\n' + atoms_str + '\n' + mass_str 53 | -------------------------------------------------------------------------------- /src/pwtools/rbf/__init__.py: -------------------------------------------------------------------------------- 1 | # Expose all stuff from core but none from hyperopt in the rbf name space. 2 | 3 | from .core import * 4 | -------------------------------------------------------------------------------- /src/pwtools/regex.py: -------------------------------------------------------------------------------- 1 | """Definition of useful regexes that we may use for parsing here and there.""" 2 | 3 | # Regex that matches every conveivable form of a float number, also Fortran 4 | # 1 5 | # 1.0 6 | # +1.0 7 | # -1.0 8 | # 1.0e3 9 | # 1.0e+03 10 | # 1.0E-003 11 | # -.1D03 12 | # ... 13 | float_re = r'[+-]*[\.0-9eEdD+-]+' 14 | -------------------------------------------------------------------------------- /src/pwtools/test/testenv.py: -------------------------------------------------------------------------------- 1 | # This module defines a dir where tests can write their temp files. It must be 2 | # imported by any test module which needs a temp dir. 3 | # 4 | # This file is only a dummy default fallback in case someone runs a test in 5 | # /path/to/pwtools/test (pytest test_foo.py). This file gets overwritten in a 6 | # safe location when tests are run by runtests.sh. 7 | testdir='/tmp' 8 | -------------------------------------------------------------------------------- /src/pwtools/test/utils/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [] 2 | -------------------------------------------------------------------------------- /src/pwtools/test/utils/lammps/20run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for d in md-{nvt,npt} vc-relax; do 4 | cd $d 5 | lmp < lmp.in 6 | cd .. 7 | done 8 | -------------------------------------------------------------------------------- /src/pwtools/test/utils/lammps/30pack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -fv *.tgz 4 | for d in md-{nvt,npt} vc-relax; do 5 | tar -vczf $d.tgz $d/ 6 | done 7 | -------------------------------------------------------------------------------- /src/pwtools/test/utils/lammps/AlN.tersoff: -------------------------------------------------------------------------------- 1 | Al Al Al 3 0.3168 1.5 0.0748 19.5691 -0.6593 6.0865 1.0949 0.927415 23.0295 2.7 0.1 2.58526 492.675 2 | Al N Al 3 0.3168 1.5 0.0748 19.5691 -0.6593 6.0865 1.0949 0.927415 23.0295 2.7 0.1 2.58526 492.675 3 | Al Al N 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 4 | Al N N 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 5 | N Al Al 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 6 | N N Al 3 1.1e-06 0 100390 16.217 -0.598 0.72 1.7289 1.86059 257.316 2.34 0.15 3.21306 1847.75 7 | N Al N 1 0.7661 0 0.1785 0.2017 0.0452 1 2.0595 2.38431 423.797 2.2 0.2 3.55787 1044.87 8 | N N N 1 0.7661 0 0.1785 0.2017 0.0452 1 2.0595 2.38431 423.797 2.2 0.2 3.55787 1044.87 9 | -------------------------------------------------------------------------------- /src/pwtools/test/utils/matdyn_modes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Generate matdyn.modes file, filled with dummy data. Parsed and used in 4 | # test/test_read_matdyn.py . 5 | 6 | import numpy as np 7 | from pwtools.common import str_arr 8 | 9 | if __name__ == '__main__': 10 | natoms = 2 11 | qpoints = np.array([[0,0,0.], [0,0,0.5]]) 12 | nqpoints = len(qpoints) 13 | nmodes = 3*natoms 14 | 15 | freqs = np.empty((nqpoints, nmodes)) 16 | vecs = np.empty((nqpoints, nmodes, natoms, 3)) 17 | num = 0 18 | for iqpoint in range(nqpoints): 19 | print(" diagonalizing the dynamical matrix ...\n") 20 | print(" q = %s" %str_arr(qpoints[iqpoint], fmt='%f')) 21 | print("*"*79) 22 | for imode in range(nmodes): 23 | freqs[iqpoint, imode] = num 24 | print(" omega(%i) = %f [THz] = %f [cm-1]" %(imode+1, num*0.1, num)) 25 | num += 1 26 | for iatom in range(natoms): 27 | vec_str = " (" 28 | for icoord in range(3): 29 | vecs[iqpoint,imode,iatom,icoord] = num 30 | vec_str += " %f %f " %(num, 0.03*num) 31 | num += 1 32 | vec_str += ")" 33 | print(vec_str) 34 | print("*"*79) 35 | -------------------------------------------------------------------------------- /src/pwtools/test/utils/rand_container.py: -------------------------------------------------------------------------------- 1 | # Define a Structure and Trajectory filled with random data. All possible attrs 2 | # are used (I hope :) 3 | 4 | from pwtools.crys import Structure, Trajectory 5 | import numpy as np 6 | rand = np.random.rand 7 | 8 | def get_rand_traj(): 9 | natoms = 10 10 | nstep = 100 11 | cell = rand(nstep,3,3) 12 | stress = rand(nstep,3,3) 13 | forces = rand(nstep,natoms,3) 14 | etot=rand(nstep) 15 | coords_frac = rand(nstep,natoms,3) 16 | symbols = ['H']*natoms 17 | tr = Trajectory(coords_frac=coords_frac, 18 | cell=cell, 19 | symbols=symbols, 20 | forces=forces, 21 | stress=stress, 22 | etot=etot, 23 | timestep=1.11, 24 | ) 25 | return tr 26 | 27 | 28 | def get_rand_struct(): 29 | natoms = 10 30 | symbols = ['H']*natoms 31 | st = Structure(coords_frac=rand(natoms,3), 32 | symbols=symbols, 33 | forces=rand(natoms,3), 34 | cell=rand(3,3), 35 | etot=3.14, 36 | stress=rand(3,3)) 37 | return st 38 | -------------------------------------------------------------------------------- /src/pwtools/test/utils/rpdf_ref.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # gen_rpdf_ref.py 4 | # 5 | # For the two AlN structures pwtools/test/files/rpdf/*.cif, generate 6 | # pwtools/test/files/rpdf/result.*.txt files with reference results. 7 | # Also, generate results for a random set of atoms. 8 | # 9 | # The AlN structures were generated with pwtools/examples/rpdf/rpdf_aln.py . 10 | # $ python pwtools/examples/rpdf/rpdf_aln.py 11 | # $ cp /tmp/rpdf_test/* pwtools/test/files/rpdf/ 12 | # $ python pwtools/test/utils/gen_rpdf_ref.py 13 | # 14 | # Notes 15 | # ----- 16 | # Make sure that you use the same seed (np.random.seed()) for all random 17 | # structs! Otherwise, you will end up with a new structure and you will have 18 | # to "hg commit" that. 19 | 20 | import os 21 | import numpy as np 22 | from pwtools import crys, parse, arrayio 23 | pj = os.path.join 24 | 25 | if __name__ == '__main__': 26 | 27 | for name in ['rand_3d', 'aln_ibrav0_sc', 'aln_ibrav2_sc']: 28 | dd = '../files/rpdf' 29 | if name == 'rand_3d': 30 | # important! 31 | np.random.seed(3) 32 | natoms_O = 10 33 | natoms_H = 3 34 | symbols = ['O']*natoms_O + ['H']*natoms_H 35 | coords_in = np.random.rand(natoms_H + natoms_O, 3, 30) 36 | cell = np.identity(3)*10 37 | sy = np.array(symbols) 38 | msk1 = sy=='O' 39 | msk2 = sy=='H' 40 | coords = [coords_in[msk1, ..., 10:], coords_in[msk1, ..., 10:]] 41 | np.savetxt(pj(dd, name + '.cell.txt'), cell) 42 | arrayio.writetxt(pj(dd, name + '.coords0.txt'), coords[0]) 43 | arrayio.writetxt(pj(dd, name + '.coords1.txt'), coords[1]) 44 | else: 45 | pp = parse.CifFile(pj(dd, name + '.cif')) 46 | pp.parse() 47 | coords = pp.coords 48 | cell = pp.cell 49 | rad, hist, num_int = crys.rpdf(coords, 50 | rmax=5.0, 51 | cell=cell, 52 | dr=0.05, 53 | pbc=True, 54 | ) 55 | np.savetxt(pj(dd, "result.rad." + name + ".txt"), rad) 56 | np.savetxt(pj(dd, "result.hist." + name + ".txt"), hist) 57 | np.savetxt(pj(dd, "result.num_int." + name + ".txt"), num_int) 58 | np.savetxt(pj(dd, "result.rmax_auto." + name + ".txt"), 59 | [crys.rmax_smith(cell)]) 60 | -------------------------------------------------------------------------------- /src/pwtools/test/utils/vc_md_cell.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Create a PWscf MD "trajectory" where the CELL_PARAMETERS unit changes. This 4 | # is for testing if the parser can handle that. The output file consists only 5 | # of CELL_PARAMETERS blocks, which is enough for PwVCMDOutputFile.get_gell() . 6 | # 7 | # usage 8 | # ------ 9 | # ./make-vc-md-cell.py > ../files/pw.vc-md.cell.out 10 | 11 | import numpy as np 12 | from pwtools import common 13 | 14 | if __name__ == '__main__': 15 | cell = np.arange(1,10).reshape((3,3)) 16 | 17 | alat_lst = [2.0, 4.0] 18 | for ialat,alat in enumerate(alat_lst): 19 | for ii in range(5): 20 | cell_str = common.str_arr((cell + 0.02*ii + ialat)/alat) 21 | print("CELL_PARAMETERS (alat= %.5f)\n%s" %(alat, cell_str)) 22 | 23 | -------------------------------------------------------------------------------- /src/pwtools/verbose.py: -------------------------------------------------------------------------------- 1 | # Define message printing stuff. Used in all other modules. Use the global var 2 | # VERBOSE to turn chatty functions on/off. 3 | # 4 | # from pwtools import verbose, parse 5 | # verbose.VERBOSE = True 6 | # pp = parse.PwSCFOutputFile(...) 7 | 8 | VERBOSE = False 9 | 10 | def verbose(msg): 11 | if VERBOSE: 12 | print(msg) 13 | 14 | -------------------------------------------------------------------------------- /test/README: -------------------------------------------------------------------------------- 1 | To run the tests, use ./runtests.sh, see ``./runtests.sh -h``. 2 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [] 2 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | import pathlib 3 | import os 4 | import datetime 5 | 6 | import pytest 7 | 8 | from pwtools.test.testenv import testdir 9 | 10 | 11 | # Intermediate way to transition all tests to using this fixture instead of 12 | # the testenv hack directly, while still keeping the mechanics in runtests.sh 13 | # unchanged, for now. 14 | # 15 | # Additional feature: each pwtools_tmpdir is unique by construction, which 16 | # should give us "thread-safe" dirs for free, i.e. for using pytest-xdist, w/o 17 | # the need to create random dirs inside tests. 18 | # 19 | # "request" is a pytest fixture that holds information about the fixture's 20 | # requesting scope, such as the module and the requesting test function inside 21 | # that, and much more. Cool! 22 | # 23 | @pytest.fixture 24 | def pwtools_tmpdir(request): 25 | base_path = os.path.join( 26 | testdir, 27 | "pwtools-test", 28 | request.module.__name__, 29 | request.function.__name__, 30 | ) 31 | os.makedirs(base_path, exist_ok=True) 32 | stamp = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H.%M.%SZ") 33 | path = tempfile.mkdtemp(dir=base_path, prefix=f"{stamp}_") 34 | return pathlib.Path(path) 35 | -------------------------------------------------------------------------------- /test/files/angle/rs.cif: -------------------------------------------------------------------------------- 1 | #====================================================================== 2 | 3 | # CRYSTAL DATA 4 | 5 | #---------------------------------------------------------------------- 6 | 7 | data_VESTA_phase_1 8 | 9 | 10 | _pd_phase_name 'Al1 N1' 11 | _cell_length_a 3.956(1) 12 | _cell_length_b 3.956(1) 13 | _cell_length_c 3.956(1) 14 | _cell_angle_alpha 90 15 | _cell_angle_beta 90 16 | _cell_angle_gamma 90 17 | _symmetry_space_group_name_H-M 'P 1 ' 18 | _symmetry_Int_Tables_number 1 19 | 20 | loop_ 21 | _symmetry_equiv_pos_as_xyz 22 | 'x, y, z' 23 | 24 | loop_ 25 | _atom_site_label 26 | _atom_site_occupancy 27 | _atom_site_fract_x 28 | _atom_site_fract_y 29 | _atom_site_fract_z 30 | _atom_site_thermal_displace_type 31 | _atom_site_B_iso_or_equiv 32 | _atom_site_type_symbol 33 | Al1 1.0 0 0 0 Biso 1.000 Al 34 | Al1 1.0 0 0.50000 0.50000 Biso 1.000 Al 35 | Al1 1.0 0.50000 0 0.50000 Biso 1.000 Al 36 | Al1 1.0 0.50000 0.50000 0 Biso 1.000 Al 37 | N1 1.0 0.50000 0.50000 0.50000 Biso 1.000 N 38 | N1 1.0 0.50000 0 0 Biso 1.000 N 39 | N1 1.0 0 0.50000 0 Biso 1.000 N 40 | N1 1.0 0 0 0.50000 Biso 1.000 N 41 | -------------------------------------------------------------------------------- /test/files/calc.templ/job.host0: -------------------------------------------------------------------------------- 1 | subcmd=XXXSUBCMD 2 | scratch=XXXSCRATCH 3 | home=XXXHOME 4 | calc_name=XXXCALC_NAME 5 | idx=XXXIDX 6 | revision=XXXREVISION 7 | study_name=XXXSTUDY_NAME 8 | 9 | -------------------------------------------------------------------------------- /test/files/calc.templ/job.host1: -------------------------------------------------------------------------------- 1 | subcmd=XXXSUBCMD 2 | scratch=XXXSCRATCH 3 | home=XXXHOME 4 | calc_name=XXXCALC_NAME 5 | idx=XXXIDX 6 | revision=XXXREVISION 7 | study_name=XXXSTUDY_NAME 8 | 9 | -------------------------------------------------------------------------------- /test/files/calc.templ/pw.in: -------------------------------------------------------------------------------- 1 | subcmd=XXXSUBCMD 2 | scratch=XXXSCRATCH 3 | home=XXXHOME 4 | calc_name=XXXCALC_NAME 5 | idx=XXXIDX 6 | revision=XXXREVISION 7 | study_name=XXXSTUDY_NAME 8 | param1=XXXPARAM1 9 | param2=XXXPARAM2 10 | param3=XXXPARAM3 11 | -------------------------------------------------------------------------------- /test/files/cif_struct.cif: -------------------------------------------------------------------------------- 1 | data_some_random_structure 2 | _audit_creation_date 2009/02/28 3 | _chemical_name_systematic 'foo bar' 4 | _chemical_formula_structural 'Si4 Al2 O2 N6' 5 | _chemical_formula_sum 'Si4 Al2 O2 N6' 6 | 7 | _cell_length_a 7.65900 8 | _cell_length_b 7.65900 9 | _cell_length_c 11.8240 10 | _cell_angle_alpha 90. 11 | _cell_angle_beta 90. 12 | _cell_angle_gamma 120. 13 | _cell_formula_units_Z 6 14 | _symmetry_space_group_name_H-M 'P 1' 15 | _symmetry_Int_Tables_number 1 16 | 17 | loop_ 18 | _atom_type_symbol 19 | _atom_type_oxidation_number 20 | Si4+ 4 21 | N3- -3 22 | Al3+ 3 23 | O2- -2 24 | 25 | loop_ 26 | _atom_site_label 27 | _atom_site_type_symbol 28 | _atom_site_fract_x 29 | _atom_site_fract_y 30 | _atom_site_fract_z 31 | 32 | Al Al3+ 0.76868 0.17438 0.0625 33 | Al Al3+ 0.17438 0.4057 0.1875 34 | O O2- 0.02972 0.32953 0.06625 35 | O O2- 0.32953 0.2998 0.19125 36 | O2 O2- 0.33331 0.66669 0.0586 37 | Al Al3+ 0.76868 0.17438 0.3125 38 | Si1 Si4+ 0.76868 0.17438 0.5625 39 | Al Al3+ 0.4057 0.23132 0.3125 40 | Si1 Si4+ 0.4057 0.23132 0.5625 41 | N1 N3- 0.02972 0.32953 0.31625 42 | N2 N3- 0.33331 0.66669 0.3086 43 | 44 | #End of data_some_random_structure 45 | 46 | -------------------------------------------------------------------------------- /test/files/cp2k/cell_opt/cell_opt.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cp2k/cell_opt/cell_opt.tgz -------------------------------------------------------------------------------- /test/files/cp2k/dcd/npt_dcd.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cp2k/dcd/npt_dcd.tgz -------------------------------------------------------------------------------- /test/files/cp2k/dcd/npt_xyz.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cp2k/dcd/npt_xyz.tgz -------------------------------------------------------------------------------- /test/files/cp2k/md/npt_f_print_low.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cp2k/md/npt_f_print_low.tgz -------------------------------------------------------------------------------- /test/files/cp2k/md/nvt_print_low.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cp2k/md/nvt_print_low.tgz -------------------------------------------------------------------------------- /test/files/cp2k/scf/cp2k.scf.out.print_low.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cp2k/scf/cp2k.scf.out.print_low.gz -------------------------------------------------------------------------------- /test/files/cp2k/scf/cp2k.scf.out.print_medium.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cp2k/scf/cp2k.scf.out.print_medium.gz -------------------------------------------------------------------------------- /test/files/cpmd/md_bo_lanczos.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cpmd/md_bo_lanczos.tgz -------------------------------------------------------------------------------- /test/files/cpmd/md_bo_odiis.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cpmd/md_bo_odiis.tgz -------------------------------------------------------------------------------- /test/files/cpmd/md_bo_odiis_npt.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cpmd/md_bo_odiis_npt.tgz -------------------------------------------------------------------------------- /test/files/cpmd/md_cp_mttk.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cpmd/md_cp_mttk.tgz -------------------------------------------------------------------------------- /test/files/cpmd/md_cp_nve.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cpmd/md_cp_nve.tgz -------------------------------------------------------------------------------- /test/files/cpmd/md_cp_nvt_nose.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cpmd/md_cp_nvt_nose.tgz -------------------------------------------------------------------------------- /test/files/cpmd/md_cp_pr.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cpmd/md_cp_pr.tgz -------------------------------------------------------------------------------- /test/files/cpmd/scf.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/cpmd/scf.tgz -------------------------------------------------------------------------------- /test/files/dyn/ph.dyn1: -------------------------------------------------------------------------------- 1 | q = ( 0.000000 0.000000 0.000000 ) 2 | 3 | omega(1) = 0.000000 [THz] = 0.000000 [cm-1] 4 | ( 1.000000 0.030000 2.000000 0.060000 3.000000 0.090000 ) 5 | ( 4.000000 0.120000 5.000000 0.150000 6.000000 0.180000 ) 6 | omega(2) = 0.700000 [THz] = 7.000000 [cm-1] 7 | ( 8.000000 0.240000 9.000000 0.270000 10.000000 0.300000 ) 8 | ( 11.000000 0.330000 12.000000 0.360000 13.000000 0.390000 ) 9 | omega(3) = 1.400000 [THz] = 14.000000 [cm-1] 10 | ( 15.000000 0.450000 16.000000 0.480000 17.000000 0.510000 ) 11 | ( 18.000000 0.540000 19.000000 0.570000 20.000000 0.600000 ) 12 | omega(4) = 2.100000 [THz] = 21.000000 [cm-1] 13 | ( 22.000000 0.660000 23.000000 0.690000 24.000000 0.720000 ) 14 | ( 25.000000 0.750000 26.000000 0.780000 27.000000 0.810000 ) 15 | omega(5) = 2.800000 [THz] = 28.000000 [cm-1] 16 | ( 29.000000 0.870000 30.000000 0.900000 31.000000 0.930000 ) 17 | ( 32.000000 0.960000 33.000000 0.990000 34.000000 1.020000 ) 18 | omega(6) = 3.500000 [THz] = 35.000000 [cm-1] 19 | ( 36.000000 1.080000 37.000000 1.110000 38.000000 1.140000 ) 20 | ( 39.000000 1.170000 40.000000 1.200000 41.000000 1.230000 ) 21 | -------------------------------------------------------------------------------- /test/files/dyn/ph.dyn2: -------------------------------------------------------------------------------- 1 | q = ( 0.000000 0.000000 0.500000 ) 2 | 3 | omega(1) = 4.200000 [THz] = 42.000000 [cm-1] 4 | ( 43.000000 1.290000 44.000000 1.320000 45.000000 1.350000 ) 5 | ( 46.000000 1.380000 47.000000 1.410000 48.000000 1.440000 ) 6 | omega(2) = 4.900000 [THz] = 49.000000 [cm-1] 7 | ( 50.000000 1.500000 51.000000 1.530000 52.000000 1.560000 ) 8 | ( 53.000000 1.590000 54.000000 1.620000 55.000000 1.650000 ) 9 | omega(3) = 5.600000 [THz] = 56.000000 [cm-1] 10 | ( 57.000000 1.710000 58.000000 1.740000 59.000000 1.770000 ) 11 | ( 60.000000 1.800000 61.000000 1.830000 62.000000 1.860000 ) 12 | omega(4) = 6.300000 [THz] = 63.000000 [cm-1] 13 | ( 64.000000 1.920000 65.000000 1.950000 66.000000 1.980000 ) 14 | ( 67.000000 2.010000 68.000000 2.040000 69.000000 2.070000 ) 15 | omega(5) = 7.000000 [THz] = 70.000000 [cm-1] 16 | ( 71.000000 2.130000 72.000000 2.160000 73.000000 2.190000 ) 17 | ( 74.000000 2.220000 75.000000 2.250000 76.000000 2.280000 ) 18 | omega(6) = 7.700000 [THz] = 77.000000 [cm-1] 19 | ( 78.000000 2.340000 79.000000 2.370000 80.000000 2.400000 ) 20 | ( 81.000000 2.430000 82.000000 2.460000 83.000000 2.490000 ) 21 | -------------------------------------------------------------------------------- /test/files/dynmat/dynmat.in: -------------------------------------------------------------------------------- 1 | 2 | &input 3 | fildyn='ph.dyn', 4 | q(1)=1.000000,q(2)=0.000000,q(3)=0.000000 5 | amass(1)=26.981538, 6 | amass(2)=14.00674, 7 | asr='crystal', 8 | filout='dynmat.modes', 9 | filxsf='dynmat.axsf', 10 | / 11 | -------------------------------------------------------------------------------- /test/files/dynmat/dynmat_all.out: -------------------------------------------------------------------------------- 1 | 2 | Program DYNMAT v.5.0.2 (svn rev. 9656) starts on 29Oct2013 at 17:49:37 3 | 4 | This program is part of the open-source Quantum ESPRESSO suite 5 | for quantum simulation of materials; please cite 6 | "P. Giannozzi et al., J. Phys.:Condens. Matter 21 395502 (2009); 7 | URL http://www.quantum-espresso.org", 8 | in publications or presentations arising from this work. More details at 9 | http://www.quantum-espresso.org/quote.php 10 | 11 | Parallel version (MPI), running on 1 processors 12 | 13 | Reading Dynamical Matrix from file ph.dyn 14 | ...Force constants read 15 | ...epsilon and Z* read 16 | ...Raman cross sections read 17 | Acoustic Sum Rule: || Z*(ASR) - Z*(orig)|| = 0.223299E-01 18 | Acoustic Sum Rule: ||dyn(ASR) - dyn(orig)||= 0.405317E-03 19 | 20 | Polarizability (A^3 units) 21 | multiply by 0.470335 for Clausius-Mossotti correction 22 | 10.218921 0.000000 0.000000 23 | 0.000000 10.218921 0.000000 24 | 0.000000 0.000000 10.817797 25 | 26 | IR activities are in (D/A)^2/amu units 27 | Raman activities are in A^4/amu units 28 | multiply Raman by 0.221215 for Clausius-Mossotti correction 29 | 30 | # mode [cm-1] [THz] IR Raman depol.fact 31 | 1 0.00 0.0000 0.0000 0.0005 0.7414 32 | 2 0.00 0.0000 0.0000 0.0005 0.7465 33 | 3 0.00 0.0000 0.0000 0.0018 0.2647 34 | 4 252.27 7.5627 0.0000 0.0073 0.7500 35 | 5 252.27 7.5627 0.0000 0.0073 0.7500 36 | 6 548.44 16.4419 0.0000 0.0000 0.7434 37 | 7 603.32 18.0872 35.9045 18.9075 0.7366 38 | 8 656.82 19.6910 0.0000 7.9317 0.7500 39 | 9 656.82 19.6910 0.0000 7.9317 0.7500 40 | 10 669.67 20.0762 31.5712 5.0265 0.7500 41 | 11 738.22 22.1311 0.0000 0.0000 0.7306 42 | 12 922.64 27.6600 31.5712 5.0265 0.7500 43 | 44 | DYNMAT : 0.00s CPU 0.01s WALL 45 | 46 | 47 | This run was terminated on: 17:49:37 29Oct2013 48 | 49 | =------------------------------------------------------------------------------= 50 | JOB DONE. 51 | =------------------------------------------------------------------------------= 52 | -------------------------------------------------------------------------------- /test/files/dynmat/dynmat_min.out: -------------------------------------------------------------------------------- 1 | 2 | Program DYNMAT v.5.0.2 (svn rev. 9656) starts on 29Oct2013 at 17:49:37 3 | 4 | This program is part of the open-source Quantum ESPRESSO suite 5 | for quantum simulation of materials; please cite 6 | "P. Giannozzi et al., J. Phys.:Condens. Matter 21 395502 (2009); 7 | URL http://www.quantum-espresso.org", 8 | in publications or presentations arising from this work. More details at 9 | http://www.quantum-espresso.org/quote.php 10 | 11 | Parallel version (MPI), running on 1 processors 12 | 13 | Reading Dynamical Matrix from file ph.dyn 14 | ...Force constants read 15 | ...epsilon and Z* read 16 | ...Raman cross sections read 17 | Acoustic Sum Rule: || Z*(ASR) - Z*(orig)|| = 0.223299E-01 18 | Acoustic Sum Rule: ||dyn(ASR) - dyn(orig)||= 0.405317E-03 19 | 20 | Polarizability (A^3 units) 21 | multiply by 0.470335 for Clausius-Mossotti correction 22 | 10.218921 0.000000 0.000000 23 | 0.000000 10.218921 0.000000 24 | 0.000000 0.000000 10.817797 25 | 26 | IR activities are in (D/A)^2/amu units 27 | Raman activities are in A^4/amu units 28 | multiply Raman by 0.221215 for Clausius-Mossotti correction 29 | 30 | # mode [cm-1] [THz] IR 31 | 1 0.00 0.0000 0.0000 32 | 2 0.00 0.0000 0.0000 33 | 3 0.00 0.0000 0.0000 34 | 4 252.27 7.5627 0.0000 35 | 5 252.27 7.5627 0.0000 36 | 6 548.44 16.4419 0.0000 37 | 7 603.32 18.0872 35.9045 38 | 8 656.82 19.6910 0.0000 39 | 9 656.82 19.6910 0.0000 40 | 10 669.67 20.0762 31.5712 41 | 11 738.22 22.1311 0.0000 42 | 12 922.64 27.6600 31.5712 43 | 44 | DYNMAT : 0.00s CPU 0.01s WALL 45 | 46 | 47 | This run was terminated on: 17:49:37 29Oct2013 48 | 49 | =------------------------------------------------------------------------------= 50 | JOB DONE. 51 | =------------------------------------------------------------------------------= 52 | -------------------------------------------------------------------------------- /test/files/ev/EVPAI.OUT.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/ev/EVPAI.OUT.gz -------------------------------------------------------------------------------- /test/files/ev/PARAM.OUT: -------------------------------------------------------------------------------- 1 | 2 | foo 3 | 4 | Universal EOS 5 | Vinet P et al., J. Phys.: Condens. Matter 1, p1941 (1989) 6 | 7 | (Default units are atomic: Hartree, Bohr etc.) 8 | 9 | V0 = 285.6821190 10 | E0 = -24.26320978 11 | B0 = 0.6496035784E-02 12 | B0' = 3.851617781 13 | 14 | B0 (GPa) = 191.1199391 15 | 16 | -------------------------------------------------------------------------------- /test/files/ev/PVPAI.OUT.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/ev/PVPAI.OUT.gz -------------------------------------------------------------------------------- /test/files/ev/evdata.txt: -------------------------------------------------------------------------------- 1 | # wz-AlN data -20 .. 20 GPa, PWscf, kpoints=6 6 4 0 0 0, 2 | # ecutwfc=100,ecutrho=400, *.GGA.fhi.UPF PPs 3 | # volume [Bohr^3] etot [Ry] 4 | 2.615008599999999888e+02 -4.851110056000000270e+01 5 | 2.635151900000000182e+02 -4.851370205999999996e+01 6 | 2.656082900000000109e+02 -4.851612060000000071e+01 7 | 2.677768800000000056e+02 -4.851833098000000177e+01 8 | 2.700260099999999852e+02 -4.852031739000000243e+01 9 | 2.723611300000000028e+02 -4.852206233999999796e+01 10 | 2.747892200000000003e+02 -4.852354717000000051e+01 11 | 2.773285999999999945e+02 -4.852475512000000180e+01 12 | 2.799821000000000026e+02 -4.852565644999999961e+01 13 | 2.827682100000000105e+02 -4.852622338999999840e+01 14 | 2.856871800000000121e+02 -4.852641964000000030e+01 15 | 2.887401399999999967e+02 -4.852621065999999672e+01 16 | 2.919842800000000125e+02 -4.852554793000000188e+01 17 | 2.954082399999999780e+02 -4.852438236999999788e+01 18 | 2.990459299999999985e+02 -4.852264945000000296e+01 19 | 3.029225099999999884e+02 -4.852027568999999829e+01 20 | 3.070866399999999885e+02 -4.851715905999999734e+01 21 | 3.115587600000000066e+02 -4.851320327999999904e+01 22 | 3.164141200000000254e+02 -4.850824805999999967e+01 23 | 3.217247699999999782e+02 -4.850210467000000136e+01 24 | 3.275587699999999813e+02 -4.849456201000000277e+01 25 | -------------------------------------------------------------------------------- /test/files/ev/min.txt: -------------------------------------------------------------------------------- 1 | # V0 [Bohr^3] 2 | # E0 [Ry] 3 | # P0 [GPa] 4 | # B0 [GPa] 5 | 2.85682119e+02 6 | -4.85264196e+01 7 | 1.33241459e-08 8 | 1.91119751e+02 9 | -------------------------------------------------------------------------------- /test/files/fqha.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/fqha.out.gz -------------------------------------------------------------------------------- /test/files/gibbs/1d/cartman.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/gibbs/1d/cartman.h5 -------------------------------------------------------------------------------- /test/files/gibbs/1d/kenny.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/gibbs/1d/kenny.h5 -------------------------------------------------------------------------------- /test/files/gibbs/2d/cartman.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/gibbs/2d/cartman.h5 -------------------------------------------------------------------------------- /test/files/gibbs/2d/kenny.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/gibbs/2d/kenny.h5 -------------------------------------------------------------------------------- /test/files/gibbs/3d-fake-1d/deskbot.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/gibbs/3d-fake-1d/deskbot.h5 -------------------------------------------------------------------------------- /test/files/lammps/md-npt.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/lammps/md-npt.tgz -------------------------------------------------------------------------------- /test/files/lammps/md-nvt.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/lammps/md-nvt.tgz -------------------------------------------------------------------------------- /test/files/lammps/mix_output.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/lammps/mix_output.tgz -------------------------------------------------------------------------------- /test/files/lammps/vc-relax.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/lammps/vc-relax.tgz -------------------------------------------------------------------------------- /test/files/matdyn.freq: -------------------------------------------------------------------------------- 1 | &plot nbnd= 6, nks= 2 / 2 | 0.1 0.2 0.3 3 | 1 2 3 4 | 4 5 6.1 5 | 0.2 0.4 0.6 6 | 7 8 9e-7 10 11 12.0 7 | -------------------------------------------------------------------------------- /test/files/matdyn.modes: -------------------------------------------------------------------------------- 1 | diagonalizing the dynamical matrix ... 2 | 3 | q = 0.000000 0.000000 0.000000 4 | ******************************************************************************* 5 | omega(1) = 0.000000 [THz] = 0.000000 [cm-1] 6 | ( 1.000000 0.030000 2.000000 0.060000 3.000000 0.090000 ) 7 | ( 4.000000 0.120000 5.000000 0.150000 6.000000 0.180000 ) 8 | omega(2) = 0.700000 [THz] = 7.000000 [cm-1] 9 | ( 8.000000 0.240000 9.000000 0.270000 10.000000 0.300000 ) 10 | ( 11.000000 0.330000 12.000000 0.360000 13.000000 0.390000 ) 11 | omega(3) = 1.400000 [THz] = 14.000000 [cm-1] 12 | ( 15.000000 0.450000 16.000000 0.480000 17.000000 0.510000 ) 13 | ( 18.000000 0.540000 19.000000 0.570000 20.000000 0.600000 ) 14 | omega(4) = 2.100000 [THz] = 21.000000 [cm-1] 15 | ( 22.000000 0.660000 23.000000 0.690000 24.000000 0.720000 ) 16 | ( 25.000000 0.750000 26.000000 0.780000 27.000000 0.810000 ) 17 | omega(5) = 2.800000 [THz] = 28.000000 [cm-1] 18 | ( 29.000000 0.870000 30.000000 0.900000 31.000000 0.930000 ) 19 | ( 32.000000 0.960000 33.000000 0.990000 34.000000 1.020000 ) 20 | omega(6) = 3.500000 [THz] = 35.000000 [cm-1] 21 | ( 36.000000 1.080000 37.000000 1.110000 38.000000 1.140000 ) 22 | ( 39.000000 1.170000 40.000000 1.200000 41.000000 1.230000 ) 23 | ******************************************************************************* 24 | diagonalizing the dynamical matrix ... 25 | 26 | q = 0.000000 0.000000 0.500000 27 | ******************************************************************************* 28 | omega(1) = 4.200000 [THz] = 42.000000 [cm-1] 29 | ( 43.000000 1.290000 44.000000 1.320000 45.000000 1.350000 ) 30 | ( 46.000000 1.380000 47.000000 1.410000 48.000000 1.440000 ) 31 | omega(2) = 4.900000 [THz] = 49.000000 [cm-1] 32 | ( 50.000000 1.500000 51.000000 1.530000 52.000000 1.560000 ) 33 | ( 53.000000 1.590000 54.000000 1.620000 55.000000 1.650000 ) 34 | omega(3) = 5.600000 [THz] = 56.000000 [cm-1] 35 | ( 57.000000 1.710000 58.000000 1.740000 59.000000 1.770000 ) 36 | ( 60.000000 1.800000 61.000000 1.830000 62.000000 1.860000 ) 37 | omega(4) = 6.300000 [THz] = 63.000000 [cm-1] 38 | ( 64.000000 1.920000 65.000000 1.950000 66.000000 1.980000 ) 39 | ( 67.000000 2.010000 68.000000 2.040000 69.000000 2.070000 ) 40 | omega(5) = 7.000000 [THz] = 70.000000 [cm-1] 41 | ( 71.000000 2.130000 72.000000 2.160000 73.000000 2.190000 ) 42 | ( 74.000000 2.220000 75.000000 2.250000 76.000000 2.280000 ) 43 | omega(6) = 7.700000 [THz] = 77.000000 [cm-1] 44 | ( 78.000000 2.340000 79.000000 2.370000 80.000000 2.400000 ) 45 | ( 81.000000 2.430000 82.000000 2.460000 83.000000 2.490000 ) 46 | ******************************************************************************* 47 | -------------------------------------------------------------------------------- /test/files/pdb_struct.pdb: -------------------------------------------------------------------------------- 1 | TITLE some_random_atoms 2 | REMARK THIS IS A SIMULATION BOX 3 | CRYST1 10.678 10.678 10.678 90.00 90.00 90.00 P 1 1 4 | MODEL 1 5 | HETATM 1 C 1 5.759 5.581 5.339 1.00 0.00 6 | ATOM 2 O 1 5.759 6.951 5.339 1.00 0.00 7 | ATOM 3 O 1 6.980 4.876 5.339 1.00 0.00 8 | ATOM 8 Na 1 7.406 6.575 5.339 1.00 0.00 9 | HETATM 9 H HOH 2 5.701 2.442 7.733 1.00 0.00 10 | HETATM 10 H HOH 2 5.908 0.887 7.280 1.00 0.00 11 | ATOM 11 O HOH 2 6.008 1.840 6.996 1.00 0.00 12 | ATOM 12 O HOH 20 2.880 7.979 2.600 1.00 0.00 13 | TER 14 | ENDMDL 15 | -------------------------------------------------------------------------------- /test/files/pw.constant_cell.txt: -------------------------------------------------------------------------------- 1 | CELL_PARAMETERS 2 | 1 2 3 3 | 4 5 6 4 | 7 8 9 5 | CELL_PARAMETERS 6 | 1 2 3 7 | 4 5 6 8 | 7 8 9 9 | CELL_PARAMETERS 10 | 1 2 3 11 | 4 5 6 12 | 7 8 9 13 | CELL_PARAMETERS 14 | 1 2 3 15 | 4 5 6 16 | 7 8 9 17 | -------------------------------------------------------------------------------- /test/files/pw.md.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.md.out.gz -------------------------------------------------------------------------------- /test/files/pw.md_london.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.md_london.out.gz -------------------------------------------------------------------------------- /test/files/pw.md_one_atom.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.md_one_atom.out.gz -------------------------------------------------------------------------------- /test/files/pw.scf.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.scf.out.gz -------------------------------------------------------------------------------- /test/files/pw.scf_no_forces_stress.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.scf_no_forces_stress.out.gz -------------------------------------------------------------------------------- /test/files/pw.scf_one_atom.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.scf_one_atom.out.gz -------------------------------------------------------------------------------- /test/files/pw.scf_verbose_london.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.scf_verbose_london.out.gz -------------------------------------------------------------------------------- /test/files/pw.vc_md.cell.out: -------------------------------------------------------------------------------- 1 | CELL_PARAMETERS (alat= 2.00000) 2 | 5.0000000000000000e-01 1.0000000000000000e+00 1.5000000000000000e+00 3 | 2.0000000000000000e+00 2.5000000000000000e+00 3.0000000000000000e+00 4 | 3.5000000000000000e+00 4.0000000000000000e+00 4.5000000000000000e+00 5 | CELL_PARAMETERS (alat= 2.00000) 6 | 5.1000000000000001e-01 1.0100000000000000e+00 1.5100000000000000e+00 7 | 2.0099999999999998e+00 2.5099999999999998e+00 3.0099999999999998e+00 8 | 3.5099999999999998e+00 4.0099999999999998e+00 4.5099999999999998e+00 9 | CELL_PARAMETERS (alat= 2.00000) 10 | 5.2000000000000002e-01 1.0200000000000000e+00 1.5200000000000000e+00 11 | 2.0200000000000000e+00 2.5200000000000000e+00 3.0200000000000000e+00 12 | 3.5200000000000000e+00 4.0199999999999996e+00 4.5199999999999996e+00 13 | CELL_PARAMETERS (alat= 2.00000) 14 | 5.3000000000000003e-01 1.0300000000000000e+00 1.5300000000000000e+00 15 | 2.0299999999999998e+00 2.5299999999999998e+00 3.0299999999999998e+00 16 | 3.5299999999999998e+00 4.0300000000000002e+00 4.5300000000000002e+00 17 | CELL_PARAMETERS (alat= 2.00000) 18 | 5.4000000000000004e-01 1.0400000000000000e+00 1.5400000000000000e+00 19 | 2.0400000000000000e+00 2.5400000000000000e+00 3.0400000000000000e+00 20 | 3.5400000000000000e+00 4.0400000000000000e+00 4.5400000000000000e+00 21 | CELL_PARAMETERS (alat= 4.00000) 22 | 5.0000000000000000e-01 7.5000000000000000e-01 1.0000000000000000e+00 23 | 1.2500000000000000e+00 1.5000000000000000e+00 1.7500000000000000e+00 24 | 2.0000000000000000e+00 2.2500000000000000e+00 2.5000000000000000e+00 25 | CELL_PARAMETERS (alat= 4.00000) 26 | 5.0500000000000000e-01 7.5500000000000000e-01 1.0049999999999999e+00 27 | 1.2549999999999999e+00 1.5049999999999999e+00 1.7549999999999999e+00 28 | 2.0049999999999999e+00 2.2549999999999999e+00 2.5049999999999999e+00 29 | CELL_PARAMETERS (alat= 4.00000) 30 | 5.1000000000000001e-01 7.6000000000000001e-01 1.0100000000000000e+00 31 | 1.2600000000000000e+00 1.5100000000000000e+00 1.7600000000000000e+00 32 | 2.0099999999999998e+00 2.2599999999999998e+00 2.5099999999999998e+00 33 | CELL_PARAMETERS (alat= 4.00000) 34 | 5.1500000000000001e-01 7.6500000000000001e-01 1.0150000000000001e+00 35 | 1.2649999999999999e+00 1.5149999999999999e+00 1.7649999999999999e+00 36 | 2.0149999999999997e+00 2.2650000000000001e+00 2.5150000000000001e+00 37 | CELL_PARAMETERS (alat= 4.00000) 38 | 5.2000000000000002e-01 7.7000000000000002e-01 1.0200000000000000e+00 39 | 1.2700000000000000e+00 1.5200000000000000e+00 1.7700000000000000e+00 40 | 2.0200000000000000e+00 2.2700000000000000e+00 2.5200000000000000e+00 41 | -------------------------------------------------------------------------------- /test/files/pw.vc_relax.in: -------------------------------------------------------------------------------- 1 | &control 2 | calculation = 'vc-relax' 3 | restart_mode='from_scratch', 4 | prefix='vrc_run0' 5 | tstress = .true. 6 | tprnfor = .true. 7 | nstep = 100, 8 | pseudo_dir = '/home/schmerle/soft/lib/espresso/pseudo/pseudo_espresso', 9 | outdir='/fastfs/schmerle' 10 | disk_io = 'low' 11 | wf_collect = .true. 12 | / 13 | &system 14 | ibrav = 4, 15 | celldm(1) = 5.878937999, 16 | celldm(3) = 1.600128576, 17 | nat = 4, 18 | ntyp = 2, 19 | ecutwfc = 50, 20 | ecutrho = 500, 21 | occupations = 'smearing' 22 | degauss = 0.002 23 | smearing = 'gaussian' 24 | nosym = .false., 25 | / 26 | &electrons 27 | diagonalization='david' 28 | mixing_mode = 'local-TF' 29 | mixing_beta = 0.4 30 | / 31 | &ions 32 | pot_extrapolation='second-order', 33 | wfc_extrapolation='second-order', 34 | / 35 | &cell 36 | press = -250.0 37 | press_conv_thr = 1 38 | cell_dynamics = 'bfgs' 39 | cell_dofree = 'xyz' 40 | / 41 | ATOMIC_SPECIES 42 | Al 26.981538 Al.pbe-n-van.UPF 43 | N 14.00674 N.pbe-van_ak.UPF 44 | ATOMIC_POSITIONS crystal 45 | Al 0.33333 0.66667 0 46 | Al 0.66667 0.33333 0.50000 47 | N 0.33333 0.66667 0.38800 48 | N 0.66667 0.33333 0.88800 49 | K_POINTS automatic 50 | 8 8 8 0 0 0 51 | 52 | -------------------------------------------------------------------------------- /test/files/pw.vc_relax.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.vc_relax.out.gz -------------------------------------------------------------------------------- /test/files/pw.vc_relax_cell_unit.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.vc_relax_cell_unit.out.gz -------------------------------------------------------------------------------- /test/files/pw.vc_relax_coords_fixed.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.vc_relax_coords_fixed.out.gz -------------------------------------------------------------------------------- /test/files/pw.vc_relax_no_cell_unit.out.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/pw.vc_relax_no_cell_unit.out.gz -------------------------------------------------------------------------------- /test/files/qe_matdyn_disp/matdyn.freq.disp.gp.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/qe_matdyn_disp/matdyn.freq.disp.gp.gz -------------------------------------------------------------------------------- /test/files/qe_matdyn_disp/matdyn.freq.disp.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/qe_matdyn_disp/matdyn.freq.disp.gz -------------------------------------------------------------------------------- /test/files/qe_pseudos/Al.pbe-n-kjpaw_psl.0.1.UPF.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/qe_pseudos/Al.pbe-n-kjpaw_psl.0.1.UPF.gz -------------------------------------------------------------------------------- /test/files/qe_pseudos/N.pbe-n-kjpaw_psl.0.1.UPF.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/qe_pseudos/N.pbe-n-kjpaw_psl.0.1.UPF.gz -------------------------------------------------------------------------------- /test/files/ref_test_pdos/dd.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/ref_test_pdos/dd.txt.gz -------------------------------------------------------------------------------- /test/files/ref_test_pdos/dv.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/ref_test_pdos/dv.txt.gz -------------------------------------------------------------------------------- /test/files/ref_test_pdos/fd.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/ref_test_pdos/fd.txt.gz -------------------------------------------------------------------------------- /test/files/ref_test_pdos/fv.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/ref_test_pdos/fv.txt.gz -------------------------------------------------------------------------------- /test/files/rpdf/rand_3d.cell.txt: -------------------------------------------------------------------------------- 1 | 1.000000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 2 | 0.000000000000000000e+00 1.000000000000000000e+01 0.000000000000000000e+00 3 | 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+01 4 | -------------------------------------------------------------------------------- /test/files/rpdf/result.rmax_auto.aln_ibrav0_sc.txt: -------------------------------------------------------------------------------- 1 | 5.000000000000000000e+00 2 | -------------------------------------------------------------------------------- /test/files/rpdf/result.rmax_auto.aln_ibrav2_sc.txt: -------------------------------------------------------------------------------- 1 | 5.773502691883621196e+00 2 | -------------------------------------------------------------------------------- /test/files/rpdf/result.rmax_auto.rand_3d.txt: -------------------------------------------------------------------------------- 1 | 5.000000000000000000e+00 2 | -------------------------------------------------------------------------------- /test/files/si.phdos.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elcorto/pwtools/a42f7e0a831b7cf6d11fa5dfde7d07cffab24d2b/test/files/si.phdos.gz -------------------------------------------------------------------------------- /test/test_acorr.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.signal import acorr 3 | 4 | def test_acorr(): 5 | arr = np.random.rand(100) 6 | for norm in [True,False]: 7 | ref = acorr(arr, method=1, norm=norm) 8 | for m in range(2,8): 9 | print("%i : 1" %m) 10 | np.testing.assert_array_almost_equal(acorr(arr, method=m, 11 | norm=norm), 12 | ref) 13 | -------------------------------------------------------------------------------- /test/test_angle.py: -------------------------------------------------------------------------------- 1 | from itertools import permutations 2 | 3 | import numpy as np 4 | 5 | from pwtools import crys,io 6 | 7 | 8 | def angles(struct, pbc=False, mask_val=999.0, deg=True): 9 | """Python implementation of angles.""" 10 | norm = np.linalg.norm 11 | anglesijk = np.ones((struct.natoms,)*3, dtype=float)*mask_val 12 | angleidx = np.array([x for x in permutations(list(range(struct.natoms)),3)]) 13 | for ijk in angleidx: 14 | ii,jj,kk = ijk 15 | ci = struct.coords_frac[ii,:] 16 | cj = struct.coords_frac[jj,:] 17 | ck = struct.coords_frac[kk,:] 18 | dvij = ci - cj 19 | dvik = ci - ck 20 | if pbc: 21 | dvij = np.dot(crys.min_image_convention(dvij), struct.cell) 22 | dvik = np.dot(crys.min_image_convention(dvik), struct.cell) 23 | else: 24 | dvij = np.dot(dvij, struct.cell) 25 | dvik = np.dot(dvik, struct.cell) 26 | cang = np.dot(dvij, dvik) / norm(dvij) / norm(dvik) 27 | ceps = 1.0-2.2e-16 28 | if cang > ceps: 29 | cang = 1.0 30 | elif cang < -ceps: 31 | cang = -1.0 32 | if deg: 33 | anglesijk[ii,jj,kk] = np.arccos(cang) * 180.0 / np.pi 34 | else: 35 | anglesijk[ii,jj,kk] = cang 36 | return anglesijk, angleidx 37 | 38 | 39 | def test_angle(): 40 | # CaCl struct, the supercell will have 0 and 180 degrees -> check corner 41 | # cases 42 | st = io.read_cif('files/angle/rs.cif') 43 | st = crys.scell(st, (2,1,1)) 44 | nang = st.natoms*(st.natoms-1)*(st.natoms-2) 45 | mask_val = 999.0 46 | for deg in [True,False]: 47 | for pbc in [True,False]: 48 | agf = crys.angles(st, pbc=pbc, mask_val=mask_val) 49 | agpy, aipy = angles(st, pbc=pbc, mask_val=mask_val) 50 | eps = np.finfo(float).eps*5 51 | assert np.allclose(agf, agpy) 52 | assert aipy.shape[0] == nang 53 | assert len((agf != mask_val).nonzero()[0]) == nang 54 | angleidx = np.array(list(zip(*(agf != mask_val).nonzero()))) 55 | assert (angleidx == aipy).all() 56 | assert not np.isnan(agpy).any(), "python angle nan" 57 | assert not np.isnan(agf).any(), "fortran angle nan" 58 | # do we have 0 and 180 degrees? 59 | assert (agf < eps).any(), "no zero degree cases" 60 | assert (agf - 180.0 < eps).any(), "no 180 degree cases" 61 | assert (agf >= 0.0).all(), "negative angles" 62 | -------------------------------------------------------------------------------- /test/test_ase.py: -------------------------------------------------------------------------------- 1 | from pwtools import crys 2 | from pwtools.test import tools 3 | import numpy as np 4 | rand = np.random.rand 5 | 6 | def test_get_ase_atoms(): 7 | natoms = 10 8 | st = crys.Structure(coords_frac=rand(natoms,3), 9 | symbols=['H']*10, 10 | cell=rand(3,3)) 11 | try: 12 | import ase 13 | st2 = crys.atoms2struct(crys.struct2atoms(st)) 14 | keys = ['natoms', 'coords', 'coords_frac', 'symbols', 'cryst_const', 15 | 'cell', 'volume', 'mass'] 16 | tools.assert_dict_with_all_types_almost_equal(\ 17 | st.__dict__, 18 | st2.__dict__, 19 | keys=keys, 20 | strict=True) 21 | # in case the test fails, use this to find out which key failed 22 | ## for kk in keys: 23 | ## print("testing: %s ..." %kk) 24 | ## tools.assert_all_types_almost_equal(st.__dict__[kk], st2.__dict__[kk]) 25 | for pbc in [True,False]: 26 | at = st.get_ase_atoms(pbc=pbc) 27 | assert (at.pbc == np.array([pbc]*3)).all() 28 | at = crys.struct2atoms(st, pbc=pbc) 29 | assert (at.pbc == np.array([pbc]*3)).all() 30 | except ImportError: 31 | tools.skip("cannot import ase, skipping test get_ase_atoms()") 32 | -------------------------------------------------------------------------------- /test/test_backup.py: -------------------------------------------------------------------------------- 1 | import os, tempfile, uuid 2 | from pwtools.common import backup, file_write, file_read 3 | from pwtools.test.testenv import testdir 4 | pj = os.path.join 5 | 6 | def create_full_dir(dn): 7 | os.makedirs(dn) 8 | for name in ['a', 'b', 'c']: 9 | file_write(pj(dn, name), 'foo') 10 | 11 | def test_backup(): 12 | # file 13 | name = tempfile.mktemp(prefix='testfile', dir=testdir) 14 | file_write(name, 'foo') 15 | backup(name) 16 | assert os.path.exists(name + '.0') 17 | backup(name) 18 | assert os.path.exists(name + '.1') 19 | backup(name) 20 | assert os.path.exists(name + '.2') 21 | 22 | # dir 23 | name = tempfile.mktemp(prefix='testdir', dir=testdir) 24 | create_full_dir(name) 25 | backup(name) 26 | assert os.path.exists(name + '.0') 27 | backup(name) 28 | assert os.path.exists(name + '.1') 29 | backup(name) 30 | assert os.path.exists(name + '.2') 31 | 32 | # link to file 33 | filename = tempfile.mktemp(prefix='testfile', dir=testdir) 34 | linkname = tempfile.mktemp(prefix='testlink', dir=testdir) 35 | file_write(filename, 'foo') 36 | os.symlink(filename, linkname) 37 | backup(linkname) 38 | assert os.path.exists(linkname + '.0') 39 | assert os.path.isfile(linkname + '.0') 40 | assert file_read(linkname + '.0') == file_read(filename) 41 | 42 | # link to dir 43 | dirname = tempfile.mktemp(prefix='testdir', dir=testdir) 44 | linkname = tempfile.mktemp(prefix='testlink', dir=testdir) 45 | create_full_dir(dirname) 46 | os.symlink(dirname, linkname) 47 | backup(linkname) 48 | assert os.path.exists(linkname + '.0') 49 | assert os.path.isdir(linkname + '.0') 50 | for name in ['a', 'b', 'c']: 51 | assert file_read(pj(dirname, name)) == \ 52 | file_read(pj(linkname + '.0', name)) 53 | 54 | # prefix 55 | name = tempfile.mktemp(prefix='testfile', dir=testdir) 56 | file_write(name, 'foo') 57 | backup(name, prefix="-bak") 58 | assert os.path.exists(name + '-bak0') 59 | 60 | # nonexisting src, should silently pass 61 | filename = str(uuid.uuid4()) 62 | assert not os.path.exists(filename) 63 | backup(filename) 64 | -------------------------------------------------------------------------------- /test/test_batch.py: -------------------------------------------------------------------------------- 1 | from pwtools.batch import Case 2 | 3 | def test_case(): 4 | 5 | c = Case(a=1, b='b') 6 | assert c.a == 1 7 | assert c.b == 'b' 8 | 9 | class MyCase(Case): 10 | pass 11 | 12 | c = MyCase(a=1, b='b') 13 | assert c.a == 1 14 | assert c.b == 'b' 15 | 16 | class MyCase(Case): 17 | def init(self): 18 | self.a += 1 19 | self.b += 'x' 20 | 21 | c = MyCase(a=1, b='b') 22 | assert c.a == 2 23 | assert c.b == 'bx' 24 | 25 | -------------------------------------------------------------------------------- /test/test_celldm_cryst_const.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.crys import cc2celldm, celldm2cc 3 | 4 | def assrt(a,b): 5 | np.testing.assert_array_almost_equal(a, b) 6 | 7 | def test_celldm_cryst_const(): 8 | cc = np.array([3,4,5, 30, 50, 123.0]) 9 | assrt(cc, celldm2cc(cc2celldm(cc))) 10 | 11 | cc = np.array([3,4,5, 30, 50, 123.0]) 12 | assrt(cc, celldm2cc(cc2celldm(cc, fac=10), fac=0.1)) 13 | 14 | cc = [3,3,3,90,90,90] 15 | assrt(cc2celldm(cc), np.array([3,1,1,0,0,0])) 16 | 17 | cc = [3,4,5, 90, 90, 120] 18 | assrt(cc2celldm(cc), 19 | np.array([3, 4/3., 5/3., 0,0, -0.5])) 20 | 21 | cc = [3,4,5, 90, 90, 120] 22 | assrt(cc2celldm(cc, fac=10), 23 | np.array([30, 4/3., 5/3., 0,0, -0.5])) 24 | 25 | -------------------------------------------------------------------------------- /test/test_center_on_atom.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.crys import center_on_atom 3 | import pwtools.test.utils.rand_container as rc 4 | 5 | def test(): 6 | # Explicit code duplication even though we could use [...,0,:] which should 7 | # work for (natoms,3) and (nstep,natoms,3) arrays [we use it in 8 | # center_on_atom()], but check if it really does. 9 | st = rc.get_rand_struct() 10 | stc = center_on_atom(st, idx=0, copy=True) 11 | assert (st.coords_frac != stc.coords_frac).all() 12 | assert (st.coords != stc.coords).all() 13 | assert (st.coords_frac[0,:] != np.array([0.5]*3)).all() 14 | assert (stc.coords_frac[0,:] == np.array([0.5]*3)).all() 15 | assert (stc.coords_frac[1:,:] != np.array([0.5]*3)).all() 16 | 17 | tr = rc.get_rand_traj() 18 | trc = center_on_atom(tr, idx=0, copy=True) 19 | assert (tr.coords_frac != trc.coords_frac).all() 20 | assert (tr.coords != trc.coords).all() 21 | assert (tr.coords_frac[:,0,:] != np.array([0.5]*3)).all() 22 | assert (trc.coords_frac[:,0,:] == np.array([0.5]*3)).all() 23 | assert (trc.coords_frac[:,1:,:] != np.array([0.5]*3)).all() 24 | -------------------------------------------------------------------------------- /test/test_cif.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess as sp 3 | 4 | import numpy as np 5 | 6 | from pwtools.parse import CifFile 7 | from pwtools import io 8 | 9 | from pwtools.test.testenv import testdir 10 | 11 | 12 | def test_cif_parse(): 13 | for filename in ['files/cif_struct.cif', 'files/cif_cart_struct.cif']: 14 | p1 = CifFile(filename).get_struct() 15 | assert p1.cell is not None 16 | assert p1.cryst_const is not None 17 | assert p1.symbols is not None 18 | assert p1.coords is not None 19 | assert p1.coords_frac is not None 20 | 21 | # test writing 22 | filename = os.path.join(testdir, 'test_write_cif.cif') 23 | io.write_cif(filename, p1) 24 | p2 = CifFile(filename).get_struct() 25 | np.testing.assert_array_almost_equal(p1.coords_frac, p2.coords_frac) 26 | np.testing.assert_array_almost_equal(p1.coords, p2.coords) 27 | np.testing.assert_array_almost_equal(p1.cryst_const, p2.cryst_const) 28 | np.testing.assert_array_almost_equal(p1.cell, p2.cell) 29 | assert p1.symbols == p2.symbols 30 | 31 | 32 | def test_cif2any(): 33 | exe = os.path.join(os.path.dirname(__file__), 34 | '../bin/cif2any.py') 35 | cmd = '{e} files/cif_struct.cif > cif2any.log'.format(e=exe) 36 | sp.run(cmd, check=True, shell=True) 37 | 38 | 39 | def test_cif2sgroup(): 40 | exe = os.path.join(os.path.dirname(__file__), 41 | '../bin/cif2sgroup.py') 42 | cmd = '{e} files/cif_struct.cif > cif2sgroup.log'.format(e=exe) 43 | sp.run(cmd, check=True, shell=True) 44 | -------------------------------------------------------------------------------- /test/test_common.py: -------------------------------------------------------------------------------- 1 | from pwtools import common as co 2 | import tempfile, os 3 | from pwtools.test.testenv import testdir 4 | 5 | def test_makedirs(): 6 | tmpdir = tempfile.mkdtemp(dir=testdir, prefix=__file__) 7 | tgt = os.path.join(tmpdir, 'foo') 8 | co.makedirs(tgt) 9 | assert os.path.exists(tgt) 10 | # pass 11 | co.makedirs('') 12 | -------------------------------------------------------------------------------- /test/test_conv_table.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.batch import conv_table 3 | 4 | def test_conv_table(): 5 | xx1 = ['a', 'b', 'c'] 6 | xx2 = np.array([1,2,3])*np.pi 7 | yy1 = [1,2,3] 8 | yy2 = [1.2,2.3,3.4] 9 | yy3 = [1.666,2.777,3.888] 10 | for xx in [xx1, xx2]: 11 | for mode in ['next','last']: 12 | st1 = conv_table(xx, yy1, mode=mode) 13 | st2 = conv_table(xx, [yy1], mode=mode) 14 | assert st1 == st2 15 | st1 = conv_table(xx, [yy1,yy2], mode=mode) 16 | st2 = conv_table(xx, np.array([yy1,yy2]), mode=mode) 17 | assert st1 == st2 18 | st1 = conv_table(xx, [yy1,yy2,yy3], mode=mode) 19 | st2 = conv_table(xx, np.array([yy1,yy2,yy3]), mode=mode) 20 | assert st1 == st2 21 | 22 | # API 23 | conv_table(xx, [yy1,yy2,yy3], mode='last', orig=True) 24 | -------------------------------------------------------------------------------- /test/test_cpmd_md.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | from pwtools.parse import CpmdMDOutputFile 3 | from pwtools import common, verbose 4 | from pwtools.test.tools import assert_attrs_not_none, unpack_compressed 5 | verbose.VERBOSE = True 6 | pj = os.path.join 7 | 8 | def run(filename, none_attrs=[]): 9 | # filename = 'files/cpmd/md_bo/cpmd.bo.out' 10 | # basename = 'cpmd.bo.out' 11 | # archive = 'files/cpmd/md_bo.tgz' 12 | bar = '='*78 13 | print(bar) 14 | print("@@testing: %s" %filename) 15 | print(bar) 16 | basename = os.path.basename(filename) 17 | archive = os.path.dirname(filename) + '.tgz' 18 | workdir = unpack_compressed(archive) 19 | pp = CpmdMDOutputFile(filename=pj(workdir, basename)) 20 | pp.parse() 21 | assert_attrs_not_none(pp, none_attrs=none_attrs) 22 | traj = pp.get_traj() 23 | attrs3d = ['coords', 24 | 'coords_frac', 25 | 'forces', 26 | 'cell', 27 | 'stress', 28 | ] 29 | for attr_name in traj.attr_lst: 30 | attr = getattr(traj, attr_name) 31 | if attr_name not in none_attrs: 32 | if hasattr(attr, 'ndim'): 33 | print("%s: ndim: %s, shape: %s" %(attr_name, attr.ndim, attr.shape)) 34 | assert attr is not None, "FAILED - None: %s" %attr_name 35 | if attr_name in attrs3d: 36 | assert attr.ndim == 3, "FAILED - not 3d: %s" %attr_name 37 | 38 | def test_cpmd_md(): 39 | # For BO-MD w/ ODIIS optimizer, ekin_elec = [0,0,...,0] but not None. 40 | run(filename='files/cpmd/md_bo_odiis/cpmd.bo.out', 41 | none_attrs=['stress', 42 | 'pressure', 43 | 'ekin_cell', 44 | ## 'ekin_elec', 45 | 'temperature_cell', 46 | ]) 47 | run(filename='files/cpmd/md_bo_odiis_npt/cpmd.out', 48 | none_attrs=['forces', 49 | 'ekin_cell', 50 | ## 'ekin_elec', 51 | 'temperature_cell', 52 | ]) 53 | run(filename='files/cpmd/md_bo_lanczos/cpmd.bo.out', 54 | none_attrs=['stress', 55 | 'pressure', 56 | 'ekin_cell', 57 | 'ekin_elec', 58 | 'temperature_cell', 59 | ]) 60 | run(filename='files/cpmd/md_cp_mttk/cpmd.out', 61 | none_attrs=['forces', 62 | ]) 63 | run(filename='files/cpmd/md_cp_pr/cpmd.out', 64 | none_attrs=['forces', 65 | ]) 66 | run(filename='files/cpmd/md_cp_nvt_nose/cpmd.out', 67 | none_attrs=['ekin_cell', 68 | 'temperature_cell', 69 | ]) 70 | run(filename='files/cpmd/md_cp_nve/cpmd.out', 71 | none_attrs=['ekin_cell', 72 | 'temperature_cell', 73 | ]) 74 | -------------------------------------------------------------------------------- /test/test_cpmd_scf.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | from pwtools.parse import CpmdSCFOutputFile 3 | from pwtools import common 4 | from pwtools.test.tools import assert_attrs_not_none, unpack_compressed 5 | pj = os.path.join 6 | 7 | def test_cpmd_scf(): 8 | filename = 'files/cpmd/scf/cpmd.out' 9 | basename = os.path.basename(filename) 10 | archive = os.path.dirname(filename) + '.tgz' 11 | workdir = unpack_compressed(archive) 12 | pp = CpmdSCFOutputFile(filename=pj(workdir, basename)) 13 | pp.parse() 14 | assert_attrs_not_none(pp, none_attrs=[]) 15 | -------------------------------------------------------------------------------- /test/test_crys_cell_tools_fortran.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools import _flib, crys, timer, num 3 | rand = np.random.rand 4 | 5 | def test_solve(): 6 | n = 5 7 | A = rand(n,n) 8 | b = rand(n) 9 | bold = b.copy() 10 | assert np.allclose(np.linalg.solve(A,b), _flib.solve(A,b)) 11 | assert (b == bold).all() 12 | 13 | 14 | def test_frac2cart(): 15 | coords_frac = rand(20,3) 16 | coords_frac_copy = coords_frac.copy() 17 | cell = rand(3,3) 18 | c1 = np.dot(coords_frac, cell) 19 | c2 = _flib.frac2cart(coords_frac, cell) 20 | c3 = crys.coord_trans(coords_frac, old=cell, new=np.identity(3)) 21 | assert (coords_frac == coords_frac_copy).all() 22 | assert np.allclose(c1, c2) 23 | assert np.allclose(c1, c3) 24 | assert c2.flags.f_contiguous 25 | 26 | 27 | def test_cart2frac(): 28 | coords = rand(20,3) 29 | coords_copy = coords.copy() 30 | cell = rand(3,3) 31 | c1 = np.dot(coords, np.linalg.inv(cell)) 32 | c2 = np.linalg.solve(cell.T, coords.T).T 33 | c3 = _flib.cart2frac(coords, cell) 34 | c4 = crys.coord_trans(coords, new=cell, old=np.identity(3)) 35 | assert (coords == coords_copy).all() 36 | assert np.allclose(c1, c2) 37 | assert np.allclose(c1, c3) 38 | assert np.allclose(c1, c4) 39 | assert c3.flags.f_contiguous 40 | 41 | 42 | def test_frac2cart_traj(): 43 | nstep = 100 44 | coords_frac = rand(nstep,20,3) 45 | coords_frac_copy = coords_frac.copy() 46 | cell = rand(nstep,3,3) 47 | c1 = np.array([np.dot(coords_frac[ii,...], cell[ii,...]) for ii in \ 48 | range(nstep)]) 49 | c2 = _flib.frac2cart_traj(coords_frac, cell) 50 | c3 = crys.coord_trans3d(coords_frac, old=cell, 51 | new=num.extend_array(np.identity(3), 52 | nstep=nstep, axis=0)) 53 | assert (coords_frac == coords_frac_copy).all() 54 | assert np.allclose(c1, c2) 55 | assert np.allclose(c1, c3) 56 | assert c2.flags.f_contiguous 57 | 58 | 59 | def test_cart2frac_traj(): 60 | nstep = 100 61 | coords = rand(nstep,20,3) 62 | coords_copy = coords.copy() 63 | cell = rand(nstep,3,3) 64 | c1 = np.array([np.dot(coords[ii,...], np.linalg.inv(cell[ii,...])) for ii in \ 65 | range(nstep)]) 66 | c2 = _flib.cart2frac_traj(coords, cell) 67 | c3 = crys.coord_trans3d(coords, new=cell, 68 | old=num.extend_array(np.identity(3), 69 | nstep=nstep, axis=0)) 70 | assert (coords == coords_copy).all() 71 | assert np.allclose(c1, c2) 72 | assert np.allclose(c1, c3) 73 | assert c2.flags.f_contiguous 74 | 75 | 76 | -------------------------------------------------------------------------------- /test/test_cut_cpmd.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import subprocess as sp 3 | from pwtools.test.tools import unpack_compressed 4 | 5 | def run(dr): 6 | dr = dr[:-1] if dr.endswith('/') else dr 7 | workdir = unpack_compressed(dr + '.tgz') 8 | exe = os.path.join(os.path.dirname(__file__), 9 | '../bin/cut-cpmd.sh') 10 | cmd = '{e} {w} 20 > {w}/cut-cpmd.log'.format(e=exe, w=workdir) 11 | sp.run(cmd, check=True, shell=True) 12 | 13 | def test_cut_cpmd(): 14 | run(dr='files/cpmd/md_cp_pr') 15 | 16 | -------------------------------------------------------------------------------- /test/test_datand.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.num import DataND 3 | rand = np.random.rand 4 | 5 | def test_datand(): 6 | x0 = np.sort(rand(3)) 7 | x1 = np.sort(rand(2)) 8 | x2 = np.sort(rand(5)) 9 | x3 = np.sort(rand(7)) 10 | axes = [x0,x1,x2,x3] 11 | shape = tuple([len(x) for x in axes]) 12 | a2 = np.empty((np.prod(shape), len(shape)+1), dtype=x0.dtype) 13 | 14 | an_ref = np.empty(shape, dtype=x0.dtype) 15 | idx = 0 16 | for i0,_x0 in enumerate(x0): 17 | for i1,_x1 in enumerate(x1): 18 | for i2,_x2 in enumerate(x2): 19 | for i3,_x3 in enumerate(x3): 20 | val = _x0*_x1*_x2*_x3 21 | an_ref[i0,i1,i2,i3] = val 22 | a2[idx,0] = _x0 23 | a2[idx,1] = _x1 24 | a2[idx,2] = _x2 25 | a2[idx,3] = _x3 26 | a2[idx,4] = val 27 | idx += 1 28 | 29 | nd = DataND(a2=a2) 30 | assert (nd.an == an_ref).all() 31 | 32 | for x,y in zip(axes, nd.axes): 33 | assert (x == y).all() 34 | -------------------------------------------------------------------------------- /test/test_deriv.py: -------------------------------------------------------------------------------- 1 | # Test numerical derivatives. 2 | # 3 | # deriv_spl: Test correctness of results for the case where we use (x,y) 4 | # as input. Note the low "decimal" values for 5 | # testing.assert_array_almost_equal: 6 | # n=1: 4 7 | # n=2: 2 8 | # You see that the derivatives are not very accurate, using the default spline 9 | # parameters (order, smoothing)! However, plotting y - yd reveals that the 10 | # errors are only big near the x-range edges x[0] and x[-1], not in between, so 11 | # it is safe to use the derivatitves after testing for further calculations. 12 | 13 | import numpy as np 14 | from pwtools import num 15 | asrt = np.testing.assert_array_almost_equal 16 | 17 | def test_deriv(): 18 | x = np.linspace(0,10,100) 19 | y = np.sin(x) 20 | for n, func, decimal in [(1, np.cos, 4), (2, lambda x: -np.sin(x), 2)]: 21 | print(n, func) 22 | xd, yd = num.deriv_spl(y, n=n, fullout=True) 23 | assert [len(xd), len(yd)] == [len(x)]*2 24 | xd, yd = num.deriv_spl(y, x, n=n, fullout=True) 25 | asrt(func(xd), yd, decimal=decimal) 26 | assert [len(xd), len(yd)] == [len(x)]*2 27 | 28 | -------------------------------------------------------------------------------- /test/test_dist.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.spatial.distance import cdist 3 | from pwtools import num 4 | from pwtools.test.tools import aaae 5 | rand = np.random.rand 6 | 7 | def test_dist(): 8 | X = rand(100,5) 9 | Y = rand(80,5) 10 | d1 = num.distsq(X,Y) 11 | d2 = ((X[:,None,...] - Y[None,...])**2.0).sum(axis=-1) 12 | d3 = cdist(X,Y, metric='sqeuclidean') 13 | print("d1 - d2") 14 | aaae(d1,d2) 15 | print("d1 - d3") 16 | aaae(d1,d3) 17 | -------------------------------------------------------------------------------- /test/test_dist_traj.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.crys import Trajectory 3 | from pwtools import crys, num 4 | rand = np.random.rand 5 | 6 | def test_dist_traj(): 7 | natoms = 10 8 | nstep = 100 9 | cell = rand(nstep,3,3) 10 | stress = rand(nstep,3,3) 11 | forces = rand(nstep,natoms,3) 12 | etot=rand(nstep) 13 | cryst_const = crys.cell2cc3d(cell, axis=0) 14 | coords_frac = np.random.rand(nstep,natoms,3) 15 | coords = crys.coord_trans3d(coords=coords_frac, 16 | old=cell, 17 | new=num.extend_array(np.identity(3), 18 | nstep,axis=0), 19 | axis=1, 20 | timeaxis=0) 21 | assert cryst_const.shape == (nstep, 6) 22 | assert coords.shape == (nstep,natoms,3) 23 | symbols = ['H']*natoms 24 | 25 | traj = Trajectory(coords_frac=coords_frac, 26 | cell=cell, 27 | symbols=symbols, 28 | forces=forces, 29 | stress=stress, 30 | etot=etot, 31 | timestep=1, 32 | ) 33 | 34 | for pbc in [True, False]: 35 | # (nstep, natoms, natoms, 3) 36 | distvecs_frac = traj.coords_frac[:,:,None,:] - \ 37 | traj.coords_frac[:,None,:,:] 38 | assert distvecs_frac.shape == (nstep, natoms, natoms, 3) 39 | if pbc: 40 | distvecs_frac = crys.min_image_convention(distvecs_frac) 41 | distvecs = np.empty((nstep, natoms, natoms, 3)) 42 | for ii in range(traj.nstep): 43 | distvecs[ii,...] = np.dot(distvecs_frac[ii,...], traj.cell[ii,...]) 44 | # (nstep, natoms, natoms) 45 | dists = np.sqrt((distvecs**2.0).sum(axis=-1)) 46 | assert np.allclose(dists, crys.distances_traj(traj, pbc=pbc)) 47 | -------------------------------------------------------------------------------- /test/test_distsq_frac.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools import _flib, crys, timer 3 | from pwtools.num import fempty 4 | 5 | def pydist(coords_frac, cell, pbc=0): 6 | distvecs_frac = coords_frac[:,None,:] - coords_frac[None,...] 7 | if pbc == 1: 8 | distvecs_frac = crys.min_image_convention(distvecs_frac) 9 | distvecs = np.dot(distvecs_frac, cell) 10 | distsq = (distvecs**2.0).sum(axis=2) 11 | return distsq, distvecs, distvecs_frac 12 | 13 | def test_fdist(): 14 | 15 | natoms = 5 16 | coords_frac = np.random.rand(natoms,3) 17 | cell = np.random.rand(3,3)*3 18 | struct = crys.Structure(coords_frac=coords_frac, 19 | cell=cell) 20 | 21 | for pbc in [0,1]: 22 | print("pbc:", pbc) 23 | pyret = pydist(coords_frac, cell, pbc) 24 | # uses _flib.distsq_frac() 25 | pyret2 = crys.distances(struct, pbc=pbc, squared=True, fullout=True) 26 | for ii in [0,1,2]: 27 | print(ii) 28 | assert np.allclose(pyret[ii], pyret2[ii]) 29 | -------------------------------------------------------------------------------- /test/test_extend_array.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | from pwtools import num, common 4 | rand = np.random.rand 5 | 6 | def equal(a,b): 7 | assert (a == b).all() 8 | 9 | def test_extend_array(): 10 | arr = rand(3,3) 11 | nrep = 5 12 | a0 = num.extend_array(arr, nrep, axis=0) 13 | a1 = num.extend_array(arr, nrep, axis=1) 14 | a2 = num.extend_array(arr, nrep, axis=2) 15 | am1 = num.extend_array(arr, nrep, axis=-1) 16 | assert a0.shape == (nrep,3,3) 17 | assert a1.shape == (3,nrep,3) 18 | assert a2.shape == (3,3,nrep) 19 | assert am1.shape == (3,3,nrep) 20 | equal(a2, am1) 21 | 22 | for axis, aa in enumerate([a0, a1, a2]): 23 | for ii in range(nrep): 24 | # slicetake(a0, 3, 0) -> a0[3,:,:] 25 | equal(arr, num.slicetake(aa, ii, axis=axis)) 26 | -------------------------------------------------------------------------------- /test/test_fft.py: -------------------------------------------------------------------------------- 1 | def test_fft(): 2 | import numpy as np 3 | from pwtools import signal 4 | from scipy.fftpack import fft 5 | 6 | y = np.random.rand(1000) 7 | pwtools_ffty = signal.dft(y) 8 | scipy_ffty = fft(y) 9 | np.testing.assert_almost_equal(scipy_ffty, pwtools_ffty) 10 | np.testing.assert_almost_equal(scipy_ffty.real, pwtools_ffty.real) 11 | np.testing.assert_almost_equal(scipy_ffty.imag, pwtools_ffty.imag) 12 | -------------------------------------------------------------------------------- /test/test_file_template.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pwtools.batch import FileTemplate 3 | from pwtools.common import file_write, file_read 4 | from pwtools import sql 5 | from pwtools.test.testenv import testdir 6 | pj = os.path.join 7 | 8 | def test_placeholders(): 9 | templ_dir = pj(testdir, 'calc.templ') 10 | templ_fn = pj(templ_dir, 'foo.in') 11 | tgt_dir = pj(testdir, 'calc') 12 | tgt_fn = pj(tgt_dir, 'foo.in') 13 | for dr in [templ_dir, tgt_dir]: 14 | if not os.path.exists(dr): 15 | os.makedirs(dr) 16 | 17 | templ_txt = "XXXFOO XXXBAR XXXBAZ" 18 | file_write(templ_fn, templ_txt) 19 | 20 | # specify keys 21 | templ = FileTemplate(basename='foo.in', 22 | keys=['foo', 'bar'], 23 | templ_dir=templ_dir) 24 | rules = {'foo': 1, 'bar': 'lala', 'baz': 3} 25 | templ.write(rules, calc_dir=tgt_dir) 26 | assert file_read(tgt_fn).strip() == "1 lala XXXBAZ" 27 | 28 | # no keys 29 | templ = FileTemplate(basename='foo.in', 30 | templ_dir=templ_dir) 31 | rules = {'foo': 1, 'bar': 'lala', 'baz': 3} 32 | templ.write(rules, calc_dir=tgt_dir) 33 | assert file_read(tgt_fn).strip() == "1 lala 3" 34 | 35 | # sql 36 | rules = {'foo': sql.SQLEntry(sqltype='integer', sqlval=1), 37 | 'bar': sql.SQLEntry(sqltype='text', sqlval='lala'), 38 | 'baz': sql.SQLEntry(sqltype='integer', sqlval=3)} 39 | templ.writesql(rules, calc_dir=tgt_dir) 40 | assert file_read(tgt_fn).strip() == "1 lala 3" 41 | 42 | # non-default placefolders 43 | templ_txt = "@foo@ @bar@" 44 | file_write(templ_fn, templ_txt) 45 | templ = FileTemplate(basename='foo.in', 46 | templ_dir=templ_dir, 47 | func=lambda x: "@%s@" %x) 48 | rules = {'foo': 1, 'bar': 'lala'} 49 | templ.write(rules, calc_dir=tgt_dir) 50 | assert file_read(tgt_fn).strip() == "1 lala" 51 | 52 | # pass txt directly 53 | templ_txt = "XXXFOO XXXBAR XXXBAZ" 54 | templ = FileTemplate(basename='foo.in', 55 | txt=templ_txt) 56 | rules = {'foo': 1, 'bar': 'lala', 'baz': 3} 57 | templ.write(rules, calc_dir=tgt_dir) 58 | assert file_read(tgt_fn).strip() == "1 lala 3" 59 | 60 | def test_file_names(): 61 | templ = FileTemplate(filename='/foo/bar.in') 62 | assert templ.basename == 'bar.in' 63 | assert templ.templ_dir == '/foo' 64 | templ = FileTemplate(basename='bar.in') 65 | assert templ.templ_dir == 'calc.templ' 66 | assert templ.filename == 'calc.templ/bar.in' 67 | 68 | -------------------------------------------------------------------------------- /test/test_frepr.py: -------------------------------------------------------------------------------- 1 | from pwtools.common import frepr 2 | 3 | def test_frepr(): 4 | assert frepr(1) == '1' 5 | assert frepr(1.0) == '1.0000000000000000e+00' 6 | assert frepr(None) == 'None' 7 | assert frepr('abc') == 'abc' 8 | -------------------------------------------------------------------------------- /test/test_fromstring.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # test that fromstring does what we expect, don't get broken code by numpy 4 | # update :) 5 | def test_fromstring(): 6 | txt1 = """1 7 | 2.0 3.0 8 | 4 9 | 10 | 5 6 11 | """ 12 | txt2 = """ 13 | 1 2 3 14 | 4 5 6""" 15 | 16 | txt3 = "1 2 3 4 5 6" 17 | 18 | arr = np.array([1,2,3,4,5.0, 6]) 19 | for sep in [' ', ' ']: 20 | for txt in [txt1, txt2, txt3]: 21 | assert (arr == np.fromstring(txt, sep=sep, dtype=float)).all() 22 | -------------------------------------------------------------------------------- /test/test_get_cont.py: -------------------------------------------------------------------------------- 1 | from pwtools import parse, num 2 | from pwtools.test import tools 3 | 4 | def test_get_cont(): 5 | filename = tools.unpack_compressed('files/pw.md.out.gz', prefix=__file__) 6 | pp = parse.PwMDOutputFile(filename=filename) 7 | tr1 = pp.get_traj() 8 | 9 | # Need new parser instance, since pp.cont is already used, i.e. set_all() 10 | # called -> all attrs set. Also units are already applied, thus won't be 11 | # applied again since self.units_applied=True. 12 | pp = parse.PwMDOutputFile(filename=filename) 13 | tr2 = pp.get_traj(auto_calc=False) 14 | 15 | # specific for the used pw.out file, None is everything which is not parsed 16 | # since nothing is calculated from parsed data 17 | none_attrs = [ 18 | 'coords_frac', 19 | 'cryst_const', 20 | 'pressure', 21 | 'velocity', 22 | 'volume', 23 | 'mass', 24 | 'mass_unique', 25 | 'nspecies', 26 | 'ntypat', 27 | 'order', 28 | 'symbols_unique', 29 | 'typat', 30 | 'time', 31 | 'znucl', 32 | 'znucl_unique', 33 | ] 34 | 35 | for name in tr1.attr_lst: 36 | a1 = getattr(tr1, name) 37 | a2 = getattr(tr2, name) 38 | if name in none_attrs: 39 | assert a1 is not None, ("a1 %s is None" %name) 40 | assert a2 is None, ("a2 %s is not None" %name) 41 | else: 42 | tools.assert_all_types_equal(a1, a2) 43 | -------------------------------------------------------------------------------- /test/test_h5.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | from pwtools import io 6 | from pwtools.test import tools 7 | 8 | import numpy as np 9 | 10 | rand = np.random.rand 11 | 12 | 13 | def test_h5(tmp_path): 14 | h5py = pytest.importorskip("h5py") 15 | dct1 = { 16 | "/a": "abcgs", 17 | "/b/c/x1": 3, 18 | "/b/c/x2": rand(2, 3), 19 | } 20 | # writing a dct w/o leading slash will always be read back in *with* 21 | # leading slash 22 | dct2 = { 23 | "a": "abciqo4iki", 24 | "b/c/x1": 3, 25 | "b/c/x2": rand(2, 3), 26 | } 27 | for idx, dct in enumerate([dct1, dct2]): 28 | h5fn = os.path.join(tmp_path, "test_%i.h5" % idx) 29 | io.write_h5(h5fn, dct) 30 | read_dct = io.read_h5(h5fn) 31 | for kk in read_dct.keys(): 32 | assert kk.startswith("/") 33 | for kk in dct.keys(): 34 | key = "/" + kk if not kk.startswith("/") else kk 35 | tools.assert_all_types_equal(dct[kk], read_dct[key]) 36 | 37 | # write mode='a' is default, test appending 38 | h5fn = os.path.join(tmp_path, "test_append.h5") 39 | io.write_h5(h5fn, {"/a": 1.0}) 40 | read_dct = io.read_h5(h5fn) 41 | assert list(read_dct.keys()) == ["/a"] 42 | assert read_dct["/a"] == 1.0 43 | # append '/b', using {'/a': 1.0, '/b': 2.0} would be an error since /a 44 | # already exists, use mode='w' then, but this overwrites all! 45 | io.write_h5(h5fn, {"/b": 2.0}) 46 | read_dct2 = io.read_h5(h5fn) 47 | # sort(...): sort possible [/b, /a] -> [/a, /b] 48 | assert np.sort(np.array(list(read_dct2.keys()))).tolist() == ["/a", "/b"] 49 | assert read_dct2["/a"] == 1.0 50 | assert read_dct2["/b"] == 2.0 51 | # overwrite 52 | io.write_h5(h5fn, {"/b": 22.0, "/c": 33.0}, mode="w") 53 | read_dct3 = io.read_h5(h5fn) 54 | assert np.sort(np.array(list(read_dct3.keys()))).tolist() == ["/b", "/c"] 55 | -------------------------------------------------------------------------------- /test/test_ibrav.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from math import pi, cos 3 | from pwtools.pwscf import ibrav2cell 4 | from pwtools import crys 5 | 6 | def test_ibrav(): 7 | # bogus 8 | aa = 3.0 9 | bb = 4.0 10 | cc = 5.0 11 | alpha = 66*pi/180 12 | beta = 77*pi/180 13 | gamma = 88*pi/180 14 | 15 | ibrav = 1 16 | celldm = [aa] + [None]*5 17 | ibrav2cell(ibrav, celldm) 18 | 19 | ibrav = 2 20 | celldm = [aa] + [None]*5 21 | ibrav2cell(ibrav, celldm) 22 | 23 | ibrav = 3 24 | celldm = [aa] + [None]*5 25 | ibrav2cell(ibrav, celldm) 26 | 27 | ibrav = 4 28 | celldm = [aa, None, cc/aa, None, None, None] 29 | ibrav2cell(ibrav, celldm) 30 | 31 | ibrav = 5 32 | celldm = [aa, None, None, cos(alpha), None, None] 33 | ibrav2cell(ibrav, celldm) 34 | 35 | ibrav = 6 36 | celldm = [aa, None, cc/aa, None, None, None] 37 | ibrav2cell(ibrav, celldm) 38 | 39 | ibrav = 7 40 | celldm = [aa, None, cc/aa, None, None, None] 41 | ibrav2cell(ibrav, celldm) 42 | 43 | ibrav = 8 44 | celldm = [aa, bb/aa, cc/aa, None, None, None] 45 | ibrav2cell(ibrav, celldm) 46 | 47 | ibrav = 9 48 | celldm = [aa, bb/aa, cc/aa, None, None, None] 49 | ibrav2cell(ibrav, celldm) 50 | 51 | ibrav = 10 52 | celldm = [aa, bb/aa, cc/aa, None, None, None] 53 | ibrav2cell(ibrav, celldm) 54 | 55 | ibrav = 11 56 | celldm = [aa, bb/aa, cc/aa, None, None, None] 57 | ibrav2cell(ibrav, celldm) 58 | 59 | # celldm(4)=cos(ab) in doc!? 60 | ibrav = 12 61 | ## celldm = [aa, bb/aa, cc/aa, cos(alpha), None, None] 62 | celldm = [aa, bb/aa, cc/aa, None, None, cos(gamma)] 63 | ibrav2cell(ibrav, celldm) 64 | 65 | # celldm(4)=cos(ab) in doc!? 66 | ibrav = 13 67 | ## celldm = [aa, bb/aa, cc/aa, cos(alpha), None, None] 68 | celldm = [aa, bb/aa, cc/aa, None, None, cos(gamma)] 69 | ibrav2cell(ibrav, celldm) 70 | 71 | # WOHOO! 72 | ibrav = 14 73 | celldm = [aa, bb/aa, cc/aa, cos(alpha), cos(beta), cos(gamma)] 74 | cell=ibrav2cell(ibrav, celldm) 75 | np.testing.assert_array_almost_equal(cell, 76 | crys.cc2cell(crys.celldm2cc(celldm))) 77 | -------------------------------------------------------------------------------- /test/test_import.py: -------------------------------------------------------------------------------- 1 | def test_import_io(): 2 | # pwtools.io 3 | from pwtools import io 4 | assert hasattr(io, 'write_axsf') 5 | 6 | # std lib io 7 | import io 8 | assert 'pwtools' not in io.__file__ 9 | assert hasattr(io, 'TextIOWrapper') 10 | 11 | def test_absolute_signal(): 12 | # std lib signal 13 | from pwtools.common import signal 14 | assert not hasattr(signal, 'fftsample') 15 | from pwtools import common 16 | print(common.backtick('ls')) 17 | -------------------------------------------------------------------------------- /test/test_is_seq.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from pwtools.common import is_seq, file_write 4 | from pwtools.test.testenv import testdir 5 | 6 | def test_is_seq(): 7 | fn = os.path.join(testdir, 'is_seq_test_file') 8 | file_write(fn, 'lala') 9 | fd = open(fn , 'r') 10 | for xx in ([1,2,3], (1,2,3), np.array([1,2,3])): 11 | print(type(xx)) 12 | assert is_seq(xx) is True 13 | for xx in ('aaa', fd): 14 | print(type(xx)) 15 | assert is_seq(xx) is False 16 | fd.close() 17 | -------------------------------------------------------------------------------- /test/test_kpath.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from pwtools import kpath, mpl 4 | from pwtools.test import tools 5 | 6 | def test_kpath(): 7 | # only API 8 | vecs = np.random.rand(10,3) 9 | kpath.kpath(vecs, N=15) 10 | 11 | def test_plot_dis(): 12 | spp = kpath.SpecialPointsPath(ks=np.array([[0,0,0], [.5,0,0], [.7,0,0]]), 13 | ks_frac=np.array([[0,0,0], [.55,0,0], [.77,0,0]]), 14 | symbols=['A','B', 'C']) 15 | path_norm = np.linspace(0,1,100) 16 | nfreq = 5 17 | freqs = np.random.rand(100, nfreq) 18 | try: 19 | print(os.environ['DISPLAY']) 20 | fig1,ax1,axdos1 = kpath.plot_dis(path_norm, freqs, spp, 21 | show_coords='cart') 22 | assert axdos1 is None 23 | fig2,ax2 = mpl.fig_ax() 24 | kpath.plot_dis(path_norm, freqs, spp, ax=ax2, show_coords='frac') 25 | lines1 = ax1.get_lines() 26 | lines2 = ax2.get_lines() 27 | for idx in range(nfreq): 28 | x1 = lines1[idx].get_xdata() 29 | x2 = lines2[idx].get_xdata() 30 | y1 = lines1[idx].get_ydata() 31 | y2 = lines2[idx].get_ydata() 32 | assert (x1 == x2).all() 33 | assert (y1 == y2).all() 34 | faxis = np.linspace(freqs.min(), freqs.max(), 30) 35 | dos = np.array([faxis, np.random.rand(len(faxis))]).T 36 | fig3,ax3,ax3dos = kpath.plot_dis(path_norm, freqs, spp, dos=dos, 37 | show_coords=None) 38 | # plot 90 rotated -> x and y swapped 39 | assert (ax3dos.get_lines()[0].get_xdata() == dos[:,1]).all() 40 | assert (ax3dos.get_lines()[0].get_ydata() == dos[:,0]).all() 41 | except KeyError: 42 | tools.skip("no DISPLAY environment variable, skipping test") 43 | 44 | -------------------------------------------------------------------------------- /test/test_lazyprop.py: -------------------------------------------------------------------------------- 1 | """Test lazy evaluation of properties.""" 2 | 3 | from pwtools.decorators import lazyprop 4 | 5 | class Foo: 6 | def __init__(self): 7 | self.lazy_called = False 8 | 9 | @lazyprop 10 | def prop(self): 11 | self.lazy_called = True 12 | print("Hi there, I'm the lazy prop.") 13 | return 123 14 | 15 | def test_lazy(): 16 | foo = Foo() 17 | assert not foo.lazy_called 18 | 19 | # calling hasattr(foo, 'prop') would already define foo.prop, so we need to 20 | # inspect __dict__ directly 21 | assert 'prop' not in foo.__dict__ 22 | 23 | # The first "foo.prop" defines foo.prop by calling the getter foo.prop = 24 | # foo.prop() [actually something like setattr(foo, 'prop', foo.prop())]. 25 | # The method prop() gets overwritten by the return value 123, i.e. from now 26 | # on foo.prop == 123. 27 | assert foo.prop == 123 28 | assert foo.lazy_called 29 | -------------------------------------------------------------------------------- /test/test_match_mask.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools import num 3 | 4 | def test_match_mask(): 5 | msk = np.array([ True, False, True, False, False], dtype=bool) 6 | idx = np.array([0, 2]) 7 | arr = np.array([1,2,3,4,5]) 8 | values = np.array([1,3]) 9 | assert (num.match_mask(arr, values) == msk).all() 10 | ret = num.match_mask(arr, values, fullout=True) 11 | assert (ret[0] == msk).all() 12 | assert (ret[1] == idx).all() 13 | assert (arr[msk] == np.array([1, 3])).all() 14 | assert (ret[0] == np.in1d(arr, values)).all() 15 | 16 | # handle cases where len(values) > len(arr) and values not contained in arr 17 | values = np.array([1,3,3,3,7,9,-3,-4,-5]) 18 | ret = num.match_mask(arr, values, fullout=True) 19 | assert (ret[0] == msk).all() 20 | assert (ret[1] == idx).all() 21 | assert (ret[0] == np.in1d(arr, values)).all() 22 | 23 | # float values: use eps 24 | ret = num.match_mask(arr+0.1, values, fullout=True, eps=0.2) 25 | assert (ret[0] == msk).all() 26 | assert (ret[1] == idx).all() 27 | 28 | msk = num.match_mask(np.array([1,2]), np.array([3,4])) 29 | assert (msk == np.array([False]*2)).all() 30 | -------------------------------------------------------------------------------- /test/test_mpl.py: -------------------------------------------------------------------------------- 1 | # If we work over ssh, we really want to run matplotlib in non-X mode by 2 | # telling it to use a non-X backend by ``matplotlib.use('Agg')``. This works if 3 | # we call this test alone:: 4 | # 5 | # $ ./runtests.sh test_mpl.py 6 | # 7 | # but if we run the whole test suite, test runners (tested with nose back in 8 | # the day) does apparently import matplotlib earlier and we get the annoying 9 | # warning:: 10 | # 11 | # $ ./runtests.sh 12 | # 13 | # /usr/lib/pymodules/python2.7/matplotlib/__init__.py:923: UserWarning: This 14 | # call to matplotlib.use() has no effect because the the backend has already 15 | # been chosen; matplotlib.use() must be called *before* pylab, 16 | # matplotlib.pyplot, or matplotlib.backends is imported for the first time. 17 | # 18 | # This happens over ssh and on localhost, wich is a big PITA! 19 | # 20 | # The only way to turn that off is to NOT use ``use('Agg')``. Over ``ssh -X``, 21 | # this test will then be very slow b/c the whole TkAgg (default backend) 22 | # machinery is running for no reason. 23 | # 24 | # If you wish, disable the test: 25 | # 26 | # $ ./runtests.sh -e 'test_mpl' 27 | 28 | from pwtools.test import tools 29 | 30 | def test_mpl(): 31 | try: 32 | from pwtools import mpl 33 | try: 34 | import os 35 | print(os.environ['DISPLAY']) 36 | fig,ax = mpl.fig_ax(dpi=15,num=20) 37 | assert fig.dpi == 15 38 | assert fig.number == 20 39 | 40 | pl = mpl.Plot(dpi=15,num=20) 41 | assert pl.fig.dpi == 15 42 | assert pl.fig.number == 20 43 | 44 | dct = mpl.prepare_plots(['test'], dpi=15,num=20) 45 | assert dct['test'].fig.dpi == 15 46 | assert dct['test'].fig.number == 20 47 | 48 | fig, ax = mpl.fig_ax3d(dpi=15) 49 | assert fig.dpi == 15 50 | except KeyError: 51 | tools.skip("no DISPLAY environment variable, skipping test") 52 | except ImportError: 53 | tools.skipping("couldn't import matplotlib, skipping test") 54 | 55 | -------------------------------------------------------------------------------- /test/test_nearest_neighbor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.crys import nearest_neighbors, Structure 3 | 4 | def aequal(a,b): 5 | ret = (a == b) 6 | if type(ret) == type(True): 7 | return ret 8 | else: 9 | return ret.all() 10 | 11 | def test_nn(): 12 | # make cell big enough to avoid pbc wrap, we only test cases where pbc=True 13 | # == pbc=False, we trust that pbc distances work, see crys.rpdf() etc 14 | cell = np.identity(3) * 10 15 | xcoords = np.array(\ 16 | [ 1, 2.02, 3.1, 4, 4.9, 6.01, 7.03]) 17 | symbols = \ 18 | ['H', 'H', 'O', 'Ca', 'O', 'Cl', 'Cl'] 19 | coords = np.zeros((len(xcoords),3), dtype=float) 20 | coords[:,0] = xcoords 21 | struct = Structure(coords=coords, cell=cell, symbols=symbols) 22 | asym = np.array(struct.symbols) 23 | 24 | # [2, 4, 1, 5, 0, 6] 25 | assert aequal(nearest_neighbors(struct, idx=3, num=2), 26 | np.array([2,4])) 27 | 28 | # [1, 5, 0, 6] 29 | assert aequal(nearest_neighbors(struct, idx=3, num=2, skip='O'), 30 | np.array([1,5])) 31 | 32 | # [1, 0] 33 | assert aequal(nearest_neighbors(struct, idx=3, num=2, skip=['O','Cl']), 34 | np.array([1,0])) 35 | 36 | # [2, 4] 37 | assert aequal(nearest_neighbors(struct, idx=3, cutoff=1.2), 38 | np.array([2,4])) 39 | 40 | # [] 41 | assert aequal(nearest_neighbors(struct, idx=3, cutoff=1.2, skip='O'), 42 | np.array([])) 43 | 44 | # [2,4,1,5] 45 | assert aequal(nearest_neighbors(struct, idx=3, cutoff=2.1, skip=None), 46 | np.array([2,4,1,5])) 47 | 48 | # [1] 49 | assert aequal(nearest_neighbors(struct, idx=3, cutoff=2.1, skip=['O','Cl']), 50 | np.array([1])) 51 | 52 | # [1,0], with dist 53 | d=nearest_neighbors(struct, idx=3, num=2, skip=['O','Cl'], fullout=True)[1] 54 | np.allclose(d, np.array([1.98,3.0])) 55 | 56 | -------------------------------------------------------------------------------- /test/test_norm_int.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.integrate import simpson as simps 3 | from pwtools.num import norm_int 4 | 5 | def test_norm_int(): 6 | # simps(y, x) == 2.0 7 | x = np.linspace(0, np.pi, 100) 8 | y = np.sin(x) 9 | 10 | for scale in [True, False]: 11 | yy = norm_int(y, x, area=10.0, scale=scale) 12 | assert np.allclose(simps(yy,x=x), 10.0) 13 | -------------------------------------------------------------------------------- /test/test_num.py: -------------------------------------------------------------------------------- 1 | from itertools import product 2 | 3 | import numpy as np 4 | from pwtools import num 5 | 6 | rand = np.random.rand 7 | 8 | 9 | def test_round_mult(): 10 | assert num.round_up_next_multiple(144,8) == 144 11 | assert num.round_up_next_multiple(145,8) == 152 12 | 13 | 14 | def test_euler_matrix(): 15 | # http://mathworld.wolfram.com/EulerAngles.html 16 | 17 | # 0 degree rotation 18 | assert np.allclose(num.euler_matrix(0,0,0), np.identity(3)) 19 | 20 | # degree and radians API 21 | degs = rand(3) 22 | degs[0] *= 360 23 | degs[1] *= 180 24 | degs[2] *= 360 25 | rads = np.radians(degs) 26 | assert np.allclose(num.euler_matrix(*rads), 27 | num.euler_matrix(*degs, deg=True)) 28 | # rotation about z 29 | vec = np.array([0,0,1.0]) 30 | rr = rand(50) 31 | for ri in rr: 32 | rot = num.euler_matrix(ri*2*np.pi,0,0) 33 | assert (np.dot(rot,vec) == vec).all() 34 | 35 | # rotation about x' 36 | vec = np.array([1.0,0,0]) 37 | rr = rand(50) 38 | for ri in rr: 39 | rot = num.euler_matrix(0, ri*np.pi,0) 40 | assert (np.dot(rot,vec) == vec).all() 41 | 42 | # rotation about z' 43 | vec = np.array([0,0,1.0]) 44 | rr = rand(50) 45 | for ri in rr: 46 | rot = num.euler_matrix(0, 0, ri*2*np.pi) 47 | assert (np.dot(rot,vec) == vec).all() 48 | 49 | 50 | def test_inner_points_mask(): 51 | # ndim = dimension of the domain, works for > 3 of course, but this is 52 | # just a test. ndim > 1 uses qhull. ndim==1 requires ordered points. 53 | for ndim in [1,2,3]: 54 | a = np.array([x for x in product([0,1,2,3],repeat=ndim)]) 55 | ai = a[num.inner_points_mask(a)] 56 | assert (ai == np.array([x for x in product([1,2],repeat=ndim)])).all() 57 | 58 | 59 | def test_meshgrid(): 60 | x = rand(10) 61 | y = rand(5) 62 | Xp,Yp = num.meshgridt(x, y) 63 | Xn,Yn = np.meshgrid(x, y) 64 | assert (Xp == Xn.T).all() 65 | assert (Yp == Yn.T).all() 66 | -------------------------------------------------------------------------------- /test/test_parser_units.py: -------------------------------------------------------------------------------- 1 | from pwtools import parse 2 | from pwtools.crys import UnitsHandler 3 | 4 | parsers = [parse.CifFile, 5 | parse.PDBFile, 6 | parse.PwSCFOutputFile, 7 | parse.PwMDOutputFile, 8 | parse.PwVCMDOutputFile, 9 | parse.CpmdSCFOutputFile, 10 | parse.CpmdMDOutputFile, 11 | parse.Cp2kSCFOutputFile, 12 | parse.Cp2kMDOutputFile, 13 | parse.LammpsTextMDOutputFile, 14 | parse.LammpsDcdMDOutputFile, 15 | ] 16 | 17 | def test_parser_units(): 18 | units = list(UnitsHandler().units_map.keys()) 19 | for pa in parsers: 20 | pp = pa() 21 | print("testing:", str(pp)) 22 | for key in units: 23 | if key in pp.default_units: 24 | # Check if default units go correctly into self.units. 25 | assert pp.default_units[key] == pp.units[key], ("default unit " 26 | "not passed on correct: key={0}, default={1}, " 27 | "current={2}".format(key, pp.default_units[key], 28 | pp.units[key])) 29 | # Check if units passed by the user are correctly passed on. 30 | dval = pp.default_units[key] 31 | val = pp.units[key] 32 | print(" key, default, curent:", key, dval, val) 33 | pp2 = pa(units={key: val*20}) 34 | dval2 = pp2.default_units[key] 35 | val2 = pp2.units[key] 36 | print(" key, default, curent:", key, dval2, val2) 37 | assert dval2 == dval 38 | assert val2 == 20*val 39 | else: 40 | if key in pp.units: 41 | val = pp.units[key] 42 | print(" key, current:", key, val) 43 | assert val == 1.0 44 | 45 | -------------------------------------------------------------------------------- /test/test_pdb.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.parse import PDBFile 3 | from pwtools import common 4 | 5 | def test_pdb(): 6 | struct = PDBFile('files/pdb_struct.pdb', 7 | units={'length': 1.0}).get_struct() 8 | 9 | assert struct.cell is not None 10 | assert struct.cryst_const is not None 11 | assert struct.symbols is not None 12 | assert struct.coords is not None 13 | assert struct.coords_frac is not None 14 | 15 | assert struct.symbols == ['C' ,'O' ,'O' ,'Na','H' ,'H' ,'O' ,'O'] 16 | coords = np.array(\ 17 | [[5.759, 5.581, 5.339], 18 | [5.759, 6.951, 5.339], 19 | [6.980, 4.876, 5.339], 20 | [7.406, 6.575, 5.339], 21 | [5.701, 2.442, 7.733], 22 | [5.908, 0.887, 7.280], 23 | [6.008, 1.840, 6.996], 24 | [2.880, 7.979, 2.600]]) 25 | cryst_const = np.array([10.678, 10.678,10.678,90.00,90.00, 90.00]) 26 | assert np.allclose(coords, struct.coords) 27 | assert np.allclose(cryst_const, struct.cryst_const) 28 | -------------------------------------------------------------------------------- /test/test_pdos_coord_trans.py: -------------------------------------------------------------------------------- 1 | # Test which shows that pydos.*_dos() needs cartesian coords. 2 | # 3 | # This test shows that scaling the coords does not matter b/c we normalize the 4 | # integral area in pydos.*_pdos(). But using a different coord sys does not work. 5 | # One must convert coords to cartesian before calculating the PDOS. 6 | # 7 | # "cart" and "cart2" must be exactly the same. "cell1" 8 | # must match in principle, but not overlay the other two. 9 | 10 | 11 | import numpy as np 12 | from pwtools import pydos as pd 13 | from pwtools.crys import coord_trans, velocity_traj 14 | from pwtools.test import tools 15 | rand = np.random.random 16 | 17 | def pdos(coords_arr_3d, axis=0): 18 | f, d = pd.direct_pdos(velocity_traj(coords_arr_3d, axis=axis)) 19 | return d 20 | 21 | def test_pdos_coord_trans(): 22 | coords = {} 23 | 24 | # cartesian: first axis is the time axis 25 | coords['cart'] = rand((100, 10, 3)) 26 | 27 | # cartesian scaled, e.g. Angstrom instead of Bohr 28 | coords['cart2'] = coords['cart']*5 29 | 30 | # some other coord sys 31 | cell1 = rand((3,3)) 32 | # coord_trans: axis=-1 specifies the "x,y,z"-axis of dimension 3 33 | coords['cell1'] = coord_trans(coords['cart'], 34 | old=np.identity(3), 35 | new=cell1, 36 | axis=-1) 37 | 38 | dos = {} 39 | for key, val in coords.items(): 40 | dos[key] = pdos(val) 41 | 42 | np.testing.assert_array_almost_equal(dos['cart'], dos['cart2']) 43 | assert not tools.array_almost_equal(dos['cart'], dos['cell1']) 44 | -------------------------------------------------------------------------------- /test/test_pw_md_out.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tempfile, os 3 | from pwtools.parse import PwMDOutputFile 4 | from pwtools import common 5 | from pwtools.constants import Bohr, Ang 6 | from pwtools.test.tools import assert_attrs_not_none, adae 7 | from pwtools.test import tools 8 | from pwtools.test.testenv import testdir 9 | 10 | def test_pw_md_out(): 11 | filename = tools.unpack_compressed('files/pw.md.out.gz', prefix=__file__) 12 | alat = 5.9098 # Bohr 13 | pp1 = PwMDOutputFile(filename=filename, use_alat=True) 14 | pp1.parse() 15 | none_attrs = [\ 16 | 'coords_frac', 17 | ] 18 | assert_attrs_not_none(pp1, none_attrs=none_attrs) 19 | assert np.allclose(pp1.timestep, 150.0) # tryd 20 | traj1 = pp1.get_traj() 21 | assert_attrs_not_none(traj1) 22 | 23 | pp2 = PwMDOutputFile(filename=filename, 24 | use_alat=False, 25 | units={'length': alat*Bohr/Ang}) 26 | pp2.parse() 27 | assert np.allclose(pp2.timestep, 150.0) # tryd 28 | assert_attrs_not_none(pp2, none_attrs=none_attrs) 29 | 30 | # Skip coords and cell b/c they are modified by self.alat and 31 | # pp1.alat = 1.0, pp2.alat = 5.9098 32 | attr_lst = common.pop_from_list(pp1.attr_lst, ['coords', 'cell']) 33 | adae(pp1.__dict__, pp2.__dict__, keys=attr_lst) 34 | 35 | traj2 = pp2.get_traj() 36 | adae(traj1.__dict__, traj2.__dict__, keys=traj1.attr_lst) 37 | 38 | pp3 = PwMDOutputFile(filename=filename) 39 | assert alat == pp3.get_alat() # self.use_alat=True default 40 | -------------------------------------------------------------------------------- /test/test_pw_more_forces.py: -------------------------------------------------------------------------------- 1 | # Parse verbose PWscf force printing, i.e. more then one force block per time 2 | # step, e.g. one blok for ions, one for vdw forces, ... 3 | 4 | import numpy as np 5 | from pwtools.parse import PwMDOutputFile, PwSCFOutputFile 6 | from pwtools import common 7 | from pwtools.constants import Bohr,Ang,Ry,eV 8 | 9 | 10 | def test_pw_more_forces(): 11 | fac = Ry / eV / Bohr * Ang 12 | 13 | # MD: london=.true. 14 | 15 | filename = 'files/pw.md_london.out' 16 | common.system('gunzip %s.gz' %filename) 17 | natoms = 141 18 | nstep = 10 19 | # traj case 20 | pp = PwMDOutputFile(filename=filename) 21 | tr = pp.get_traj() 22 | assert tr.natoms == natoms 23 | assert tr.forces.shape == (nstep,natoms,3) 24 | assert tr.coords.shape == (nstep,natoms,3) 25 | assert pp._forces_raw.shape == (nstep+1,2*natoms,3) 26 | assert np.allclose(tr.forces, pp._forces_raw[1:,:natoms,:] * fac) 27 | 28 | # scf case, return only 1st step 29 | pp = PwSCFOutputFile(filename=filename) 30 | st = pp.get_struct() 31 | assert st.natoms == natoms 32 | assert st.forces.shape == (natoms,3) 33 | assert st.coords.shape == (natoms,3) 34 | assert pp._forces_raw.shape == (nstep+1,2*natoms,3) 35 | assert np.allclose(st.forces, pp._forces_raw[0,:natoms,:] * fac) 36 | common.system('gzip %s' %filename) 37 | 38 | # SCF: verbosity='high' + london=.true. 39 | 40 | filename = 'files/pw.scf_verbose_london.out' 41 | common.system('gunzip %s.gz' %filename) 42 | natoms = 4 43 | nstep = 1 44 | pp = PwSCFOutputFile(filename=filename) 45 | st = pp.get_struct() 46 | assert st.natoms == natoms 47 | assert st.forces.shape == (natoms,3) 48 | assert st.coords.shape == (natoms,3) 49 | assert pp._forces_raw.shape == (nstep, 8*natoms,3) 50 | assert np.allclose(st.forces, pp._forces_raw[0,:natoms,:] * fac) 51 | common.system('gzip %s' %filename) 52 | 53 | 54 | # MD: verbosity='high' + natoms=1 55 | 56 | filename = 'files/pw.md_one_atom.out' 57 | common.system('gunzip %s.gz' %filename) 58 | natoms = 1 59 | nstep = 4 60 | # traj case 61 | pp = PwMDOutputFile(filename=filename) 62 | tr = pp.get_traj() 63 | assert tr.natoms == natoms 64 | assert tr.forces.shape == (nstep,natoms,3) 65 | assert tr.coords.shape == (nstep,natoms,3) 66 | assert pp._forces_raw.shape == (nstep+1,7*natoms,3) 67 | assert np.allclose(tr.forces, pp._forces_raw[1:,:natoms,:] * fac) 68 | 69 | # scf case, return only 1st step 70 | pp = PwSCFOutputFile(filename=filename) 71 | st = pp.get_struct() 72 | assert st.natoms == natoms 73 | assert st.forces.shape == (natoms,3) 74 | assert st.coords.shape == (natoms,3) 75 | assert pp._forces_raw.shape == (nstep+1,7*natoms,3) 76 | assert np.allclose(st.forces, pp._forces_raw[0,:natoms,:] * fac) 77 | common.system('gzip %s' %filename) 78 | 79 | -------------------------------------------------------------------------------- /test/test_pw_vc_md_cell_alat.py: -------------------------------------------------------------------------------- 1 | # See utils/make-vc-md-cell.py 2 | 3 | import numpy as np 4 | from pwtools import parse 5 | from pwtools.test.testenv import testdir 6 | 7 | def test_pw_vc_md_cell_alat(): 8 | nstep = 10 9 | cell_single = np.arange(1,10).reshape((3,3)) 10 | cell = np.empty((nstep,3,3)) 11 | cell_no_unit = np.empty((nstep,3,3)) 12 | # copy from make-vc-md-cell.py 13 | alat_lst = [2.0, 4.0] 14 | for ialat,alat in enumerate(alat_lst): 15 | for ii in range(5): 16 | this_cell = cell_single+0.02*ii + ialat 17 | cell[ialat*5 + ii,...] = this_cell 18 | cell_no_unit[ialat*5 + ii,...] = this_cell/alat 19 | 20 | # Even though PwVCMDOutputFile.get_cell is derived from 21 | # PwMDOutputFile, we test the API here 22 | for parser in [parse.PwMDOutputFile, parse.PwVCMDOutputFile]: 23 | pp = parser('files/pw.vc_md.cell.out') 24 | assert np.allclose(pp.get_cell(), cell) 25 | assert pp.get_cell_unit() == 'alat' 26 | assert np.allclose(pp._get_cell_3d_factors(), np.array([2.0]*5 + [4.0]*5)) 27 | 28 | pp = parser('files/pw.constant_cell.txt') 29 | assert pp._get_cell_3d_factors() is None 30 | 31 | # respect use_alat=False, then self.alat=1.0 32 | pp = parser('files/pw.vc_md.cell.out', use_alat=False) 33 | assert np.allclose(pp.get_cell(), cell_no_unit) 34 | # We found 'CELL_PARAMETERS.*alat' but don't use it. 35 | assert pp.get_cell_unit() == 'alat' 36 | assert pp._get_cell_3d_factors() is None 37 | 38 | -------------------------------------------------------------------------------- /test/test_pw_vc_relax_cell_unit.py: -------------------------------------------------------------------------------- 1 | # Test that we correctly parse cell_unit from 2 | # 3 | # CELL_PARAMETERS (alat = 1.234) 4 | # 5 | # as 'alat'. 6 | 7 | import os 8 | from pwtools.parse import PwMDOutputFile 9 | from pwtools import common, crys, num 10 | from pwtools.test.tools import assert_attrs_not_none 11 | from pwtools.test.testenv import testdir 12 | pj = os.path.join 13 | 14 | def test_pw_vc_relax_out(): 15 | filename = 'files/pw.vc_relax_cell_unit.out' 16 | common.system('gunzip %s.gz' %filename) 17 | pp = PwMDOutputFile(filename=filename) 18 | pp.parse() 19 | common.system('gzip %s' %filename) 20 | none_attrs = ['coords', 21 | 'ekin', 22 | 'temperature', 23 | 'timestep', 24 | ] 25 | assert_attrs_not_none(pp, none_attrs=none_attrs) 26 | traj = pp.get_traj() 27 | none_attrs = [\ 28 | 'ekin', 29 | 'temperature', 30 | 'timestep', 31 | 'velocity', 32 | 'time', 33 | ] 34 | assert_attrs_not_none(traj, none_attrs=none_attrs) 35 | assert pp.cell_unit == 'alat' 36 | assert pp.cell.shape == (6,3,3) 37 | for idx in range(1, pp.cell.shape[0]): 38 | assert num.rms(pp.cell[idx,...] - pp.cell[0,...]) > 0.0 39 | 40 | # Test _get_block_header_unit, which is used in get_cell_unit(). 41 | dct = \ 42 | {'FOO': None, 43 | 'FOO alat': 'alat', 44 | 'FOO (alat)': 'alat', 45 | 'FOO {alat}': 'alat', 46 | 'FOO (alat=1.23)': 'alat', 47 | 'FOO (alat= 1.23)': 'alat', 48 | } 49 | 50 | for txt,val in dct.items(): 51 | fn = pj(testdir, 'test_block_header_unit.txt') 52 | common.file_write(fn, txt) 53 | pp.filename = fn 54 | assert pp._get_block_header_unit('FOO') == val 55 | -------------------------------------------------------------------------------- /test/test_pw_vc_relax_coords_fixed.py: -------------------------------------------------------------------------------- 1 | # Test parsing the correct SCF cell from a [vc-]relax run (QE 5.x only, IIRC), 2 | # which performs a final SCF run after the relax has converged. 3 | 4 | import os 5 | import numpy as np 6 | from pwtools import common, crys, parse, io 7 | 8 | # from pw.out: "reduced cell" = cell / alat 9 | cell_2d_red_ref = parse.arr2d_from_txt(""" 10 | 1.000000 0.000000 0.000000 11 | 0.000000 2.902399 0.000000 12 | 0.000000 0.000000 2.304846 13 | """) 14 | 15 | def test_scf_cell(): 16 | filename = 'files/pw.vc_relax_coords_fixed.out' 17 | common.system('gunzip %s.gz' %filename) 18 | 19 | pp = parse.PwSCFOutputFile(filename, use_alat=False) 20 | cell_2d_red = pp.get_cell() 21 | assert np.allclose(cell_2d_red, cell_2d_red_ref, atol=1e-15, rtol=0) 22 | 23 | pp = parse.PwSCFOutputFile(filename, use_alat=True) 24 | assert np.allclose(pp.get_cell(), 25 | cell_2d_red*pp.get_alat(), 26 | atol=1e-15, 27 | rtol=0) 28 | 29 | st = io.read_pw_scf(filename) 30 | tr = io.read_pw_md(filename) 31 | 32 | # tr is from a vc-relax w/ fixed fractional coords, check that 33 | assert np.allclose(np.zeros((tr.nstep,tr.natoms,3)), 34 | tr.coords_frac - tr.coords_frac[0,...].copy(), 35 | rtol=0, atol=1e-15) 36 | 37 | # check if scf parser gets the same coords_frac as the trajectory parser 38 | # Note: this and the next test have the same max error of 39 | # 4.33868466709e-08 (b/c of limited accuracy in printed numbers in 40 | # pwscf output) 41 | assert np.allclose(st.coords_frac,tr.coords_frac[0,...], atol=1e-7, rtol=0) 42 | 43 | # same test, plus test of concatenate() works 44 | trcat = crys.concatenate((st,tr)) 45 | assert np.allclose(np.zeros((trcat.nstep,trcat.natoms,3)), 46 | trcat.coords_frac - trcat.coords_frac[0,...].copy(), 47 | rtol=0, atol=1e-7) 48 | 49 | -------------------------------------------------------------------------------- /test/test_pw_vc_relax_out.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.parse import PwMDOutputFile 3 | from pwtools import common, parse 4 | from pwtools.test.tools import assert_attrs_not_none 5 | from pwtools.test import tools 6 | 7 | def test_pw_vc_relax_out(): 8 | filename = 'files/pw.vc_relax.out' 9 | common.system('gunzip %s.gz' %filename) 10 | pp = PwMDOutputFile(filename=filename) 11 | pp.parse() 12 | common.system('gzip %s' %filename) 13 | none_attrs = ['coords', 14 | 'ekin', 15 | 'temperature', 16 | 'timestep', 17 | ] 18 | assert_attrs_not_none(pp, none_attrs=none_attrs) 19 | traj = pp.get_traj() 20 | none_attrs = [\ 21 | 'ekin', 22 | 'temperature', 23 | 'timestep', 24 | 'velocity', 25 | 'time', 26 | ] 27 | assert_attrs_not_none(traj, none_attrs=none_attrs) 28 | 29 | 30 | # for test_return_3d_if_no_cell_unit 31 | _cell = parse.traj_from_txt(""" 32 | 1.004152675 0.000000000 0.000000000 33 | -0.502076337 0.869621726 0.000000000 34 | 0.000000000 0.000000000 1.609289155 35 | 1.004147458 0.000000000 0.000000000 36 | -0.502073729 0.869617208 0.000000000 37 | 0.000000000 0.000000000 1.609759673 38 | 1.004050225 0.000000000 0.000000000 39 | -0.502025112 0.869533001 0.000000000 40 | 0.000000000 0.000000000 1.610320650 41 | 1.003992235 0.000000000 0.000000000 42 | -0.501996117 0.869482780 0.000000000 43 | 0.000000000 0.000000000 1.610416170 44 | 1.003981055 0.000000000 0.000000000 45 | -0.501990527 0.869473099 0.000000000 46 | 0.000000000 0.000000000 1.610369398 47 | 1.003981055 0.000000000 0.000000000 48 | -0.501990527 0.869473099 0.000000000 49 | 0.000000000 0.000000000 1.610369398 50 | """, shape=(6,3,3)) 51 | 52 | def test_return_3d_if_no_cell_unit(): 53 | filename = tools.unpack_compressed('files/pw.vc_relax_no_cell_unit.out.gz', 54 | prefix=__file__) 55 | pp = PwMDOutputFile(filename=filename) 56 | pp.parse() 57 | assert np.allclose(pp.cell, _cell*pp.get_alat()) 58 | -------------------------------------------------------------------------------- /test/test_pwscf.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools import pwscf 3 | 4 | def test_atpos_str(): 5 | assert pwscf.atpos_str(['X'], np.array([[0.1,1,1]]), fmt='%g', delim='_', 6 | eps=0.0) == 'X_0.1_1_1' 7 | assert pwscf.atpos_str(['X'], np.array([[0.1,1,1]]), fmt='%g', delim='_', 8 | eps=0.2) == 'X_0_1_1' 9 | 10 | 11 | -------------------------------------------------------------------------------- /test/test_pytest_stuff.py: -------------------------------------------------------------------------------- 1 | def test_tmpdir_1(pwtools_tmpdir): 2 | print(">>>>>>", pwtools_tmpdir) 3 | 4 | 5 | def test_tmpdir_2(pwtools_tmpdir): 6 | print(">>>>>>", pwtools_tmpdir) 7 | -------------------------------------------------------------------------------- /test/test_rand_struct.py: -------------------------------------------------------------------------------- 1 | from pwtools import random as rand 2 | 3 | def test_rand_struct(): 4 | # close_scale is small -> make sure that struct generation doesn't fail, 5 | # only API test here 6 | rs = rand.RandomStructure(symbols=['Si']*10, 7 | vol_scale=3, 8 | angle_range=[60.0, 120.0], 9 | vol_range_scale=[0.7, 1.3], 10 | length_range_scale=[0.7, 1.3], 11 | close_scale=0.7, 12 | cell_maxtry=100, 13 | atom_maxtry=1000) 14 | st = rs.get_random_struct(fail=True) 15 | st = rs.get_random_struct(fail=False) 16 | assert st.is_struct 17 | 18 | for arr in [st.coords, 19 | st.coords_frac, 20 | st.cell, 21 | st.symbols, 22 | st.cryst_const]: 23 | assert arr is not None 24 | assert st.natoms == len(st.symbols) == 10 25 | 26 | # catch exception 27 | rs = rand.RandomStructure(symbols=['Si']*100, 28 | vol_scale=3, 29 | angle_range=[60.0, 120.0], 30 | vol_range_scale=[0.7, 1.3], 31 | length_range_scale=[0.7, 1.3], 32 | close_scale=100, 33 | cell_maxtry=1, 34 | atom_maxtry=1) 35 | try: 36 | st = rs.get_random_struct(fail=True) 37 | except rand.RandomStructureFail: 38 | pass 39 | -------------------------------------------------------------------------------- /test/test_rms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools import crys, num 3 | 4 | 5 | def test_rms(): 6 | natoms = 5 7 | nstep = 10 8 | arr1 = np.random.rand(nstep, 3, natoms) 9 | arr2 = np.random.rand(natoms, nstep, 3) 10 | arr3 = np.random.rand(natoms, 3, nstep) 11 | 12 | # Test if rms() works. 13 | r3_rms_all1 = num.rms(arr3, nitems='all') 14 | r3_rms_natoms1 = num.rms(arr3, nitems=natoms) 15 | r3_rms_all2 = np.sqrt((arr3**2.0).sum() / float(3*natoms*nstep)) 16 | r3_rms_natoms2 = np.sqrt((arr3**2.0).sum() / float(natoms)) 17 | np.testing.assert_almost_equal(r3_rms_all1, r3_rms_all2) 18 | np.testing.assert_almost_equal(r3_rms_natoms1, r3_rms_natoms2) 19 | 20 | # Test if rms3d() operates correctly along each axis. 21 | r1_3d = num.rms3d(arr1, axis=0, nitems='all') 22 | r2_3d = num.rms3d(arr2, axis=1, nitems='all') 23 | r3_3d = num.rms3d(arr3, axis=2, nitems='all') 24 | r1_loop = np.empty((nstep,), dtype=float) 25 | r2_loop = np.empty((nstep,), dtype=float) 26 | r3_loop = np.empty((nstep,), dtype=float) 27 | for k in range(nstep): 28 | r1_loop[k] = num.rms(arr1[k,...], nitems='all') 29 | r2_loop[k] = num.rms(arr2[:,k,:], nitems='all') 30 | r3_loop[k] = num.rms(arr3[...,k], nitems='all') 31 | np.testing.assert_array_almost_equal(r1_3d,r1_loop) 32 | np.testing.assert_array_almost_equal(r2_3d,r2_loop) 33 | np.testing.assert_array_almost_equal(r3_3d,r3_loop) 34 | 35 | # Test if rmsd() works. 36 | # 37 | # NOTE: Subtle numpy issue here: 38 | # It is very important NOT to use 39 | # R -= R[...,0][...,None] 40 | # or 41 | # for k in range(R.shape[-1]): 42 | # R[...,k] -= R[...,0][...,None] 43 | # because R itself is changed in the loop! You have to copy the reference 44 | # R[...,0] first and then broadcast it for subtracting. What also works is 45 | # this: 46 | # R = R - R[...,0][...,None] 47 | # HOWEVER, THIS DOES NOT: 48 | # for k in range(R.shape[-1]): 49 | # R[...,k] = R[...,k] - R[...,0][...,None] 50 | traj = crys.Trajectory(coords=np.random.rand(nstep, natoms, 3)) 51 | assert traj.timeaxis == 0 52 | assert traj.nstep == nstep 53 | from_rmsd = crys.rmsd(traj, ref_idx=0) 54 | from_loop = np.empty((nstep,), dtype=float) 55 | from_rms3d = num.rms3d(traj.coords - traj.coords[0,...][None,...], 56 | nitems=natoms, axis=0) 57 | R = traj.coords.copy() 58 | ref = R[0,...].copy() 59 | for k in range(nstep): 60 | R[k,...] -= ref 61 | from_loop[k] = np.sqrt((R[k,...]**2.0).sum() / natoms) 62 | 63 | np.testing.assert_array_almost_equal(from_rmsd, from_loop) 64 | np.testing.assert_array_almost_equal(from_rmsd, from_rms3d) 65 | -------------------------------------------------------------------------------- /test/test_save_object.py: -------------------------------------------------------------------------------- 1 | # Data persistence. Parse some data into a PwMDOutputFile object and save the 2 | # whole object in binary to disk using the dump() method, which actually uses 3 | # pickle. 4 | 5 | import os, tempfile 6 | import numpy as np 7 | from pwtools.parse import PwMDOutputFile 8 | from pwtools import common, crys, io 9 | from pwtools.test.testenv import testdir 10 | from pwtools.test import tools 11 | from pwtools.test.tools import ade 12 | 13 | rand = np.random.rand 14 | 15 | def test_save_object(): 16 | filename = tools.unpack_compressed('files/pw.md.out.gz', prefix=__file__) 17 | dumpfile = os.path.join(testdir, 'pw.md.pk') 18 | 19 | c = PwMDOutputFile(filename=filename) 20 | print(">>> parsing ...") 21 | c.parse() 22 | print(">>> ... done") 23 | 24 | print(">>> saving %s ..." %dumpfile) 25 | c.dump(dumpfile) 26 | print(">>> ... done") 27 | 28 | print(">>> loading ...") 29 | c2 = io.read_pickle(dumpfile) 30 | print(">>> ... done") 31 | 32 | print(">>> checking equalness of attrs in loaded object ...") 33 | known_fails = {'fd': 'closed/uninitialized file', 34 | 'cont': 'container object'} 35 | arr_t = type(np.array([1])) 36 | dict_t = type({}) 37 | for attr in c.__dict__.keys(): 38 | c_val = getattr(c, attr) 39 | c2_val = getattr(c2, attr) 40 | dotest = True 41 | for name, string in known_fails.items(): 42 | if name == attr: 43 | print("known fail: %s: %s: %s" %(name, string, attr)) 44 | dotest = False 45 | if dotest: 46 | print("testing:", attr, type(c_val), type(c2_val)) 47 | type_c = type(c_val) 48 | type_c2 = type(c2_val) 49 | assert type_c is type_c2, "attr: %s: types differ: %s, %s" \ 50 | %(attr, str(type_c), str(type_c2)) 51 | if type(c_val) is arr_t: 52 | assert (c_val == c2_val).all(), "fail: %s: %s, %s" \ 53 | %(attr, c_val, c2_val) 54 | elif type(c_val) is dict_t: 55 | ade(c_val, c2_val) 56 | else: 57 | assert c_val == c2_val, "fail: %s: %s, %s" \ 58 | %(attr, c_val, c2_val) 59 | 60 | def test_save_mkdir(): 61 | path = os.path.join(testdir, 'foo', 'bar', 'baz') 62 | assert not os.path.exists(path) 63 | fn = os.path.join(path, 'grr.pk') 64 | st = crys.Structure(coords=rand(10,3), cell=rand(3,3), symbols=['H']*10) 65 | st.dump(fn, mkdir=True) 66 | io.read_pickle(fn) 67 | -------------------------------------------------------------------------------- /test/test_spline.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.interpolate import splev 3 | from pwtools.num import Spline 4 | 5 | def test_spline(): 6 | x = np.linspace(0,10,100) 7 | y = np.sin(x) 8 | spl = Spline(x,y) 9 | assert (y - spl(x) < spl.eps).all() 10 | assert (y - splev(x, spl.tck) < spl.eps).all() 11 | assert not spl.is_mono() 12 | np.testing.assert_array_almost_equal(spl(x,der=1), np.cos(x), decimal=4) 13 | 14 | x = np.linspace(0,10,100) 15 | y = x**2.0 - 5 16 | spl = Spline(x,y) 17 | assert spl.is_mono() 18 | 19 | y = -(x**2.0 - 5) 20 | spl = Spline(x,y) 21 | assert spl.is_mono() 22 | 23 | y0s = [5,0,-40] 24 | xabs = [[0,2], [1,3], [6,8]] 25 | x0s = [1,2,7] 26 | # use bracket [x[0], x[-1]] for brentq() 27 | for y0 in y0s: 28 | np.testing.assert_almost_equal(y0, spl(spl.invsplev(y0))) 29 | # use smaller bracket 30 | for y0,xab in zip(y0s, xabs): 31 | np.testing.assert_almost_equal(y0, spl(spl.invsplev(y0, xab=xab))) 32 | # use start guess for newton() 33 | for y0,x0 in zip(y0s, x0s): 34 | np.testing.assert_almost_equal(y0, spl(spl.invsplev(y0, x0=x0))) 35 | 36 | # root 37 | np.testing.assert_almost_equal(spl.invsplev(0.0), spl.get_root()) 38 | 39 | def test_min_max(): 40 | # min: num.Fit1D.get_min() 41 | x = np.linspace(-10,10,100) 42 | y = (x-5)**2.0 + 1.0 43 | spl = Spline(x,y) 44 | xmin = spl.get_min() 45 | ymin = spl(xmin) 46 | np.testing.assert_almost_equal(xmin, 5.0) 47 | np.testing.assert_almost_equal(ymin, 1.0) 48 | 49 | # max: num.Fit1D.get_max() 50 | y = -(x-5)**2.0 + 1.0 51 | spl = Spline(x,y) 52 | xmax = spl.get_max() 53 | ymax = spl(xmin) 54 | np.testing.assert_almost_equal(xmax, 5.0) 55 | np.testing.assert_almost_equal(ymax, 1.0) 56 | 57 | def test_api(): 58 | x = np.linspace(-10,10,100) 59 | y = (x-5)**2.0 + 1.0 60 | spl = Spline(x,y,k=2,s=0.1,eps=0.11) 61 | for kw in ['k', 's']: 62 | assert kw in list(spl.splrep_kwargs.keys()) 63 | assert spl.splrep_kwargs['k'] == 2 64 | assert spl.splrep_kwargs['s'] == 0.1 65 | 66 | # scalar 67 | assert type(spl(1.0)) != type(np.array(1.0)) 68 | 69 | -------------------------------------------------------------------------------- /test/test_sql_column.py: -------------------------------------------------------------------------------- 1 | from pwtools.sql import sql_column 2 | import pytest 3 | 4 | 5 | def test_sql_column(): 6 | x = sql_column(key='foo', 7 | sqltype='integer', 8 | lst=[1,2,3]) 9 | for num, xx in zip([1,2,3], x): 10 | assert xx.sqlval == num 11 | assert xx.fileval == num 12 | 13 | x = sql_column(key='foo', 14 | sqltype='integer', 15 | lst=[1,2,3], 16 | fileval_func=lambda z: "k=%i"%z) 17 | for num, xx in zip([1,2,3], x): 18 | assert xx.sqlval == num 19 | assert xx.fileval == "k=%i" %num 20 | 21 | x = sql_column(key='foo', 22 | sqltype='integer', 23 | lst=[1,2,3], 24 | sqlval_func=lambda z: z**2, 25 | fileval_func=lambda z: "k=%i"%z) 26 | for num, xx in zip([1,2,3], x): 27 | assert xx.sqlval == num**2 28 | assert xx.fileval == "k=%i" %num 29 | 30 | for xx in sql_column('foo', [1,2]): 31 | assert xx.sqltype == 'INTEGER' 32 | for xx in sql_column('foo', [1.0,2.0]): 33 | assert xx.sqltype == 'REAL' 34 | 35 | 36 | def test_sql_column_fail_for_mixed_types(): 37 | with pytest.raises(AssertionError): 38 | s = sql_column('foo', [1,2.0]) 39 | -------------------------------------------------------------------------------- /test/test_sum.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.num import sum 3 | from pwtools.test.tools import aaae, aae 4 | rand = np.random.rand 5 | 6 | def test_sum(): 7 | arr = rand(2,3,4) 8 | 9 | # this all goes thru np.sum(), must produce exact same result 10 | assert sum(arr) == sum(arr, axis=None) == arr.sum() 11 | 12 | # must use aaae() b/c summing order is apparently different in 13 | # np.sum(axis=None) -> small numerical noise 14 | aaae(sum(arr, axis=(0,1,2)), arr.sum()) 15 | 16 | aaae(sum(arr, axis=0), arr.sum(axis=0)) 17 | aaae(sum(arr, axis=1), arr.sum(axis=1)) 18 | aaae(sum(arr, axis=2), arr.sum(axis=2)) 19 | 20 | aaae(sum(arr, axis=-1), arr.sum(axis=-1)) 21 | aaae(sum(arr, axis=-2), arr.sum(axis=-2)) 22 | 23 | aaae(sum(arr, axis=(0,)), arr.sum(axis=0)) 24 | aaae(sum(arr, axis=(1,)), arr.sum(axis=1)) 25 | aaae(sum(arr, axis=(2,)), arr.sum(axis=2)) 26 | 27 | assert sum(arr, axis=(0,1)).shape == (4,) 28 | assert sum(arr, axis=(0,2)).shape == (3,) 29 | assert sum(arr, axis=(1,2)).shape == (2,) 30 | 31 | aaae(sum(arr, axis=(0,1)), arr.sum(axis=0).sum(axis=0)) 32 | aaae(sum(arr, axis=(0,2)), arr.sum(axis=0).sum(axis=1)) 33 | aaae(sum(arr, axis=(1,2)), arr.sum(axis=1).sum(axis=1)) 34 | 35 | assert sum(arr, axis=(0,), keepdims=True).shape == (2,) 36 | assert sum(arr, axis=(1,), keepdims=True).shape == (3,) 37 | assert sum(arr, axis=(2,), keepdims=True).shape == (4,) 38 | 39 | assert sum(arr, axis=(0,1), keepdims=True).shape == (2,3) 40 | assert sum(arr, axis=(0,2), keepdims=True).shape == (2,4) 41 | assert sum(arr, axis=(1,2), keepdims=True).shape == (3,4) 42 | 43 | aaae(sum(arr, axis=(0,1)), sum(arr, axis=(1,0))) 44 | aaae(sum(arr, axis=(0,2)), sum(arr, axis=(2,0))) 45 | aaae(sum(arr, axis=(1,2)), sum(arr, axis=(2,1))) 46 | -------------------------------------------------------------------------------- /test/test_template_replace.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pwtools.common import template_replace 3 | pj = os.path.join 4 | 5 | 6 | def test_dict(): 7 | # default 'dct' mode 8 | templ_txt = "%(foo)i %(bar)s" 9 | rules = {'foo': 1, 'bar': 'lala'} 10 | ref_txt = "1 lala" 11 | tgt_txt = template_replace(templ_txt, rules, mode='dct') 12 | assert tgt_txt == ref_txt, ("ref_txt={} " 13 | "tgt_txt={}".format(ref_txt, tgt_txt)) 14 | 15 | def test_mode_txt_conv_true(): 16 | # 'txt' mode, not default, but actually more often used b/c placeholders 17 | # are much simpler (no type formatting string, just convert values with 18 | # str() or pass in string-only values in `rules`). 19 | templ_txt = "XXXFOO XXXBAR" 20 | rules = {'XXXFOO': 1, 'XXXBAR': 'lala'} 21 | tgt_txt = template_replace(templ_txt, rules, mode='txt', conv=True) 22 | ref_txt = "1 lala" 23 | assert tgt_txt == ref_txt, ("ref_txt={} " 24 | "tgt_txt={}".format(ref_txt, tgt_txt)) 25 | 26 | def test_mode_txt_conv_true_single(): 27 | # string-only is required, note that conv=False 28 | templ_txt = "XXXFOO" 29 | rules = {'XXXFOO': str(1)} 30 | tgt_txt = template_replace(templ_txt, rules, mode='txt', conv=False) 31 | ref_txt = "1" 32 | assert tgt_txt == ref_txt, ("ref_txt={} " 33 | "tgt_txt={}".format(ref_txt, tgt_txt)) 34 | 35 | def test_mode_txt_conv_true_warn_more_rules(): 36 | # warn but pass not found placeholders in `rules` 37 | templ_txt = "XXXFOO XXXBAR" 38 | rules = {'XXXFOO': 1, 'XXXBAR': 'lala', 'XXXBAZ': 3} 39 | tgt_txt = template_replace(templ_txt, rules, mode='txt', conv=True) 40 | ref_txt = "1 lala" 41 | assert tgt_txt == ref_txt, ("ref_txt={} " 42 | "tgt_txt={}".format(ref_txt, tgt_txt)) 43 | 44 | def test_mode_txt_conv_true_warn_more_placeholders(): 45 | # warn but do duplicate placeholders 46 | templ_txt = "XXXFOO XXXBAR XXXBAR" 47 | rules = {'XXXFOO': 1, 'XXXBAR': 'lala'} 48 | tgt_txt = template_replace(templ_txt, rules, mode='txt', conv=True) 49 | ref_txt = "1 lala lala" 50 | assert tgt_txt == ref_txt, ("ref_txt={} " 51 | "tgt_txt={}".format(ref_txt, tgt_txt)) 52 | 53 | -------------------------------------------------------------------------------- /test/test_timer.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | from pwtools.timer import TagTimer 3 | 4 | tt = TagTimer() 5 | 6 | def test_timer(): 7 | tt.t('outer loop') 8 | for ii in range(10): 9 | sleep(0.01) 10 | tt.t('inner loop') 11 | for jj in range(2): 12 | sleep(0.01) 13 | tt.pt('inner loop') 14 | tt.pt('outer loop') 15 | 16 | -------------------------------------------------------------------------------- /test/test_vacf_methods.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | def test_vacf_methods(): 4 | import numpy as np 5 | import math 6 | from pwtools import pydos 7 | 8 | def assrt(a, b): 9 | np.testing.assert_array_almost_equal(a,b) 10 | 11 | # random velocity array: 10 atoms, 50 steps, 50 x (10,3) arrays 12 | a = np.random.rand(50,10,3) + 1.0 13 | # random mass vector 14 | m = np.random.rand(10) * 10.0 + 1.0 15 | 16 | p1 = pydos.pyvacf(a, method=1) 17 | p2 = pydos.pyvacf(a, method=2) 18 | p3 = pydos.pyvacf(a, method=3) 19 | p1m = pydos.pyvacf(a, method=1, m=m) 20 | p2m = pydos.pyvacf(a, method=2, m=m) 21 | p3m = pydos.pyvacf(a, method=3, m=m) 22 | 23 | assrt(p1, p2) 24 | assrt(p1, p3) 25 | assrt(p2, p3) 26 | assrt(p1m, p2m) 27 | assrt(p1m, p3m) 28 | assrt(p2m, p3m) 29 | 30 | f1 = pydos.fvacf(a, method=1) 31 | f2 = pydos.fvacf(a, method=2) 32 | f1m = pydos.fvacf(a, method=1, m=m) 33 | f2m = pydos.fvacf(a, method=2, m=m) 34 | 35 | assrt(f1, f2) 36 | assrt(f1m, f2m) 37 | 38 | assrt(p1, f1) 39 | assrt(p2, f1) 40 | assrt(p3, f1) 41 | assrt(p1, f2) 42 | assrt(p2, f2) 43 | assrt(p3, f2) 44 | 45 | assrt(p1m, f1m) 46 | assrt(p2m, f1m) 47 | assrt(p3m, f1m) 48 | assrt(p1m, f2m) 49 | assrt(p2m, f2m) 50 | assrt(p3m, f2m) 51 | 52 | -------------------------------------------------------------------------------- /test/test_velocity.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from pwtools import common, io, crys 4 | 5 | def test_velocity_traj(): 6 | # Test Trajectory.get_velocity() against velocities output from CP2K. The 7 | # agreement is very good. Works only for fixed-cell MDs, however! 8 | dr = 'files/cp2k/md/nvt_print_low' 9 | base = os.path.dirname(dr) 10 | fn = '%s/cp2k.out' %dr 11 | print(common.backtick('tar -C {0} -xzf {1}.tgz'.format(base,dr))) 12 | tr = io.read_cp2k_md(fn) 13 | # read from data file 14 | v1 = tr.velocity.copy() 15 | # If tr.velocity != None, then get_velocity() doesn't calculate it. Then, 16 | # it simply returns tr.velocity, which is what we of course usually want. 17 | tr.velocity = None 18 | # calculate from coords + time step, b/c of central diffs, only steps 1:-1 19 | # are the same 20 | v2 = tr.get_velocity() 21 | print(">>>> np.abs(v1).max()", np.abs(v1).max()) 22 | print(">>>> np.abs(v1).min()", np.abs(v1).min()) 23 | print(">>>> np.abs(v2).max()", np.abs(v2).max()) 24 | print(">>>> np.abs(v2).min()", np.abs(v2).min()) 25 | print(">>>> np.abs(v1-v2).max()", np.abs(v1-v2).max()) 26 | print(">>>> np.abs(v1-v2).min()", np.abs(v1-v2).min()) 27 | assert np.allclose(v1[1:-1,...], v2[1:-1,...], atol=1e-4) 28 | 29 | ##from pwtools import mpl 30 | ##fig,ax = mpl.fig_ax() 31 | ##ax.plot(v1[1:-1,:,0], 'b') 32 | ##ax.plot(v2[1:-1,:,0], 'r') 33 | ##mpl.plt.show() 34 | 35 | shape = (100,10,3) 36 | arr = np.random.rand(*shape) 37 | assert crys.velocity_traj(arr, axis=0).shape == shape 38 | assert crys.velocity_traj(arr, axis=0, endpoints=False).shape == (98,10,3) 39 | -------------------------------------------------------------------------------- /test/test_vlinspace.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pwtools.num import vlinspace 3 | 4 | def test_vlinspace(): 5 | aa = np.array([0,0,0]) 6 | bb = np.array([1,1,1]) 7 | 8 | np.testing.assert_array_equal(vlinspace(aa,bb,1), aa[None,:]) 9 | 10 | np.testing.assert_array_equal(vlinspace(aa,bb,2), 11 | np.array([aa,bb])) 12 | 13 | tgt = np.array([aa, [0.5,0.5,0.5], bb]) 14 | np.testing.assert_array_equal(vlinspace(aa,bb,3), tgt) 15 | 16 | tgt = np.array([aa, [1/3.]*3, [2/3.]*3, bb]) 17 | np.testing.assert_array_almost_equal(vlinspace(aa,bb,4), tgt) 18 | 19 | tgt = np.array([[ 0. , 0. , 0. ], 20 | [ 0.25, 0.25, 0.25], 21 | [ 0.5 , 0.5 , 0.5 ], 22 | [ 0.75, 0.75, 0.75]]) 23 | np.testing.assert_array_equal(vlinspace(aa,bb,4,endpoint=False), 24 | tgt) 25 | 26 | aa = np.array([-1,-1,-1]) 27 | bb = np.array([1,1,1]) 28 | tgt = np.array([[ -1. , -1. , -1. ], 29 | [ 0, 0, 0], 30 | [ 1, 1, 1]]) 31 | np.testing.assert_array_equal(vlinspace(aa,bb,3), 32 | tgt) 33 | 34 | --------------------------------------------------------------------------------