├── .gitignore ├── LICENSE ├── README.md ├── mcpm ├── __init__.py ├── grainboundary.py ├── io.py ├── kinetic.py ├── mcpm.py ├── rejection.py ├── spatial.py ├── stats.py └── utils │ ├── __init__.py │ ├── draw.py │ └── unique.pyx └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.dream3d 3 | *.png 4 | *.gif 5 | 6 | # emacs backup folders 7 | *.backups 8 | 9 | # setuptools directories 10 | /build/ 11 | /dist/ 12 | 13 | # python egg metadata from setuptools 14 | /*.egg-info -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Brian DeCost 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mcpm 2 | Monte Carlo grain growth solver 3 | 4 | kinetic or rejection MC schemes 5 | 6 | traditional or gaussian pixel neighborhood (Mason, J. K., et al. "Kinetics and anisotropy of the Monte Carlo model of grain growth." Acta Materialia 82 (2015): 155-166) 7 | 8 | mcpm: grain growth solver 9 | 10 | currently uses a half-implemented version of the DREAM3Dv6 file format... 11 | 12 | visualization tools: 13 | - mcpm-draw: draw a 2D snapshot 14 | - mcpm-animate: make a movie from a series of snapshots 15 | -------------------------------------------------------------------------------- /mcpm/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Monte Carlo grain growth solver 3 | kinetic or rejection MC schemes 4 | traditional or gaussian pixel neighborhood 5 | """ 6 | 7 | -------------------------------------------------------------------------------- /mcpm/grainboundary.py: -------------------------------------------------------------------------------- 1 | """ Grain boundary properties -- default to cubic symmetry """ 2 | 3 | import numpy as np 4 | from misori import misori 5 | 6 | from . import io 7 | 8 | # grain growth model parameters 9 | # find the best way to set these at runtime 10 | high_angle = np.pi*30/180 11 | mobility_ratio = None 12 | energy_ratio = 0.6 13 | quaternions = None 14 | colors = None 15 | mobility_cache = {} 16 | energy_cache = {} 17 | 18 | def energy(*args): 19 | return uniform_energy() 20 | 21 | 22 | def mobility(*args): 23 | return uniform_mobility() 24 | 25 | 26 | def uniform_mobility(*args): 27 | return 1 28 | 29 | 30 | def uniform_energy(*args): 31 | return 1 32 | 33 | 34 | def threshold_mobility(a, b): 35 | global mobility_cache 36 | key = tuple(sorted([a,b])) 37 | try: 38 | return mobility_cache[key] 39 | except KeyError: 40 | angle = misori(quaternions[a], quaternions[b]) 41 | mobility = mobility_ratio if angle < high_angle else 1 42 | mobility_cache[key] = mobility 43 | return mobility 44 | 45 | 46 | def threshold_energy(qa, qb): 47 | angle = misori(qa, qb) 48 | return energy_ratio if angle < high_angle else 1 49 | 50 | def discrete_texture_mobility(a, b): 51 | global mobility_cache 52 | key = tuple(sorted([a,b])) 53 | try: 54 | return mobility_cache[key] 55 | except KeyError: 56 | mobility = 1.0 57 | if colors[a] == 0 or colors[b] == 0: 58 | pass 59 | elif colors[a] == colors[b]: 60 | mobility = mobility_ratio 61 | mobility_cache[key] = mobility 62 | return mobility 63 | 64 | def setup_discrete_texture_energy(colors, energy_ratio): 65 | """ make a lookup table for energy values """ 66 | energy_table = np.ones((len(colors), len(colors)), dtype=float) 67 | 68 | for color in np.unique(colors): 69 | if color > 0: 70 | idx = np.where(colors == color) 71 | y,x = np.meshgrid(idx,idx) 72 | energy_table[x,y] = energy_ratio 73 | 74 | np.fill_diagonal(energy_table, 0) 75 | 76 | return energy_table 77 | 78 | def discrete_texture_energy(neighstates, refstate): 79 | global energy_cache 80 | return energy_cache[neighstates, refstate] 81 | 82 | def setup(options): 83 | global quaternions 84 | global colors 85 | global mobility 86 | global mobility_ratio 87 | global energy 88 | global energy_ratio 89 | global high_angle 90 | mobility_ratio = options.mobility 91 | energy_ratio = options.energy 92 | 93 | if options.discrete == True: 94 | colors = io.load_colors(options.infile) 95 | # mobility = discrete_texture_mobility 96 | global energy 97 | global energy_cache 98 | energy_cache = setup_discrete_texture_energy(colors, options.energy) 99 | energy = discrete_texture_energy 100 | else: 101 | if mobility_ratio != 1.0: 102 | quaternions = io.load_quaternions(options.infile) 103 | mobility = threshold_mobility 104 | high_angle = options.angle*np.pi/180 105 | 106 | mobility = uniform_mobility 107 | -------------------------------------------------------------------------------- /mcpm/io.py: -------------------------------------------------------------------------------- 1 | """ simple HDF5 i/o routines (DREAM3D format) """ 2 | 3 | import numpy as np 4 | import h5py 5 | 6 | GRAIN_ID_PATH = 'DataContainers/SyntheticVolume/CellData/FeatureIds' 7 | QUATERNION_PATH = 'DataContainers/SyntheticVolume/CellFeatureData/AvgQuats' 8 | COLOR_PATH = 'DataContainers/SyntheticVolume/CellFeatureData/Color' 9 | PRNG_STATE_PATH = 'DataContainers/SytheticVolume/prng_state' 10 | ARGS_PATH = 'DataContainers/SytheticVolume/mcpm_args' 11 | 12 | def load_dream3d(path): 13 | with h5py.File(path, 'r') as f: 14 | grain_ids = np.array(f[GRAIN_ID_PATH]) 15 | shape = tuple([s for s in grain_ids.shape if s > 1]) 16 | return grain_ids.reshape(shape) 17 | 18 | def dimensions(dim_line): 19 | min_val, max_val = dim_line.split() 20 | return int(float(max_val) - float(min_val)) 21 | 22 | def load_spparks(path): 23 | """ load microstructure from spparks text dump """ 24 | grain_ids = None 25 | with open(path, 'r') as f: 26 | for line in f: 27 | if "TIMESTEP" in line: 28 | time = next(f) # float() 29 | elif "NUMBER" in line: 30 | num_sites = next(f) # float() 31 | elif "BOX" in line: 32 | x = dimensions(next(f)) 33 | y = dimensions(next(f)) 34 | z = dimensions(next(f)) 35 | grain_ids = np.zeros((x,y,z)) 36 | # skip forward a line 37 | line = next(f) 38 | else: 39 | # get id, spin, x, y, z 40 | idx, grain_id, x, y, z = map(int, line.split()) 41 | grain_ids[(x,y,z)] = grain_id 42 | shape = tuple([s for s in grain_ids.shape if s > 1]) 43 | return grain_ids.reshape(shape) 44 | 45 | def load_prng_state(path): 46 | state = list(np.random.get_state()) 47 | with h5py.File(path) as f: 48 | try: 49 | saved_state = f[PRNG_STATE_PATH] 50 | state[1] = saved_state 51 | np.random.set_state(tuple(state)) 52 | except KeyError: 53 | print('unable to load saved state') 54 | save_prng_state(path) 55 | return 56 | 57 | def save_prng_state(path): 58 | # RandomState is a tuple 59 | # the important part is the 624 element array of integers. 60 | state = np.random.get_state() 61 | with h5py.File(path) as f: 62 | try: 63 | f[PRNG_STATE_PATH][...] = state[1] 64 | except KeyError: 65 | f[PRNG_STATE_PATH] = state[1] 66 | return 67 | 68 | def dump_dream3d(sites, time, prefix='mcpm_dump'): 69 | path = '{0}{1:06d}.dream3d'.format(prefix, time) 70 | with h5py.File(path) as f: 71 | f[GRAIN_ID_PATH] = sites 72 | return 73 | 74 | def load_quaternions(path): 75 | with h5py.File(path, 'r') as f: 76 | quaternions = np.array(f[QUATERNION_PATH], dtype=np.float32) 77 | return quaternions 78 | 79 | def load_colors(path): 80 | """ load discrete texture-component states """ 81 | with h5py.File(path, 'r') as f: 82 | colors = np.array(f[COLOR_PATH], dtype=np.int) 83 | return colors 84 | 85 | 86 | def save_args(args): 87 | """ save command line arguments """ 88 | try: 89 | with h5py.File(args.infile) as f: 90 | try: 91 | f.create_group(ARGS_PATH) 92 | except ValueError: 93 | del f[ARGS_PATH] 94 | h_args = f[ARGS_PATH] 95 | for key, value in args.__dict__.items(): 96 | h_args[key] = value 97 | except: 98 | print('could not save arguments...') 99 | return 100 | 101 | def load_prev_args(args): 102 | """ load args from a previous run 103 | modifies values in the arguments namespace """ 104 | with h5py.File(args.infile) as f: 105 | h_args = f[ARGS_PATH] 106 | for key in args.__dict__.keys(): 107 | args.__dict__[key] = h_args[key].value 108 | return args 109 | -------------------------------------------------------------------------------- /mcpm/kinetic.py: -------------------------------------------------------------------------------- 1 | """ Kinetic Monte Carlo solver """ 2 | 3 | import argparse 4 | import numpy as np 5 | import cython 6 | 7 | from . import io 8 | from . import stats 9 | from . import spatial 10 | from . import grainboundary as gb 11 | from .utils.unique import _unique 12 | 13 | radius = 1 14 | dims = None 15 | 16 | def site_propensity(site, nearest, kT, sites, weights): 17 | current_state = sites[site] 18 | neighs = spatial.neighbors(site, dims=dims, radius=radius) 19 | nearest_sites = neighs[nearest] 20 | nearest_states = sites[nearest_sites] 21 | # states = _unique(np.ascontiguousarray(nearest_states.astype(np.int32))) 22 | states = np.unique(nearest_states) 23 | states = states[states != current_state] 24 | if states.size == 0: 25 | return 0 26 | 27 | delta = gb.energy(sites[neighs], current_state) 28 | current_energy = np.sum(np.multiply(delta, weights)) 29 | 30 | prob = 0 31 | for proposed_state in states: 32 | sites[site] = proposed_state 33 | delta = gb.energy(sites[neighs], current_state) 34 | proposed_energy = np.sum(np.multiply(delta, weights)) 35 | energy_change = proposed_energy - current_energy 36 | 37 | mobility = gb.mobility(current_state, proposed_state) 38 | if energy_change <= 0: 39 | prob += mobility 40 | elif kT > 0.0: 41 | prob += mobility * np.exp(-energy_change/kT) 42 | 43 | sites[site] = current_state 44 | return prob 45 | 46 | 47 | def site_event(site, nearest, kT, weights, sites, propensity): 48 | threshold = np.random.uniform() * propensity[site] 49 | current_state = sites[site] 50 | neighs = spatial.neighbors(site, dims=dims, radius=radius) 51 | nearest_sites = neighs[nearest] 52 | nearest_states = sites[nearest_sites] 53 | 54 | # states = _unique(np.ascontiguousarray(nearest_states.astype(np.int32))) 55 | states = np.unique(nearest_states) 56 | states = states[states != current_state] 57 | 58 | delta = gb.energy(sites[neighs], current_state) 59 | current_energy = np.sum(np.multiply(delta, weights)) 60 | 61 | prob = 0 62 | for proposed_state in states: 63 | sites[site] = proposed_state 64 | 65 | delta = gb.energy(sites[neighs], current_state) 66 | proposed_energy = np.sum(np.multiply(delta, weights)) 67 | energy_change = proposed_energy - current_energy 68 | 69 | mobility = gb.mobility(current_state, proposed_state) 70 | if energy_change <= 0: 71 | prob += mobility 72 | elif kT > 0.0: 73 | prob += mobility * np.exp(-energy_change/kT) 74 | if prob >= threshold: 75 | break 76 | 77 | # neighs = neighs[np.nonzero(weights)] 78 | for neigh in np.nditer(neighs): 79 | propensity[neigh] = site_propensity(neigh, nearest, 80 | kT, sites, weights) 81 | return 82 | 83 | 84 | def select_site_iter(propensity): 85 | total_propensity = np.sum(propensity) 86 | index = np.argsort(propensity, axis=None) 87 | target = total_propensity * np.random.uniform() 88 | partial = np.array(0) 89 | # iterate in reverse: 90 | for site in np.nditer(index[::-1]): 91 | partial += propensity.ravel()[site] 92 | if partial >= target: 93 | break 94 | time_step = -1.0/total_propensity * np.log(np.random.uniform()) 95 | return site, time_step 96 | 97 | 98 | def select_site(propensity): 99 | cumprop= np.cumsum(propensity) 100 | target = cumprop[-1] * np.random.uniform() 101 | site = np.searchsorted(cumprop, target) 102 | time_step = -1.0/cumprop[-1] * np.log(np.random.uniform()) 103 | return site, time_step 104 | 105 | 106 | def all_propensity(sites, nearest, kT, weights): 107 | print('initializing kmc propensity') 108 | propensity = np.zeros(sites.shape, dtype=float) 109 | for site,__ in np.ndenumerate(sites.ravel()): 110 | propensity[site] = site_propensity(site, nearest, kT, sites, weights) 111 | return propensity 112 | 113 | 114 | def iterate(sites, weights, options): 115 | 116 | global radius 117 | radius = options.radius 118 | global dims 119 | dims = sites.shape 120 | kT = options.kT 121 | length = options.length 122 | dump_frequency = options.freq 123 | 124 | time = 0 125 | spatial.setup(sites, options) 126 | gb.setup(options) 127 | nearest = spatial.nearest_neighbor_mask(radius,sites.ndim) 128 | propensity = all_propensity(sites.ravel(), 129 | nearest, kT, weights) 130 | while time < length: 131 | inner_time = 0 132 | print('time: {}'.format(time)) 133 | if not options.nodump: 134 | io.dump_dream3d(sites, int(time), prefix=options.prefix) 135 | if not options.nostats: 136 | stats.compute(sites, time=time) 137 | while inner_time < dump_frequency: 138 | site, time_step = select_site(propensity) 139 | site_event(site, nearest, kT, weights, sites.ravel(), propensity) 140 | inner_time += time_step 141 | time += inner_time 142 | if not options.nodump: 143 | io.dump_dream3d(sites, int(time), prefix=options.prefix) 144 | if not options.nostats: 145 | stats.compute(sites, time=time) 146 | return time 147 | -------------------------------------------------------------------------------- /mcpm/mcpm.py: -------------------------------------------------------------------------------- 1 | """ 2 | Monte Carlo grain growth solver 3 | kinetic or rejection MC schemes 4 | traditional or gaussian pixel neighborhood 5 | """ 6 | 7 | from . import io 8 | from . import stats 9 | from . import kinetic 10 | from . import rejection 11 | from .spatial import uniform_mask, gaussian_mask, strained_mask 12 | 13 | import argparse 14 | import numpy as np 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser(prog='mcpm', 18 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 19 | description='''Kinetic Monte Carlo grain growth 20 | simulations in 2 and 3 dimensions.''') 21 | 22 | parser.add_argument('-i', '--infile', nargs='?', default='input.dream3d', 23 | help='DREAM3D file containing initial structure') 24 | parser.add_argument('--style', default='kmc', 25 | choices=['kmc', 'reject'], 26 | help='Monte Carlo style') 27 | parser.add_argument('--nbrhd', nargs='?', default='gaussian', 28 | choices=['uniform', 'gaussian', 'strained'], 29 | help='pixel neighborhood weighting') 30 | parser.add_argument('--sigma', type=float, default=3, 31 | help='smoothing parameter for gaussian neighborhood') 32 | parser.add_argument('--radius', type=int, default=9, 33 | help='pixel neighborhood radius') 34 | parser.add_argument('--kT', type=float, default=.001, 35 | help='Monte Carlo temperature kT') 36 | parser.add_argument('-l', '--length', type=float, default=100, 37 | help='Simulation length in MCS') 38 | parser.add_argument('--cutoff', type=float, default=0.01, 39 | help='interaction weight cutoff value') 40 | parser.add_argument('--norm', type=float, default=1, 41 | help='gaussian kernel normalization constant a') 42 | parser.add_argument('--freq', type=float, default=10, 43 | help='timesteps between system snapshots') 44 | parser.add_argument('--prefix', nargs='?', default='mcpm_dump', 45 | help='prefix for dump file') 46 | parser.add_argument('--neighborlist', action='store_true', 47 | help='''Compute explicit neighbor lists. 48 | Problematic with large 3D systems.''') 49 | parser.add_argument('--mobility', type=float, default=1.0, 50 | help='''use misorientation-threshold mobility. This is the mobility ratio.''') 51 | parser.add_argument('--energy', type=float, default=1.0, 52 | help='''use misorientation-threshold energy. This is the energy ratio.''') 53 | parser.add_argument('--angle', type=float, default=30.0, 54 | help='high angle boundary cutoff in degrees') 55 | parser.add_argument('--statsfile', nargs='?', default='stats.h5', 56 | help='HDF5 file for grain growth stats') 57 | parser.add_argument('--neighborfile', nargs='?', default='') 58 | parser.add_argument('--load_prng_state', action='store_true', 59 | help='use the PRNG state stored in the input file') 60 | parser.add_argument('--nostats', action='store_true', 61 | help='no statistics file.') 62 | parser.add_argument('--nodump', action='store_true', 63 | help='no dream3d dump files.') 64 | parser.add_argument('--discrete', action='store_true', 65 | help='''use discrete orientation states for TRD simulations''') 66 | 67 | args = parser.parse_args() 68 | sites = io.load_dream3d(args.infile) 69 | 70 | if args.load_prng_state: 71 | io.load_prng_state(args.infile) 72 | else: 73 | io.save_prng_state(args.infile) 74 | if not args.nostats: 75 | stats.initialize(sites, args) 76 | 77 | if args.nbrhd == 'uniform': 78 | weights = uniform_mask(sites, radius=args.radius) 79 | elif args.nbrhd == 'strained': 80 | weights = strained_mask(sites, radius=args.radius, strain=0.1) 81 | elif args.nbrhd == 'gaussian': 82 | weights = gaussian_mask(sites, args.radius, a=args.norm, 83 | sigma_squared=np.square(args.sigma), 84 | cutoff=args.cutoff) 85 | 86 | if args.style == 'reject': 87 | rejection.iterate(sites, weights, args) 88 | elif args.style == 'kmc': 89 | kinetic.iterate(sites, weights, args) 90 | 91 | io.save_args(args) 92 | if not args.nostats: 93 | stats.finalize() 94 | -------------------------------------------------------------------------------- /mcpm/rejection.py: -------------------------------------------------------------------------------- 1 | """ Rejection Monte Carlo solver """ 2 | 3 | import argparse 4 | import numpy as np 5 | 6 | from . import io 7 | from . import stats 8 | from . import spatial 9 | 10 | def site_event(site, kT, sites, weights): 11 | s = sites.ravel() 12 | current_state = s[site] 13 | nearest = spatial.neighbors(site, None, dims=sites.shape, radius=1) 14 | states = np.unique(s[nearest]) 15 | states = states[states != current_state] 16 | if states.size == 0: 17 | return current_state 18 | 19 | neighs = spatial.neighbors(site, None, dims=sites.shape, radius=radius) 20 | delta = s[neighs] != current_state 21 | current_energy = np.sum(np.multiply(delta, weights)) 22 | 23 | proposed_state = np.random.choice(states) 24 | s[site] = proposed_state 25 | 26 | delta = s[neighs] != proposed_state 27 | proposed_energy = np.sum(np.multiply(delta, weights)) 28 | s[site] = current_state 29 | energy_change = proposed_energy - current_energy 30 | 31 | if energy_change > 0: 32 | if kT == 0: 33 | return current_state 34 | elif np.random.uniform() > np.exp(-energy_change/kT): 35 | return current_state 36 | 37 | return proposed_state 38 | 39 | 40 | def timestep(sites, kT, weights): 41 | rejects = 0 42 | s = sites.ravel() 43 | for i in range(sites.size): 44 | site = np.random.randint(sites.size) 45 | current = s[site] 46 | s[site] = site_event(site, kT, sites, weights) 47 | rejects += (current == s[site]) 48 | return rejects 49 | 50 | 51 | def iterate(sites, weights, options): 52 | radius = options.radius 53 | kT = options.kT 54 | length = options.length 55 | dump_frequency = options.freq 56 | spatial.setup(sites, options) 57 | 58 | rejects = 0 59 | for time in np.arange(0, length+1, dump_frequency): 60 | print('time: {}'.format(time)) 61 | io.dump_dream3d(sites, time) 62 | stats.compute(sites, time) 63 | accepts = time*sites.size - rejects 64 | print('accepts: {}, rejects: {}'.format(accepts,rejects)) 65 | for step in range(dump_frequency): 66 | rej = timestep(sites, kT, weights) 67 | rejects += rej 68 | io.dump_dream3d(sites, time) 69 | stats.compute(sites, time) 70 | return 71 | -------------------------------------------------------------------------------- /mcpm/spatial.py: -------------------------------------------------------------------------------- 1 | """ pixel neighborhood definitions """ 2 | import numpy as np 3 | 4 | nbrlist = None 5 | neighbors = None 6 | nbr_range = np.arange(-1,2) 7 | lattice_basis = None 8 | 9 | def gaussian_mask(sites, radius, sigma_squared=1, a=None, cutoff=0.01): 10 | if a is None: 11 | a = 1/(np.sqrt(2*sigma_squared*np.pi)) 12 | id_range = np.arange(-radius,radius+1) 13 | dist = np.meshgrid( *[id_range for d in range(sites.ndim)] ) 14 | square_dist = np.sum(np.square(list(dist)), axis=0) 15 | weights = a * np.exp(-0.5 * square_dist / sigma_squared) 16 | weights[weights < cutoff] = 0 17 | return weights.ravel() 18 | 19 | 20 | def uniform_mask(sites, radius=1): 21 | dims = tuple([2*radius+1 for __ in range(sites.ndim)]) 22 | return np.ones(dims).ravel() 23 | 24 | # def uniform_mask(sites, radius=1): 25 | # arr = np.array([[1,1,1], 26 | # [1,1,1], 27 | # [1,1,1]], dtype=float) 28 | # return arr.ravel() 29 | 30 | # def uniform_mask(sites, radius=1): 31 | # arr = np.array([[0,0,0,0,0], 32 | # [0,1,1,1,0], 33 | # [0,1,1,1,0], 34 | # [0,1,1,1,0], 35 | # [0,0,0,0,0]], dtype=float) 36 | # return arr.ravel() 37 | 38 | def strained_mask(sites, radius=1, strain=0.1): 39 | dims = tuple([2*radius+1 for __ in range(sites.ndim)]) 40 | if dims != (3,3): 41 | import sys; sys.exit('strained_mask only implemented for 2D/8n square lattice') 42 | mask = np.array([[1, 1, 1], 43 | [1+strain, 1, 1+strain], 44 | [1, 1, 1]]) 45 | return mask.ravel() 46 | 47 | def nearest_neighbor_mask(radius, ndim): 48 | id_range = np.arange(-radius,radius+1) 49 | dist = np.meshgrid( *[id_range for d in range(ndim)] ) 50 | square_dist = np.sum(np.square(list(dist)), axis=0) 51 | nearest = np.zeros_like(square_dist, dtype=bool) 52 | nearest[square_dist <= 2] = 1 53 | return nearest.ravel() 54 | 55 | 56 | def lookup_neighbors(site, dims=None, radius=1): 57 | return nbrlist[site] 58 | 59 | 60 | def meshgrid_neighbors(site, dims=None, radius=1): 61 | """ N-dimensional pixel neighborhood 62 | for periodic images on regular grids """ 63 | index = np.unravel_index(site, dims=dims) 64 | id_range = [np.arange(idx-radius, idx+radius+1) 65 | for idx in index] 66 | neigh_ids = np.meshgrid(*id_range) 67 | neighs = np.ravel_multi_index(neigh_ids,dims=dims, mode='wrap') 68 | return neighs.ravel() 69 | 70 | def slice_neighbors(site, dims=None, radius=1): 71 | """ N-dimensional pixel neighborhood 72 | for periodic images on regular grids """ 73 | index = np.unravel_index(site, dims=dims) 74 | islice = [(nbr_range + idx)%dims[i] for i,idx in enumerate(index)] 75 | neighs = np.ravel_multi_index(islice,dims=dims, mode='wrap') 76 | return neighs.ravel() 77 | 78 | def ix_neighbors(site, dims=None, radius=1): 79 | """ use np.ix_ instead of np.meshgrid -- requires less memory """ 80 | index = np.unravel_index(site, dims=dims) 81 | id_range = [np.arange(idx-radius, idx+radius+1) 82 | for idx in index] 83 | neighs = np.ravel_multi_index(np.ix_(*id_range), 84 | dims=dims, mode='wrap') 85 | return neighs.ravel() 86 | 87 | def view_neighbors(site, dims=None, radius=1): 88 | ishape = np.eye(len(dims))*(2*radius + 1) 89 | ishape[ishape == 0] += 1 90 | index = np.unravel_index(site, dims=dims) 91 | id_range = [np.arange(idx-radius, idx+radius+1).reshape(ishape[i]) 92 | for i,idx in enumerate(index)] 93 | neighs = np.ravel_multi_index(tuple(id_range), dims=dims, mode='wrap') 94 | return neighs.ravel() 95 | 96 | def view_neighbors_2d(site, dims=None, radius=1): 97 | ''' the listcomp in view_neighbors takes a lot of time. 98 | here's a 2d-specific way to do it hopefully faster? ''' 99 | index = np.unravel_index(site, dims=dims) 100 | id_range = ( (nbr_range+index[0]).reshape(lattice_basis[0]), 101 | (nbr_range+index[1]).reshape(lattice_basis[1]) ) 102 | neighs = np.ravel_multi_index(id_range, dims=dims, mode='wrap') 103 | return neighs.ravel() 104 | 105 | def view_neighbors_3d(site, dims=None, radius=1): 106 | ''' the listcomp in view_neighbors takes a lot of time. 107 | here's a 3d-specific way to do it hopefully faster? ''' 108 | index = np.unravel_index(site, dims=dims) 109 | id_range = ( (nbr_range+index[0]).reshape(lattice_basis[0]), 110 | (nbr_range+index[1]).reshape(lattice_basis[1]), 111 | (nbr_range+index[2]).reshape(lattice_basis[2]) ) 112 | neighs = np.ravel_multi_index(id_range, dims=dims, mode='wrap') 113 | return neighs.ravel() 114 | 115 | def roll_neighbors(sites, site, dims=None, radius=1): 116 | """ N-dimensional pixel neighborhood 117 | for periodic images on regular grids """ 118 | index = np.unravel_index(site, dims=dims) 119 | neighs = sites.take(nbr_range+index, axis=0, mode='wrap') 120 | return neighs.flatten() 121 | 122 | def build_neighbor_list(sites, radius=1): 123 | global nbrlist 124 | if nbrlist is not None: 125 | print('nbrlist already exists') 126 | return 127 | global neighbors 128 | # neighbors = meshgrid_neighbors 129 | print('building neighbor list') 130 | check_neighs = neighbors(0, dims=sites.shape, radius=radius) 131 | num_neighs = check_neighs.size 132 | print('each site has {} neighbors'.format(num_neighs-1)) 133 | 134 | nbrlist = np.zeros((sites.size, num_neighs),dtype=int) 135 | for index, __ in np.ndenumerate(sites): 136 | site = np.ravel_multi_index(index, sites.shape) 137 | nbrlist[site] = neighbors(site, dims=sites.shape, radius=radius) 138 | 139 | # reassign neighbors function to use lookup list 140 | neighbors = lookup_neighbors 141 | return 142 | 143 | def load_neighbor_list(neighborpath): 144 | global nbrlist 145 | global neighbors 146 | nbrlist = np.load(neighborpath) 147 | neighbors = lookup_neighbors 148 | return 149 | 150 | def setup(sites, options): 151 | global neighbors 152 | global nbr_range 153 | global lattice_basis 154 | if options.neighborfile: 155 | load_neighbor_list(options.neighborfile) 156 | return 157 | # neighbors = meshgrid_neighbors 158 | neighbors = view_neighbors_2d if (sites.ndim == 2) else view_neighbors_3d 159 | nbr_range = np.arange(-options.radius, options.radius+1) 160 | lattice_basis = np.eye(len(sites.shape))*(2*options.radius + 1) 161 | lattice_basis[lattice_basis == 0] += 1 162 | 163 | if options.neighborlist: 164 | build_neighbor_list(sites, radius=options.radius) 165 | return 166 | -------------------------------------------------------------------------------- /mcpm/stats.py: -------------------------------------------------------------------------------- 1 | """ microstructure data output """ 2 | import h5py 3 | import numpy as np 4 | 5 | from . import io 6 | 7 | stats_df = None # pandas dataframe for stats output 8 | unique_ids = None # list of Potts states 9 | output_index = 0 10 | stats_file = None 11 | 12 | def initialize(sites, args): 13 | global unique_ids 14 | global stats_file 15 | stats_file = args.statsfile 16 | 17 | num_snapshots = 1 + (args.length / args.freq) 18 | unique_ids = np.sort(np.unique(sites)) 19 | np.insert(unique_ids, 0, 0) # insert zero to match index with grain id 20 | zero_data = np.zeros((num_snapshots,unique_ids.size), dtype=float) 21 | with h5py.File(stats_file) as f: 22 | f['time'] = np.zeros(num_snapshots) 23 | f['grainsize'] = zero_data 24 | return 25 | 26 | def compute(sites, time=None): 27 | global output_index 28 | global unique_ids 29 | global stats_file 30 | # get grain size in voxels 31 | grainsize = np.array([np.sum(sites == state) for state in unique_ids]) 32 | with h5py.File(stats_file) as f: 33 | f['time'][output_index] = time 34 | f['grainsize'][output_index] = grainsize 35 | 36 | print('computed stats!') 37 | output_index += 1 38 | return 39 | 40 | def finalize(): 41 | pass 42 | return 43 | -------------------------------------------------------------------------------- /mcpm/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bdecost/mcpm/cb7e2d5d3d61bfe6b83605a186e83c53da40088b/mcpm/utils/__init__.py -------------------------------------------------------------------------------- /mcpm/utils/draw.py: -------------------------------------------------------------------------------- 1 | """ draw 2D simulation snapshots """ 2 | # import matplotlib 3 | # matplotlib.use('Agg') 4 | import matplotlib.pyplot as plt 5 | 6 | 7 | import numpy as np 8 | import h5py 9 | import os 10 | import glob 11 | from skimage.segmentation import find_boundaries 12 | import argparse 13 | import subprocess 14 | 15 | import misori 16 | 17 | from .. import io 18 | 19 | def rodrigues_colormap(quats): 20 | """ rodrigues vector mapped to unit cube as rgb values """ 21 | quats = np.sort(np.abs(quats), axis=1) # sort ascending 22 | fz_half = np.tan(np.pi/8) # half cubic fundamental zone width 23 | rf = [misori.fz_rod(q)+fz_half for q in quats] 24 | return {index: a / (2*np.tan(np.pi/8)) for index, a in enumerate(rf)} 25 | 26 | def binary_colormap(quats): 27 | cmap = {1: np.array([1.0, 1.0, 1.0]), 28 | 2: np.array([.45, .57, 0.63]), 29 | 3: np.array([0.6, 0.0, 0.0])} 30 | return {i: cmap[np.sum(quats[i,:] > 0)] 31 | for i in range(1,quats.shape[0])} 32 | 33 | def map_colors(sites, color_map): 34 | ''' apply color mapping and fill in boundaries ''' 35 | shape = sites.shape + (3,) 36 | colors = np.zeros(shape, dtype=float) 37 | for (x,y), s in np.ndenumerate(sites): 38 | colors[(x,y)] = color_map[s] 39 | colors[find_boundaries(sites), :] = np.array([.3,.3,.3]) 40 | return colors 41 | 42 | def mark_grain(sites, grain, idgrain): 43 | colors = ['r', 'g', 'b', 'y', 'c', 'm', 'k'] 44 | 45 | # get grain centroids 46 | index = np.indices(sites.shape) 47 | idx , idy = index[0], index[1] 48 | size_x, size_y = sites.shape 49 | mask = sites == grain 50 | if np.any(mask): 51 | y, x = idy[mask], idx[mask] 52 | # fix periodic boundary splinching 53 | if np.max(y) - np.min(y) == size_y-1: 54 | y[y < size_y/2] += size_y 55 | if np.max(x) - np.min(x) == size_x-1: 56 | x[x < size_x/2] += size_x 57 | y, x = np.mean(y), np.mean(x) 58 | y = y if y < size_y else y - size_y 59 | x = x if x < size_x else x - size_x 60 | # use image coordinates -- forgot to change imshow 61 | plt.scatter(y, x, color=colors[idgrain], edgecolor='w') 62 | return 63 | 64 | def draw(sites, outfile=None, colormap=None, 65 | vmin=None, vmax=None, mark_grains=[]): 66 | cmap = None 67 | if colormap is not None: 68 | colors = map_colors(sites, colormap) 69 | else: 70 | colors = sites 71 | colors[find_boundaries(sites)] = 0 72 | cmap=plt.get_cmap('Spectral') 73 | plt.imshow(colors, interpolation='none', 74 | cmap=cmap, 75 | vmin=vmin, vmax=vmax, origin='lower') 76 | for idg,grain in enumerate(mark_grains): 77 | mark_grain(sites, grain, idg) # calls plt.plot 78 | if outfile is not None: 79 | plt.savefig(outfile) 80 | plt.clf() 81 | else: 82 | return 83 | 84 | def propensity(sites, outfile=None): 85 | return 86 | 87 | def sequence(tmpdir='images'): 88 | snapshots = glob.glob('dump*.dream3d') 89 | snapshots.sort() 90 | vmin, vmax = 0, 0 91 | for i,snapshot in enumerate(snapshots): 92 | print(snapshot) 93 | # name, ext = os.path.splitext(snapshot) 94 | outfile = 'snapshot{0:04d}.png'.format(i) 95 | s = load_dream3d(snapshot) 96 | if (vmin, vmax) == (0,0): 97 | vmax = s.max() 98 | draw(s, '{}/{}'.format(tmpdir,outfile), vmin=0, vmax=vmax) 99 | 100 | 101 | def draw_snapshot(): 102 | parser = argparse.ArgumentParser(prog='draw-snapshot', 103 | description='''Draw a 2D snapshot from a DREAM3D file''') 104 | parser.add_argument('-i', '--infile', nargs='?', default='input.dream3d', 105 | help='path to dream3d file to draw') 106 | parser.add_argument('-o', '--outfile', nargs='?', default=None, 107 | help='save image file to this path') 108 | parser.add_argument('-c', '--color', nargs='?', default='grain_id', 109 | help='color scheme') 110 | parser.add_argument('--initial', nargs='?', default='input.dream3d', 111 | help='initial snapshot with quaternions') 112 | 113 | args = parser.parse_args() 114 | 115 | sites = io.load_dream3d(args.infile) 116 | cmap = None 117 | if args.color == 'quaternion': 118 | quats = io.load_quaternions(args.initial) 119 | cmap = rodrigues_colormap(quats) 120 | draw(sites, colormap=cmap, outfile=args.outfile) 121 | 122 | 123 | def animate_snapshots(): 124 | parser = argparse.ArgumentParser(prog='mcpm-animate', 125 | description='''Animate 2D snapshots from file sequence''') 126 | parser.add_argument('-i', '--snapshots', nargs='+', required=True, 127 | help='list of snapshots to animate') 128 | parser.add_argument('-o', '--outfile', nargs='?', default='grains.gif', 129 | help='save image file to this path') 130 | parser.add_argument('--cleanup', action='store_true', 131 | help='clean up temporary image files') 132 | parser.add_argument('-c', '--color', nargs='?', default='grain_id', 133 | help='color scheme') 134 | parser.add_argument('--initial', nargs='?', default='input.dream3d', 135 | help='initial snapshot with quaternions') 136 | parser.add_argument('--format', choices=['dream3d', 'spparks'], default='dream3d', 137 | help='input file format') 138 | parser.add_argument('--ffmpeg', action='store_true', 139 | help='use ffmpeg -> *.mov instead of imagemagick') 140 | 141 | args = parser.parse_args() 142 | 143 | plt.switch_backend('Agg') 144 | tmpdir = 'temp' 145 | try: 146 | os.mkdir(tmpdir) 147 | except FileExistsError: 148 | pass 149 | mark_grains = [] # list of grains to mark 150 | # mark_grains = [158, 2136, 2482, 300, 335, 39, 823] 151 | 152 | print('processing snapshots') 153 | if '*' in args.snapshots: 154 | # zsh expands the wildcard and passes python a list of filenames 155 | # bash doesn't expand the wildcard like this: check and glob 156 | args.snapshots = glob.glob(snapshots) 157 | args.snapshots.sort() 158 | 159 | vmin, vmax = 0, 0 160 | cmap = None 161 | for i,snapshot in enumerate(args.snapshots): 162 | print(snapshot) 163 | # name, ext = os.path.splitext(snapshot) 164 | name = 'snapshot{:04d}'.format(i) 165 | if args.format == 'dream3d': 166 | s = io.load_dream3d(snapshot) 167 | elif args.format == 'spparks': 168 | s = io.load_spparks(snapshot) 169 | if (vmin, vmax) == (0,0): 170 | vmax = s.max() 171 | if args.color == 'quaternion': 172 | quats = io.load_quaternions(args.initial) 173 | cmap = rodrigues_colormap(quats) 174 | elif args.color == 'binary': 175 | quats = io.load_quaternions(args.initial) 176 | cmap = binary_colormap(quats) 177 | 178 | draw(s, '{}/{}.png'.format(tmpdir,name), 179 | colormap=cmap, vmin=0, vmax=vmax, 180 | mark_grains=mark_grains) 181 | 182 | images = glob.glob('{}/*.png'.format(tmpdir)) 183 | images.sort() 184 | if not args.ffmpeg: 185 | print('calling imagemagick') 186 | subprocess.call(['convert'] + images + ['-delay', '25', args.outfile]) 187 | else: 188 | print('calling ffmpeg') 189 | subprocess.call(['ffmpeg'] + '-i temp/snapshot%04d.png -codec png grains.mov'.split(' ')) 190 | 191 | if args.cleanup: 192 | print('cleaning up') 193 | for i in images: 194 | os.remove(i) 195 | os.rmdir(tmpdir) 196 | -------------------------------------------------------------------------------- /mcpm/utils/unique.pyx: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import numpy as np 3 | cimport numpy as np 4 | import cython 5 | 6 | # profiling suggests removing bounds checking doesn't 7 | # increase performance very significantly 8 | # @cython.boundscheck(False) 9 | # @cython.wraparound(False) 10 | def _unique(np.ndarray[np.int32_t,ndim=1] arr not None): 11 | cdef int imax = arr.size 12 | cdef np.ndarray[np.int32_t,ndim=1] unique = np.zeros(imax, dtype=np.int32) 13 | cdef int i, j 14 | cdef int n_unq = 0 15 | cdef int value 16 | for i in range(imax): 17 | value = arr[i] 18 | for j in range(n_unq+1): 19 | if unique[j] == value: 20 | break 21 | if j == n_unq: 22 | unique[n_unq] = value 23 | n_unq += 1 24 | 25 | return unique[:n_unq] 26 | 27 | 28 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from Cython.Build import cythonize 3 | import numpy 4 | 5 | setup(name='mcpm', 6 | version='0.1', 7 | description='Kinetic Monte Carlo grain growth model', 8 | url='tbd', 9 | author='Brian DeCost', 10 | author_email='bdecost@andrew.cmu.edu', 11 | license='MIT', 12 | packages=['mcpm', 'mcpm.utils'], 13 | ext_modules=cythonize('mcpm/utils/unique.pyx'), 14 | include_dirs=[numpy.get_include()], 15 | entry_points={ 16 | 'console_scripts': [ 17 | 'mcpm = mcpm.mcpm:main', 18 | 'mcpm-draw = mcpm.utils.draw:draw_snapshot', 19 | 'mcpm-animate = mcpm.utils.draw:animate_snapshots', 20 | ], 21 | }, 22 | zip_safe=False) 23 | --------------------------------------------------------------------------------