├── pycola ├── __init__.py ├── aux.rst ├── box_smooth.rst ├── cic.rst ├── evolve.rst ├── taskfarmer │ ├── writeTasks.py │ ├── taskwrapper.sh │ └── taskfarmer.sh ├── acceleration.rst ├── potential.rst ├── growth.rst ├── readOutput.py ├── OmSiNs │ ├── makeList.py │ └── copyFiles.py ├── example.rst ├── README.md ├── ic.rst ├── reformatNBody.py ├── box_smooth.pyx ├── index.rst ├── aux.py ├── potential.pyx ├── growth.py ├── Makefile └── setup.py ├── MUSIC ├── plugins │ ├── nyx_plugin │ │ ├── Make.package │ │ ├── Make.ic │ │ └── GNUmakefile │ ├── random_music.cc │ ├── transfer_inflation.cc │ ├── transfer_bbks.cc │ ├── point_file_reader.hh │ └── region_convex_hull.cc ├── OmSiNs │ ├── ics_template.conf │ ├── runCode.py~ │ ├── runCode.py │ └── ics_template.conf_log.txt ├── defaults.hh ├── Numerics.cc ├── LICENSE ├── defaults.cc ├── output.cc ├── README.md ├── transfer_function.cc ├── Numerics.hh ├── LICENSE.md ├── region_generator.hh ├── log.cc ├── convolution_kernel.hh ├── Makefile ├── log.hh ├── tools │ └── point_file_reader.hh ├── general.hh ├── output.hh ├── poisson.hh ├── fft_operators.hh ├── schemes.hh ├── tests.hh └── cosmology.hh ├── README.md ├── .gitignore ├── janWorkflow ├── runMusic.sh ├── rdMeta.py ├── batchMakeUniverse.slr ├── bigShovel.sh ├── makeOneUniverse.sh ├── projectNBody.py ├── pack_hd5_Pk.py ├── Readme ├── prepMusic_4par.sh └── sliceBigCube.py ├── LICENSE.md ├── CosmoFlow ├── hyper_parameters_Cosmo.py └── io_Cosmo-3param.py └── README /pycola/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MUSIC/plugins/nyx_plugin/Make.package: -------------------------------------------------------------------------------- 1 | CEXE_sources += output_nyx.cpp 2 | 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cosmoflow-sims 2 | Running the simulations for the CosmoFlow project 3 | -------------------------------------------------------------------------------- /pycola/aux.rst: -------------------------------------------------------------------------------- 1 | 2 | 3 | .. automodule:: aux 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | -------------------------------------------------------------------------------- /pycola/box_smooth.rst: -------------------------------------------------------------------------------- 1 | 2 | 3 | .. automodule:: box_smooth 4 | :members: 5 | :undoc-members: 6 | :show-inheritance: 7 | -------------------------------------------------------------------------------- /pycola/cic.rst: -------------------------------------------------------------------------------- 1 | Cloud-in-Cell 2 | ------------- 3 | 4 | .. automodule:: cic 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /pycola/evolve.rst: -------------------------------------------------------------------------------- 1 | Evolution 2 | -------------- 3 | 4 | .. automodule:: evolve 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | -------------------------------------------------------------------------------- /pycola/taskfarmer/writeTasks.py: -------------------------------------------------------------------------------- 1 | outfile = open("tasklist.txt", "w") 2 | 3 | for i in range(0,2005): 4 | print >> outfile, "taskwrapper.sh "+str(i) 5 | -------------------------------------------------------------------------------- /pycola/acceleration.rst: -------------------------------------------------------------------------------- 1 | Accelerations 2 | ---------------------------------- 3 | 4 | .. automodule:: acceleration 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /pycola/potential.rst: -------------------------------------------------------------------------------- 1 | Potential 2 | ------------------------- 3 | 4 | .. autofunction:: potential.initialize_density 5 | 6 | .. autofunction:: potential.get_phi 7 | 8 | .. automodule:: potential 9 | :undoc-members: 10 | :show-inheritance: 11 | -------------------------------------------------------------------------------- /pycola/taskfarmer/taskwrapper.sh: -------------------------------------------------------------------------------- 1 | module load python 2 | source activate cola2 3 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/common/software/fftw/3.3.4/hsw/gnu/lib/ 4 | cd /global/cscratch1/sd/djbard/MUSIC_pyCola/github/cosmoflow-sims/pycola/ 5 | python pycola-OmSiNs-template.py $1 6 | -------------------------------------------------------------------------------- /pycola/growth.rst: -------------------------------------------------------------------------------- 1 | Growth factors 2 | ------------------------- 3 | 4 | .. autofunction:: growth.growth_factor_solution 5 | .. autofunction:: growth.growth_2lpt 6 | .. autofunction:: growth.d_growth2 7 | 8 | 9 | 10 | 11 | .. automodule:: growth 12 | :show-inheritance: 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Add any directories, files, or patterns you don't want to be tracked by version control 2 | *~ 3 | *pyc 4 | *yaml 5 | data* 6 | out* 7 | __pycache__/ 8 | L* 9 | web/ 10 | slurm-* 11 | *svg 12 | logs/* 13 | plot/ 14 | *.so 15 | *.npz 16 | pycola/build/ 17 | wnoise_*.bin 18 | ics*.conf 19 | MUSIC 20 | *log.txt 21 | -------------------------------------------------------------------------------- /pycola/taskfarmer/taskfarmer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #SBATCH -N 10 -c 1 3 | ##SBATCH -p debug 4 | #SBATCH -t 00:12:00 5 | #SBATCH -C haswell 6 | #SBATCH -J OmSiNs 7 | #SBATCH -q premium 8 | rm *.tfin 9 | cd /global/cscratch1/sd/djbard/MUSIC_pyCola/github/cosmoflow-sims/pycola/taskfarmer/ 10 | export PATH=$PATH:/usr/common/tig/taskfarmer/1.5/bin:$(pwd) 11 | export THREADS=1 12 | runcommands.sh tasklist.txt 13 | -------------------------------------------------------------------------------- /MUSIC/plugins/random_music.cc: -------------------------------------------------------------------------------- 1 | #include "random.hh" 2 | 3 | class RNG_music : public RNG_plugin{ 4 | public: 5 | explicit RNG_music( config_file& cf ) 6 | : RNG_plugin( cf ) 7 | { } 8 | 9 | ~RNG_music() { } 10 | 11 | bool is_multiscale() const 12 | { return true; } 13 | }; 14 | 15 | 16 | namespace{ 17 | RNG_plugin_creator_concrete< RNG_music > creator("MUSIC"); 18 | 19 | 20 | } 21 | -------------------------------------------------------------------------------- /janWorkflow/runMusic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -u ; # exit if you try to use an uninitialized variable 4 | set -e ; # bash exits if any statement returns a non-true return value 5 | set -o errexit ; # exit if any statement returns a non-true return value 6 | 7 | pwd 8 | musicConf=$1 9 | outPath=$2 10 | cd $outPath 11 | pwd 12 | MusicExe=/global/homes/b/balewski/prj/cosmoflow-sims/MUSIC/MUSIC 13 | echo start MUSIC $musicConf ' '`date` 14 | time $MusicExe ../$musicConf 15 | 16 | echo 'done Music '`date` 17 | 18 | -------------------------------------------------------------------------------- /pycola/readOutput.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ROOT import * 3 | import time 4 | 5 | data = np.load('OmSi/pycola_0.325_0.928.npz') 6 | print np.shape(data['px']) 7 | 8 | px = data['px'] 9 | py = data['py'] 10 | pz = data['pz'] 11 | 12 | print px[1][1][1], py[1][1][1], pz[1][1][1] 13 | ncells = 128 # np.shape(px)[0] 14 | hi = 256 15 | lo = 0 16 | nbins = 128 17 | h = TH3F("","",nbins, lo, hi, nbins, lo, hi, nbins, lo, hi) ##xbinx, xlo, xhi... 18 | for i in range(ncells): 19 | for j in range(ncells): 20 | for k in range(ncells): 21 | #h.SetBinContent(i, j, k, px[i][j][k] 22 | h.Fill(px[i][j][k], py[i][j][k], pz[i][j][k]) 23 | 24 | h.Draw() 25 | 26 | time.sleep(300) 27 | -------------------------------------------------------------------------------- /pycola/OmSiNs/makeList.py: -------------------------------------------------------------------------------- 1 | import os, shutil 2 | 3 | ### making the list of cosmological params, which will be used by the CosmoFlow IO code to make the tensorflow files 4 | 5 | list = open("list.txt", "w") 6 | print >> list, "## seed, om, si" 7 | counter = 999 8 | 9 | start = 0 10 | stop = 2000 11 | 12 | ct = 0 13 | for afile in os.listdir("./"): 14 | if "pycola" not in afile or "npz" in afile: 15 | continue 16 | else: 17 | print afile 18 | counter+=1 19 | 20 | if counter>3000: 21 | continue 22 | 23 | om = afile.split("_")[1] 24 | si = afile.split("_")[2] 25 | ns = afile.split("_")[3] 26 | print counter, om, si, ns 27 | 28 | print >> list, str(counter)+","+str(om)+","+str(si)+","+str(ns) 29 | 30 | -------------------------------------------------------------------------------- /MUSIC/OmSiNs/ics_template.conf: -------------------------------------------------------------------------------- 1 | [setup] 2 | boxlength = 512 3 | zstart = 0 4 | levelmin = 9 5 | levelmax = 9 6 | overlap = 4 7 | align_top = no 8 | baryons = no 9 | use_2LPT = no 10 | use_LLA = no 11 | periodic_TF = yes 12 | 13 | 14 | [cosmology] 15 | Omega_m = 0.276 16 | Omega_L = 0.724 17 | w0 = -1.0 18 | wa = 0.0 19 | Omega_b = 0.045 20 | H0 = 70.3 21 | sigma_8 = 0.811 22 | nspec = 0.961 23 | transfer = eisenstein 24 | 25 | [random] 26 | seed[9] = 34567 27 | 28 | [output] 29 | ##generic MUSIC data format (used for testing) 30 | ##requires HDF5 installation and HDF5 enabled in Makefile 31 | format = generic 32 | filename = ics_nozoom.hdf5 33 | 34 | [poisson] 35 | fft_fine = yes 36 | accuracy = 1e-5 37 | pre_smooth = 3 38 | post_smooth = 3 39 | smoother = gs 40 | laplace_order = 6 41 | grad_order = 6 42 | 43 | -------------------------------------------------------------------------------- /MUSIC/defaults.hh: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | defaults.hh - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | 12 | #ifndef __DEFAULTS_HH 13 | #define __DEFAULTS_HH 14 | 15 | #include 16 | #include 17 | 18 | struct default_conf{ 19 | std::string sec; 20 | std::string tag; 21 | std::string val; 22 | default_conf( std::string sec_, std::string tag_, std::string val_ ) 23 | : sec(sec_), tag(tag_), val(val_) 24 | { } 25 | }; 26 | 27 | 28 | class default_options{ 29 | protected: 30 | std::map def; 31 | public: 32 | default_options(); 33 | 34 | template 35 | void query( std::string tag ) 36 | {} 37 | 38 | }; 39 | 40 | extern default_options defaults; 41 | 42 | 43 | #endif //__DEFAULTS_HH 44 | 45 | -------------------------------------------------------------------------------- /janWorkflow/rdMeta.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from ruamel.yaml import YAML 3 | from pprint import pprint 4 | import sys 5 | 6 | def read_yaml(yaml_fn,verb=1): 7 | data={} 8 | 9 | if verb: print(' read yaml:',yaml_fn) 10 | with open(yaml_fn) as yamlfile: 11 | for key, val in YAML().load(yamlfile).items(): 12 | print('hpar:',key, val) 13 | data[key]=val 14 | assert len(data['namePar']) == len(data['unitPar']) 15 | assert len(data['physPar']) == len(data['unitPar']) 16 | 17 | return data 18 | 19 | 20 | 21 | # = = = = = = = = = = = = = 22 | # = = = = = = = = = = = = = 23 | if __name__ == '__main__': 24 | 25 | print('inp num args:',len(sys.argv)) 26 | #ymlF='../janWorkflow/out/cosmoMeta.yaml' 27 | ymlF='./cosmoMeta.yaml' 28 | if len(sys.argv)>1: 29 | ymlF=sys.argv[1] 30 | print ("read YAML from ",ymlF,' and pprint it:') 31 | 32 | blob=read_yaml(ymlF) 33 | pprint(blob) 34 | 35 | assert len(blob['namePar']) == len(blob['unitPar']) 36 | assert len(blob['physPar']) == len(blob['unitPar']) 37 | 38 | 39 | -------------------------------------------------------------------------------- /MUSIC/Numerics.cc: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | numerics.cc - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #ifdef WITH_MPI 12 | #ifdef MANNO 13 | #include 14 | #else 15 | #include 16 | #endif 17 | #endif 18 | #include 19 | #include "Numerics.hh" 20 | 21 | 22 | #ifndef REL_PRECISION 23 | #define REL_PRECISION 1.e-5 24 | #endif 25 | 26 | real_t integrate( double (* func) (double x, void * params), double a, double b, void *params ) 27 | { 28 | gsl_function F; 29 | F.function = func; 30 | F.params = params; 31 | 32 | double result; 33 | double error; 34 | 35 | 36 | gsl_set_error_handler_off (); 37 | gsl_integration_workspace *w = gsl_integration_workspace_alloc(100000); 38 | gsl_integration_qag( &F, a, b, 0, REL_PRECISION, 100000, 6, w, &result, &error ); 39 | 40 | 41 | gsl_integration_workspace_free(w); 42 | 43 | gsl_set_error_handler(NULL); 44 | 45 | if( error/result > REL_PRECISION ) 46 | std::cerr << " - Warning: no convergence in function 'integrate', rel. error=" << error/result << std::endl; 47 | 48 | return (real_t)result; 49 | } 50 | -------------------------------------------------------------------------------- /janWorkflow/batchMakeUniverse.slr: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | #SBATCH --nodes=4 --time=28:00 # takes 18 min wall time 3 | #SBATCH --account m3363 # ExaLearn, Peter Nugent, ECP-AD-2.2.6 4 | #SBATCH --qos=regular -J uni-reg 5 | #-SBATCH --qos=debug -J univ-dbg 6 | #-SBATCH --qos=premium -J univ-pr 7 | #SBATCH -C haswell 8 | 9 | #SBATCH --array=9-9 10 | 11 | arrIdx=${SLURM_ARRAY_TASK_ID} 12 | 13 | coreN='cosmoUniverse4d/'${SLURM_ARRAY_JOB_ID} 14 | codeList=" makeOneUniverse.sh *Music*.sh rdMeta.py pycola-OmSiNs-jan.py projectNBody.py sliceBigCube.py ics_template.conf pack_hd5_Pk.py batchMakeUniverse.slr" 15 | date 16 | echo SLURM_CLUSTER_NAME=$SLURM_CLUSTER_NAME numNodes=$SLURM_NNODES 17 | #env|grep SLURM 18 | 19 | srcDir=`pwd` 20 | wrkDir=$CSCRATCH/${coreN}-${arrIdx} 21 | mkdir -p ${wrkDir} 22 | cp -rp $codeList $wrkDir 23 | cd $wrkDir 24 | echo PWD=`pwd` 25 | ls -l $dataH5 26 | 27 | module load python/3.6-anaconda-4.4 28 | module list 29 | ./prepMusic_4par.sh $SLURM_NNODES ./ 30 | 31 | echo 'U: PWD2 '`pwd` 32 | ./rdMeta.py 33 | 34 | echo M_start-`date` 35 | srun --label ./makeOneUniverse.sh 36 | echo M_done-`date` 37 | 38 | # mv slurm log to final destination - it is alwasy a job-array 39 | mv $srcDir/slurm-${SLURM_ARRAY_JOB_ID}_${SLURM_ARRAY_TASK_ID}.out . 40 | 41 | 42 | -------------------------------------------------------------------------------- /janWorkflow/bigShovel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -u ; # exit if you try to use an uninitialized variable 3 | set -e ; # bash exits if any statement returns a non-true return value 4 | set -o errexit ; # exit if any statement returns a non-true return value 5 | 6 | 7 | nSleep=1 8 | n=0 9 | date 10 | 11 | inpM=~/prj/cosmoflow-sims/janWorkflow/old3 12 | cd $inpM 13 | for i in $( ls slurm-18625370* ); do 14 | echo item: $i 15 | arrIdx=${i:15:-4} 16 | dirN=/global/cscratch1/sd/balewski/cosmoUniverse4a/18625370-$arrIdx 17 | echo $arrIdx $dirN 18 | 19 | rm -rf $dirN 20 | done 21 | exit 22 | outM=/global/cscratch1/sd/balewski/cosmoData_Jan3/meta/ 23 | for K in {1..50} ; do 24 | echo -n submit K=$K ' ' 25 | cd /global/cscratch1/sd/balewski/cosmoUnivers2/10787415-${K}/out/ 26 | #cd /global/cscratch1/sd/balewski/cosmoUnivers4/15954636-${K}/out/ 27 | name1=`ls *conf` 28 | name2=cosmos_${name1/conf/meta.yaml} 29 | name0=cosmoMeta.yaml 30 | echo $name0 $name1 $name2 31 | cp $name0 $outM/$name2 32 | 33 | 34 | #./pack_tfrec.py --npzPath /global/cscratch1/sd/balewski/cosmoUnivers2/10787415-${K}//out/ -X --tfrPath /global/cscratch1/sd/balewski/cosmoData_JanX/ 35 | n=$[ $n +1] 36 | #sleep $nSleep 37 | done 38 | date 39 | echo sent $n jobs 40 | -------------------------------------------------------------------------------- /MUSIC/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | MUSIC - Multi-scale Initial Conditions for Cosmological Simulations 3 | 4 | Copyright(c) 2011 by Oliver Hahn. All rights reserved. 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to use 8 | the Software under the following conditions: 9 | 10 | 1. Redistributions of the Software must contain the above copyright 11 | notice, this list of conditions and the following disclaimers. 12 | 13 | 2. Scientific publications that make use of results obtained with 14 | the Software must properly reference that the 'MUSIC' software 15 | has been used and include a reference to Hahn & Abel (2011), 16 | published in MNRAS Vol 415(3), the paper describing the 17 | algorithms used in the Software. 18 | 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 23 | CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH 26 | THE SOFTWARE. -------------------------------------------------------------------------------- /pycola/OmSiNs/copyFiles.py: -------------------------------------------------------------------------------- 1 | import os, shutil 2 | 3 | ## need to rename all the files in the numbering scheme required by the CosmoFlow IO sceme. 4 | ## Previously, the dir had a useful name containing the comso params used in the sim 5 | ## Now, this dir needs to be re-named so that it has a number, which corresponds to the position in the list of cosmo params 6 | ## I know, this is not very sensible. But then the CosmoFlow IO code picks random sets of sims + sub-volumes to make the TFRecord files, and it's easier to do that when the file names are the numbers. 7 | 8 | ### In case you don't want to run this over all the files you have 9 | start = 1000 10 | stop = 1010 11 | 12 | for line in open("list.txt"): 13 | if "#" in line: 14 | continue 15 | cols = line.split(",") 16 | num = cols[0] 17 | om = cols[1] 18 | si = cols[2] 19 | ns = cols[3][:-1] 20 | print line, num, om, si, ns 21 | 22 | if int(num)stop: 23 | print"skipping!" 24 | continue 25 | 26 | outdir = "./"+num 27 | if os.path.exists(outdir): 28 | if len(os.listdir(outdir))==8: 29 | print "done this one!" 30 | continue 31 | else: 32 | shutil.rmtree(outdir) 33 | 34 | ## now move the files 35 | indir = "pycola_"+om+"_"+si+"_"+ns 36 | shutil.copytree(indir, outdir) 37 | 38 | -------------------------------------------------------------------------------- /MUSIC/plugins/nyx_plugin/Make.ic: -------------------------------------------------------------------------------- 1 | 2 | COMP = Intel 3 | FCOMP = Intel 4 | DEBUG = FALSE 5 | include $(BOXLIB_HOME)/Tools/C_mk/Make.defs 6 | NYX = TRUE 7 | VERBOSE = TRUE 8 | 9 | DEFINES += -DHAVE_BOXLIB 10 | 11 | #These are the directories in Nyx 12 | 13 | Bpack += $(TOP)/Make.package 14 | Blocs += $(TOP) 15 | 16 | #include $(TOP)/Make.package 17 | 18 | include $(Bpack) 19 | INCLUDE_LOCATIONS += $(Blocs) 20 | VPATH_LOCATIONS += $(Blocs) 21 | 22 | #These are the directories in BoxLib 23 | 24 | Pdirs := C_BaseLib 25 | Ppack += $(foreach dir, $(Pdirs), $(BOXLIB_HOME)/Src/$(dir)/Make.package) 26 | Plocs += $(foreach dir, $(Pdirs), $(BOXLIB_HOME)/Src/$(dir)) 27 | 28 | include $(Ppack) 29 | INCLUDE_LOCATIONS += $(Plocs) 30 | VPATH_LOCATIONS += $(Plocs) 31 | 32 | 33 | INCLUDE_LOCATIONS += $(BOXLIB_HOME)/Src/F_BaseLib 34 | VPATH_LOCATIONS += $(BOXLIB_HOME)/Src/F_BaseLib 35 | 36 | 37 | 38 | vpath %.c . $(VPATH_LOCATIONS) 39 | vpath %.cpp . $(VPATH_LOCATIONS) 40 | vpath %.h . $(VPATH_LOCATIONS) 41 | vpath %.H . $(VPATH_LOCATIONS) 42 | vpath %.F . $(VPATH_LOCATIONS) 43 | vpath %.f90 . $(VPATH_LOCATIONS) 44 | vpath %.f . $(VPATH_LOCATIONS) 45 | vpath %.fi . $(VPATH_LOCATIONS) 46 | 47 | ifeq ($(IN_MUSIC), NO) 48 | all: $(objForExecs) 49 | @echo BoxLib compiled ... 50 | @touch ../../output.cc 51 | 52 | include $(BOXLIB_HOME)/Tools/C_mk/Make.rules 53 | 54 | endif 55 | 56 | -------------------------------------------------------------------------------- /MUSIC/defaults.cc: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | defaults.cc - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #include "defaults.hh" 12 | 13 | #define ADD_DEF(x,a,b,c) def.insert(std::pair(x,default_conf(a,b,c))); 14 | 15 | default_options defaults; 16 | 17 | default_options::default_options() 18 | { 19 | //... [setup] ... 20 | ADD_DEF("align_top", "setup", "align_top", "yes"); 21 | ADD_DEF("baryons", "setup", "baryons", "no"); 22 | ADD_DEF("center_v", "setup", "center_velocities","no"); 23 | ADD_DEF("deconvolve", "setup", "deconvolve", "yes"); 24 | ADD_DEF("exact_shotnoise", "setup", "exact_shotnoise", "yes"); 25 | ADD_DEF("overlap", "setup", "overlap", "8"); 26 | ADD_DEF("padding", "setup", "padding", "16"); 27 | ADD_DEF("periodic_TF", "setup", "periodic_TF", "yes"); 28 | ADD_DEF("use_2LPT", "setup", "use_2LPT", "yes"); 29 | ADD_DEF("use_LLA", "setup", "use_LLA", "no"); 30 | 31 | //... [poisson] ... 32 | ADD_DEF("mgacc", "poisson", "accuracy", "1e-4"); 33 | ADD_DEF("mggrad", "poisson", "grad_order", "6"); 34 | ADD_DEF("mglapl", "poisson", "laplce_order", "6"); 35 | ADD_DEF("fft_fine", "poisson", "fft_fine", "yes"); 36 | ADD_DEF("kspace_poisson", "poisson", "kspace", "no"); 37 | 38 | 39 | //... deprecated 40 | ADD_DEF("avg_fine", "setup", "avg_fine", "no"); 41 | } 42 | 43 | 44 | 45 | 46 | #undef ADD_DEF 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /pycola/example.rst: -------------------------------------------------------------------------------- 1 | The following example is contained in :download:`example.py`. It 2 | reads in a MUSIC-generated initial conditions at first order; 3 | calculates the second-order initial displacements on the full box; 4 | then runs COLA and finally outputs a figure containing a density 5 | slice. The script needs `matplotlib `_ to be 6 | installed. 7 | 8 | To run the script, first generate MUSIC initial conditions with the 9 | included configuration file:: 10 | 11 | MUSIC ics.conf 12 | 13 | Then change the variable ``music_file`` below to point to the MUSIC 14 | snapshot, and then execute the example script by issuing:: 15 | 16 | python ./example.py 17 | 18 | This example script was used with minor modifications in making the 19 | figures for the paper. It fits comfortably on 24GB ram. If that is not 20 | available, decreasing ``gridscale`` to 1, reduces ram consumption to 21 | 11GB at the cost of reducing force resolution and producing artifacts. 22 | 23 | Most of the time, the script spends on calculating the second-order 24 | displacement field for the whole box, i.e. not on the COLA calculation. 25 | On the 4-core laptop the calculations for the paper were performed, the 26 | COLA calculation itself takes about 40 seconds. 27 | 28 | The script produces the following figure: 29 | 30 | .. figure:: slab.jpg 31 | :align: center 32 | :width: 5cm 33 | :height: 5cm 34 | 35 | .. literalinclude:: example.py 36 | :lines: 25- 37 | 38 | .. automodule:: example 39 | .. :members: 40 | .. :undoc-members: 41 | :show-inheritance: 42 | 43 | 44 | -------------------------------------------------------------------------------- /MUSIC/output.cc: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | output.cc - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #include "output.hh" 12 | 13 | 14 | std::map< std::string, output_plugin_creator *>& 15 | get_output_plugin_map() 16 | { 17 | static std::map< std::string, output_plugin_creator* > output_plugin_map; 18 | return output_plugin_map; 19 | } 20 | 21 | void print_output_plugins() 22 | { 23 | std::map< std::string, output_plugin_creator *>& m = get_output_plugin_map(); 24 | 25 | std::map< std::string, output_plugin_creator *>::iterator it; 26 | it = m.begin(); 27 | std::cout << " - Available output plug-ins:\n"; 28 | while( it!=m.end() ) 29 | { 30 | if( (*it).second ) 31 | std::cout << "\t\'" << (*it).first << "\'\n"; 32 | ++it; 33 | } 34 | 35 | } 36 | 37 | output_plugin *select_output_plugin( config_file& cf ) 38 | { 39 | std::string formatname = cf.getValue( "output", "format" ); 40 | 41 | output_plugin_creator *the_output_plugin_creator 42 | = get_output_plugin_map()[ formatname ]; 43 | 44 | if( !the_output_plugin_creator ) 45 | { 46 | std::cerr << " - Error: output plug-in \'" << formatname << "\' not found." << std::endl; 47 | print_output_plugins(); 48 | throw std::runtime_error("Unknown output plug-in"); 49 | 50 | }else 51 | std::cout << " - Selecting output plug-in \'" << formatname << "\'..." << std::endl; 52 | 53 | output_plugin *the_output_plugin 54 | = the_output_plugin_creator->create( cf ); 55 | 56 | return the_output_plugin; 57 | } 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /MUSIC/plugins/nyx_plugin/GNUmakefile: -------------------------------------------------------------------------------- 1 | TOP = ${PWD} 2 | PRECISION = DOUBLE 3 | DEBUG = FALSE 4 | DIM = 3 5 | COMP = Intel 6 | FCOMP = Intel 7 | USE_MPI = FALSE 8 | USE_OMP = FALSE 9 | 10 | IN_MUSIC = NO 11 | 12 | #hmm....awful (needed for general.hh) 13 | ifeq ($(FFTW3), yes) 14 | DEFINES += -DFFTW3 15 | endif 16 | 17 | ifeq ($(SINGLE), yes) 18 | PRECISION = SINGLE 19 | endif 20 | 21 | 22 | include $(BOXLIB_HOME)/Tools/C_mk/Make.defs 23 | NYX = TRUE 24 | VERBOSE = TRUE 25 | 26 | DEFINES += -DHAVE_BOXLIB 27 | 28 | #These are the directories in Nyx 29 | 30 | Bpack += $(TOP)/Make.package 31 | Blocs += $(TOP) 32 | 33 | #include $(TOP)/Make.package 34 | 35 | include $(Bpack) 36 | INCLUDE_LOCATIONS += $(Blocs) 37 | VPATH_LOCATIONS += $(Blocs) 38 | 39 | #These are the directories in BoxLib 40 | 41 | Pdirs := C_BaseLib 42 | Ppack += $(foreach dir, $(Pdirs), $(BOXLIB_HOME)/Src/$(dir)/Make.package) 43 | Plocs += $(foreach dir, $(Pdirs), $(BOXLIB_HOME)/Src/$(dir)) 44 | 45 | include $(Ppack) 46 | INCLUDE_LOCATIONS += $(Plocs) 47 | VPATH_LOCATIONS += $(Plocs) 48 | 49 | 50 | INCLUDE_LOCATIONS += $(BOXLIB_HOME)/Src/F_BaseLib 51 | VPATH_LOCATIONS += $(BOXLIB_HOME)/Src/F_BaseLib 52 | 53 | 54 | 55 | vpath %.c . $(VPATH_LOCATIONS) 56 | vpath %.cpp . $(VPATH_LOCATIONS) 57 | vpath %.h . $(VPATH_LOCATIONS) 58 | vpath %.H . $(VPATH_LOCATIONS) 59 | vpath %.F . $(VPATH_LOCATIONS) 60 | vpath %.f90 . $(VPATH_LOCATIONS) 61 | vpath %.f . $(VPATH_LOCATIONS) 62 | vpath %.fi . $(VPATH_LOCATIONS) 63 | 64 | ifeq ($(IN_MUSIC), NO) 65 | all: $(objForExecs) 66 | @echo BoxLib compiled ... 67 | @touch ../../output.cc 68 | 69 | include $(BOXLIB_HOME)/Tools/C_mk/Make.rules 70 | 71 | endif 72 | -------------------------------------------------------------------------------- /MUSIC/README.md: -------------------------------------------------------------------------------- 1 | MUSIC - multi-scale cosmological initial conditions 2 | =================================================== 3 | 4 | MUSIC is a computer program to generate nested grid initial conditions for 5 | high-resolution "zoom" cosmological simulations. A detailed description 6 | of the algorithms can be found in [Hahn & Abel (2011)][1]. You can 7 | download the user's guide [here][3]. Please consider joining the 8 | [user mailing list][2]. 9 | 10 | Current MUSIC key features are: 11 | 12 | - Supports output for RAMSES, ENZO, Arepo, Gadget-2/3, ART, Pkdgrav/Gasoline 13 | and NyX via plugins. New codes can be added. 14 | 15 | - Support for first (1LPT) and second order (2LPT) Lagrangian perturbation 16 | theory, local Lagrangian approximation (LLA) for baryons with grid codes. 17 | 18 | - Pluggable transfer functions, currently CAMB, Eisenstein&Hu, BBKS, Warm 19 | Dark Matter variants. Distinct baryon+CDM fields. 20 | 21 | - Minimum bounding ellipsoid and convex hull shaped high-res regions supported 22 | with most codes, supports refinement mask generation for RAMSES. 23 | 24 | - Parallelized with OpenMP 25 | 26 | - Requires FFTW (v2 or v3), GSL (and HDF5 for output for some codes) 27 | 28 | 29 | This program is distributed in the hope that it will be useful, but 30 | WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 31 | or FITNESS FOR A PARTICULAR PURPOSE. By downloading and using MUSIC, you 32 | agree to the LICENSE, distributed with the source code in a text 33 | file of the same name. 34 | 35 | 36 | [1]: http://arxiv.org/abs/1103.6031 37 | [2]: https://groups.google.com/forum/#!forum/cosmo_music 38 | [3]: https://bitbucket.org/ohahn/music/downloads/MUSIC_Users_Guide.pdf 39 | -------------------------------------------------------------------------------- /MUSIC/plugins/transfer_inflation.cc: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | tranfer_inflation.cc - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | This program is free software: you can redistribute it and/or modify 10 | it under the terms of the GNU General Public License as published by 11 | the Free Software Foundation, either version 3 of the License, or 12 | (at your option) any later version. 13 | 14 | This program is distributed in the hope that it will be useful, 15 | but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | GNU General Public License for more details. 18 | 19 | You should have received a copy of the GNU General Public License 20 | along with this program. If not, see . 21 | 22 | */ 23 | 24 | #include "transfer_function.hh" 25 | 26 | class transfer_inflation_plugin : public transfer_function_plugin 27 | { 28 | protected: 29 | 30 | double ns2_; 31 | 32 | public: 33 | 34 | transfer_inflation_plugin( config_file& cf ) 35 | : transfer_function_plugin( cf ) 36 | { 37 | ns2_ = 0.5*cf.getValue("cosmology","nspec"); 38 | tf_distinct_ = true; 39 | } 40 | 41 | ~transfer_inflation_plugin(){ }; 42 | 43 | double compute( double k, tf_type type=baryon) 44 | { 45 | return pow(k,ns2_); 46 | } 47 | 48 | double get_kmax( void ) 49 | { 50 | return 1e10; 51 | } 52 | 53 | double get_kmin( void ) 54 | { 55 | return 1e-30; 56 | } 57 | 58 | }; 59 | 60 | 61 | namespace{ 62 | transfer_function_plugin_creator_concrete< transfer_inflation_plugin > creator("inflation"); 63 | } 64 | 65 | -------------------------------------------------------------------------------- /pycola/README.md: -------------------------------------------------------------------------------- 1 | **Author:** Svetlin V. Tassev (Harvard U, Princeton U) 2 | 3 | **Initial public release date:** Jul 3,2014 4 | 5 | pyCOLA is a multithreaded Python/Cython N-body code, implementing the 6 | Comoving Lagrangian Acceleration (COLA) method in the temporal and 7 | spatial domains. 8 | 9 | pyCOLA is based on the following two papers: 10 | 11 | 1. Solving Large Scale Structure in Ten Easy Steps with 12 | COLA, S. Tassev, M. Zaldarriaga, D. J. Eisenstein, Journal of 13 | Cosmology and Astroparticle Physics, 06, 036 14 | (2013), [arXiv:1301.0322](http://arxiv.org/abs/arXiv:1301.0322) 15 | 16 | 2. sCOLA: The N-body COLA Method Extended to the Spatial Domain, S. Tassev, D. 17 | J. Eisenstein, B. D. Wandelt, M. Zaldarriaga, (2015) 18 | 19 | If you use pyCOLA or the COLA method in the spatial and/or time domains for scientific work, we kindly ask you to reference the papers above. 20 | 21 | * pyCOLA is free and open-source software, distributed under the GPLv3 license. 22 | 23 | * To build the code, you need to run: 24 | 25 | 26 | ``` 27 | #!bash 28 | 29 | python setup.py build_ext --inplace 30 | ``` 31 | 32 | 33 | * To compile successfully, you need to have the following packages installed: [Python 2.7](https://www.python.org/), [Cython](http://cython.org/), [NumPy](http://www.numpy.org/), [SciPy](http://www.scipy.org/),[pyFFTW](https://hgomersall.github.io/pyFFTW/index.html), [h5py](http://www.h5py.org/), as well as their respective dependencies. Note that pyFFTW v0.9.2 does not support large arrays, so one needs to install the development version from [github](https://github.com/hgomersall/pyFFTW), where the bug has been fixed. 34 | 35 | * Read the manual [here](https://bitbucket.org/tassev/pycola/downloads/pyCOLA.pdf). -------------------------------------------------------------------------------- /MUSIC/transfer_function.cc: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | transfer_function.cc - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | 12 | #include "transfer_function.hh" 13 | 14 | 15 | std::map< std::string, transfer_function_plugin_creator *>& 16 | get_transfer_function_plugin_map() 17 | { 18 | static std::map< std::string, transfer_function_plugin_creator* > transfer_function_plugin_map; 19 | return transfer_function_plugin_map; 20 | } 21 | 22 | void print_transfer_function_plugins() 23 | { 24 | std::map< std::string, transfer_function_plugin_creator *>& m = get_transfer_function_plugin_map(); 25 | std::map< std::string, transfer_function_plugin_creator *>::iterator it; 26 | it = m.begin(); 27 | std::cout << " - Available transfer function plug-ins:\n"; 28 | while( it!=m.end() ) 29 | { 30 | if( (*it).second ) 31 | std::cout << "\t\'" << (*it).first << "\'\n"; 32 | ++it; 33 | } 34 | 35 | 36 | } 37 | 38 | transfer_function_plugin *select_transfer_function_plugin( config_file& cf ) 39 | { 40 | std::string tfname = cf.getValue( "cosmology", "transfer" ); 41 | 42 | transfer_function_plugin_creator *the_transfer_function_plugin_creator 43 | = get_transfer_function_plugin_map()[ tfname ]; 44 | 45 | if( !the_transfer_function_plugin_creator ) 46 | { 47 | std::cerr << " - Error: transfer function plug-in \'" << tfname << "\' not found." << std::endl; 48 | LOGERR("Invalid/Unregistered transfer function plug-in encountered : %s",tfname.c_str() ); 49 | print_transfer_function_plugins(); 50 | throw std::runtime_error("Unknown transfer function plug-in"); 51 | 52 | }else 53 | { 54 | std::cout << " - Selecting transfer function plug-in \'" << tfname << "\'..." << std::endl; 55 | LOGUSER("Selecting transfer function plug-in : %s",tfname.c_str() ); 56 | } 57 | 58 | transfer_function_plugin *the_transfer_function_plugin 59 | = the_transfer_function_plugin_creator->create( cf ); 60 | 61 | return the_transfer_function_plugin; 62 | } 63 | 64 | -------------------------------------------------------------------------------- /MUSIC/Numerics.hh: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | numerics.hh - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #ifndef __NUMERICS_HH 12 | #define __NUMERICS_HH 13 | 14 | #ifdef WITH_MPI 15 | #ifdef MANNO 16 | #include 17 | #else 18 | #include 19 | #endif 20 | #endif 21 | 22 | #include 23 | #include 24 | #include 25 | 26 | #include 27 | #include 28 | #include "general.hh" 29 | 30 | 31 | 32 | real_t integrate( double (* func) (double x, void * params), double a, double b, void *params=NULL); 33 | 34 | typedef __attribute__((__may_alias__)) int aint; 35 | 36 | inline float fast_log2 (float val) 37 | { 38 | //if( sizeof(int) != sizeof(float) ) 39 | // throw std::runtime_error("fast_log2 will fail on this system!!"); 40 | aint * const exp_ptr = reinterpret_cast (&val); 41 | aint x = *exp_ptr; 42 | const int log_2 = ((x >> 23) & 255) - 128; 43 | x &= ~(255 << 23); 44 | x += 127 << 23; 45 | *exp_ptr = x; 46 | 47 | val = ((-1.0f/3) * val + 2) * val - 2.0f/3; // (1) 48 | 49 | return (val + log_2); 50 | } 51 | 52 | inline float fast_log (const float &val) 53 | { 54 | return (fast_log2 (val) * 0.69314718f); 55 | } 56 | 57 | inline float fast_log10 (const float &val) 58 | { 59 | return (fast_log2 (val) * 0.3010299956639812f); 60 | } 61 | 62 | inline unsigned locate( const double x, const std::vector vx ) 63 | { 64 | long unsigned ju,jm,jl; 65 | bool ascnd=(vx[vx.size()-1]>=vx[0]); 66 | jl = 0; 67 | ju = vx.size()-1; 68 | while( ju-jl > 1 ) { 69 | jm = (ju+jl)>>1; 70 | if( (x >= vx[jm]) == ascnd ) 71 | jl = jm; 72 | else 73 | ju = jm; 74 | } 75 | return std::max((long unsigned)0,std::min((long unsigned)(vx.size()-2),(long unsigned)jl)); 76 | } 77 | 78 | 79 | inline real_t linint( const double x, const std::vector& xx, const std::vector& yy ) 80 | { 81 | unsigned i = locate(x,xx); 82 | 83 | if( x=xx[xx.size()-1] ) 86 | return yy[yy.size()-1]; 87 | double a = 1.0/(xx[i+1]-xx[i]); 88 | double dy = (yy[i+1]-yy[i])*a; 89 | double y0 = (yy[i]*xx[i+1]-xx[i]*yy[i+1])*a; 90 | return dy*x+y0; 91 | } 92 | 93 | 94 | #endif 95 | 96 | 97 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | CosmoFlow simulations Copyright (c) 2018, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | (3) Neither the name of the University of California, Lawrence Berkeley National Laboratory, U.S. Dept. of Energy nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 12 | 13 | You are under no obligation whatsoever to provide any bug fixes, patches, or upgrades to the features, functionality or performance of the source code ("Enhancements") to anyone; however, if you choose to make your Enhancements available either publicly, or directly to Lawrence Berkeley National Laboratory, without imposing a separate written license agreement for such Enhancements, then you hereby grant the following license: a non-exclusive, royalty-free perpetual license to install, use, modify, prepare derivative works, incorporate into other computer software, distribute, and sublicense such enhancements or derivative works thereof, in binary and source code form. 14 | -------------------------------------------------------------------------------- /MUSIC/plugins/transfer_bbks.cc: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | transfer_bbks.cc - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #include "transfer_function.hh" 12 | 13 | //! Implementation of class TransferFunction_BBKS for the BBKS transfer function 14 | /*! 15 | This class implements the analytical fit to the matter transfer 16 | function by Bardeen, Bond, Kaiser & Szalay (BBKS). 17 | ( see Bardeen et al. (1986) ) 18 | */ 19 | class transfer_bbks_plugin : public transfer_function_plugin{ 20 | private: 21 | double m_Gamma; 22 | 23 | public: 24 | //! Constructor 25 | /*! 26 | \param aCosm Structure of type Cosmology carrying the cosmological parameters 27 | \param bSugiyama flag whether the Sugiyama (1995) correction shall be applied (default=true) 28 | */ 29 | transfer_bbks_plugin( config_file& cf ) 30 | : transfer_function_plugin( cf ) 31 | { 32 | double Omega0 = cosmo_.Omega_m; 33 | double FreeGamma = -1.0; 34 | 35 | bool bSugiyama(true); 36 | 37 | try{ 38 | bSugiyama= pcf_->getValue( "cosmology", "sugiyama_corr" ); 39 | }catch(...){ 40 | throw std::runtime_error("Error in \'tranfer_bbks_plugin\': need to specify \'[cosmology]/sugiyama_corr = [true/false]"); 41 | } 42 | 43 | FreeGamma = pcf_->getValueSafe( "cosmology", "gamma", FreeGamma ); 44 | 45 | if( FreeGamma <= 0.0 ){ 46 | m_Gamma = Omega0*0.01*cosmo_.H0; 47 | if( bSugiyama ) 48 | m_Gamma *= exp(-cosmo_.Omega_b*(1.0+sqrt(2.0*0.01*cosmo_.H0)/Omega0)); 49 | }else 50 | m_Gamma = FreeGamma; 51 | 52 | tf_distinct_ = false; 53 | tf_withvel_ = false; 54 | 55 | } 56 | 57 | //! computes the value of the BBKS transfer function for mode k (in h/Mpc) 58 | inline double compute( double k, tf_type type ){ 59 | double q, f1, f2; 60 | 61 | if(k < 1e-7 ) 62 | return 1.0; 63 | 64 | q = k/(m_Gamma); 65 | f1 = log(1.0 + 2.34*q)/(2.34*q); 66 | f2 = 1.0 + q*(3.89 + q*(259.21 + q*(162.771336 + q*2027.16958081))); 67 | 68 | return f1/sqrt(sqrt(f2)); 69 | 70 | } 71 | 72 | inline double get_kmin( void ){ 73 | return 1e-4; 74 | } 75 | 76 | inline double get_kmax( void ){ 77 | return 1.e4; 78 | } 79 | }; 80 | 81 | 82 | namespace{ 83 | transfer_function_plugin_creator_concrete< transfer_bbks_plugin > creator("bbks"); 84 | } 85 | 86 | -------------------------------------------------------------------------------- /MUSIC/LICENSE.md: -------------------------------------------------------------------------------- 1 | CosmoFlow simulations Copyright (c) 2018, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | (3) Neither the name of the University of California, Lawrence Berkeley National Laboratory, U.S. Dept. of Energy nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 12 | 13 | You are under no obligation whatsoever to provide any bug fixes, patches, or upgrades to the features, functionality or performance of the source code ("Enhancements") to anyone; however, if you choose to make your Enhancements available either publicly, or directly to Lawrence Berkeley National Laboratory, without imposing a separate written license agreement for such Enhancements, then you hereby grant the following license: a non-exclusive, royalty-free perpetual license to install, use, modify, prepare derivative works, incorporate into other computer software, distribute, and sublicense such enhancements or derivative works thereof, in binary and source code form. 14 | -------------------------------------------------------------------------------- /janWorkflow/makeOneUniverse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -u ; # exit if you try to use an uninitialized variable 4 | set -e ; # bash exits if any statement returns a non-true return value 5 | set -o errexit ; # exit if any statement returns a non-true return value 6 | 7 | ( sleep 40; echo "TTTTTTTTT1"; date; hostname; free -g; top ibn1)& 8 | ( sleep 500; echo "TTTTTTTT2"; date; hostname; free -g; top ibn1)& 9 | 10 | procIdx=${SLURM_PROCID} 11 | module unload darshan 12 | module load cray-fftw gsl cray-hdf5 13 | 14 | 15 | echo U: PWD1= `pwd` 16 | coreStr=`grep coreStr ./cosmoMeta.yaml | awk '{printf "%s", $3 }' ` 17 | musicConf=${coreStr}_${procIdx}.conf 18 | echo 'U: prep MUSIC procIdx='$procIdx 'musicCon='$musicConf 19 | 20 | outPath=out_$procIdx 21 | mkdir $outPath 22 | ./runMusic.sh $musicConf $outPath 23 | 24 | module unload python/3.6-anaconda-4.4 25 | module load python/2.7-anaconda-4.4 26 | source activate cola_jan1 27 | 28 | 29 | echo U: start PyCola coreStr=$coreStr procIdx=$procIdx 30 | #On Haswell: 31 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/common/software/fftw/3.3.4/hsw/gnu/lib/ 32 | #OR on Edison: 33 | #export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/common/software/fftw/3.3.4/gnu/lib/ 34 | 35 | time ./pycola-OmSiNs-jan.py $outPath cosmoMeta.yaml 36 | 37 | echo U: start projection 38 | time ./projectNBody.py $outPath ./cosmoMeta.yaml 39 | ls -l $outPath 40 | echo U: start slicing 41 | ./sliceBigCube.py $outPath ./cosmoMeta.yaml 42 | 43 | echo U: done $outPath 44 | touch $outPath/done.job 45 | ls -lh $outPath/*hdf5 46 | rm $outPath/*hdf5 47 | rm $outPath/*npz 48 | rm $outPath/wnoise*bin 49 | 50 | 51 | exit 52 | 53 | 54 | echo U: optional produce input for Pk 55 | module unload python/2.7-anaconda-4.4 56 | module load python/3.6-anaconda-4.4 57 | time ./pack_hd5_Pk.py $outPath/$coreStr 58 | 59 | # this part will fail because it is calling srun inside srun 60 | srun -n 32 -c 2 --cpu_bind=cores /project/projectdirs/mpccc/balewski/cosmo-gimlet2/apps/matter_pk/matter_pk.ex $outPath/${coreStr}.nyx.hdf5 $outPath/${coreStr} 61 | 62 | #gnuplot> set logscale 63 | #gnuplot> plot "ics_2018-12_a12383763_rhom_ps3d.txt" u 3:4 w lines 64 | echo U: plot Pk 65 | gnuplot <<-EOFMarker 66 | set title "Pk for ${coreStr}" font ",14" textcolor rgbcolor "royalblue" 67 | set pointsize 1 68 | set logscale 69 | set terminal png 70 | set output "$outPath/${coreStr}_rhom_ps3d.png" 71 | plot "$outPath/${coreStr}_rhom_ps3d.txt" u 3:4 w lines 72 | EOFMarker 73 | 74 | -------------------------------------------------------------------------------- /janWorkflow/projectNBody.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import print_function 3 | __author__ = "Jan Balewski" 4 | __email__ = "janstar1122@gmail.com" 5 | 6 | import numpy as np 7 | #import os 8 | import math 9 | import sys 10 | from rdMeta import read_yaml as read_yaml 11 | 12 | ### This code will take the output of one NBody simulation (ie from the pycola code), and split it into ? sub-volumes, and histogram them. 13 | 14 | 15 | ######## Loop over the files! 16 | def projectOne(infile,boxlength,coreName): 17 | nbins=512 18 | histFile=coreName+'_dim%d_full'%nbins 19 | print('input=',infile,'boxlength=',boxlength,' nbins=',nbins,'histFile=',histFile) 20 | ### First, read in the px/py/pz from the pycola output file 21 | data = np.load(infile) 22 | 23 | px = data['px'] 24 | py = data['py'] 25 | pz = data['pz'] 26 | 27 | print ('pxyz: ',px[0][0][0], py[0][0][0], pz[0][0][0]) 28 | 29 | 30 | #### Try using this hp.histogramdd function... 31 | ### For this I need to turn the particl elists into coord lists, 32 | ### so ( (px[i][j][k], py[i][j][k], pz[i][j][k]), ....) 33 | pxf = np.ndarray.flatten(px) 34 | pyf = np.ndarray.flatten(py) 35 | pzf = np.ndarray.flatten(pz) 36 | 37 | print ('pxf.shape', pxf.shape) 38 | print ('pxf sample', pxf[0], pyf[0], pzf[0]) 39 | print ('pxf min/max', pxf.min(), pxf.max()) 40 | 41 | ### so the flattening is working. Now make this into a 3d array... 42 | ps = np.vstack( (pxf, pyf, pzf) ).T 43 | 44 | del(pxf); del(pyf); del(pzf) 45 | 46 | print ("one big vector list ", ps.shape, ps[77,:],'\naccumulate 3D histo...') 47 | 48 | ## OK! Then this is indeed a big old array. Now I want to histogram it. 49 | ## this step goes from a set of parcile coordinates to a histogram of particle counts 50 | 51 | H, bins = np.histogramdd(ps, nbins, range=((0,boxlength),(0,boxlength),(0,boxlength)) ) 52 | 53 | print ("histo dshape!", H.shape, H[0][0][0]) 54 | print ('mass sum=%.3g'%np.sum(H)) # takes many seconds, just for QA 55 | np.save(histFile, H) 56 | return H 57 | # - - - - - - - - - - - - - - - - - - - - - - 58 | # - - - - - - - - - - - - - - - - - - - - - - 59 | # - - - - - - - - - - - - - - - - - - - - - - 60 | 61 | from pprint import pprint 62 | if __name__ == '__main__': 63 | 64 | 65 | ioPath=sys.argv[1] 66 | ymlF=sys.argv[2] 67 | print ("read YAML from ",ymlF,' and pprint it:') 68 | 69 | blob=read_yaml(ymlF) 70 | 71 | pprint(blob) 72 | core=blob['coreStr'] 73 | vectFile=ioPath+'/'+core+'.npz' 74 | boxlength = blob['boxlength'] 75 | fnameSeed=vectFile.replace('.npz','') 76 | 77 | bigH=projectOne(vectFile,boxlength,fnameSeed) 78 | print('projection completed') 79 | 80 | -------------------------------------------------------------------------------- /janWorkflow/pack_hd5_Pk.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | (va,vb,vc,vd,ve)=sys.version_info ; assert(va==3) # needes Python3 4 | 5 | import sys 6 | import h5py 7 | import numpy as np 8 | 9 | # - - - - - - - - 10 | def _QA_dataH3(dataH3): 11 | print('got H3:',dataH3.shape) 12 | sum=np.sum(dataH3) 13 | nx,ny,nz=dataH3.shape 14 | fac=sum/nx/ny/nz 15 | print('sum0',sum,'fac=',fac) 16 | assert np.abs(fac-1.) <1.e-3 17 | 18 | 19 | # - - - - - - - - 20 | gDeep=0 21 | 22 | def _list_all(ds,name): 23 | global gDeep 24 | print('list content for %s gDeep=%d'%(name,gDeep)) 25 | gDeep+=1 26 | print('ds atr:') 27 | aN=list(ds.attrs.keys()) 28 | aV=list(ds.attrs.values()) 29 | 30 | print('aN',aN) 31 | print('aV',aV) 32 | 33 | try: 34 | for g in ds.keys(): 35 | _list_all(ds[g],g) 36 | except: 37 | a=1 38 | gDeep-=1 39 | 40 | # - - - - - - - - 41 | def _copy_all(src,dst): 42 | # head 43 | for atr in src.attrs.keys(): 44 | val=src.attrs[atr] 45 | #print('add attr',atr,val) 46 | dst.attrs.create(atr,val) 47 | 48 | # work with data sets 49 | try: 50 | for g in src.keys(): 51 | src[g].copy(src[g],dst,g) 52 | except Exception as e: 53 | print ('error in copy:',e) 54 | pass 55 | 56 | # - - - - - - - - 57 | # - - - - - - - - 58 | if __name__=="__main__": 59 | 60 | if (len(sys.argv)) < 2: 61 | print ('args: coreName missing') 62 | exit() 63 | templHDF5='/global/homes/b/balewski/prj/cosmoflow-sims/janWorkflow/example-z5.hdf5' 64 | 65 | coreName=sys.argv[1] 66 | inpH3=coreName+'_dim512_full.npy' 67 | outHDF5=coreName+'.nyx.hdf5' 68 | print('pack-h5 core:') 69 | 70 | src = h5py.File(templHDF5,'r') 71 | dst = h5py.File(outHDF5,'w') 72 | dataH3 = np.load(inpH3) 73 | 74 | _QA_dataH3(dataH3) 75 | #_list_all(src,'main') 76 | _copy_all(src,dst) 77 | print ('modifying cloned dset') 78 | #dataH3 = np.zeros((512,512,512)) 79 | #dataH3 = np.zeros((12,12,12)) 80 | 81 | del dst['native_fields/matter_density'] 82 | dset = dst.create_dataset('native_fields/matter_density', data=dataH3) 83 | 84 | dd=dst['domain'] 85 | newAtr={'shape':[512,512,512], 'size':[ 512., 512., 512.]} 86 | 87 | for atr in dd.attrs.keys(): 88 | val=dd.attrs[atr] 89 | #print('DD attr',atr,val) 90 | dd.attrs.create(atr,newAtr[atr]) 91 | 92 | # erase cosmo params 93 | dd=dst['universe'] 94 | for atr in dd.attrs.keys(): 95 | dd.attrs.create(atr,0.5) 96 | 97 | #_list_all(dd,'BBB') 98 | try: 99 | src.close() 100 | dst.close() 101 | except Exception as e: 102 | print ('error in file closing') 103 | 104 | 105 | -------------------------------------------------------------------------------- /pycola/ic.rst: -------------------------------------------------------------------------------- 1 | .. ######################################################################## 2 | .. ######################################################################## 3 | .. # Copyright (c) 2013,2014 Svetlin Tassev 4 | .. # Princeton University,Harvard University 5 | .. # 6 | .. # This file is part of pyCOLA. 7 | .. # 8 | .. # pyCOLA is free software: you can redistribute it and/or modify 9 | .. # it under the terms of the GNU General Public License as published by 10 | .. # the Free Software Foundation, either version 3 of the License, or 11 | .. # (at your option) any later version. 12 | .. # 13 | .. # pyCOLA is distributed in the hope that it will be useful, 14 | .. # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | .. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | .. # GNU General Public License for more details. 17 | .. # 18 | .. # You should have received a copy of the GNU General Public License 19 | .. # along with pyCOLA. If not, see . 20 | .. # 21 | .. ######################################################################## 22 | .. ######################################################################## 23 | 24 | Initial conditions 25 | ------------------ 26 | 27 | First order initial conditions for pyCOLA can be calculated using 28 | either MUSIC [MUSIC]_, or internally. With MUSIC, however, one can 29 | do refinements on a region, which is not supported internally. 30 | 31 | The second-order displacement field is generated using a novel 32 | algorithm using force evaluations. See the Algorithm section of 33 | :func:`ic_2lpt_engine` for details. 34 | 35 | .. warning:: 36 | As of MUSIC `rev. 116353436ee6 37 | `_, 38 | the second-order displacement field returned by MUSIC gets 39 | unphysical large-scale deviations when a refined subvolume is 40 | requested (seems to be fine for single grid). Until that problem is 41 | fixed, use the function :func:`ic.ic_2lpt` to get the second order 42 | displacements from the first order result. Update: MUSIC received a 43 | fix with `rev. ed51fcaffee 44 | `_, 45 | which supposedly fixes the problem. 46 | 47 | 48 | 49 | 50 | Initial displacements at first order 51 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 52 | 53 | .. autofunction:: ic.import_music_snapshot 54 | 55 | .. autofunction:: ic.ic_za 56 | 57 | Initial displacements at second order 58 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 59 | 60 | .. autofunction:: ic.ic_2lpt 61 | 62 | .. autofunction:: ic.ic_2lpt_engine 63 | 64 | Obtaining the Eulerian positions 65 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 66 | 67 | .. autofunction:: ic.initial_positions 68 | 69 | 70 | .. automodule:: ic 71 | :undoc-members: 72 | :show-inheritance: 73 | -------------------------------------------------------------------------------- /MUSIC/region_generator.hh: -------------------------------------------------------------------------------- 1 | #ifndef __REGION_GENERATOR_HH 2 | #define __REGION_GENERATOR_HH 3 | 4 | #include 5 | #include "config_file.hh" 6 | 7 | //! Abstract base class for region generators 8 | /*! 9 | This class implements a purely virtual interface that can be 10 | used to derive instances implementing various region generators. 11 | */ 12 | class region_generator_plugin{ 13 | public: 14 | config_file *pcf_; 15 | unsigned levelmin_, levelmax_; 16 | 17 | public: 18 | region_generator_plugin( config_file& cf ) 19 | : pcf_( &cf ) 20 | { 21 | levelmin_ = cf.getValue("setup","levelmin"); 22 | levelmax_ = cf.getValue("setup","levelmax"); 23 | } 24 | 25 | //! destructor 26 | virtual ~region_generator_plugin() { }; 27 | 28 | //! compute the bounding box of the region 29 | virtual void get_AABB( double *left, double *right, unsigned level) = 0; 30 | 31 | //! query whether a point intersects the region 32 | virtual bool query_point( double *x, int level ) = 0; 33 | 34 | //! query whether the region generator explicitly forces the grid dimensions 35 | virtual bool is_grid_dim_forced( size_t *ndims ) = 0; 36 | 37 | //! get the center of the region 38 | virtual void get_center( double *xc ) = 0; 39 | 40 | //! get the center of the region with a possible re-centering unapplied 41 | virtual void get_center_unshifted( double *xc ) = 0; 42 | 43 | //! update the highres bounding box to what the grid generator actually uses 44 | virtual void update_AABB( double *left, double *right ) = 0; 45 | }; 46 | 47 | //! Implements abstract factory design pattern for region generator plug-ins 48 | struct region_generator_plugin_creator 49 | { 50 | //! create an instance of a transfer function plug-in 51 | virtual region_generator_plugin * create( config_file& cf ) const = 0; 52 | 53 | //! destroy an instance of a plug-in 54 | virtual ~region_generator_plugin_creator() { } 55 | }; 56 | 57 | //! Write names of registered region generator plug-ins to stdout 58 | std::map< std::string, region_generator_plugin_creator *>& get_region_generator_plugin_map(); 59 | void print_region_generator_plugins( void ); 60 | 61 | //! Concrete factory pattern for region generator plug-ins 62 | template< class Derived > 63 | struct region_generator_plugin_creator_concrete : public region_generator_plugin_creator 64 | { 65 | //! register the plug-in by its name 66 | region_generator_plugin_creator_concrete( const std::string& plugin_name ) 67 | { 68 | get_region_generator_plugin_map()[ plugin_name ] = this; 69 | } 70 | 71 | //! create an instance of the plug-in 72 | region_generator_plugin * create( config_file& cf ) const 73 | { 74 | return new Derived( cf ); 75 | } 76 | }; 77 | 78 | typedef region_generator_plugin region_generator; 79 | 80 | region_generator_plugin *select_region_generator_plugin( config_file& cf ); 81 | 82 | extern region_generator_plugin *the_region_generator; 83 | 84 | #endif 85 | -------------------------------------------------------------------------------- /pycola/reformatNBody.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import print_function 3 | 4 | import numpy as np 5 | import os, shutil 6 | import math 7 | 8 | import sys 9 | 10 | ### This code will take the output of one NBody simulation (ie from the pycola code), and split it into 8 sub-volumes, and histogram them. 11 | 12 | ## ddefining which files to run over. Basically this was added just to allow me to run multiple jobs in parallel. 13 | 14 | start = 0 15 | stop = 2 16 | 17 | counter = -1 18 | ######## Loop over the files! 19 | for afile in os.listdir("OmSiNs/"): 20 | if "npz" not in afile or "pycola" not in afile: 21 | continue 22 | infile = "OmSiNs/"+afile 23 | 24 | print ('Input=',infile,' of',counter) 25 | counter+=1 26 | if counterstop: 27 | continue 28 | 29 | print (counter, infile) 30 | 31 | outdir = infile[:-4] 32 | if os.path.exists(outdir): 33 | if len(os.listdir(outdir))==8: 34 | print ("done this one!") 35 | continue 36 | else: 37 | print ('see len:',len(os.listdir(outdir))) 38 | ### this is a half-empty folder. delete it! 39 | shutil.rmtree(outdir) 40 | 41 | os.mkdir(outdir) 42 | 43 | ### First, read in the px/py/pz from the pycola output file 44 | data = np.load(infile) 45 | 46 | px = data['px'] 47 | py = data['py'] 48 | pz = data['pz'] 49 | 50 | print ('pxyz: ',px[0][0][0], py[0][0][0], pz[0][0][0]) 51 | 52 | 53 | 54 | #### Try using this hp.histogramdd function... 55 | ### For this I need to turn the particl elists into coord lists, 56 | ### so ( (px[i][j][k], py[i][j][k], pz[i][j][k]), ....) 57 | pxf = np.ndarray.flatten(px) 58 | pyf = np.ndarray.flatten(py) 59 | pzf = np.ndarray.flatten(pz) 60 | 61 | print ('a', pxf.shape) 62 | print ('b', pxf[0], pyf[0], pzf[0]) 63 | ### so the flattening is working. Now make this into a 3d array... 64 | ps = np.vstack( (pxf, pyf, pzf) ).T 65 | 66 | del(pxf); del(pyf); del(pzf) 67 | 68 | print ("one big array!", ps.shape, ps[0,:]) 69 | 70 | 71 | ## OK! Then this is indeed a big old array. Now I want to histogram it. 72 | ## this step goes from a set of parcile coordinates to a histogram of particle counts 73 | nbins = 256 74 | H, bins = np.histogramdd(ps, nbins, range=((0,512),(0,512),(0,512)) ) 75 | 76 | print ("histo dshape!", H.shape, H[0][0][0]) 77 | #print ('mass sum=%.3g'%np.sum(H)) 78 | 79 | 80 | ### now I have my histogram of particle density, I split it up into 8 subvolumes and write it out 81 | ### note that the file structure here is required from the legacy CosmoFlow code. One dir is created for each NBody output file, then the 8 sub-volumes are named [0-7].npy inside that dir. 82 | count = -1 83 | for i in range(0, 256, 128): 84 | for j in range(0, 256, 128): 85 | for k in range(0, 256, 128): 86 | 87 | count+=1 88 | d = H[i:(i+128),j:(j+128),k:(k+128)] 89 | filename = outdir+"/"+str(count)+".npy" 90 | print (count,'mass sum=%.3g'%np.sum(d)) 91 | np.save(filename, d) 92 | print ("got count :", count) 93 | 94 | print ("**************************") 95 | -------------------------------------------------------------------------------- /MUSIC/OmSiNs/runCode.py~: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | import math 4 | import subprocess 5 | import multiprocessing 6 | import os 7 | 8 | def func(argus): 9 | omi = argus[0] 10 | sii = argus[1] 11 | nsi = argus[2] 12 | om = str(round(omi, 3)) 13 | si = str(round(sii, 3)) 14 | ns = str(round(nsi, 3)) 15 | #print omi, sii 16 | 17 | tfile = "ics_template.conf" 18 | hdffilename = "ics_"+om+"_"+si+"_"+ns+".hdf5" 19 | if os.path.exists(hdffilename) and os.path.getsize(hdffilename)>10300000000: 20 | #print "done this one" 21 | return 22 | else: 23 | print "***********", omi, sii, nsi 24 | outfilename = "ics_"+om+"_"+si+"_"+ns+".conf" 25 | outfile = open(outfilename, "w") 26 | for line in open(tfile): 27 | 28 | if "Omega_m" in line: 29 | print >> outfile, "Omega_m = "+om 30 | elif "sigma_8" in line: 31 | print >> outfile, "sigma_8 = "+si 32 | elif "nspec" in line: 33 | print >> outfile, "nspec = "+ns 34 | elif "filename" in line: 35 | print >> outfile, "filename = "+hdffilename 36 | elif "seed" in line: 37 | print >> outfile, "seed[9] = "+str(random.randrange(30000,40000)) 38 | else: 39 | print >> outfile, line[:-1] 40 | 41 | outfile.close() 42 | 43 | 44 | ##########3 Now run MUSIC on this! 45 | cmd = ['../MUSIC', outfilename] 46 | print cmd 47 | 48 | q = subprocess.Popen(cmd) 49 | q.wait() 50 | 51 | 52 | 53 | ####################################### 54 | if __name__ == '__main__': 55 | ## get random params for omM and si8 56 | random.seed(18885294) ### have this be repeatably random! 57 | 58 | 59 | nsamples = 1001 ## has tpo be 508 because for some reason tehre are repetitions in teh random number sequence!!! 60 | oms, sis, nss = [], [], [] 61 | for i in range(0, nsamples): 62 | r1 = random.randrange(2500, 3500) 63 | r2 = random.randrange(7800, 9500) 64 | r3 = random.randrange(9000, 10000) 65 | 66 | oms.append(r1/10000.) 67 | sis.append(r2/10000.) 68 | nss.append(r3/10000.) 69 | 70 | 71 | ### check if this file already exists, delete it if too small 72 | ct = 0 73 | for i in range(len(oms)): 74 | 75 | om = str(round(oms[i], 3)) 76 | si = str(round(sis[i], 3)) 77 | ns = str(round(nss[i], 3)) 78 | hdffilename = "ics_"+om+"_"+si+"_"+ns+".hdf5" 79 | if os.path.exists(hdffilename): 80 | if os.path.getsize(hdffilename)>10300000000: 81 | print i 82 | else: 83 | print "file size", os.path.getsize(hdffilename) 84 | os.remove(hdffilename) 85 | else: 86 | print om, si, ns 87 | ct +=1 88 | print "need to run: ", ct, "more sims! " 89 | 90 | 91 | ### run this in parallel - each worker in teh pool will run one set of omM/si8 params 92 | argus = zip(oms, sis, nss) 93 | print len(oms) 94 | pool = multiprocessing.Pool(20) 95 | pool.map(func, argus) 96 | 97 | 98 | 99 | -------------------------------------------------------------------------------- /MUSIC/log.cc: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | log.cc - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #include "log.hh" 12 | #include 13 | #include 14 | 15 | std::string RemoveMultipleWhiteSpaces( std::string s ); 16 | 17 | 18 | std::string MUSIC::log::outputFile_; 19 | std::ofstream MUSIC::log::outputStream_; 20 | std::list MUSIC::log::messages_; 21 | void (*MUSIC::log::receiver)(const message&) = NULL; 22 | MUSIC::log::messageType MUSIC::log::logLevel_; 23 | 24 | 25 | std::string RemoveMultipleWhiteSpaces( std::string s ) 26 | { 27 | std::string search = " "; // this is 2 spaces 28 | size_t index; 29 | 30 | while( (index = s.find(search)) != std::string::npos ) 31 | { // remove 1 character from the string at index 32 | s.erase(index,1); 33 | } 34 | 35 | return s; 36 | } 37 | 38 | void MUSIC::log::send(messageType type, const std::string& text_) 39 | //void MUSIC::log::send(messageType type, std::stringstream& textstr) 40 | { 41 | std::string text(text_);// = textstr.str(); 42 | // Skip logging if minimum level is higher 43 | if (logLevel_) 44 | if (type < logLevel_) return; 45 | // log message 46 | MUSIC::log::message m; 47 | m.type = type; 48 | m.text = text; 49 | time_t t = time(NULL); 50 | m.when = localtime(&t); 51 | messages_.push_back(m); 52 | 53 | if( type==Info||type==Warning||type==Error||type==FatalError ) 54 | { 55 | std::cout << " - "; 56 | if(type==Warning) 57 | std::cout << "WARNING: "; 58 | if(type==Error) 59 | std::cout << "ERROR: "; 60 | if(type==FatalError) 61 | std::cout << "FATAL: "; 62 | std::cout << text << std::endl; 63 | } 64 | 65 | std::replace(text.begin(),text.end(),'\n',' '); 66 | RemoveMultipleWhiteSpaces(text); 67 | 68 | // if enabled logging to file 69 | if(outputStream_.is_open()) 70 | { 71 | // print time 72 | char buffer[9]; 73 | strftime(buffer, 9, "%X", m.when); 74 | outputStream_ << buffer; 75 | 76 | // print type 77 | switch(type) 78 | { 79 | case Info: outputStream_ << " | info | "; break; 80 | case DebugInfo: outputStream_ << " | debug | "; break; 81 | case Warning: outputStream_ << " | warning | "; break; 82 | case Error: outputStream_ << " | ERROR | "; break; 83 | case FatalError:outputStream_ << " | FATAL | "; break; 84 | case User: outputStream_ << " | info | "; break; 85 | default: outputStream_ << " | "; 86 | } 87 | 88 | // print description 89 | outputStream_ << text << std::endl; 90 | } 91 | 92 | // if user wants to catch messages, send it to him 93 | if(receiver) 94 | receiver(m); 95 | } 96 | 97 | 98 | void MUSIC::log::setOutput(const std::string& filename) 99 | { 100 | //logDebug("Setting output log file: " + filename); 101 | outputFile_ = filename; 102 | 103 | // close old one 104 | if(outputStream_.is_open()) 105 | outputStream_.close(); 106 | 107 | // create file 108 | outputStream_.open(filename.c_str()); 109 | if(!outputStream_.is_open()) 110 | LOGERR("Cannot create/open logfile \'%s\'.",filename.c_str()); 111 | } 112 | 113 | void MUSIC::log::setLevel(const MUSIC::log::messageType level) 114 | { 115 | logLevel_ = level; 116 | } 117 | 118 | 119 | MUSIC::log::~log() 120 | { 121 | if(outputStream_.is_open()) 122 | outputStream_.close(); 123 | } 124 | 125 | -------------------------------------------------------------------------------- /janWorkflow/Readme: -------------------------------------------------------------------------------- 1 | Oryginal, repo: https://github.com/NERSC/cosmoflow-sims/ 2 | 3 | cd cosmoflow-sims/janWorkflow 4 | 5 | 6 | Jan's scheme simulating a single universe 7 | 8 | Single node Haswell Slumr job array : batchMakeUniverse.slr 9 | It executes script: makeOneUniverse.sh 10 | Update1: -N4 would eun on 4 nodes and each node would do different stop-time 11 | Update2: add constraint Omega_L = 1-Omega_m 12 | 13 | 14 | The job sandbox is at : wrkDir=$CSCRATCH/${coreN}-${arrIdx} 15 | 16 | 17 | Processing steps 18 | A)= = = = MUSIC (runMusic.sh) creates those files (0.5 min wall time) 19 | balewski@cori12:~/prj/cosmoflow-sims/janWorkflow/out> ls -lh 20 | total 11G 21 | -rw-rw---- 1 balewski nstaff 220 Nov 6 15:28 cosmoMeta.yaml 22 | -rw-rw---- 1 balewski nstaff 730 Nov 6 15:28 ics_2018-12_a12383763.conf 23 | -rw-rw---- 1 balewski nstaff 6.7K Nov 6 15:28 ics_2018-12_a12383763.conf_log.txt 24 | -rw-rw---- 1 balewski nstaff 9.7G Nov 6 15:28 ics_2018-12_a12383763.hdf5 25 | -rw-rw---- 1 balewski nstaff 20K Nov 6 15:28 input_powerspec.txt 26 | -rw-rw---- 1 balewski nstaff 130 Nov 6 15:28 tmp.fix_ics_2018-12_a12383763 27 | -rw-rw---- 1 balewski nstaff 1.1G Nov 6 15:28 wnoise_0009.bin 28 | 29 | This is the meta-data file: 30 | 31 | ./rdMeta.py 32 | inp num args: 1 33 | read YAML from out/cosmoMeta.yaml and pprint it: 34 | {'boxlength': 512, 35 | 'coreStr': 'ics_2018-12_a12383763', 36 | 'date': 'Tue Nov 6 15:28:09 PST 2018', 37 | 'namePar': ['Omega_m', 'sigma_8'], 38 | 'physOmega_m': 0.275069, 39 | 'physPar': [0.275069, 0.783454], 40 | 'seed9': 12383763, 41 | 'unitPar': [-0.0674912, -0.6793091]} 42 | 43 | B) = = = = = = = run Pycola 44 | Time elapsed on small box (including IC): 523.199383974 seconds. 45 | -rw-rw---- 1 balewski nstaff 1.6G Nov 6 15:53 ics_2018-12_a12383763.npz 46 | 47 | C) = = = = = = = run slicing 48 | 1.1G Nov 6 15:57 ics_2018-12_a12383763_dim512_full.npy 49 | ... 50 | 17M Nov 6 15:57 ics_2018-12_a12383763_dim128_cube0.npy 51 | 17M Nov 6 15:57 ics_2018-12_a12383763_dim128_cube1.npy 52 | 17M Nov 6 15:57 ics_2018-12_a12383763_dim128_cube2.npy 53 | ... 54 | 17M Nov 6 15:57 ics_2018-12_a12383763_dim512_sheet21.npy 55 | 17M Nov 6 15:57 ics_2018-12_a12383763_dim512_sheet22.npy 56 | 57 | D) = = = = = = Extract P(k) in 2 steps 58 | produce correct hd5 bsed on a template 59 | pack_hd5_Pk.py 60 | = = = = = = = = One time compilations = = = = = = = = 61 | 62 | A) - - - - compile MUSIC , on Haswell 63 | 64 | balewski@cori01:~/prj/cosmoGenerator/cosmoflow-sims> cd MUSIC/ 65 | 66 | module unload darshan 67 | module load cray-fftw gsl cray-hdf5 68 | time make 69 | real 3m29.408s 70 | 71 | # to test-run manually for three cosmological parameters, nsamples = 3 ## How many simulations 72 | 73 | cd OmSiNs 74 | # cleanup old outputs: rm ics_0* 75 | $ time python runCode.py 76 | real 1m46.831s 77 | user 32m3.399s 78 | OUT: MUSIC/OmSiNs/ics_0.275_0.859_0.933.hdf5 79 | 80 | 81 | B) - - - - - PyCola 82 | module load python/2.7-anaconda-4.4 83 | source activate cola_jan1 84 | (cola_jan1) balewski@cori09:~/prj/cosmoGenerator/cosmoflow-sims> 85 | cd ../../pycola/ 86 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/common/software/fftw/3.3.4/hsw/gnu/lib/ 87 | 88 | time python pycola-OmSiNs-template.py 1 89 | OUT: OmSiNs/pycola_0.275_0.859_0.933.npz 90 | real 8m56.339s 91 | user 113m47.176s 92 | 93 | 94 | C) - - - - (optional) compile gimlet2 + matter_pk for Zarija 95 | 96 | git clone https://balewski@bitbucket.org/zarija/gimlet2 cosmo-gimlet2 97 | module load cray-fftw gsl cray-hdf5-parallel 98 | 99 | cd gimlet2 100 | make -j 101 | cd apps/matter_pk 102 | make (to make the executable) 103 | -------------------------------------------------------------------------------- /MUSIC/OmSiNs/runCode.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | import math 4 | import subprocess 5 | import multiprocessing 6 | import os 7 | 8 | def func(argus): 9 | omi = argus[0] 10 | sii = argus[1] 11 | nsi = argus[2] 12 | om = str(round(omi, 3)) 13 | si = str(round(sii, 3)) 14 | ns = str(round(nsi, 3)) 15 | #print omi, sii 16 | 17 | tfile = "ics_template.conf" ## read in template config file 18 | hdffilename = "ics_"+om+"_"+si+"_"+ns+".hdf5" 19 | if os.path.exists(hdffilename) and os.path.getsize(hdffilename)>10300000000: 20 | #print "done this one" 21 | return 22 | else: 23 | print "***********", omi, sii, nsi 24 | outfilename = "ics_"+om+"_"+si+"_"+ns+".conf" ## make the config file name for this combination of cosmo params 25 | outfile = open(outfilename, "w") 26 | for line in open(tfile): 27 | 28 | if "Omega_m" in line: 29 | print >> outfile, "Omega_m = "+om 30 | elif "sigma_8" in line: 31 | print >> outfile, "sigma_8 = "+si 32 | elif "nspec" in line: 33 | print >> outfile, "nspec = "+ns 34 | elif "filename" in line: 35 | print >> outfile, "filename = "+hdffilename 36 | elif "seed" in line: 37 | print >> outfile, "seed[9] = "+str(random.randrange(30000,40000)) 38 | else: 39 | print >> outfile, line[:-1] 40 | 41 | outfile.close() 42 | 43 | 44 | ##########3 Now run MUSIC on this! 45 | cmd = ['../MUSIC', outfilename] 46 | print cmd 47 | 48 | q = subprocess.Popen(cmd) 49 | q.wait() 50 | 51 | 52 | 53 | ####################################### 54 | if __name__ == '__main__': 55 | ## get random params for omM and si8 56 | random.seed(18885294) ### have this be repeatably random! Note that this is for the param values, NOT the random initial conditions 57 | 58 | 59 | nsamples = 2 ## How many simulations are we going to make? 60 | oms, sis, nss = [], [], [] 61 | for i in range(0, nsamples): ### defining the ranges for the simulation parameters 62 | r1 = random.randrange(2500, 3500) ## Om_m 0.25 - 0.35 63 | r2 = random.randrange(7800, 9500) ## Si_8 0.78 - 0.95 64 | r3 = random.randrange(9000, 10000) ## Ns 0.9 - 1.0 65 | 66 | oms.append(r1/10000.) ## taking the params down to the correct range/number of decimal places 67 | sis.append(r2/10000.) 68 | nss.append(r3/10000.) 69 | 70 | 71 | ### check if this file already exists, delete it if too small 72 | ct = 0 73 | for i in range(len(oms)): 74 | 75 | om = str(round(oms[i], 3)) 76 | si = str(round(sis[i], 3)) 77 | ns = str(round(nss[i], 3)) 78 | hdffilename = "ics_"+om+"_"+si+"_"+ns+".hdf5" 79 | if os.path.exists(hdffilename): 80 | if os.path.getsize(hdffilename)>10300000000: ### this is about the size of a successful run 81 | print i 82 | else: 83 | print "file size", os.path.getsize(hdffilename) 84 | os.remove(hdffilename) 85 | else: 86 | print om, si, ns 87 | ct +=1 88 | print "need to run: ", ct, "more sims! " 89 | 90 | 91 | ### run this in parallel - each worker in the pool will run one set of omM/si8 params 92 | argus = zip(oms, sis, nss) 93 | print len(oms) 94 | pool = multiprocessing.Pool(1) ## 20 processes is about as many as you'd want to run on a single node interactively. 95 | pool.map(func, argus) 96 | 97 | 98 | 99 | -------------------------------------------------------------------------------- /CosmoFlow/hyper_parameters_Cosmo.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | magic_number = 64 4 | 5 | DATAPARAM={ 6 | "output_dim" : 3, 7 | #Ns 8 | "zsAVG" : [0.3, 0.8628, 0.95], 9 | "zsSTD" : [0.02853, 0.04887, 0.028] 10 | #H0 11 | #"zsAVG" : [0.3, 0.8628, 0.701], 12 | #"zsSTD" : [0.02853, 0.04887, 0.05691] 13 | #"zsAVG": [2.995679839999998983e-01,8.610806619999996636e-01], 14 | #"zsSTD": [2.905168635566176411e-02,4.023372385668218254e-02] 15 | } 16 | 17 | Input = { 18 | "BATCH_SIZE" : 1, #mini-batch size for training and validation 19 | "NUM_THREADS" : 2, #number of threads to read data 20 | "CAPACITY" : 0, 21 | "MIN_AFTER_DEQUEUE" : 200 #the minimum number in the queue after dequeue (Min_after_dequeue and capacity together determines the shuffling of input data) 22 | } 23 | 24 | Input["CAPACITY"] = Input["BATCH_SIZE"]*4 + Input["MIN_AFTER_DEQUEUE"] 25 | 26 | Input_Test = { 27 | "BATCH_SIZE" : 1, #mini-batch size for test data 28 | "NUM_THREADS" : 2, #number of threads to read data 29 | "CAPACITY" : 0, 30 | "MIN_AFTER_DEQUEUE" : 64 31 | } 32 | 33 | Input_Test["CAPACITY"] = Input_Test["BATCH_SIZE"]*4 + Input_Test["MIN_AFTER_DEQUEUE"] 34 | 35 | Model = { 36 | "REG_RATE": 0., #regularization of weights: currently set to 0 since batch_normalization has the same effect of regularization 37 | "LEAK_PARAMETER": 0.01, #leaky parameter for leaky relu 38 | "LEARNING_RATE" : 0.0001, #adam_optimizer to do the update. 39 | "DROP_OUT": 0.5 #apply drop out in fully connected layer. this value gives the probabilty of keep the node. 40 | } 41 | 42 | RUNPARAM={ 43 | "num_epoch": 80, #each epoch means a fully pass over the data. The program might stop before running num_epoch (see next line). 44 | "require_improvement": 50, #if with require_improvement, there is no improvement in validation error, then stop running. 45 | "num_train":400, #total number of simulations for training 46 | "num_val":50, #total number of simulations for validation 47 | "num_test":49, #total number of simulations for testing 48 | "batch_per_epoch":0, 49 | "batch_per_epoch_val":0, 50 | "iter_test":0 51 | } 52 | 53 | RUNPARAM["batch_per_epoch"] = RUNPARAM['num_train']*magic_number/Input['BATCH_SIZE'] 54 | RUNPARAM["batch_per_epoch_val"] = RUNPARAM['num_val']*magic_number/Input['BATCH_SIZE'] 55 | RUNPARAM['iter_test'] = RUNPARAM['num_test']*magic_number/Input_Test['BATCH_SIZE'] 56 | 57 | 58 | ##### CHANGE THIS TO LOCAL DIRECTORY 59 | 60 | ## data on scratch 61 | main_dir = "/global/cscratch1/sd/djbard/cosmoML/data-March20Runs/" 62 | target_dir = "500/" #"orig_paper" 63 | 64 | 65 | ## BB dir 66 | #main_dir = someVariable = (os.environ['DW_PERSISTENT_STRIPED_CosmoFlow']) 67 | #target_dir = "/two-param-500-128cubefrom256-64perTfrecord-64From500perTfrecord/" #"orig_paper" 68 | 69 | Path={ 70 | 71 | "init_data" : '.', #Path where the init data is 72 | "Model_path" : './result/', #Path to save the best model where the validation error is the smallest. And then we use this model for test 73 | "train_data" : main_dir + target_dir + '/train/', #path where the train data is 74 | "train_result" : './result/', #path to store the train result 75 | "val_data" : main_dir + target_dir + '/val/', #path where the validation data is 76 | "val_result" : './result/', #path to st/data0/jamesarnemann/cosmoNet/' + target_dir + '/result/'ore the validation result 77 | "test_data" : main_dir + target_dir + '/test/', #path where the test data is 78 | "test_result" : './result/', #path to store the test result 79 | 80 | } 81 | -------------------------------------------------------------------------------- /MUSIC/convolution_kernel.hh: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | convolution_kernel.hh - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #ifndef __CONVOLUTION_KERNELS_HH 12 | #define __CONVOLUTION_KERNELS_HH 13 | 14 | #include 15 | #include 16 | 17 | #include "config_file.hh" 18 | #include "densities.hh" 19 | #include "transfer_function.hh" 20 | 21 | 22 | #define ACC_RF(i,j,k) (((((size_t)(i)+nx)%nx)*ny+(((size_t)(j)+ny)%ny))*2*(nz/2+1)+(((size_t)(k)+nz)%nz)) 23 | #define ACC_RC(i,j,k) (((((size_t)(i)+nxc)%nxc)*nyc+(((size_t)(j)+nyc)%nyc))*2*(nzc/2+1)+(((size_t)(k)+nzc)%nzc)) 24 | 25 | namespace convolution{ 26 | 27 | //! encapsulates all parameters required for transfer function convolution 28 | struct parameters 29 | { 30 | int nx,ny,nz; 31 | double lx,ly,lz;//,boxlength; 32 | config_file *pcf; 33 | transfer_function* ptf; 34 | unsigned coarse_fact; 35 | bool deconvolve; 36 | bool is_finest; 37 | bool smooth; 38 | }; 39 | 40 | 41 | ///////////////////////////////////////////////////////////////// 42 | 43 | 44 | //! abstract base class for a transfer function convolution kernel 45 | class kernel{ 46 | public: 47 | 48 | //! all parameters (physical/numerical) 49 | parameters cparam_; 50 | 51 | config_file *pcf_; 52 | transfer_function* ptf_; 53 | refinement_hierarchy* prefh_; 54 | tf_type type_; 55 | 56 | //! constructor 57 | kernel( config_file& cf, transfer_function* ptf, refinement_hierarchy& refh, tf_type type ) 58 | : pcf_(&cf), ptf_(ptf), prefh_(&refh), type_(type)//cparam_( cp ) 59 | { } 60 | 61 | //! dummy constructor 62 | /*kernel( void ) 63 | { }*/ 64 | 65 | //! compute/load the kernel 66 | virtual kernel* fetch_kernel( int ilevel, bool isolated=false ) = 0; 67 | 68 | //! virtual destructor 69 | virtual ~kernel(){ }; 70 | 71 | //! purely virtual method to obtain a pointer to the underlying data 72 | virtual void* get_ptr() = 0; 73 | 74 | //! purely virtual method to determine whether the kernel is k-sampled or not 75 | virtual bool is_ksampled() = 0; 76 | 77 | //! purely virtual vectorized method to compute the kernel value if is_ksampled 78 | virtual void at_k( size_t len, const double* in_k, double* out_Tk ) = 0; 79 | 80 | //! free memory 81 | virtual void deallocate() = 0; 82 | }; 83 | 84 | 85 | //! abstract factory class to create convolution kernels 86 | struct kernel_creator 87 | { 88 | //! creates a convolution kernel object 89 | virtual kernel * create( config_file& cf, transfer_function* ptf, refinement_hierarchy& refh, tf_type type ) const = 0; 90 | 91 | //! destructor 92 | virtual ~kernel_creator() { } 93 | }; 94 | 95 | 96 | //! access map to the various kernel classes through the factory 97 | std::map< std::string, kernel_creator *>& get_kernel_map(); 98 | 99 | 100 | //! actual implementation of the factory class for kernel objects 101 | template< class Derived > 102 | struct kernel_creator_concrete : public kernel_creator 103 | { 104 | //! constructor inserts the kernel class in the map 105 | kernel_creator_concrete( const std::string& kernel_name ) 106 | { get_kernel_map()[ kernel_name ] = this; } 107 | 108 | //! creates an instance of the kernel object 109 | kernel * create( config_file& cf, transfer_function* ptf, refinement_hierarchy& refh, tf_type type ) const 110 | { return new Derived( cf, ptf, refh, type ); } 111 | }; 112 | 113 | 114 | //! actual implementation of the FFT convolution (independent of the actual kernel) 115 | template< typename real_t > 116 | void perform( kernel* pk, void *pd, bool shift ); 117 | 118 | 119 | 120 | 121 | } //namespace convolution 122 | 123 | 124 | #endif //__CONVOLUTION_KERNELS_HH 125 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | JAN 2 | 3 | MUSIC: 4 | to compile: 5 | 6 | cd MUSIC 7 | 8 | module unload darshan 9 | 10 | module load cray-fftw gsl cray-hdf5 11 | 12 | make 13 | 14 | to run for three cosmological parameters: 15 | 16 | cd OmSiNs 17 | 18 | python runCode.py 19 | 20 | 21 | -------------------- 22 | 23 | 24 | pycola: 25 | 26 | First step: make a conda env with the correct python modules in it. I called this "cola", could obviously be anything you like. 27 | 28 | conda create -n cola python=2.7 numpy 29 | source activate cola 30 | pip install --global-option=build_ext --global-option=-L/usr/common/software/fftw/3.3.4/hsw/gnu/lib --global-option=-I/usr/common/software/fftw/3.3.4/hsw/gnu/include pyfftw 31 | conda install scipy 32 | conda install h5py 33 | conda install matplotlib 34 | conda install cython 35 | 36 | to compile: 37 | 38 | module load python 39 | 40 | source activate NAME_OF_YOUR_CONDA_ENV 41 | 42 | python setup.py build_ext --inplace 43 | 44 | 45 | 46 | to run: 47 | 48 | module load python 49 | 50 | source activate NAME_OF_YOUR_CONDA_ENV 51 | 52 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/common/software/fftw/3.3.4/hsw/gnu/lib/ 53 | 54 | 55 | Need a MUSIC file to run over as input file. 56 | pycola will need to run one job for every input file. I used taskfarmer to bundle all the jobs into units that would run effectively on Cori. 57 | 58 | In pycola/OmSiNs/taskfarmer, 59 | - writeTasks.py makes the list of tasks that you will feed into taskfarmer. 60 | - taskwrapper.sh actually contains the tasks to be run. *You will need to change the path in this file to point to your own directory, and to activate your own conda env* 61 | - taskfarmer.sh is the batch file to submit. Usual options apply. You'll at least need to edit to poitn to the correct dir for your installation. 62 | 63 | This will run the file: 64 | cosmoflow-sims/pycola/pycola-OmSiNs-template.py 65 | which looks at all the hdf5 files in the directory cosmoflow-sims/MUSIC/OmSiNs/ that were produced by the MUSIC code you just ran. It'll run the pycola code (the N-body simulation code) over each of those hdf5 files. The output is put into the pycola/OmSiNs directory. 66 | 67 | Note that when I was running this, I'd delete the tfin files after each taskfarmer run, as there seemed to be some confusion if I tried to use the in-built taskfarmer checks for which jobs had completed. Instead I had the code check for completion of the task list. 68 | 69 | 70 | -------------------- 71 | 72 | Reformatting the NBody files 73 | This code does two things 74 | 1) makes a histogram of particle counts out of the list of particle coordinates 75 | 2) Splits that histogram into 8 sub-volumes 76 | 77 | just run : 78 | 79 | cosmoflow-sims/pycola> python reformatNBody.py 0 80 | 81 | The argument for this defines which file in the pycola output dir you start with. I put this in so I could run multiple versions of this code at the same time over the same output dir. 82 | 83 | The output of this code is a directory inside the OmSiNs dir that has the name of the 3 cosmo params behind the simulation. Inside that dir are 8 npy files, each of which is a histogram of particle counts. 84 | 85 | 86 | -------------------- 87 | 88 | Making the list of files for input into the CosmoFlow IO code 89 | 90 | The next stage of formatting involves putting these sub-volume files into a format required by the CosmoFlow IO code, for when it makes the TFRecord files you'll actually use in the tensorflow training. For this we need a list of cosmologies, and we need to re-name all the dirs we just made to numbers that correspond to the position of this simulation on the list. 91 | I know this is not a very sensible solution. 92 | 93 | First run : 94 | cosmoflow-sims/pycola/OmSiNs/makeList.py 95 | to make the list, then run: 96 | 97 | cosmoflow-sims/pycola/OmSiNs/copyFiles.py 98 | to make new dirs with new names. Note that I'm copying them, rather than moving the dir, because I am cautious and didn't want to have to re-make all these files again. 99 | -------------------------------------------------------------------------------- /MUSIC/OmSiNs/ics_template.conf_log.txt: -------------------------------------------------------------------------------- 1 | 12:46:07 | info | Opening log file 'ics_template.conf_log.txt'. 2 | 12:46:07 | info | Running music!, version 1.53 3 | 12:46:07 | info | Log is for run started Fri Aug 31 12:46:07 2018 4 | 12:46:07 | info | Code was compiled using FFTW version 3.x 5 | 12:46:07 | info | Code was compiled for multi-threaded FFTW 6 | 12:46:07 | info | Running with a maximum of 64 OpenMP threads 7 | 12:46:07 | info | Code was compiled for double precision. 8 | 12:46:07 | info | Using k-space sampled transfer functions... 9 | 12:46:07 | info | Selecting transfer function plug-in : eisenstein 10 | 12:46:07 | info | Selecting region generator plug-in : box 11 | 12:46:07 | info | Selecting random number generator plug-in : MUSIC 12 | 12:46:07 | info | Grid structure for Poisson solver: 13 | 12:46:07 | info | Domain shifted by ( 0, 0, 0) 14 | 12:46:07 | info | Level 9 : offset = ( 0, 0, 0) 15 | 12:46:07 | info | size = ( 512, 512, 512) 16 | 12:46:07 | info | Grid structure for density convolution: 17 | 12:46:07 | info | Domain shifted by ( 0, 0, 0) 18 | 12:46:07 | info | Level 9 : offset = ( 0, 0, 0) 19 | 12:46:07 | info | size = ( 512, 512, 512) 20 | 12:46:07 | info | Computing white noise... 21 | 12:46:07 | info | Found 0 density constraint(s) to be obeyed. 22 | 12:46:07 | info | Generating random numbers (2) with seed 34567 23 | 12:46:07 | info | Generating random numbers w/ sample cube size of 32 24 | 12:46:09 | info | Storing white noise field in file 'wnoise_0009.bin'... 25 | 12:46:15 | info | Entering 1LPT branch 26 | 12:46:15 | info | Computing dark matter displacements... 27 | 12:46:15 | info | Using k-space transfer function kernel. 28 | 12:46:16 | info | Performing noise convolution on level 9 29 | 12:46:16 | info | Loading white noise from file 'wnoise_0009.bin'... 30 | 12:46:17 | info | Performing kernel convolution on ( 512, 512, 512) grid 31 | 12:46:17 | info | Performing forward FFT... 32 | 12:46:18 | info | Performing backward FFT... 33 | 12:46:20 | info | Finished computing the density field in 5.347166s 34 | 12:46:21 | info | Grid mean density is 3.06321e-322. Correcting... 35 | 12:46:21 | info | Writing CDM data 36 | 12:46:42 | info | Global density extrema: 37 | 12:46:42 | info | minimum: delta=-17.011135 at (0.870117,0.350586,0.286133) 38 | 12:46:42 | info | shifted back at (0.870117,0.350586,0.286133) 39 | 12:46:42 | info | maximum: delta=17.697845 at (0.877930,0.219727,0.233398) 40 | 12:46:42 | info | shifted back at (0.877930,0.219727,0.233398) 41 | 12:46:44 | info | Entering k-space Poisson solver... 42 | 12:46:44 | info | Performing forward transform. 43 | 12:46:44 | info | Performing backward transform. 44 | 12:46:46 | info | Done with k-space Poisson solver. 45 | 12:46:46 | info | Writing CDM potential 46 | 12:46:50 | info | Computing a gradient in k-space... 47 | 12:46:53 | info | Done with k-space gradient. 48 | 12:46:53 | info | max. x-displacement of HR particles is -27.695793 [mean dx] 49 | 12:46:53 | info | Writing CDM displacements 50 | 12:46:57 | info | Computing a gradient in k-space... 51 | 12:47:00 | info | Done with k-space gradient. 52 | 12:47:00 | info | max. y-displacement of HR particles is 27.702524 [mean dx] 53 | 12:47:01 | info | Writing CDM displacements 54 | 12:47:05 | info | Computing a gradient in k-space... 55 | 12:47:07 | info | Done with k-space gradient. 56 | 12:47:07 | info | max. z-displacement of HR particles is -27.444410 [mean dx] 57 | 12:47:07 | info | Writing CDM displacements 58 | 12:47:12 | info | Computing velocitites... 59 | 12:47:13 | info | Computing a gradient in k-space... 60 | 12:47:15 | info | Done with k-space gradient. 61 | 12:47:16 | info | sigma of x-velocity of high-res particles is 0.563714 62 | 12:47:16 | info | Writing CDM velocities 63 | 12:47:21 | info | Computing a gradient in k-space... 64 | -------------------------------------------------------------------------------- /janWorkflow/prepMusic_4par.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -u ; # exit if you try to use an uninitialized variable 3 | set -e ; # bash exits if any statement returns a non-true return value 4 | set -o errexit ; # exit if any statement returns a non-true return value 5 | nTime=${1-1} 6 | outPath=${2-out} 7 | zRedShift=(0.0 0.5 1.5 3.0) 8 | 9 | echo nTime=$nTime mapps to: ${zRedShift[*]} 10 | # this function returns 3 type of values derived from the same 3-byte random int 11 | myRandomF () { 12 | rndI=`od -A n -t d -N 3 /dev/random | tr -d ' '` 13 | # This is 3-byte truly random generator, max value: 256^3=16777216-1 14 | #rndI=8340000 #tmp, gives randF~0. 15 | rndF=` echo $rndI | awk '{printf "%.7f", $1/ 16777216 *2 -1. }'` 16 | #echo rndI=$rndI rndF=$rndF 17 | } 18 | 19 | 20 | #OUTPUTS: 21 | 22 | # STEP 1 - - - - generate unique seed for MUSIC 23 | myRandomF 24 | seed9=$rndI 25 | core=ics_2019-03_a$rndI 26 | musicTempl=ics_template.conf 27 | #musicConf=${core}.conf 28 | musicHDF5=${core}.hdf5 29 | # yaml w/ metaData 30 | ymlF=${outPath}/'cosmoMeta.yaml' 31 | 32 | # unit-params 33 | myRandomF 34 | uOmega_m=$rndF 35 | myRandomF 36 | uSigma_8=$rndF 37 | myRandomF 38 | uN_spec=$rndF 39 | #myRandomF 40 | #uOmega_b=$rndF 41 | myRandomF 42 | uH_0=$rndF 43 | 44 | echo $uOmega_m $uSigma_8 $uN_spec nTime=$nTime 45 | 46 | #Marcelo: recommend varying around Omegam=0.3, h=0.7, ns=0.96, sigma8=0.8, 47 | # Const: Omegab=0.045, and OmegaL=1-Omegam. 48 | 49 | #phys-params 50 | Omega_m=` echo $uOmega_m | awk '{printf "%f", (1.+$1*0.10)* 0.3 }'` 51 | Omega_L=` echo $Omega_m | awk '{printf "%f", 1.-$1 }'` 52 | sigma_8=` echo $uSigma_8 | awk '{printf "%f", (1.+$1*0.10)* 0.80 }'` 53 | N_spec=` echo $uN_spec | awk '{printf "%f", (1.+$1*0.10)* 0.96 }'` 54 | Omega_b=0.045 #` echo $uOmega_b | awk '{printf "%f", (1.+$1*0.20)* 0.045 }'` 55 | H_0=` echo $uH_0 | awk '{printf "%f", (1.+$1*0.10)* 70.0 }'` 56 | 57 | # Marcelo: if you want to observe a region of a fixed comoving size measured in Mpc (not Mpc/h) , then = 512 * (H0 / 70) 58 | boxlength=`echo $H_0 | awk '{printf "%d",512*$1 / 70.0}'` 59 | 60 | echo $Omega_m $Omega_L $sigma_8 $N_spec $Omega_b $H_0 $boxlength 61 | 62 | 63 | # - - - - - - create meta-data file 64 | echo "date: "`date` > $ymlF 65 | echo 'namePar: ' >> $ymlF 66 | echo '- Omega_m ' >> $ymlF 67 | echo '- sigma_8 ' >> $ymlF 68 | echo '- N_spec ' >> $ymlF 69 | #echo '- Omega_b ' >> $ymlF 70 | echo '- H_0 ' >> $ymlF 71 | 72 | echo 'unitPar:' >> $ymlF 73 | echo "- $uOmega_m" >> $ymlF 74 | echo "- $uSigma_8" >> $ymlF 75 | echo "- $uN_spec" >> $ymlF 76 | #echo "- $uOmega_b" >> $ymlF 77 | echo "- $uH_0" >> $ymlF 78 | 79 | echo 'physPar:' >> $ymlF 80 | echo "- $Omega_m" >> $ymlF 81 | echo "- $sigma_8" >> $ymlF 82 | echo "- $N_spec" >> $ymlF 83 | #echo "- $Omega_b" >> $ymlF 84 | echo "- $H_0" >> $ymlF 85 | 86 | echo "coreStr : $core" >> $ymlF 87 | echo "boxlength : $boxlength" >> $ymlF 88 | echo "seed9 : $seed9" >> $ymlF 89 | echo "physOmega_m: $Omega_m" >> $ymlF 90 | echo "zRedShift: [${zRedShift[*]}]" >> $ymlF 91 | 92 | # - - - - - -prepare SED edits 93 | SEDINP=${outPath}/tmp.fix_${core} 94 | echo "s//$boxlength/g" >$SEDINP 95 | echo "s//$seed9/g" >>$SEDINP 96 | echo "s//$Omega_m/g" >>$SEDINP 97 | echo "s//$sigma_8/g" >>$SEDINP 98 | echo "s//$N_spec/g" >>$SEDINP 99 | echo "s//$Omega_b/g" >>$SEDINP 100 | echo "s//$Omega_L/g" >>$SEDINP 101 | echo "s//$H_0/g" >>$SEDINP 102 | #echo "s/<>/$/g" >>$SEDINP 103 | echo "s||$musicHDF5|g" >>$SEDINP 104 | 105 | echo make new config with $musicTempl 106 | cat $SEDINP 107 | [ ! -f $musicTempl ] && { echo "$musicTempl file not found, abort"; exit 98; } 108 | #cat $musicTempl |sed -f $SEDINP > ${outPath}/$musicConf 109 | 110 | 111 | for (( iT=0; iT<$nTime; iT++ )); do 112 | zstart=${zRedShift[$iT]} 113 | echo prep iT=$iT zstart=$zstart 114 | cat $musicTempl |sed -f $SEDINP | sed "s//$zstart/g"> ${outPath}/${core}_${iT}.conf 115 | 116 | done 117 | exit 118 | 119 | It should be: 120 | 121 | zRedShift: 122 | - 0.0 123 | - 0.5 124 | - 1.5 125 | - 3.0 126 | 127 | -------------------------------------------------------------------------------- /MUSIC/Makefile: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | ### compile time configuration options 3 | FFTW3 = yes 4 | MULTITHREADFFTW = yes 5 | SINGLEPRECISION = no 6 | HAVEHDF5 = yes 7 | HAVEBOXLIB = no 8 | BOXLIB_HOME = ${HOME}/nyx_tot_sterben/BoxLib 9 | 10 | ############################################################################## 11 | ### compiler and path settings 12 | CC = CC 13 | OPT = -Wall -Wno-unknown-pragmas -O3 -g -mtune=native 14 | CFLAGS = 15 | LFLAGS = -lgsl -lgslcblas 16 | CPATHS = -I. -I$(HOME)/local/include -I/opt/local/include -I/usr/local/include -I/usr/common/software/gsl/2.1/intel/include/ 17 | LPATHS = -L$(HOME)/local/lib -L/opt/local/lib -L/usr/local/lib -L/usr/common/software/gsl/2.1/intel/lib 18 | 19 | ############################################################################## 20 | # if you have FFTW 2.1.5 or 3.x with multi-thread support, you can enable the 21 | # option MULTITHREADFFTW 22 | ifeq ($(strip $(MULTITHREADFFTW)), yes) 23 | ifeq ($(CC), mpiicpc) 24 | CFLAGS += -openmp 25 | LFLAGS += -openmp 26 | else 27 | CFLAGS += -fopenmp 28 | LFLAGS += -fopenmp 29 | endif 30 | ifeq ($(strip $(FFTW3)),yes) 31 | ifeq ($(strip $(SINGLEPRECISION)), yes) 32 | LFLAGS += -lfftw3f_threads 33 | else 34 | LFLAGS += -lfftw3_threads 35 | endif 36 | else 37 | ifeq ($(strip $(SINGLEPRECISION)), yes) 38 | LFLAGS += -lsrfftw_threads -lsfftw_threads 39 | else 40 | LFLAGS += -ldrfftw_threads -ldfftw_threads 41 | endif 42 | endif 43 | else 44 | CFLAGS += -DSINGLETHREAD_FFTW 45 | endif 46 | 47 | ifeq ($(strip $(FFTW3)),yes) 48 | CFLAGS += -DFFTW3 49 | endif 50 | 51 | ############################################################################## 52 | # this section makes sure that the correct FFTW libraries are linked 53 | ifeq ($(strip $(SINGLEPRECISION)), yes) 54 | CFLAGS += -DSINGLE_PRECISION 55 | ifeq ($(FFTW3),yes) 56 | LFLAGS += -lfftw3f 57 | else 58 | LFLAGS += -lsrfftw -lsfftw 59 | endif 60 | else 61 | ifeq ($(strip $(FFTW3)),yes) 62 | LFLAGS += -lfftw3 63 | else 64 | LFLAGS += -ldrfftw -ldfftw 65 | endif 66 | endif 67 | 68 | ############################################################################## 69 | #if you have HDF5 installed, you can also enable the following options 70 | ifeq ($(strip $(HAVEHDF5)), yes) 71 | OPT += -DH5_USE_16_API -DHAVE_HDF5 72 | LFLAGS += -lhdf5 73 | endif 74 | 75 | ############################################################################## 76 | CFLAGS += $(OPT) 77 | TARGET = MUSIC 78 | OBJS = output.o transfer_function.o Numerics.o defaults.o constraints.o random.o\ 79 | convolution_kernel.o region_generator.o densities.o cosmology.o poisson.o\ 80 | densities.o cosmology.o poisson.o log.o main.o \ 81 | $(patsubst plugins/%.cc,plugins/%.o,$(wildcard plugins/*.cc)) 82 | 83 | ############################################################################## 84 | # stuff for BoxLib 85 | BLOBJS = "" 86 | ifeq ($(strip $(HAVEBOXLIB)), yes) 87 | IN_MUSIC = YES 88 | TOP = ${PWD}/plugins/nyx_plugin 89 | CCbla := $(CC) 90 | include plugins/nyx_plugin/Make.ic 91 | CC := $(CCbla) 92 | CPATHS += $(INCLUDE_LOCATIONS) 93 | LPATHS += -L$(objEXETempDir) 94 | BLOBJS = $(foreach obj,$(objForExecs),plugins/boxlib_stuff/$(obj)) 95 | # 96 | endif 97 | 98 | ############################################################################## 99 | all: $(OBJS) $(TARGET) Makefile 100 | # cd plugins/boxlib_stuff; make 101 | 102 | bla: 103 | echo $(BLOBJS) 104 | 105 | ifeq ($(strip $(HAVEBOXLIB)), yes) 106 | $(TARGET): $(OBJS) plugins/nyx_plugin/*.cpp 107 | cd plugins/nyx_plugin; make BOXLIB_HOME=$(BOXLIB_HOME) FFTW3=$(FFTW3) SINGLE=$(SINGLEPRECISION) 108 | $(CC) $(LPATHS) -o $@ $^ $(LFLAGS) $(BLOBJS) -lifcore 109 | else 110 | $(TARGET): $(OBJS) 111 | $(CC) $(LPATHS) -o $@ $^ $(LFLAGS) 112 | endif 113 | 114 | %.o: %.cc *.hh Makefile 115 | $(CC) $(CFLAGS) $(CPATHS) -c $< -o $@ 116 | 117 | clean: 118 | rm -rf $(OBJS) 119 | ifeq ($(strip $(HAVEBOXLIB)), yes) 120 | oldpath=`pwd` 121 | cd plugins/nyx_plugin; make realclean BOXLIB_HOME=$(BOXLIB_HOME) 122 | endif 123 | cd $(oldpath) 124 | -------------------------------------------------------------------------------- /MUSIC/log.hh: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | log.hh - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #ifndef __LOG_HH 12 | #define __LOG_HH 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | /*! 22 | * \brief System for logging runtime library errors, warnings, etc. 23 | * 24 | * This is the class that catches every (debug) info, warning, error, or user message and 25 | * processes it. Messages can be written to files and/or forwarded to user function for 26 | * processing messages. 27 | */ 28 | namespace MUSIC 29 | { 30 | 31 | class log 32 | { 33 | public: 34 | log(){} 35 | ~log(); 36 | 37 | /*! 38 | * \brief Types of logged messages. 39 | */ 40 | enum messageType 41 | { 42 | Info, 43 | DebugInfo, 44 | Warning, 45 | Error, 46 | FatalError, 47 | User 48 | }; 49 | 50 | /*! 51 | * \brief Logged message of type MessageType with some info. 52 | */ 53 | struct message 54 | { 55 | messageType type; 56 | std::string text; 57 | tm* when; 58 | }; 59 | 60 | /*! 61 | * \brief Open file where to log the messages. 62 | */ 63 | static void setOutput(const std::string& filename); 64 | 65 | /*! 66 | * \brief Get the filename of log. 67 | */ 68 | static const std::string& output() { return outputFile_; } 69 | 70 | /*! 71 | * \brief Add a new message to log. 72 | * \param type Type of the new message. 73 | * \param text Message. 74 | * \remarks Message is directly passes to user reciever if one is set. 75 | */ 76 | static void send(messageType type, const std::string& text); 77 | //static void send(messageType type, std::string& text); 78 | 79 | /*! 80 | * \brief Get the list of all of the logged messages. 81 | */ 82 | static const std::list& messages() { return messages_; } 83 | 84 | /*! 85 | * \brief Get the last logged message. 86 | */ 87 | static const message& lastMessage() { return messages_.back(); } 88 | 89 | /*! 90 | * \brief Set user function to receive newly sent messages to logger. 91 | */ 92 | static void setUserReceiver(void (*userFunc)(const message&)) { receiver = userFunc; } 93 | 94 | /*! 95 | * \brief Set minimum level of message to be logged. 96 | */ 97 | static void setLevel(const log::messageType level); 98 | 99 | private: 100 | 101 | static std::string outputFile_; 102 | static std::ofstream outputStream_; 103 | static std::list messages_; 104 | static messageType logLevel_; 105 | static void (*receiver)(const message&); 106 | }; 107 | 108 | } 109 | 110 | 111 | inline void LOGERR( const char* str, ... ) 112 | { 113 | char out[1024]; 114 | va_list argptr; 115 | va_start(argptr,str); 116 | va_end(argptr); 117 | vsprintf(out,str,argptr); 118 | MUSIC::log::send(MUSIC::log::Error, std::string(out)); 119 | } 120 | 121 | inline void LOGWARN( const char* str, ... ) 122 | { 123 | char out[1024]; 124 | va_list argptr; 125 | va_start(argptr,str); 126 | va_end(argptr); 127 | vsprintf(out,str,argptr); 128 | MUSIC::log::send(MUSIC::log::Warning, std::string(out)); 129 | } 130 | 131 | inline void LOGFATAL( const char* str, ... ) 132 | { 133 | char out[1024]; 134 | va_list argptr; 135 | va_start(argptr,str); 136 | va_end(argptr); 137 | vsprintf(out,str,argptr); 138 | MUSIC::log::send(MUSIC::log::FatalError, std::string(out)); 139 | } 140 | 141 | inline void LOGDEBUG( const char* str, ... ) 142 | { 143 | char out[1024]; 144 | va_list argptr; 145 | va_start(argptr,str); 146 | va_end(argptr); 147 | vsprintf(out,str,argptr); 148 | MUSIC::log::send(MUSIC::log::DebugInfo, std::string(out)); 149 | } 150 | 151 | inline void LOGUSER( const char* str, ... ) 152 | { 153 | char out[1024]; 154 | va_list argptr; 155 | va_start(argptr,str); 156 | va_end(argptr); 157 | vsprintf(out,str,argptr); 158 | MUSIC::log::send(MUSIC::log::User, std::string(out)); 159 | } 160 | 161 | inline void LOGINFO( const char* str, ... ) 162 | { 163 | char out[1024]; 164 | va_list argptr; 165 | va_start(argptr,str); 166 | va_end(argptr); 167 | vsprintf(out,str,argptr); 168 | MUSIC::log::send(MUSIC::log::Info, std::string(out)); 169 | } 170 | 171 | #endif //__LOG_HH 172 | 173 | 174 | -------------------------------------------------------------------------------- /janWorkflow/sliceBigCube.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = "Jan Balewski" 4 | __email__ = "janstar1122@gmail.com" 5 | 6 | from ruamel.yaml import YAML 7 | 8 | import numpy as np 9 | import os, shutil 10 | import math, sys 11 | 12 | def read_yaml(yaml_fn,verb=1): 13 | data={} 14 | 15 | if verb: print(' read yaml:',yaml_fn) 16 | with open(yaml_fn) as yamlfile: 17 | for key, val in YAML().load(yamlfile).items(): 18 | print('hpar:',key, val) 19 | data[key]=val 20 | assert len(data['namePar']) == len(data['unitPar']) 21 | assert len(data['physPar']) == len(data['unitPar']) 22 | 23 | return data 24 | 25 | 26 | # - - - - - - - - - - - - - - - - - - - - - - 27 | # - - - - - - - - - - - - - - - - - - - - - - 28 | # - - - - - - - - - - - - - - - - - - - - - - 29 | 30 | def slice_to_cubes( H, newDim,coreName): 31 | 32 | ### now I have my histogram of particle density, I split it up into 8 subvolumes and write it out 33 | ### note that the file structure here is required from the legacy CosmoFlow code. One dir is created for each NBody output file, then the 8 sub-volumes are named [0-7].npy inside that dir. 34 | print('slice_to_cubes dim=',newDim) 35 | bigDim=H.shape[0] 36 | assert bigDim%newDim==0 37 | count = -1 38 | for i in range(0,bigDim, newDim ): 39 | for j in range(0, bigDim, newDim): 40 | for k in range(0, bigDim, newDim): 41 | count+=1 42 | d = H[i:(i+newDim),j:(j+newDim),k:(k+newDim)] 43 | histFile=coreName+'_dim%d_cube%d'%(d.shape[0],count) 44 | print (count,'mass sum=%.3g'%np.sum(d)) 45 | np.save(histFile, d) 46 | 47 | print ("got cube count :", count) 48 | 49 | print ("**************************") 50 | 51 | # - - - - - - - - - - - - - - - - - - - - - - 52 | # - - - - - - - - - - - - - - - - - - - - - - 53 | # - - - - - - - - - - - - - - - - - - - - - - 54 | 55 | def slice_to_fullSheets( H, newDim, coreName): 56 | print('slice_to_shits shortDim=',newDim) 57 | bigDim=H.shape[0] 58 | assert bigDim%newDim==0 59 | 60 | count = -1 61 | step=newDim 62 | for i in range(0, bigDim, step): 63 | count+=1 64 | d = H[:,:,i:(i+step)] # the last dim is shorter 65 | histFile=coreName+'_dim%d_sheet%d'%(bigDim,count) 66 | print (count,'mass sum=%.3g'%np.sum(d)) 67 | np.save(histFile, d) 68 | 69 | print ("got sheet count :", count) 70 | 71 | print ("**************************") 72 | 73 | 74 | 75 | # - - - - - - - - - - - - - - - - - - - - - - 76 | # - - - - - - - - - - - - - - - - - - - - - - 77 | # - - - - - - - - - - - - - - - - - - - - - - 78 | 79 | 80 | def slice_to_subSheets( H, stepXY,stepZ, coreName): 81 | print('slice_to_sheets stepXY=%d stepZ=%d'%(stepXY,stepZ)) 82 | bigDim=H.shape[0] 83 | assert bigDim%stepZ==0 84 | assert bigDim%stepXY==0 85 | 86 | count = -1 87 | for iz in range(0, bigDim, stepZ): 88 | for ix in range(0, bigDim, stepXY): 89 | for iy in range(0, bigDim, stepXY): 90 | count+=1 91 | d = H[ix:(ix+stepXY),iy:(iy+stepXY),iz:(iz+stepZ)] # the last dim is shorter 92 | histFile=coreName+'_xy%d_z%d_slice%d'%(stepXY,stepZ,count) 93 | if count%23==0: 94 | print (count,'mass sum=%.3g'%np.sum(d), d.shape,ix,iy,iz) 95 | np.save(histFile, d) 96 | 97 | print ("got sheet count :", count) 98 | 99 | print ("**************************") 100 | 101 | # - - - - - - - - - - - - - - - - - - - - - - 102 | # - - M A I N E 103 | # - - - - - - - - - - - - - - - - - - - - - - 104 | 105 | from pprint import pprint 106 | if __name__ == '__main__': 107 | 108 | inPath=sys.argv[1]+'/' 109 | ymlF=sys.argv[2] 110 | print ("read YAML from ",ymlF,' and pprint it:') 111 | 112 | blob=read_yaml(ymlF) 113 | outPath=inPath 114 | pprint(blob) 115 | 116 | core=blob['coreStr'] 117 | #ics_2018-11_d14600087_dim512_full.npy 118 | bigFileN=inPath+core+'_dim512_full.npy' 119 | nbins = blob['boxlength'] 120 | 121 | bigH=np.load(bigFileN) 122 | print('shape1:',bigH.shape) 123 | fnameSeed=outPath+core 124 | 125 | slice_to_cubes( bigH, 128, fnameSeed) 126 | #slice_to_cubes( bigH, 256, fnameSeed) 127 | slice_to_fullSheets( bigH, 1, fnameSeed) 128 | #slice_to_subSheets( bigH, 256,4, fnameSeed) 129 | -------------------------------------------------------------------------------- /MUSIC/tools/point_file_reader.hh: -------------------------------------------------------------------------------- 1 | #ifndef POINT_FILE_READER_HH 2 | #define POINT_FILE_READER_HH 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | struct point_reader{ 11 | 12 | int num_columns; 13 | 14 | point_reader( void ) 15 | : num_columns( 0 ) 16 | { } 17 | 18 | bool isFloat( std::string myString ) 19 | { 20 | std::istringstream iss(myString); 21 | double f; 22 | //iss >> std::noskipws >> f; // noskipws considers leading whitespace invalid 23 | // Check the entire string was consumed and if either failbit or badbit is set 24 | iss >> f; 25 | return iss.eof() && !iss.fail(); 26 | } 27 | 28 | template< typename real_t > 29 | void read_points_from_file( std::string fname, float vfac_, std::vector& p ) 30 | { 31 | std::ifstream ifs(fname.c_str()); 32 | if( !ifs ) 33 | { 34 | printf("region_ellipsoid_plugin::read_points_from_file : Could not open file \'%s\'",fname.c_str()); 35 | throw std::runtime_error("region_ellipsoid_plugin::read_points_from_file : cannot open point file."); 36 | } 37 | 38 | int colcount = 0, colcount1 = 0, row = 0; 39 | p.clear(); 40 | 41 | while( ifs ) 42 | { 43 | std::string s; 44 | if( !getline(ifs,s) )break; 45 | std::stringstream ss(s); 46 | colcount1 = 0; 47 | while(ss) 48 | { 49 | if( !getline(ss,s,' ') ) break; 50 | if( !isFloat( s ) ) continue; 51 | p.push_back( strtod(s.c_str(),NULL) ); 52 | 53 | if( row == 0 ) 54 | colcount++; 55 | else 56 | colcount1++; 57 | } 58 | ++row; 59 | 60 | if( row>1 && colcount != colcount1 ) 61 | printf("error on line %d of input file",row); 62 | 63 | //std::cout << std::endl; 64 | } 65 | 66 | printf("region point file appears to contain %d columns",colcount); 67 | 68 | if( p.size()%3 != 0 && p.size()%6 != 0 ) 69 | { 70 | printf("Region point file \'%s\' does not contain triplets (%d elems)",fname.c_str(),p.size()); 71 | throw std::runtime_error("region_ellipsoid_plugin::read_points_from_file : file does not contain triplets."); 72 | } 73 | 74 | 75 | double x0[3] = { p[0],p[1],p[2] }, dx; 76 | 77 | if( colcount == 3 ) 78 | { 79 | // only positions are given 80 | 81 | for( size_t i=3; i 0.5 ) dx -= 1.0; 88 | p[i+j] = x0[j] + dx; 89 | } 90 | } 91 | } 92 | else if( colcount == 6 ) 93 | { 94 | // positions and velocities are given 95 | 96 | //... include the velocties to unapply Zeldovich approx. 97 | 98 | for( size_t j=3; j<6; ++j ) 99 | { 100 | dx = (p[j-3]-p[j]/vfac_)-x0[j-3]; 101 | if( dx < -0.5 ) dx += 1.0; 102 | else if( dx > 0.5 ) dx -= 1.0; 103 | p[j] = x0[j-3] + dx; 104 | } 105 | 106 | for( size_t i=6; i 0.5 ) dx -= 1.0; 113 | p[i+j] = x0[j] + dx; 114 | } 115 | 116 | for( size_t j=3; j<6; ++j ) 117 | { 118 | dx = (p[i+j-3]-p[i+j]/vfac_)-x0[j-3]; 119 | if( dx < -0.5 ) dx += 1.0; 120 | else if( dx > 0.5 ) dx -= 1.0; 121 | p[i+j] = x0[j-3] + dx; 122 | } 123 | } 124 | } 125 | else 126 | printf("Problem interpreting the region point file \'%s\'", fname.c_str() ); 127 | 128 | num_columns = colcount; 129 | } 130 | 131 | 132 | }; 133 | 134 | 135 | #endif 136 | -------------------------------------------------------------------------------- /MUSIC/plugins/point_file_reader.hh: -------------------------------------------------------------------------------- 1 | #ifndef POINT_FILE_READER_HH 2 | #define POINT_FILE_READER_HH 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "log.hh" 9 | 10 | struct point_reader{ 11 | 12 | int num_columns; 13 | 14 | point_reader( void ) 15 | : num_columns( 0 ) 16 | { } 17 | 18 | bool isFloat( std::string myString ) 19 | { 20 | std::istringstream iss(myString); 21 | double f; 22 | //iss >> std::noskipws >> f; // noskipws considers leading whitespace invalid 23 | // Check the entire string was consumed and if either failbit or badbit is set 24 | iss >> f; 25 | return iss.eof() && !iss.fail(); 26 | } 27 | 28 | template< typename real_t > 29 | void read_points_from_file( std::string fname, float vfac_, std::vector& p ) 30 | { 31 | std::ifstream ifs(fname.c_str()); 32 | if( !ifs ) 33 | { 34 | LOGERR("region_ellipsoid_plugin::read_points_from_file : Could not open file \'%s\'",fname.c_str()); 35 | throw std::runtime_error("region_ellipsoid_plugin::read_points_from_file : cannot open point file."); 36 | } 37 | 38 | int colcount = 0, colcount1 = 0, row = 0; 39 | p.clear(); 40 | 41 | while( ifs ) 42 | { 43 | std::string s; 44 | if( !getline(ifs,s) )break; 45 | std::stringstream ss(s); 46 | colcount1 = 0; 47 | while(ss) 48 | { 49 | if( !getline(ss,s,' ') ) break; 50 | if( !isFloat( s ) ) continue; 51 | p.push_back( strtod(s.c_str(),NULL) ); 52 | 53 | if( row == 0 ) 54 | colcount++; 55 | else 56 | colcount1++; 57 | } 58 | ++row; 59 | 60 | if( row>1 && colcount != colcount1 ) 61 | LOGERR("error on line %d of input file",row); 62 | 63 | //std::cout << std::endl; 64 | } 65 | 66 | LOGINFO("region point file appears to contain %d columns",colcount); 67 | 68 | if( p.size()%3 != 0 && p.size()%6 != 0 ) 69 | { 70 | LOGERR("Region point file \'%s\' does not contain triplets (%d elems)",fname.c_str(),p.size()); 71 | throw std::runtime_error("region_ellipsoid_plugin::read_points_from_file : file does not contain triplets."); 72 | } 73 | 74 | 75 | double x0[3] = { p[0],p[1],p[2] }, dx; 76 | 77 | if( colcount == 3 ) 78 | { 79 | // only positions are given 80 | 81 | for( size_t i=3; i 0.5 ) dx -= 1.0; 88 | p[i+j] = x0[j] + dx; 89 | } 90 | } 91 | } 92 | else if( colcount == 6 ) 93 | { 94 | // positions and velocities are given 95 | 96 | //... include the velocties to unapply Zeldovich approx. 97 | 98 | for( size_t j=3; j<6; ++j ) 99 | { 100 | dx = (p[j-3]-p[j]/vfac_)-x0[j-3]; 101 | if( dx < -0.5 ) dx += 1.0; 102 | else if( dx > 0.5 ) dx -= 1.0; 103 | p[j] = x0[j-3] + dx; 104 | } 105 | 106 | for( size_t i=6; i 0.5 ) dx -= 1.0; 113 | p[i+j] = x0[j] + dx; 114 | } 115 | 116 | for( size_t j=3; j<6; ++j ) 117 | { 118 | dx = (p[i+j-3]-p[i+j]/vfac_)-x0[j-3]; 119 | if( dx < -0.5 ) dx += 1.0; 120 | else if( dx > 0.5 ) dx -= 1.0; 121 | p[i+j] = x0[j-3] + dx; 122 | } 123 | } 124 | } 125 | else 126 | LOGERR("Problem interpreting the region point file \'%s\'", fname.c_str() ); 127 | 128 | num_columns = colcount; 129 | } 130 | 131 | 132 | }; 133 | 134 | 135 | #endif -------------------------------------------------------------------------------- /pycola/box_smooth.pyx: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | ######################################################################## 3 | # Copyright (c) 2013,2014 Svetlin Tassev 4 | # Princeton University,Harvard University 5 | # 6 | # This file is part of pyCOLA. 7 | # 8 | # pyCOLA is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # pyCOLA is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with pyCOLA. If not, see . 20 | # 21 | ######################################################################## 22 | ######################################################################## 23 | 24 | 25 | import numpy as np 26 | cimport numpy as np 27 | cimport cython 28 | 29 | 30 | @cython.boundscheck(False) 31 | @cython.wraparound(False) 32 | @cython.cdivision(True) 33 | @cython.embedsignature(True) 34 | def box_smooth( 35 | np.ndarray[np.float32_t, ndim=3] arr, 36 | np.ndarray[np.float32_t, ndim=3] arr1 37 | ): 38 | """ 39 | :math:`\\vspace{-1mm}` 40 | 41 | Do a 3x3x3 boxcar smoothing. 42 | 43 | **Arguments**: 44 | 45 | * ``arr`` -- a 3-dim float32 array serving as input. 46 | 47 | * ``arr1`` -- a 3-dim float32 array serving as output. 48 | 49 | .. note:: This should really be replaced with a Gaussian smoothing, 50 | so that one can change the amount of smoothing. Gaussian 51 | smoothing can be trivially implemented by modifying 52 | :func:`potential.get_phi` as indicated in the source file of 53 | that function. Not done here as this worked well enough for the 54 | paper. 55 | 56 | """ 57 | 58 | cdef int i,j,k 59 | 60 | 61 | cdef int ngrid_x,ngrid_y,ngrid_z 62 | 63 | ngrid_x=arr.shape[0] 64 | ngrid_y=arr.shape[1] 65 | ngrid_z=arr.shape[2] 66 | 67 | 68 | from cython.parallel cimport prange,parallel 69 | cdef int nthreads 70 | from multiprocessing import cpu_count 71 | nthreads=cpu_count() 72 | #print 'nthreads,npart_x = ', nthreads,npart_x 73 | 74 | if ngrid_x-2>nthreads: 75 | chunksize=(ngrid_x-2)//nthreads 76 | else: 77 | chunksize=1 78 | arr1[:]=arr[:] 79 | 80 | with nogil, parallel(num_threads=nthreads): 81 | for i in prange(1,ngrid_x-1,schedule='static',chunksize=chunksize): 82 | for j in range(1,ngrid_y-1): 83 | for k in range(1,ngrid_z-1): 84 | 85 | arr1[i,j,k]+=arr[i-1, j, k] 86 | arr1[i,j,k]+=arr[i, j-1, k] 87 | arr1[i,j,k]+=arr[i, j, k-1] 88 | arr1[i,j,k]+=arr[i-1, j-1, k] 89 | arr1[i,j,k]+=arr[i-1, j, k-1] 90 | arr1[i,j,k]+=arr[i, j-1, k-1] 91 | arr1[i,j,k]+=arr[i-1, j-1, k-1] 92 | 93 | arr1[i,j,k]+=arr[i+1, j, k] 94 | arr1[i,j,k]+=arr[i, j+1, k] 95 | arr1[i,j,k]+=arr[i, j, k+1] 96 | arr1[i,j,k]+=arr[i+1, j+1, k] 97 | arr1[i,j,k]+=arr[i+1, j, k+1] 98 | arr1[i,j,k]+=arr[i, j+1, k+1] 99 | arr1[i,j,k]+=arr[i+1, j+1, k+1] 100 | 101 | arr1[i,j,k]+=arr[i-1, j+1, k] 102 | arr1[i,j,k]+=arr[i+1, j-1, k] 103 | 104 | arr1[i,j,k]+=arr[i+1, j, k-1] 105 | arr1[i,j,k]+=arr[i-1, j, k+1] 106 | 107 | arr1[i,j,k]+=arr[i, j+1, k-1] 108 | arr1[i,j,k]+=arr[i, j-1, k+1] 109 | 110 | arr1[i,j,k]+=arr[i+1, j-1, k-1] 111 | arr1[i,j,k]+=arr[i-1, j+1, k-1] 112 | arr1[i,j,k]+=arr[i-1, j-1, k+1] 113 | arr1[i,j,k]+=arr[i-1, j+1, k+1] 114 | arr1[i,j,k]+=arr[i+1, j-1, k+1] 115 | arr1[i,j,k]+=arr[i+1, j+1, k-1] 116 | 117 | arr1[i,j,k]/=27.0 118 | 119 | -------------------------------------------------------------------------------- /pycola/index.rst: -------------------------------------------------------------------------------- 1 | .. ######################################################################## 2 | .. ######################################################################## 3 | .. # Copyright (c) 2013,2014 Svetlin Tassev 4 | .. # Princeton University,Harvard University 5 | .. # 6 | .. # This file is part of pyCOLA. 7 | .. # 8 | .. # pyCOLA is free software: you can redistribute it and/or modify 9 | .. # it under the terms of the GNU General Public License as published by 10 | .. # the Free Software Foundation, either version 3 of the License, or 11 | .. # (at your option) any later version. 12 | .. # 13 | .. # pyCOLA is distributed in the hope that it will be useful, 14 | .. # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | .. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | .. # GNU General Public License for more details. 17 | .. # 18 | .. # You should have received a copy of the GNU General Public License 19 | .. # along with pyCOLA. If not, see . 20 | .. # 21 | .. ######################################################################## 22 | .. ######################################################################## 23 | 24 | 25 | Welcome to pyCOLA's documentation! 26 | ================================== 27 | 28 | :Author: Svetlin Tassev 29 | :Version: |release| 30 | :Date: June 26, 2014 31 | :Homepage: `pyCOLA Homepage`_ 32 | :Documentation: `PDF Documentation `_ 33 | :License: `GPLv3+ License`_ 34 | 35 | .. _pyCOLA Homepage: https://bitbucket.org/tassev/pycola 36 | .. _GPLv3+ License: https://www.gnu.org/licenses/gpl-3.0.html 37 | 38 | 39 | Introduction 40 | ------------ 41 | 42 | pyCOLA is a multithreaded Python/Cython N-body code, implementing the 43 | Comoving Lagrangian Acceleration (COLA) method in the temporal and 44 | spatial domains. pyCOLA also implements a novel method to compute 45 | second-order cosmological initial conditions for given initial 46 | conditions at first-order for arbitrary initial particle configurations 47 | (including glass initial conditions, as well as initial conditions 48 | having refined subregions). 49 | 50 | pyCOLA is based on the following two papers: [temporalCOLA]_, 51 | [spatialCOLA]_. We kindly ask you [#f1]_ to acknowledge them and their 52 | authors in any program or publication in which you use the COLA method 53 | in the temporal and/or spatial domains. 54 | 55 | The new method for calculating second-order cosmological initial 56 | conditions is based on the following paper: (todo: Daniel, let me know 57 | what to cite). Again, we kindly ask you to acknowledge that paper and its 58 | authors in any program or publication in which you use that method. 59 | 60 | 61 | .. rubric:: Footnotes 62 | 63 | .. [#f1] We do not *require* you, however, as we want pyCOLA to be 64 | GPLv3 compatible. 65 | 66 | pyCOLA requires `NumPy `_, `SciPy 67 | `_, `pyFFTW 68 | `_, `h5py 69 | `_, as well as their respective dependencies. 70 | Note that pyFFTW v0.9.2 does not support large arrays, so one needs to 71 | install the development version from `github 72 | `_, where the bug has been fixed. 73 | 74 | 75 | 76 | .. note:: 77 | All lengthscales are in units of comoving :math:`\mathrm{Mpc}/h`, unless 78 | otherwise specified. 79 | 80 | .. todo:: 81 | If there is interest in the code (i.e. not only the algorithm), it 82 | should be converted to use classes as that will enormously reduce 83 | the amount of arguments to be passed around, will make the code more 84 | readable, and reduce the chances for introducing bugs. Some of the 85 | functions are already converted to using classes in a separate 86 | branch, but converting the whole code will take some time. 87 | 88 | pyCOLA modules 89 | ================= 90 | 91 | .. include:: ic.rst 92 | .. include:: growth.rst 93 | .. include:: cic.rst 94 | .. include:: potential.rst 95 | .. include:: acceleration.rst 96 | .. include:: evolve.rst 97 | Auxiliary 98 | ------------------- 99 | .. include:: box_smooth.rst 100 | .. include:: aux.rst 101 | 102 | 103 | 104 | 105 | Worked-out example 106 | ================== 107 | 108 | .. include:: example.rst 109 | 110 | 111 | 112 | .. [MUSIC] `Multi-scale initial conditions for cosmological 113 | simulations`, O. Hahn, T. Abel, Monthly Notices of the Royal 114 | Astronomical Society, 415, 2101 (2011), `arXiv:1103.6031 115 | `_. The code can be found 116 | on `this website `_. 117 | 118 | .. [spatialCOLA] `Extending the N-body Comoving Lagrangian 119 | Acceleration Method to the Spatial Domain`, S. Tassev, D. 120 | J. Eisenstein, B. D. Wandelt, M. Zaldarriaga, (2014), 121 | `arXiv:14??.???? `_ 122 | 123 | .. [temporalCOLA] `Solving Large Scale Structure in Ten Easy Steps with 124 | COLA`, S. Tassev, M. Zaldarriaga, D. J. Eisenstein, Journal of 125 | Cosmology and Astroparticle Physics, 06, 036 126 | (2013), `arXiv:1301.0322 `_ 127 | 128 | 129 | 130 | -------------------------------------------------------------------------------- /MUSIC/general.hh: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | general.hh - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #ifndef __GENERAL_HH 12 | #define __GENERAL_HH 13 | 14 | #include "log.hh" 15 | 16 | #include 17 | #include "omp.h" 18 | 19 | #ifdef WITH_MPI 20 | #ifdef MANNO 21 | #include 22 | #else 23 | #include 24 | #endif 25 | #else 26 | #include 27 | #endif 28 | 29 | #ifdef FFTW3 30 | #include 31 | #if defined(SINGLE_PRECISION) 32 | typedef float fftw_real; 33 | #else 34 | typedef double fftw_real; 35 | #endif 36 | 37 | #else 38 | #if defined(SINGLE_PRECISION) and not defined(SINGLETHREAD_FFTW) 39 | #include 40 | #include 41 | #elif defined(SINGLE_PRECISION) and defined(SINGLETHREAD_FFTW) 42 | #include 43 | #elif not defined(SINGLE_PRECISION) and not defined(SINGLETHREAD_FFTW) 44 | #include 45 | #include 46 | #elif not defined(SINGLE_PRECISION) and defined(SINGLETHREAD_FFTW) 47 | #include 48 | #endif 49 | #endif 50 | 51 | #ifdef SINGLE_PRECISION 52 | typedef float real_t; 53 | #else 54 | typedef double real_t; 55 | #endif 56 | 57 | 58 | #ifdef FFTW3 59 | #define RE(x) ((x)[0]) 60 | #define IM(x) ((x)[1]) 61 | #else 62 | #define RE(x) ((x).re) 63 | #define IM(x) ((x).im) 64 | #endif 65 | 66 | #if defined(FFTW3) && defined(SINGLE_PRECISION) 67 | #define fftw_complex fftwf_complex 68 | #endif 69 | 70 | 71 | 72 | #include 73 | 74 | #include "config_file.hh" 75 | //#include "mesh.hh" 76 | 77 | 78 | 79 | //! compute square of argument 80 | template< typename T > 81 | inline T SQR( T a ){ 82 | return a*a; 83 | } 84 | 85 | //! compute cube of argument 86 | template< typename T > 87 | inline T CUBE( T a ){ 88 | return a*a*a; 89 | } 90 | 91 | //! compute 4th power of argument 92 | template< typename T > 93 | inline T POW4( T a ){ 94 | return SQR(SQR(a)); 95 | //return a*a*a*a; 96 | } 97 | 98 | 99 | //! structure for cosmological parameters 100 | typedef struct cosmology{ 101 | double 102 | Omega_m, //!< baryon+dark matter density 103 | Omega_b, //!< baryon matter density 104 | Omega_DE, //!< dark energy density (cosmological constant or parameterised) 105 | Omega_r, //!< photon + relativistic particle density 106 | Omega_k, //!< curvature density 107 | H0, //!< Hubble constant in km/s/Mpc 108 | nspect, //!< long-wave spectral index (scale free is nspect=1) 109 | sigma8, //!< power spectrum normalization 110 | w_0, //!< dark energy equation of state parameter 1: w = w0 + a * wa 111 | w_a, //!< dark energy equation of state parameter 2: w = w0 + a * wa 112 | 113 | //Gamma, //!< shape parameter (of historical interest, as a free parameter) 114 | //fnl, //!< non-gaussian contribution parameter 115 | //w0, //!< dark energy equation of state parameter (not implemented, i.e. =1 at the moment) 116 | //wa, //!< dark energy equation of state parameter (not implemented, i.e. =1 at the moment) 117 | dplus, //!< linear perturbation growth factor 118 | pnorm, //!< actual power spectrum normalisation factor 119 | vfact, //!< velocity<->displacement conversion factor in Zel'dovich approx. 120 | WDMmass, //!< Warm DM particle mass 121 | WDMg_x, //!< Warm DM particle degrees of freedom 122 | astart; //!< expansion factor a for which to generate initial conditions 123 | 124 | cosmology( config_file cf ) 125 | { 126 | double zstart = cf.getValue( "setup", "zstart" ); 127 | 128 | astart = 1.0/(1.0+zstart); 129 | Omega_b = cf.getValue( "cosmology", "Omega_b" ); 130 | Omega_m = cf.getValue( "cosmology", "Omega_m" ); 131 | Omega_DE = cf.getValue( "cosmology", "Omega_L" ); 132 | w_0 = cf.getValueSafe( "cosmology", "w0", -1.0 ); 133 | w_a = cf.getValueSafe( "cosmology", "wa", 0.0 ); 134 | 135 | Omega_r = cf.getValueSafe( "cosmology", "Omega_r", 0.0 ); // no longer default to nonzero (8.3e-5) 136 | Omega_k = 1.0 - Omega_m - Omega_DE - Omega_r; 137 | 138 | H0 = cf.getValue( "cosmology", "H0" ); 139 | sigma8 = cf.getValue( "cosmology", "sigma_8" ); 140 | nspect = cf.getValue( "cosmology", "nspec" ); 141 | WDMg_x = cf.getValueSafe( "cosmology", "WDMg_x", 1.5 ); 142 | WDMmass = cf.getValueSafe( "cosmology", "WDMmass", 0.0 ); 143 | 144 | dplus = 0.0; 145 | pnorm = 0.0; 146 | vfact = 0.0; 147 | } 148 | 149 | cosmology( void ) 150 | { 151 | 152 | } 153 | }Cosmology; 154 | 155 | //! basic box/grid/refinement structure parameters 156 | typedef struct { 157 | unsigned levelmin, levelmax; 158 | double boxlength; 159 | std::vector offx,offy,offz,llx,lly,llz; 160 | }Parameters; 161 | 162 | //! measure elapsed wallclock time 163 | inline double time_seconds( void ) 164 | { 165 | #ifdef WITH_MPI 166 | return MPI_Wtime(); 167 | #else 168 | return ((double) clock()) / CLOCKS_PER_SEC; 169 | #endif 170 | } 171 | 172 | 173 | inline bool is_number(const std::string& s) 174 | { 175 | for (unsigned i = 0; i < s.length(); i++) 176 | if (!std::isdigit(s[i])&&s[i]!='-' ) 177 | return false; 178 | 179 | return true; 180 | } 181 | 182 | 183 | #endif 184 | -------------------------------------------------------------------------------- /MUSIC/output.hh: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | output.hh - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #ifndef __OUTPUT_HH 12 | #define __OUTPUT_HH 13 | 14 | #include 15 | #include 16 | 17 | #include "general.hh" 18 | #include "mesh.hh" 19 | 20 | 21 | /*! 22 | * @class output_plugin 23 | * @brief abstract base class for output plug-ins 24 | * 25 | * This class provides the abstract base class for all output plug-ins. 26 | * All output plug-ins need to derive from it and implement the purely 27 | * virtual member functions. 28 | */ 29 | class output_plugin 30 | { 31 | protected: 32 | 33 | //! reference to the config_file object that holds all configuration options 34 | config_file& cf_; 35 | 36 | //! output file or directory name 37 | std::string fname_; 38 | 39 | //! minimum refinement level 40 | unsigned levelmin_; 41 | 42 | //! maximum refinement level 43 | unsigned levelmax_; 44 | 45 | std::vector 46 | offx_, //!< vector describing the x-offset of each level 47 | offy_, //!< vector describing the y-offset of each level 48 | offz_, //!< vector describing the z-offset of each level 49 | sizex_, //!< vector describing the extent in x of each level 50 | sizey_, //!< vector describing the extent in y of each level 51 | sizez_; //!< vector describing the extent in z of each level 52 | 53 | //! quick access function to query properties of the refinement grid from the configuration options 54 | /*! @param name name of the config property 55 | * @param icomp component index (0=x, 1=y, 2=z) 56 | * @param oit output iterator (e.g. std::back_inserter for vectors) 57 | */ 58 | template< typename output_iterator > 59 | void query_grid_prop( std::string name, int icomp, output_iterator oit ) 60 | { 61 | char str[128]; 62 | for( unsigned i=levelmin_; i<=levelmax_; ++i ) 63 | { 64 | sprintf( str, "%s(%u,%d)", name.c_str(), i, icomp ); 65 | *oit = cf_.getValue( "setup", str ); 66 | ++oit; 67 | } 68 | } 69 | 70 | public: 71 | 72 | //! constructor 73 | explicit output_plugin( config_file& cf ) 74 | : cf_(cf) 75 | { 76 | fname_ = cf.getValue("output","filename"); 77 | levelmin_ = cf.getValue( "setup", "levelmin" ); 78 | levelmax_ = cf.getValue( "setup", "levelmax" ); 79 | 80 | query_grid_prop( "offset", 0, std::back_inserter(offx_) ); 81 | query_grid_prop( "offset", 1, std::back_inserter(offy_) ); 82 | query_grid_prop( "offset", 2, std::back_inserter(offz_) ); 83 | 84 | query_grid_prop( "size", 0, std::back_inserter(sizex_) ); 85 | query_grid_prop( "size", 1, std::back_inserter(sizey_) ); 86 | query_grid_prop( "size", 2, std::back_inserter(sizez_) ); 87 | } 88 | 89 | //! destructor 90 | virtual ~output_plugin() 91 | { } 92 | 93 | //! purely virtual prototype to write the masses for each dark matter particle 94 | virtual void write_dm_mass( const grid_hierarchy& gh ) = 0; 95 | 96 | //! purely virtual prototype to write the dark matter density field 97 | virtual void write_dm_density( const grid_hierarchy& gh ) = 0; 98 | 99 | //! purely virtual prototype to write the dark matter gravitational potential (from which displacements are computed in 1LPT) 100 | virtual void write_dm_potential( const grid_hierarchy& gh ) = 0; 101 | 102 | //! purely virtual prototype to write dark matter particle velocities 103 | virtual void write_dm_velocity( int coord, const grid_hierarchy& gh ) = 0; 104 | 105 | //! purely virtual prototype to write dark matter particle positions 106 | virtual void write_dm_position( int coord, const grid_hierarchy& gh ) = 0; 107 | 108 | //! purely virtual prototype to write the baryon velocities 109 | virtual void write_gas_velocity( int coord, const grid_hierarchy& gh ) = 0; 110 | 111 | //! purely virtual prototype to write the baryon coordinates 112 | virtual void write_gas_position( int coord, const grid_hierarchy& gh ) = 0; 113 | 114 | //! purely virtual prototype to write the baryon density field 115 | virtual void write_gas_density( const grid_hierarchy& gh ) = 0; 116 | 117 | //! purely virtual prototype to write the baryon gravitational potential (from which displacements are computed in 1LPT) 118 | virtual void write_gas_potential( const grid_hierarchy& gh ) = 0; 119 | 120 | //! purely virtual prototype for all things to be done at the very end 121 | virtual void finalize( void ) = 0; 122 | }; 123 | 124 | /*! 125 | * @brief implements abstract factory design pattern for output plug-ins 126 | */ 127 | struct output_plugin_creator 128 | { 129 | //! create an instance of a plug-in 130 | virtual output_plugin * create( config_file& cf ) const = 0; 131 | 132 | //! destroy an instance of a plug-in 133 | virtual ~output_plugin_creator() { } 134 | }; 135 | 136 | //! maps the name of a plug-in to a pointer of the factory pattern 137 | std::map< std::string, output_plugin_creator *>& get_output_plugin_map(); 138 | 139 | //! print a list of all registered output plug-ins 140 | void print_output_plugins(); 141 | 142 | /*! 143 | * @brief concrete factory pattern for output plug-ins 144 | */ 145 | template< class Derived > 146 | struct output_plugin_creator_concrete : public output_plugin_creator 147 | { 148 | //! register the plug-in by its name 149 | output_plugin_creator_concrete( const std::string& plugin_name ) 150 | { 151 | get_output_plugin_map()[ plugin_name ] = this; 152 | } 153 | 154 | //! create an instance of the plug-in 155 | output_plugin * create( config_file& cf ) const 156 | { 157 | return new Derived( cf ); 158 | } 159 | }; 160 | 161 | //! failsafe version to select the output plug-in 162 | output_plugin *select_output_plugin( config_file& cf ); 163 | 164 | #endif // __OUTPUT_HH 165 | -------------------------------------------------------------------------------- /MUSIC/poisson.hh: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | poisson.cc - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | #ifndef __POISSON_HH 12 | #define __POISSON_HH 13 | 14 | #include 15 | #include 16 | 17 | #include "general.hh" 18 | #include "mesh.hh" 19 | 20 | //! abstract base class for Poisson solvers and gradient calculations 21 | class poisson_plugin 22 | { 23 | protected: 24 | 25 | //! reference to the config_file object that holds all configuration options 26 | config_file& cf_; 27 | 28 | public: 29 | 30 | //! constructor 31 | explicit poisson_plugin( config_file& cf ) 32 | : cf_(cf) 33 | { } 34 | 35 | //! destructor 36 | virtual ~poisson_plugin() 37 | { } 38 | 39 | //! solve Poisson's equation Du=f 40 | virtual double solve( grid_hierarchy& f, grid_hierarchy& u ) = 0; 41 | 42 | //! compute the gradient of u 43 | virtual double gradient( int dir, grid_hierarchy& u, grid_hierarchy& Du ) = 0; 44 | 45 | //! compute the gradient and add 46 | virtual double gradient_add( int dir, grid_hierarchy& u, grid_hierarchy& Du ) = 0; 47 | 48 | }; 49 | 50 | #pragma mark - 51 | 52 | /*! 53 | * @brief implements abstract factory design pattern for poisson solver plug-ins 54 | */ 55 | struct poisson_plugin_creator 56 | { 57 | //! create an instance of a plug-in 58 | virtual poisson_plugin * create( config_file& cf ) const = 0; 59 | 60 | //! destroy an instance of a plug-in 61 | virtual ~poisson_plugin_creator() { } 62 | }; 63 | 64 | //! maps the name of a plug-in to a pointer of the factory pattern 65 | std::map< std::string, poisson_plugin_creator *>& get_poisson_plugin_map(); 66 | 67 | //! print a list of all registered output plug-ins 68 | void print_poisson_plugins(); 69 | 70 | 71 | /*! 72 | * @brief concrete factory pattern for output plug-ins 73 | */ 74 | template< class Derived > 75 | struct poisson_plugin_creator_concrete : public poisson_plugin_creator 76 | { 77 | //! register the plug-in by its name 78 | poisson_plugin_creator_concrete( const std::string& plugin_name ) 79 | { 80 | get_poisson_plugin_map()[ plugin_name ] = this; 81 | } 82 | 83 | //! create an instance of the plug-in 84 | poisson_plugin * create( config_file& cf ) const 85 | { 86 | return new Derived( cf ); 87 | } 88 | }; 89 | 90 | /**************************************************************************************/ 91 | /**************************************************************************************/ 92 | #pragma mark - 93 | 94 | //! adaptive FAS multigrid implementation of abstract base class poisson_plugin 95 | class multigrid_poisson_plugin : public poisson_plugin 96 | { 97 | public: 98 | 99 | //! constructor 100 | explicit multigrid_poisson_plugin( config_file& cf ) 101 | : poisson_plugin( cf ) 102 | { } 103 | 104 | //! solve Poisson's equation Du=f 105 | double solve( grid_hierarchy& f, grid_hierarchy& u ); 106 | 107 | //! compute the gradient of u 108 | double gradient( int dir, grid_hierarchy& u, grid_hierarchy& Du ); 109 | 110 | //! compute the gradient and add 111 | double gradient_add( int dir, grid_hierarchy& u, grid_hierarchy& Du ); 112 | 113 | protected: 114 | 115 | //! various FD approximation implementations 116 | struct implementation 117 | { 118 | //! solve 2nd order FD approximation to Poisson's equation 119 | double solve_O2( grid_hierarchy& f, grid_hierarchy& u ); 120 | 121 | //! solve 4th order FD approximation to Poisson's equation 122 | double solve_O4( grid_hierarchy& f, grid_hierarchy& u ); 123 | 124 | //! solve 6th order FD approximation to Poisson's equation 125 | double solve_O6( grid_hierarchy& f, grid_hierarchy& u ); 126 | 127 | //! compute 2nd order FD gradient 128 | void gradient_O2( int dir, grid_hierarchy& u, grid_hierarchy& Du ); 129 | 130 | //! compute and add 2nd order FD gradient 131 | void gradient_add_O2( int dir, grid_hierarchy& u, grid_hierarchy& Du ); 132 | 133 | //! compute 4th order FD gradient 134 | void gradient_O4( int dir, grid_hierarchy& u, grid_hierarchy& Du ); 135 | 136 | //! compute and add 4th order FD gradient 137 | void gradient_add_O4( int dir, grid_hierarchy& u, grid_hierarchy& Du ); 138 | 139 | //! compute 6th order FD gradient 140 | void gradient_O6( int dir, grid_hierarchy& u, grid_hierarchy& Du ); 141 | 142 | //! compute and add 6th order FD gradient 143 | void gradient_add_O6( int dir, grid_hierarchy& u, grid_hierarchy& Du ); 144 | }; 145 | }; 146 | 147 | /**************************************************************************************/ 148 | /**************************************************************************************/ 149 | #pragma mark - 150 | 151 | //! FFT based implementation of abstract base class poisson_plugin 152 | class fft_poisson_plugin : public poisson_plugin 153 | { 154 | public: 155 | 156 | //! constructor 157 | explicit fft_poisson_plugin( config_file& cf ) 158 | : poisson_plugin( cf ) 159 | { } 160 | 161 | //! solve Poisson's equation Du=f 162 | double solve( grid_hierarchy& f, grid_hierarchy& u ); 163 | 164 | //! compute the gradient of u 165 | double gradient( int dir, grid_hierarchy& u, grid_hierarchy& Du ); 166 | 167 | //! compute the gradient and add 168 | double gradient_add( int dir, grid_hierarchy& u, grid_hierarchy& Du ){ return 0.0; } 169 | 170 | 171 | }; 172 | 173 | /**************************************************************************************/ 174 | /**************************************************************************************/ 175 | #pragma mark - 176 | 177 | template< typename T > 178 | void poisson_hybrid( T& f, int idir, int order, bool periodic, bool deconvolve_cic ); 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | #endif // __POISSON_HH 189 | 190 | -------------------------------------------------------------------------------- /MUSIC/plugins/region_convex_hull.cc: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | region_convex_hull.cc - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010-13 Oliver Hahn 8 | 9 | */ 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #include "region_generator.hh" 21 | #include "convex_hull.hh" 22 | #include "point_file_reader.hh" 23 | 24 | 25 | //! Convex hull region plugin 26 | class region_convex_hull_plugin : public region_generator_plugin{ 27 | private: 28 | 29 | convex_hull *phull_; 30 | int shift[3], shift_level, padding_; 31 | double vfac_; 32 | bool do_extra_padding_; 33 | 34 | double anchor_pt_[3]; 35 | 36 | std::vector level_dist_; 37 | 38 | void apply_shift( size_t Np, double *p, int *shift, int levelmin ) 39 | { 40 | double dx = 1.0/(1< pp; 54 | 55 | vfac_ = cf.getValue("cosmology","vfact"); 56 | padding_ = cf.getValue("setup","padding"); 57 | 58 | 59 | std::string point_file = cf.getValue("setup","region_point_file"); 60 | 61 | point_reader pfr; 62 | pfr.read_points_from_file( point_file, vfac_, pp ); 63 | 64 | if( cf.containsKey("setup","region_point_shift") ) 65 | { 66 | std::string point_shift = cf.getValue("setup","region_point_shift"); 67 | if(sscanf( point_shift.c_str(), "%d,%d,%d", &shift[0],&shift[1],&shift[2] )!=3){ 68 | LOGERR("Error parsing triple for region_point_shift"); 69 | throw std::runtime_error("Error parsing triple for region_point_shift"); 70 | } 71 | unsigned point_levelmin = cf.getValue("setup","region_point_levelmin"); 72 | 73 | apply_shift( pp.size()/3, &pp[0], shift, point_levelmin ); 74 | shift_level = point_levelmin; 75 | } 76 | 77 | // take care of possibly cutting across a periodic boundary 78 | anchor_pt_[0] = pp[0]; 79 | anchor_pt_[1] = pp[1]; 80 | anchor_pt_[2] = pp[2]; 81 | 82 | for( size_t i = 0; i < pp.size(); ++i ) 83 | { 84 | double d = pp[i] - anchor_pt_[i%3]; 85 | if( d > 0.5 ) pp[i] -= 1.0; 86 | if( d < -0.5 ) pp[i] += 1.0; 87 | } 88 | 89 | // compute the convex hull 90 | phull_ = new convex_hull( &pp[0], pp.size()/3, anchor_pt_ ); 91 | 92 | //expand the ellipsoid by one grid cell 93 | unsigned levelmax = cf.getValue("setup","levelmax"); 94 | double dx = 1.0/(1ul<expand( sqrt(3.)*dx ); 96 | 97 | // output the center 98 | double c[3] = { phull_->centroid_[0], phull_->centroid_[1], phull_->centroid_[2] }; 99 | LOGINFO("Region center from convex hull centroid determined at\n\t (%f,%f,%f)",c[0],c[1],c[2]); 100 | 101 | //----------------------------------------------------------------- 102 | // when querying the bounding box, do we need extra padding? 103 | do_extra_padding_ = false; 104 | 105 | // conditions should be added here 106 | { 107 | std::string output_plugin = cf.getValue("output","format"); 108 | if( output_plugin == std::string("grafic2") ) 109 | do_extra_padding_ = true; 110 | } 111 | 112 | level_dist_.assign( levelmax+1, 0.0 ); 113 | // generate the higher level ellipsoids 114 | for( int ilevel = levelmax_-1; ilevel > 0; --ilevel ) 115 | { 116 | dx = 1.0/(1ul<<(ilevel)); 117 | level_dist_[ilevel] = level_dist_[ilevel+1] + padding_ * dx; 118 | } 119 | } 120 | 121 | ~region_convex_hull_plugin() 122 | { 123 | delete phull_; 124 | } 125 | 126 | void get_AABB( double *left, double *right, unsigned level ) 127 | { 128 | for( int i=0; i<3; ++i ) 129 | { 130 | left[i] = phull_->left_[i]; 131 | right[i] = phull_->right_[i]; 132 | } 133 | double dx = 1.0/(1ul<check_point( x, level_dist_[ilevel] ); } 156 | 157 | bool is_grid_dim_forced( size_t* ndims ) 158 | { return false; } 159 | 160 | void get_center( double *xc ) 161 | { 162 | xc[0] = phull_->centroid_[0]; 163 | xc[1] = phull_->centroid_[1]; 164 | xc[2] = phull_->centroid_[2]; 165 | } 166 | 167 | void get_center_unshifted( double *xc ) 168 | { 169 | double dx = 1.0/(1<centroid_[0], phull_->centroid_[1], phull_->centroid_[2] }; 171 | xc[0] = c[0]+shift[0]*dx; 172 | xc[1] = c[1]+shift[1]*dx; 173 | xc[2] = c[2]+shift[2]*dx; 174 | 175 | } 176 | }; 177 | 178 | namespace{ 179 | region_generator_plugin_creator_concrete< region_convex_hull_plugin > creator("convex_hull"); 180 | } 181 | 182 | -------------------------------------------------------------------------------- /pycola/aux.py: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | ######################################################################## 3 | # Copyright (c) 2013,2014 Svetlin Tassev 4 | # Princeton University,Harvard University 5 | # 6 | # This file is part of pyCOLA. 7 | # 8 | # pyCOLA is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # pyCOLA is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with pyCOLA. If not, see . 20 | # 21 | ######################################################################## 22 | ######################################################################## 23 | 24 | def boundaries(boxsize,level,level_zoom,NPART_zoom,offset_from_code,cut_from_sides, gridscale): 25 | """ 26 | :math:`\\vspace{-1mm}` 27 | 28 | Calculate bounding boxes, fine grid offsets and other 29 | quantities useful when dealing with a MUSIC snapshot. 30 | 31 | **Arguments**: 32 | 33 | * The first three arguments must be set to the following parameters 34 | from the MUSIC configuration file:: 35 | 36 | [boundaries() argument] [parameter from MUSIC .conf] [type] 37 | boxsize = boxlength float 38 | level = levelmin int 39 | level_zoom = levelmax int 40 | 41 | With the included MUSIC configuration file, :download:`ics.conf 42 | <./ics.conf>`, the above three arguments take the values: 43 | 100.0, 9, 10. 44 | 45 | * ``NPART_zoom`` -- list of three integers, giving the size of the 46 | fine grid. 47 | 48 | * ``offset_from_code`` -- list of three integers, giving the 49 | crude-grid index coordinates of the origin of the fine grid. 50 | 51 | * The last two parameters define the COLA volume and the size of the PM grid: 52 | 53 | - ``cut_from_sides`` -- a list of two integers, call them :math:`a,b`. 54 | Then, the Lagrangian COLA volume in terms of the indices of 55 | the particle displacements arrays, ``s``:sub:`i`, at level ``level`` 56 | (the one covering the full box) is given by the following 57 | array slice:: 58 | 59 | si[a:2**level-b,a:2**level-b,a:2**level-b] 60 | 61 | - ``gridscale`` -- an integer. Sets the size of the PM grid in 62 | each dimension to ``gridscale`` times the particle number at 63 | level ``level`` in that dimension within the COLA volume. 64 | 65 | **Return**: 66 | 67 | * ``BBox_in,offset_zoom,cellsize,cellsize_zoom`` -- the same as in 68 | :func:`ic.ic_2lpt` but the first two give the bounding box and 69 | offset of the refinement region with respect to the COLA volume. 70 | 71 | * ``offset_index`` -- a 3-vector of integers. The same as 72 | ``offset_zoom`` but in units of the crude-particle index. 73 | 74 | * ``BBox_out`` -- a 3x2 array of integers. Gives the bounding box 75 | at level ``level`` that resides in the COLA volume. It equals 76 | ``[[a,2**level-b],[a,2**level-b],[a,2**level-b]]`` (see the 77 | description of the argument ``cut_from_sides`` above). 78 | 79 | * ``BBox_out_zoom`` -- the same as ``BBox_out`` but for the fine 80 | particles (as level ``level_zoom``) included in the COLA volume. 81 | 82 | * ``ngrid_x, ngrid_y, ngrid_z`` -- integers. The size of the PM 83 | grid used in the COLA volume. 84 | 85 | * ``gridcellsize`` -- a float. The PM grid spacing in 86 | :math:`\mathrm{Mpc}/h`. 87 | 88 | **Example**: For example usage, see the worked out example in (todo). 89 | 90 | """ 91 | 92 | from numpy import array 93 | NPART = (2**level,2**level,2**level) 94 | cellsize=boxsize/2.0**level 95 | cellsize_zoom=boxsize/2.0**level_zoom 96 | gridcellsize=(cellsize)/float(gridscale) 97 | BBox_out = array([[cut_from_sides[0], NPART[0]-cut_from_sides[1]], 98 | [cut_from_sides[0], NPART[1]-cut_from_sides[1]], 99 | [cut_from_sides[0], NPART[2]-cut_from_sides[1]]],dtype='int32') 100 | ind0=[0,0,0] 101 | ind1=list(NPART_zoom[:]) 102 | for i in range(3): 103 | if (cut_from_sides[0]>offset_from_code[i]): 104 | ind0[i]=(cut_from_sides[0]-offset_from_code[i])*2**(level_zoom-level) 105 | if (NPART[i]-cut_from_sides[1]-offset_from_code[i])*2**(level_zoom-level) < NPART_zoom[i]: 106 | ind1[i]=(NPART[i]-cut_from_sides[1]-offset_from_code[i])*2**(level_zoom-level) 107 | 108 | BBox_out_zoom = array([[ind0[0], ind1[0]], [ind0[1], ind1[1]], [ind0[2], ind1[2]]],dtype='int32') 109 | 110 | #offset index of small sim box relative to large sim box (NOTE: sim, not IC boxes!!!) 111 | offset_index = array(offset_from_code,dtype='int32')+BBox_out_zoom[:,0]//2**(level_zoom-level)-BBox_out[:,0] 112 | offset_zoom = offset_index * cellsize - array([cellsize_zoom,cellsize_zoom,cellsize_zoom],dtype='float32')/2.0 113 | 114 | 115 | 116 | #bbox of small sim box relative to large sim box in large sim coo 117 | BBox_in = array([[offset_index[0], offset_index[0] + (BBox_out_zoom[0,1]-BBox_out_zoom[0,0])//2**(level_zoom-level)], 118 | [offset_index[1], offset_index[1] + (BBox_out_zoom[1,1]-BBox_out_zoom[1,0])//2**(level_zoom-level)], 119 | [offset_index[2], offset_index[2] + (BBox_out_zoom[2,1]-BBox_out_zoom[2,0])//2**(level_zoom-level)]],dtype='int32') 120 | ngrid_x=(BBox_out[0,1]-BBox_out[0,0])*gridscale 121 | ngrid_y=(BBox_out[1,1]-BBox_out[1,0])*gridscale 122 | ngrid_z=(BBox_out[2,1]-BBox_out[2,0])*gridscale 123 | return BBox_in,offset_zoom,cellsize,cellsize_zoom,offset_index,BBox_out,BBox_out_zoom,ngrid_x,ngrid_y,ngrid_z,gridcellsize 124 | 125 | 126 | -------------------------------------------------------------------------------- /pycola/potential.pyx: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | ######################################################################## 3 | # Copyright (c) 2013,2014 Svetlin Tassev 4 | # Princeton University,Harvard University 5 | # 6 | # This file is part of pyCOLA. 7 | # 8 | # pyCOLA is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # pyCOLA is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with pyCOLA. If not, see . 20 | # 21 | ######################################################################## 22 | ######################################################################## 23 | 24 | import numpy as np 25 | cimport numpy as np 26 | cimport cython 27 | 28 | 29 | 30 | 31 | @cython.boundscheck(False) 32 | @cython.wraparound(False) 33 | @cython.cdivision(True) 34 | @cython.embedsignature(True) 35 | def get_phi(np.ndarray[np.float32_t, ndim=3] denphi, 36 | np.ndarray[np.complex64_t, ndim=3] den_k, 37 | den_fft, phi_ifft, 38 | np.int32_t ngrid_x, 39 | np.int32_t ngrid_y, 40 | np.int32_t ngrid_z, 41 | np.float32_t gridcellsize 42 | ): 43 | """ 44 | :math:`\\vspace{-1mm}` 45 | 46 | Calculate the potential sourced by a given density field. Periodic 47 | boundary conditions are assumed. 48 | 49 | **Arguments**: 50 | 51 | * ``denphi,den_k,den_fft,phi_ifft`` -- these arrays 52 | and classes are the output from a single call to 53 | :func:`potential.initialize_density`:: 54 | 55 | denphi,den_k,den_fft,phi_ifft = initialize_density(ngrid_x,ngrid_y,ngrid_z) 56 | 57 | The array ``denphi`` should then be assigned the values of the density 58 | field, and then fed as an input to this function. It is 59 | overwritten with the values of the potential on exit. 60 | 61 | * ``ngrid_x, ngrid_y, ngrid_z`` -- int32. The size of ``denphi``. 62 | 63 | * ``gridcellsize`` -- float32. Grid spacing of the PM grid in 64 | physical units. 65 | 66 | **Result**: 67 | 68 | * ``denphi`` contains the potential on exit. 69 | 70 | **Algorithm**: 71 | 72 | Convolve the input density with the :math:`-1/k^2` kernel. 73 | 74 | """ 75 | 76 | 77 | cdef int i,j,x,y,z,nyq_x,nyq_y,nyq_z 78 | cdef np.float32_t k2,delta2_x,delta2_y,delta2_z 79 | 80 | 81 | delta2_x=(2.0*np.pi/(gridcellsize*float(ngrid_x)))**2 82 | delta2_y=(2.0*np.pi/(gridcellsize*float(ngrid_y)))**2 83 | delta2_z=(2.0*np.pi/(gridcellsize*float(ngrid_z)))**2 84 | 85 | nyq_x=ngrid_x//2 86 | nyq_y=ngrid_y//2 87 | nyq_z=ngrid_z//2 88 | 89 | den_fft() # fft the density 90 | del den_fft 91 | 92 | 93 | from multiprocessing import cpu_count 94 | from cython.parallel cimport prange,parallel 95 | cdef int nthreads 96 | 97 | nthreads=cpu_count() 98 | #print 'nthreads = ', nthreads 99 | 100 | chunk=ngrid_x//nthreads 101 | if chunk==0: 102 | chunk=1 103 | 104 | with nogil, parallel(num_threads=nthreads): 105 | for i in prange(ngrid_x,schedule='static',chunksize=chunk): 106 | #for i in range(ngrid): 107 | x=i 108 | if x>nyq_x: 109 | x=ngrid_x-i 110 | for j in range(ngrid_y): 111 | y=j 112 | if y>nyq_y: 113 | y=ngrid_y-j 114 | for z in range(nyq_z+1): 115 | k2 =delta2_x * ((x*x)) + delta2_y * ((y*y))+delta2_z * ((z*z)) 116 | den_k[i,j,z] = - den_k[i,j,z] / k2 # for gaussian smoothing, change the -1/k2 kernel here to exp(-k2*smoothing_scale**2/2.0). 117 | 118 | den_k[0,0,0]=0 119 | 120 | phi_ifft(normalise_idft=True) 121 | del den_k, phi_ifft 122 | 123 | @cython.embedsignature(True) 124 | def initialize_density(ngrid_x,ngrid_y,ngrid_z): 125 | """ 126 | :math:`\\vspace{-1mm}` 127 | 128 | Initialize the PM grid and its forward and inverse in-place Fourier 129 | transforms. We use pyFFTW, which issues calls to the `fftw 130 | library `_ to create the plans for the FFT. 131 | 132 | **Arguments**: 133 | 134 | * ``ngrid_x,ngrid_y,ngrid_z`` -- integers, giving the size of the 135 | PM grid. 136 | 137 | **Return**: 138 | 139 | * ``density`` -- a properly aligned 3-dim float32 array. 140 | 141 | * ``density_k`` -- a `view 142 | `_ 143 | of ``density`` as a 3-dim complex64 array. After executing the 144 | forward fft plan, ``density_k`` contains the in-place fft'd 145 | ``density``. 146 | 147 | * ``den_fft`` -- instance of the `FFTW class 148 | `_ 149 | for computing the forward fft, which fft's ``density`` to give 150 | ``density_k``. Creating the instance is equivalent to creating a 151 | `fftw plan 152 | `_. 153 | Calling the instance, executes the plan. 154 | 155 | * ``den_ifft`` -- instance of the FFTW class for computing the 156 | inverse fft, which ifft's ``density_k`` to give back ``density`` 157 | (up to normalization). 158 | 159 | 160 | """ 161 | import pyfftw 162 | 163 | nalign=pyfftw.simd_alignment 164 | 165 | from multiprocessing import cpu_count 166 | cdef int nthreads 167 | nthreads=cpu_count() 168 | #print 'nthreads = ', nthreads 169 | 170 | ngrid_pad = 2*(ngrid_z//2 + 1) 171 | 172 | density_pad = pyfftw.n_byte_align_empty((ngrid_x,ngrid_y,ngrid_pad),nalign,'float32') 173 | density = density_pad[:,:,:ngrid_z] 174 | density_k = density_pad.view('complex64') 175 | 176 | if nthreads>ngrid_z*2: 177 | nthreads=ngrid_z*2 178 | 179 | den_fft=pyfftw.FFTW(density,density_k, axes=(0,1,2),direction='FFTW_FORWARD',threads=nthreads,flags=('FFTW_ESTIMATE','FFTW_DESTROY_INPUT')) 180 | den_ifft=pyfftw.FFTW(density_k,density, axes=(0,1,2),direction='FFTW_BACKWARD',threads=nthreads,flags=('FFTW_ESTIMATE','FFTW_DESTROY_INPUT')) 181 | 182 | del density_pad 183 | 184 | return density,density_k,den_fft,den_ifft 185 | 186 | 187 | 188 | 189 | -------------------------------------------------------------------------------- /pycola/growth.py: -------------------------------------------------------------------------------- 1 | ######################################################################## 2 | ######################################################################## 3 | # Copyright (c) 2013,2014 Svetlin Tassev 4 | # Princeton University,Harvard University 5 | # 6 | # This file is part of pyCOLA. 7 | # 8 | # pyCOLA is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # pyCOLA is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with pyCOLA. If not, see . 20 | # 21 | ######################################################################## 22 | ######################################################################## 23 | 24 | 25 | 26 | ################################################## 27 | ################################################## 28 | ################################################## 29 | ################################################## 30 | 31 | # This solves for the linear growth factor and its derivative 32 | # The notation follows eq. (A.1,A.2) of arXiv:1301.0322 33 | 34 | 35 | def _q_factor(a,Om,Ol): # this is Q(a) 36 | from math import sqrt 37 | return a**3*sqrt(Om/a**3 + Ol + (1.0-Om-Ol)/a**2) 38 | 39 | 40 | def _growth_derivs(f,a,Om,Ol): 41 | d=f[0] 42 | y=f[1] 43 | q=_q_factor(a,Om,Ol) 44 | dDda=y/q 45 | dyda=1.5*Om*a*d/q 46 | return [dDda,dyda] 47 | 48 | 49 | def growth_factor_solution(Om,Ol): # returns a,D(a),T[D] = Q(a)D'(a) where Q(a) = _q_factor() 50 | """ 51 | :math:`\\vspace{-1mm}` 52 | 53 | Calculate the linear growth factor evolution for a given cosmology. 54 | 55 | **Arguments**: 56 | 57 | * ``Om`` -- a float, giving the matter density, :math:`\Omega_m`, today. 58 | 59 | * ``Ol`` -- a float, giving the vacuum density, :math:`\Omega_\Lambda`, today. 60 | 61 | **Return**: 62 | 63 | * an :math:`n\\times 3` array containing 64 | :math:`[a_i,D(a_i),T[D](a_i)]` for :math:`i=1\dots n` in 65 | order of increasing scale factor :math:`a`. Here, the linear growth 66 | factor is given by :math:`D(a)`, while :math:`T[D](a)` is given by 67 | equation (A.1) of [temporalCOLA]_. These arrays can be further interpolated if needed. 68 | 69 | 70 | 71 | """ 72 | from scipy import integrate 73 | from numpy import append,array 74 | a=[float(x)/1000. for x in range(1,1101)] # go to slightly later times so that no problems with interpolation occur 75 | amin=a[0] 76 | v=integrate.odeint(_growth_derivs, [amin,_q_factor(amin,Om,Ol)], a,args=(Om,Ol)) 77 | 78 | v/=v[1001,0] # divide by growth factor at a=1. index=1000+1 depend on the a=[...] above 79 | 80 | return append(array([a]).transpose(),v,1) 81 | 82 | 83 | def growth_2lpt(a,d,Om): 84 | """ 85 | :math:`\\vspace{-1mm}` 86 | 87 | Return the second order growth factor for a given scale factor and 88 | respective linear growth factor. One needs to precompute the latter. 89 | :math:`\Lambda\mathrm{CDM}` is assumed for this calculation. 90 | 91 | **Arguments**: 92 | 93 | * ``a`` -- a float, giving the scale factor. 94 | 95 | * ``d`` -- a float, giving the linear growth factor at `a`. 96 | 97 | * ``Om`` -- a float, giving the matter density, :math:`\Omega_m`, today. 98 | 99 | **Return**: 100 | 101 | * A float giving the second order growth factor. 102 | 103 | **Example**:: 104 | 105 | >>> Om=0.275 106 | >>> Ol=1.0-Om 107 | >>> from growth import growth_factor_solution,growth_2lpt 108 | >>> darr=growth_factor_solution(Om,Ol) 109 | >>> from scipy import interpolate 110 | >>> growth = interpolate.interp1d(darr[:,0].tolist(),darr[:,1].tolist(), 111 | ... kind='linear') 112 | >>> a=0.3 113 | >>> d=growth(a) 114 | >>> growth_2lpt(a,d,Om)/d/d 115 | 0.99148941733187124 116 | 117 | """ 118 | #omega=Om/(Om+(1.0-Om)*a*a*a) 119 | omega=1.0/(Om+(1.0-Om)*a*a*a) #normalized 120 | return d*d*omega**(-1./143.) 121 | 122 | def d_growth2(a,d,Om,Ol): 123 | """ 124 | :math:`\\vspace{-1mm}` 125 | 126 | Return :math:`T[D_2](a)` for the second order growth factor, :math:`D_2`, for a given scale factor and 127 | respective linear growth factor. Here :math:`T` is given by 128 | equation (A.1) of [temporalCOLA]_. One needs to precompute the linear growth factor. 129 | :math:`\Lambda\mathrm{CDM}` is assumed for this calculation. 130 | 131 | **Arguments**: 132 | 133 | * ``a`` -- a float, giving the scale factor. 134 | 135 | * ``d`` -- a float, giving the linear growth factor at `a`. 136 | 137 | * ``Om`` -- a float, giving the matter density, :math:`\Omega_m`, today. 138 | 139 | **Return**: 140 | 141 | * A float giving :math:`T[D_2](a)`. 142 | 143 | """ 144 | 145 | d2= growth_2lpt(a,d,Om) 146 | omega = Om/(Om+(1.0-Om)*a*a*a) 147 | return _q_factor(a,Om,Ol)*(d2/a)*2.0*omega**(6./11.) 148 | 149 | 150 | 151 | ################################################## 152 | ################################################## 153 | ################################################## 154 | ################################################## 155 | 156 | 157 | # These are routines calculating the displacement and velocity 158 | # coefficients for the COLA timestepping. 159 | # See eq. (A.15) of arXiv:1301.0322 160 | 161 | 162 | def _u_ansatz(a,nCola): 163 | return a**nCola 164 | 165 | def _du_ansatz(a,nCola): # this must be _du_ansatz/da 166 | return a**(nCola-1.0)*nCola 167 | 168 | def _vel_coef(ai,af,ac,nCola,Om,Ol): 169 | """ 170 | Needed in implementing (A.15) of http://arxiv.org/pdf/1301.0322v1.pdf 171 | """ 172 | coef = _u_ansatz(af,nCola) - _u_ansatz(ai,nCola) 173 | q=_q_factor(ac,Om,Ol) 174 | coef /= q * _du_ansatz(ac,nCola) 175 | return coef 176 | 177 | def _displ_coef_integral(a,nCola,Om,Ol): 178 | return _u_ansatz(a,nCola) / _q_factor(a,Om,Ol) 179 | 180 | def _displ_coef(ai,af,ac,nCola,Om,Ol): 181 | """ 182 | Needed in implementing (A.15) of http://arxiv.org/pdf/1301.0322v1.pdf 183 | """ 184 | from scipy import integrate 185 | coef = 1.0/_u_ansatz(ac,nCola) 186 | coef *= integrate.quad(_displ_coef_integral,float(ai),float(af),args=(nCola,Om,Ol))[0] 187 | return coef 188 | 189 | 190 | ######################## 191 | ######################## 192 | ######################## 193 | ######################## 194 | -------------------------------------------------------------------------------- /MUSIC/fft_operators.hh: -------------------------------------------------------------------------------- 1 | #ifndef __FFT_OPERATORS_HH 2 | #define __FFT_OPERATORS_HH 3 | struct fft_interp{ 4 | 5 | template< typename m1, typename m2 > 6 | void interpolate( m1& V, m2& v, bool fourier_splice = false ) const 7 | { 8 | int oxc = V.offset(0), oyc = V.offset(1), ozc = V.offset(2); 9 | int oxf = v.offset(0), oyf = v.offset(1), ozf = v.offset(2); 10 | 11 | size_t nxf = v.size(0), nyf = v.size(1), nzf = v.size(2), nzfp = nzf+2; 12 | 13 | // cut out piece of coarse grid that overlaps the fine: 14 | assert( nxf%2==0 && nyf%2==0 && nzf%2==0 ); 15 | 16 | size_t nxc = nxf/2, nyc = nyf/2, nzc = nzf/2, nzcp = nzf/2+2; 17 | 18 | fftw_real *rcoarse = new fftw_real[ nxc * nyc * nzcp ]; 19 | fftw_complex *ccoarse = reinterpret_cast (rcoarse); 20 | 21 | fftw_real *rfine = new fftw_real[ nxf * nyf * nzfp]; 22 | fftw_complex *cfine = reinterpret_cast (rfine); 23 | 24 | #pragma omp parallel for 25 | for( int i=0; i<(int)nxc; ++i ) 26 | for( int j=0; j<(int)nyc; ++j ) 27 | for( int k=0; k<(int)nzc; ++k ) 28 | { 29 | size_t q = ((size_t)i*nyc+(size_t)j)*nzcp+(size_t)k; 30 | rcoarse[q] = V( oxf+i, oyf+j, ozf+k ); 31 | } 32 | 33 | if( fourier_splice ) 34 | { 35 | #pragma omp parallel for 36 | for( int i=0; i<(int)nxf; ++i ) 37 | for( int j=0; j<(int)nyf; ++j ) 38 | for( int k=0; k<(int)nzf; ++k ) 39 | { 40 | size_t q = ((size_t)i*nyf+(size_t)j)*nzfp+(size_t)k; 41 | rfine[q] = v(i,j,k); 42 | } 43 | } 44 | else 45 | { 46 | #pragma omp parallel for 47 | for( size_t i=0; i/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pyCOLA.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pyCOLA.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/pyCOLA" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pyCOLA" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /MUSIC/schemes.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * schemes.hh 3 | * GravitySolver 4 | * 5 | * Created by Oliver Hahn on 2/1/10. 6 | * Copyright 2010 KIPAC/Stanford University. All rights reserved. 7 | * 8 | */ 9 | 10 | #ifndef __SCHEME_HH 11 | #define __SCHEME_HH 12 | 13 | #include 14 | #include 15 | 16 | #include "solver.hh" 17 | 18 | //... abstract implementation of the Poisson/Force scheme 19 | template< class L, class G, typename real_t=double > 20 | class scheme 21 | { 22 | public: 23 | typedef L laplacian; 24 | typedef G gradient; 25 | 26 | laplacian m_laplacian; 27 | gradient m_gradient; 28 | 29 | template< class C > 30 | inline real_t grad_x( const C&c, const int i, const int j, const int k ) 31 | { return m_gradient.apply_x( c,i,j,k ); } 32 | 33 | template< class C > 34 | inline real_t grad_y( const C&c, const int i, const int j, const int k ) 35 | { return m_gradient.apply_y( c,i,j,k ); } 36 | 37 | template< class C > 38 | inline real_t grad_z( const C&c, const int i, const int j, const int k ) 39 | { return m_gradient.apply_z( c,i,j,k ); } 40 | 41 | template< class C > 42 | inline real_t L_apply( const C&c, const int i, const int j, const int k ) 43 | { return m_laplacian.apply( c,i,j,k ); } 44 | 45 | template< class C > 46 | inline real_t L_rhs( const C&c, const int i, const int j, const int k ) 47 | { return m_laplacian.rhs( c,i,j,k ); } 48 | 49 | inline real_t ccoeff( void ) 50 | { return m_laplacian.ccoeff(); } 51 | 52 | }; 53 | 54 | 55 | template< int nextent, typename T > 56 | class gradient 57 | { 58 | typedef T real_t; 59 | std::vector m_stencil; 60 | const unsigned nl; 61 | public: 62 | 63 | gradient() 64 | : nl( 2*nextent+1 ) 65 | { 66 | m_stencil.assign(nl*nl*nl,(real_t)0.0); 67 | } 68 | 69 | real_t& operator()(int i) 70 | { return m_stencil[i+nextent]; } 71 | 72 | const real_t& operator()(int i) const 73 | { return m_stencil[i+nextent]; } 74 | 75 | template< class C > 76 | inline void apply( const C& c, C& f, int dir ) 77 | { 78 | f = c; 79 | 80 | int nx=c.size(0), ny=c.size(1), nz=c.size(2); 81 | double hx = 1.0/(nx+1.0), hy = 1.0/(ny+1.0), hz = 1.0/(nz+1.0); 82 | 83 | f.zero(); 84 | 85 | if( dir == 0 ) 86 | for( int i=0; i 108 | class base_stencil 109 | { 110 | protected: 111 | std::vector m_stencil; 112 | const unsigned nl; 113 | public: 114 | bool m_modsource; 115 | 116 | public: 117 | base_stencil( bool amodsource = false ) 118 | : nl( 2*nextent+1 ), m_modsource( amodsource ) 119 | { 120 | m_stencil.assign(nl*nl*nl,(real_t)0.0); 121 | } 122 | 123 | real_t& operator()(int i, int j, int k) 124 | { return m_stencil[((i+nextent)*nl+(j+nextent))*nl+(k+nextent)]; } 125 | 126 | const real_t& operator()(unsigned i, unsigned j, unsigned k) const 127 | { return m_stencil[((i+nextent)*nl+(j+nextent))*nl+(k+nextent)]; } 128 | 129 | template< class C > 130 | inline real_t rhs( const C& c, const int i, const int j, const int k ) 131 | { 132 | real_t sum = this->apply( c, i, j, k ); 133 | sum -= (*this)(0,0,0) * c(i,j,k); 134 | return sum; 135 | } 136 | 137 | inline real_t ccoeff( void ) 138 | { 139 | return (*this)(0,0,0); 140 | } 141 | 142 | 143 | template< class C > 144 | inline real_t apply( const C& c, const int i, const int j, const int k ) 145 | { 146 | real_t sum = 0.0; 147 | 148 | for( int ii=-nextent; ii<=nextent; ++ii ) 149 | for( int jj=-nextent; jj<=nextent; ++jj ) 150 | for( int kk=-nextent; kk<=nextent; ++kk ) 151 | sum += (*this)(ii,jj,kk) * c(i+ii,j+jj,k+kk); 152 | 153 | return sum; 154 | } 155 | 156 | template< class C > 157 | inline real_t modsource( const C& c, const int i, const int j, const int k ) 158 | { 159 | return 0.0; 160 | } 161 | 162 | }; 163 | 164 | 165 | /***************************************************************************************/ 166 | /***************************************************************************************/ 167 | /***************************************************************************************/ 168 | 169 | 170 | //... Implementation of the Gradient schemes............................................ 171 | 172 | 173 | template< typename real_t > 174 | class deriv_2P : public gradient<1,real_t> 175 | { 176 | 177 | public: 178 | deriv_2P( void ) 179 | { 180 | (*this)( 0 ) = 0.0; 181 | (*this)(-1 ) = -0.5; 182 | (*this)(+1 ) = +0.5; 183 | } 184 | 185 | 186 | }; 187 | 188 | //... Implementation of the Laplacian schemes.......................................... 189 | 190 | 191 | template< typename real_t > 192 | class stencil_7P : public base_stencil<1,real_t> 193 | { 194 | 195 | public: 196 | stencil_7P( void ) 197 | { 198 | (*this)( 0, 0, 0) = -6.0; 199 | (*this)(-1, 0, 0) = +1.0; 200 | (*this)(+1, 0, 0) = +1.0; 201 | (*this)( 0,-1, 0) = +1.0; 202 | (*this)( 0,+1, 0) = +1.0; 203 | (*this)( 0, 0,-1) = +1.0; 204 | (*this)( 0, 0,+1) = +1.0; 205 | } 206 | 207 | template< class C > 208 | inline real_t apply( const C& c, const int i, const int j, const int k ) const 209 | { 210 | return c(i-1,j,k)+c(i+1,j,k)+c(i,j-1,k)+c(i,j+1,k)+c(i,j,k-1)+c(i,j,k+1)-6.0*c(i,j,k); 211 | } 212 | 213 | template< class C > 214 | inline real_t rhs( const C& c, const int i, const int j, const int k ) const 215 | { 216 | return c(i-1,j,k)+c(i+1,j,k)+c(i,j-1,k)+c(i,j+1,k)+c(i,j,k-1)+c(i,j,k+1); 217 | } 218 | 219 | inline real_t ccoeff( void ) 220 | { 221 | return -6.0; 222 | } 223 | }; 224 | 225 | 226 | template< typename real_t > 227 | class stencil_13P : public base_stencil<2,real_t> 228 | { 229 | 230 | public: 231 | stencil_13P( void ) 232 | { 233 | (*this)( 0, 0, 0) = -90.0/12.; 234 | 235 | (*this)(-1, 0, 0) = 236 | (*this)(+1, 0, 0) = 237 | (*this)( 0,-1, 0) = 238 | (*this)( 0,+1, 0) = 239 | (*this)( 0, 0,-1) = 240 | (*this)( 0, 0,+1) = 16./12.; 241 | 242 | (*this)(-2, 0, 0) = 243 | (*this)(+2, 0, 0) = 244 | (*this)( 0,-2, 0) = 245 | (*this)( 0,+2, 0) = 246 | (*this)( 0, 0,-2) = 247 | (*this)( 0, 0,+2) = -1./12.; 248 | } 249 | 250 | template< class C > 251 | inline real_t apply( const C& c, const int i, const int j, const int k ) 252 | { 253 | return 254 | (-1.0*(c(i-2,j,k)+c(i+2,j,k)+c(i,j-2,k)+c(i,j+2,k)+c(i,j,k-2)+c(i,j,k+2)) 255 | +16.0*(c(i-1,j,k)+c(i+1,j,k)+c(i,j-1,k)+c(i,j+1,k)+c(i,j,k-1)+c(i,j,k+1)) 256 | -90.0*c(i,j,k))/12.0; 257 | } 258 | 259 | template< class C > 260 | inline real_t rhs( const C& c, const int i, const int j, const int k ) 261 | { 262 | return 263 | (-1.0*(c(i-2,j,k)+c(i+2,j,k)+c(i,j-2,k)+c(i,j+2,k)+c(i,j,k-2)+c(i,j,k+2)) 264 | +16.0*(c(i-1,j,k)+c(i+1,j,k)+c(i,j-1,k)+c(i,j+1,k)+c(i,j,k-1)+c(i,j,k+1)))/12.0; 265 | } 266 | 267 | inline real_t ccoeff( void ) 268 | { 269 | return -90.0/12.0; 270 | } 271 | }; 272 | 273 | #endif 274 | 275 | 276 | -------------------------------------------------------------------------------- /CosmoFlow/io_Cosmo-3param.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import hyper_parameters_Cosmo 4 | import os 5 | import itertools 6 | import random 7 | 8 | def _float64_feature(value): 9 | return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) 10 | 11 | def _bytes_feature(value): 12 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) 13 | 14 | 15 | class loadNpyData: 16 | def __init__(self,data,label,num): 17 | ### suggestion from James to cast as 32-bit 18 | self.data = data.astype(dtype = np.float32) ##data 19 | self.label = label.astype(dtype = np.float32) ##label 20 | self.num = num 21 | 22 | def convert_to(self): 23 | filename = str(self.num)+'.tfrecord' 24 | print('Writing ', filename) 25 | writer = tf.python_io.TFRecordWriter(filename) 26 | for index in range(len(self.data)): 27 | data_raw = self.data[index].tostring() 28 | label_raw = self.label[index].tostring() 29 | example = tf.train.Example(features = tf.train.Features(feature={'label_raw': _bytes_feature(label_raw),'data_raw': _bytes_feature(data_raw)})) 30 | writer.write(example.SerializeToString()) 31 | writer.close() 32 | 33 | class loadTfrecordData: 34 | def __init__(self,fileBuffer,num): 35 | self.fileBuffer = fileBuffer 36 | 37 | 38 | def reconstruct_from(self): 39 | for filename in record_iterator: 40 | example = tf.train.Example() 41 | example.ParseFromString(filename) 42 | data_raw = (example.features.feature['data_raw'].bytes_list.value[0]) 43 | data = np.fromstring(data_raw, dtype=np.float).reshape([-1,128,128,128,1]) 44 | label_raw = (example.features.feature['label_raw'].bytes_list.value[0]) 45 | label = np.fromstring(label_raw,dtype=np.float).reshape([-1,hyper_parameters_Cosmo.DATAPARAM["output_dim"] ]) 46 | 47 | return data,label 48 | 49 | def read_tfrecord(filename_queue): 50 | reader = tf.TFRecordReader() 51 | _,single_example = reader.read(filename_queue) 52 | parsed_example = tf.parse_single_example( 53 | single_example, 54 | features = { 55 | "data_raw": tf.FixedLenFeature([],tf.string), 56 | "label_raw": tf.FixedLenFeature([],tf.string) 57 | } 58 | ) 59 | 60 | NbodySimuDecode = tf.decode_raw(parsed_example['data_raw'],tf.float32) 61 | labelDecode = tf.decode_raw(parsed_example['label_raw'],tf.float32) 62 | 63 | NbodySimus = tf.reshape(NbodySimuDecode,[128,128,128]) 64 | 65 | #normalize 66 | NbodySimus /= (tf.reduce_sum(NbodySimus)/128**3+0.) 67 | NbodySimuAddDim = tf.expand_dims(NbodySimus,axis = 3) 68 | label = tf.reshape(labelDecode,[hyper_parameters_Cosmo.DATAPARAM["output_dim"] ]) 69 | 70 | 71 | label = (label - tf.constant(hyper_parameters_Cosmo.DATAPARAM['zsAVG'],dtype = tf.float32))/tf.constant(hyper_parameters_Cosmo.DATAPARAM['zsSTD'] 72 | ,dtype = tf.float32) 73 | return NbodySimuAddDim,label 74 | 75 | def readDataSet(filenames): 76 | print "---readDataSet-ioCosmo------" 77 | print filenames 78 | filename_queue = tf.train.string_input_producer(filenames,num_epochs=None,shuffle=True) 79 | NbodySimus,label= read_tfrecord(filename_queue) 80 | 81 | NbodySimus_batch, label_batch = tf.train.shuffle_batch( 82 | [NbodySimus,label], 83 | 84 | batch_size = hyper_parameters_Cosmo.Input["BATCH_SIZE"], 85 | num_threads = hyper_parameters_Cosmo.Input["NUM_THREADS"], 86 | capacity = hyper_parameters_Cosmo.Input["CAPACITY"], 87 | min_after_dequeue = hyper_parameters_Cosmo.Input["MIN_AFTER_DEQUEUE"], 88 | allow_smaller_final_batch=True) 89 | 90 | return NbodySimus_batch, label_batch 91 | 92 | 93 | def read_test_tfrecord(filename_queue): 94 | reader = tf.TFRecordReader() 95 | _,single_example = reader.read(filename_queue) 96 | parsed_example = tf.parse_single_example( 97 | single_example, 98 | features = { 99 | "data_raw": tf.FixedLenFeature([],tf.string), 100 | "label_raw": tf.FixedLenFeature([],tf.string) 101 | } 102 | ) 103 | 104 | NbodySimuDecode = tf.decode_raw(parsed_example['data_raw'],tf.float32) 105 | labelDecode = tf.decode_raw(parsed_example['label_raw'],tf.float32) 106 | NbodySimus = tf.reshape(NbodySimuDecode,[128,128,128]) 107 | NbodySimus /= (tf.reduce_sum(NbodySimus)/128**3+0.) 108 | NbodySimuAddDim = tf.expand_dims(NbodySimus,3) 109 | #label = tf.reshape(labelDecode,[2]) 110 | label = tf.reshape(labelDecode,[hyper_parameters_Cosmo.DATAPARAM["output_dim"] ]) 111 | 112 | labelAddDim = (label - tf.constant(hyper_parameters_Cosmo.DATAPARAM['zsAVG'],dtype = tf.float32))/tf.constant(hyper_parameters_Cosmo.DATAPARAM['zsSTD'] 113 | ,dtype = tf.float32) 114 | 115 | print NbodySimuAddDim.shape 116 | 117 | return NbodySimuAddDim,labelAddDim 118 | 119 | def readTestSet(filenames): 120 | print "----readTestSet-io_cosmo----" 121 | filename_queue = tf.train.string_input_producer(filenames,num_epochs=None,shuffle=False) 122 | NbodySimus,label= read_test_tfrecord(filename_queue) 123 | NbodySimus_batch, label_batch = tf.train.batch( 124 | [NbodySimus,label], 125 | #NbodyList, 126 | batch_size = hyper_parameters_Cosmo.Input_Test["BATCH_SIZE"], 127 | num_threads = hyper_parameters_Cosmo.Input_Test["NUM_THREADS"], 128 | capacity = hyper_parameters_Cosmo.Input_Test["CAPACITY"], 129 | enqueue_many=False, 130 | allow_smaller_final_batch=True) 131 | 132 | return NbodySimus_batch, label_batch 133 | 134 | 135 | 136 | if __name__ == '__main__': 137 | 138 | 139 | 140 | label_path = os.path.join('/global/cscratch1/sd/djbard/MUSIC_pyCola/egpbos-pycola-672c58551ff1/OmSiNs/twothousand-4/','list-2000-noCiC-128from256.txt') 141 | I labels = np.loadtxt(label_path,delimiter=',') 142 | 143 | 144 | ### How many tensorflow files do we want to make? 145 | ### Assuming 500 here, with teh first 400 a raondom mix, 146 | ### and the last 100 NOT mixed for val/test sets. 147 | for i in range(1950,2000): 148 | data = [] 149 | label = [] 150 | for j in range(64): 151 | ## for twothousand dir, just make all of them training. 152 | numDirectory = random.randrange(1000,3000) ### 153 | 154 | #if i<1800: 155 | # numDirectory = random.randrange(1000,2800) ### 156 | #else: 157 | # numDirectory = (i)+1000 ## don't want this to be random!! 158 | 159 | numFile = random.randrange(8) 160 | dirname = numDirectory 161 | 162 | #print i, j, numDirectory 163 | ## pull a sub-volumes from the 2000 dir 164 | data_path = os.path.join('/global/cscratch1/sd/djbard/MUSIC_pyCola/egpbos-pycola-672c58551ff1/OmSiNs/twothousand-4/128from256-2000/',str(dirname).rjust(3,'0'),str(numFile)+'.npy') 165 | #print data_path 166 | data = np.append(data,np.load(data_path)) 167 | label = np.append(label,labels[ (numDirectory-1000)][[1,2,3]]) 168 | 169 | 170 | loadNpyData(data.reshape(-1,128,128,128,1),label.reshape(-1,3),i).convert_to() 171 | 172 | 173 | -------------------------------------------------------------------------------- /MUSIC/tests.hh: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | tests.hh - This file is part of MUSIC - 4 | a code to generate multi-scale initial conditions 5 | for cosmological simulations 6 | 7 | Copyright (C) 2010 Oliver Hahn 8 | 9 | */ 10 | 11 | 12 | #ifndef __TESTS_HH 13 | #define __TESTS_HH 14 | 15 | #include 16 | 17 | 18 | inline double CIC_interp_back( const MeshvarBnd& A, double x, double y, double z ) 19 | { 20 | int 21 | ix = (int)x, 22 | iy = (int)y, 23 | iz = (int)z, 24 | ix1 = (ix+1), 25 | iy1 = (iy+1), 26 | iz1 = (iz+1); 27 | 28 | 29 | double 30 | dx = (double)(x - (double)ix), 31 | dy = (double)(y - (double)iy), 32 | dz = (double)(z - (double)iz), 33 | tx = 1.0-dx, 34 | ty = 1.0-dy, 35 | tz = 1.0-dz; 36 | 37 | double 38 | f_xyz = A(ix,iy,iz)*tx*ty*tz, 39 | f_Xyz = A(ix1,iy,iz)*dx*ty*tz, 40 | f_xYz = A(ix,iy1,iz)*tx*dy*tz, 41 | f_xyZ = A(ix,iy,iz1)*tx*ty*dz, 42 | f_XYz = A(ix1,iy1,iz)*dx*dy*tz, 43 | f_XyZ = A(ix1,iy,iz1)*dx*ty*dz, 44 | f_xYZ = A(ix,iy1,iz1)*tx*dy*dz, 45 | f_XYZ = A(ix1,iy1,iz1)*dx*dy*dz; 46 | 47 | return f_xyz + f_Xyz + f_xYz + f_xyZ + f_XYz + f_XyZ + f_xYZ + f_XYZ; 48 | } 49 | 50 | inline double TSC_interp_back( const MeshvarBnd& A, double x, double y, double z ) 51 | { 52 | double val = 0.0; 53 | int xngp = (int)x, yngp = (int)y, zngp = (int)z; 54 | 55 | for( int xx = xngp-1; xx <= xngp+1; ++xx ) 56 | { 57 | double weightx = 1.0; 58 | double dx = fabs(x-(double)xx); 59 | int axx(xx); 60 | 61 | if( xx==xngp ) 62 | weightx *= 0.75-dx*dx; 63 | else{ 64 | weightx *= 1.125 - 1.5*dx + 0.5*dx*dx; 65 | } 66 | 67 | for( int yy = yngp-1; yy <= yngp+1; ++yy ) 68 | { 69 | double weighty = weightx; 70 | double dy = fabs(y-(double)yy); 71 | int ayy(yy); 72 | 73 | if( yy==yngp ) 74 | weighty *= 0.75-dy*dy; 75 | else{ 76 | weighty *= 1.125 - 1.5*dy + 0.5*dy*dy; 77 | } 78 | 79 | for( int zz = zngp-1; zz <= zngp+1; ++zz ) 80 | { 81 | double weightz = weighty; 82 | double dz = fabs(z-(double)zz); 83 | int azz(zz); 84 | 85 | if( zz==zngp ) 86 | weightz *= 0.75-dz*dz; 87 | else{ 88 | weightz *= 1.125 - 1.5*dz + 0.5*dz*dz; 89 | } 90 | 91 | val += A(axx,ayy,azz) * weightz; 92 | } 93 | } 94 | } 95 | 96 | return val; 97 | } 98 | 99 | class TestProblem{ 100 | public: 101 | MeshvarBnd m_rho, m_uana, m_ubnd, m_xgrad, m_ygrad, m_zgrad; 102 | int m_nb, m_nres; 103 | double m_h; 104 | 105 | TestProblem( int nb, int nres ) 106 | : m_rho( nb, nres ), m_uana( nb, nres ), m_ubnd( nb, nres ), 107 | m_xgrad( nb, nres ), m_ygrad( nb, nres ), m_zgrad( nb, nres ), 108 | m_nb( nb ), m_nres( nres ), m_h( 1.0/((double)nres ) )//m_h( 1.0/((double)nres+1.0 ) ) 109 | { } 110 | 111 | }; 112 | 113 | class TSC_Test : public TestProblem{ 114 | public: 115 | double m_q; 116 | 117 | class TSCcube{ 118 | public: 119 | std::vector m_data; 120 | 121 | 122 | TSCcube() 123 | { 124 | m_data.assign(27,0.0); 125 | 126 | //.. center 127 | (*this)( 0, 0, 0) = 27./64.; 128 | 129 | //.. faces 130 | (*this)(-1, 0, 0) = 131 | (*this)(+1, 0, 0) = 132 | (*this)( 0,-1, 0) = 133 | (*this)( 0,+1, 0) = 134 | (*this)( 0, 0,-1) = 135 | (*this)( 0, 0,+1) = 9./128.; 136 | 137 | //.. edges 138 | (*this)(-1,-1, 0) = 139 | (*this)(-1,+1, 0) = 140 | (*this)(+1,-1, 0) = 141 | (*this)(+1,+1, 0) = 142 | (*this)(-1, 0,-1) = 143 | (*this)(-1, 0,+1) = 144 | (*this)(+1, 0,-1) = 145 | (*this)(+1, 0,+1) = 146 | (*this)( 0,-1,-1) = 147 | (*this)( 0,-1,+1) = 148 | (*this)( 0,+1,-1) = 149 | (*this)( 0,+1,+1) = 3./256.; 150 | 151 | //.. corners 152 | (*this)(-1,-1,-1) = 153 | (*this)(-1,+1,-1) = 154 | (*this)(-1,-1,+1) = 155 | (*this)(-1,+1,+1) = 156 | (*this)(+1,-1,-1) = 157 | (*this)(+1,+1,-1) = 158 | (*this)(+1,-1,+1) = 159 | (*this)(+1,+1,+1) = 1./512.; 160 | 161 | } 162 | 163 | double& operator()(int i, int j, int k) 164 | { return m_data[ ((i+1)*3+(j+1))*3 +(k+1)]; } 165 | 166 | const double& operator()(int i, int j, int k) const 167 | { return m_data[ ((i+1)*3+(j+1))*3 +(k+1)]; } 168 | }; 169 | 170 | TSC_Test( int nb, int nres, double q=-1.0 ) 171 | : TestProblem(nb, nres), m_q(q) 172 | { 173 | TSCcube c; 174 | int xm(nres/2-1), ym(nres/2-1), zm(nres/2-1); 175 | double xxm((double)xm*m_h), yym((double)ym*m_h), zzm((double)zm*m_h); 176 | 177 | double fourpi = 4.0*M_PI; 178 | 179 | m_uana.zero(); 180 | m_ubnd.zero(); 181 | m_xgrad.zero(); 182 | m_ygrad.zero(); 183 | m_zgrad.zero(); 184 | 185 | for( int i=-nb; iOmega_m/(a2*a) + cosm->Omega_k/a2 69 | + cosm->Omega_DE * pow(a,-3.*(1.+cosm->w_0+cosm->w_a)) * exp(-3.*(1.0-a)*cosm->w_a) ); 70 | return Ha; 71 | } 72 | 73 | inline static double Hprime_of_a( double a, void *Params ) 74 | { 75 | Cosmology *cosm = (Cosmology*)Params; 76 | double a2 = a*a; 77 | double H = H_of_a( a, Params ); 78 | double Hprime = 1/(a*H) * ( -1.5 * cosm->Omega_m / (a2*a) - cosm->Omega_k / a2 79 | - 1.5 * cosm->Omega_DE * pow( a, -3.*(1.+cosm->w_0+cosm->w_a) ) * exp( -3.*(1.0-a)*cosm->w_a ) 80 | * ( 1. + cosm->w_0 + (1.-a) * cosm->w_a ) ); 81 | return Hprime; 82 | } 83 | 84 | 85 | //! Integrand used by function CalcGrowthFactor to determine the linear growth factor D+ 86 | inline static double GrowthIntegrand( double a, void *Params ) 87 | { 88 | double Ha = a * H_of_a( a, Params ); 89 | return 2.5/( Ha * Ha * Ha ); 90 | } 91 | 92 | //! Computes the linear theory growth factor D+ 93 | /*! Function integrates over member function GrowthIntegrand and computes 94 | * /a 95 | * D+(a) = 5/2 H(a) * | [a'^3 * H(a')^3]^(-1) da' 96 | * /0 97 | */ 98 | real_t CalcGrowthFactor( real_t a ) 99 | { 100 | real_t integral = integrate( &GrowthIntegrand, 0.0, a, (void*)&m_Cosmology ); 101 | return H_of_a( a, (void*)&m_Cosmology ) * integral; 102 | } 103 | 104 | //! Compute the factor relating particle displacement and velocity 105 | /*! Function computes 106 | * 107 | * vfac = a^2 * H(a) * dlogD+ / d log a = a^2 * H'(a) + 5/2 * [ a * D+(a) * H(a) ]^(-1) 108 | * 109 | */ 110 | real_t CalcVFact( real_t a ) 111 | { 112 | real_t Dp = CalcGrowthFactor( a ); 113 | real_t H = H_of_a( a, (void*)&m_Cosmology ); 114 | real_t Hp = Hprime_of_a( a, (void*)&m_Cosmology ); 115 | real_t a2 = a*a; 116 | 117 | return ( a2 * Hp + 2.5 / ( a * Dp * H ) ) * 100.0; 118 | } 119 | 120 | 121 | //! Integrand for the sigma_8 normalization of the power spectrum 122 | /*! Returns the value of the primordial power spectrum multiplied with 123 | the transfer function and the window function of 8 Mpc/h at wave number k */ 124 | static double dSigma8( double k, void *Params ) 125 | { 126 | if( k<=0.0 ) 127 | return 0.0f; 128 | 129 | transfer_function *ptf = (transfer_function *)Params; 130 | 131 | double x = k*8.0; 132 | double w = 3.0*(sin(x)-x*cos(x))/(x*x*x); 133 | static double nspect = (double)ptf->cosmo_.nspect; 134 | 135 | double tf = ptf->compute(k, total); 136 | 137 | //... no growth factor since we compute at z=0 and normalize so that D+(z=0)=1 138 | return k*k * w*w * pow((double)k,(double)nspect) * tf*tf; 139 | 140 | } 141 | 142 | //! Integrand for the sigma_8 normalization of the power spectrum 143 | /*! Returns the value of the primordial power spectrum multiplied with 144 | the transfer function and the window function of 8 Mpc/h at wave number k */ 145 | static double dSigma8_0( double k, void *Params ) 146 | { 147 | if( k<=0.0 ) 148 | return 0.0f; 149 | 150 | transfer_function *ptf = (transfer_function *)Params; 151 | 152 | double x = k*8.0; 153 | double w = 3.0*(sin(x)-x*cos(x))/(x*x*x); 154 | static double nspect = (double)ptf->cosmo_.nspect; 155 | 156 | double tf = ptf->compute(k, total0); 157 | 158 | //... no growth factor since we compute at z=0 and normalize so that D+(z=0)=1 159 | return k*k * w*w * pow((double)k,(double)nspect) * tf*tf; 160 | 161 | } 162 | 163 | 164 | //! Computes the square of the transfer function 165 | /*! Function evaluates the supplied transfer function m_pTransferFunction 166 | * and returns the square of its value at wave number k 167 | * @param k wave number at which to evaluate the transfer function 168 | */ 169 | inline real_t TransferSq( real_t k ){ 170 | //.. parameter supplied transfer function 171 | real_t tf1 = m_pTransferFunction->compute(k, total); 172 | return tf1*tf1; 173 | } 174 | 175 | 176 | //! Computes the normalization for the power spectrum 177 | /*! 178 | * integrates the power spectrum to fix the normalization to that given 179 | * by the sigma_8 parameter 180 | */ 181 | real_t ComputePNorm( real_t kmax ) 182 | { 183 | real_t sigma0, kmin; 184 | kmax = m_pTransferFunction->get_kmax();//m_Cosmology.H0/8.0; 185 | kmin = m_pTransferFunction->get_kmin();//0.0; 186 | 187 | if( !m_pTransferFunction->tf_has_total0() ) 188 | sigma0 = 4.0 * M_PI * integrate( &dSigma8, (double)kmin, (double)kmax, (void*)m_pTransferFunction ); 189 | else 190 | sigma0 = 4.0 * M_PI * integrate( &dSigma8_0, (double)kmin, (double)kmax, (void*)m_pTransferFunction ); 191 | 192 | return m_Cosmology.sigma8*m_Cosmology.sigma8/sigma0; 193 | } 194 | 195 | }; 196 | 197 | 198 | //! compute the jeans sound speed 199 | /*! given a density in g/cm^-3 and a mass in g it gives back the sound 200 | * speed in cm/s for which the input mass is equal to the jeans mass 201 | * @param rho density 202 | * @param mass mass scale 203 | * @returns jeans sound speed 204 | */ 205 | inline double jeans_sound_speed( double rho, double mass ) 206 | { 207 | const double G = 6.67e-8; 208 | return pow( 6.0*mass/M_PI*sqrt(rho)*pow(G,1.5), 1.0/3.0 ); 209 | } 210 | 211 | //! computes the density from the potential using the Laplacian 212 | void compute_Lu_density( const grid_hierarchy& u, grid_hierarchy& fnew, unsigned order=4 ); 213 | 214 | //! computes the 2nd order density perturbations using also off-diagonal terms in the potential Hessian 215 | void compute_LLA_density( const grid_hierarchy& u, grid_hierarchy& fnew, unsigned order=4 ); 216 | 217 | //! computes the source term for the 2nd order perturbations in the displacements 218 | void compute_2LPT_source( const grid_hierarchy& u, grid_hierarchy& fnew, unsigned order=4 ); 219 | 220 | void compute_2LPT_source_FFT( config_file& cf_, const grid_hierarchy& u, grid_hierarchy& fnew ); 221 | 222 | 223 | #endif // _COSMOLOGY_HH 224 | 225 | -------------------------------------------------------------------------------- /pycola/setup.py: -------------------------------------------------------------------------------- 1 | #This script is based on setup.py included in the pyFFTW 2 | #package under the BSD license. That still requires the copyright 3 | #notice below the gpl. 4 | 5 | ######################################################################## 6 | ######################################################################## 7 | # Copyright (c) 2013,2014 Svetlin Tassev 8 | # Princeton University,Harvard University 9 | # 10 | # This file is part of pyCOLA. 11 | # 12 | # pyCOLA is free software: you can redistribute it and/or modify 13 | # it under the terms of the GNU General Public License as published by 14 | # the Free Software Foundation, either version 3 of the License, or 15 | # (at your option) any later version. 16 | # 17 | # pyCOLA is distributed in the hope that it will be useful, 18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 | # GNU General Public License for more details. 21 | # 22 | # You should have received a copy of the GNU General Public License 23 | # along with pyCOLA. If not, see . 24 | # 25 | ######################################################################## 26 | ######################################################################## 27 | # 28 | # This file incorporates work covered by the following copyright and 29 | # permission notice: 30 | # 31 | # Copyright 2014 Knowledge Economy Developments Ltd 32 | # 33 | # Henry Gomersall 34 | # heng@kedevelopments.co.uk 35 | # 36 | # All rights reserved. 37 | # 38 | # Redistribution and use in source and binary forms, with or without 39 | # modification, are permitted provided that the following conditions are met: 40 | # 41 | # * Redistributions of source code must retain the above copyright notice, this 42 | # list of conditions and the following disclaimer. 43 | # 44 | # * Redistributions in binary form must reproduce the above copyright notice, 45 | # this list of conditions and the following disclaimer in the documentation 46 | # and/or other materials provided with the distribution. 47 | # 48 | # * Neither the name of the copyright holder nor the names of its contributors 49 | # may be used to endorse or promote products derived from this software without 50 | # specific prior written permission. 51 | # 52 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 53 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 56 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 | # POSSIBILITY OF SUCH DAMAGE. 63 | # 64 | 65 | try: 66 | from setuptools import setup 67 | except ImportError: 68 | from distutils.core import setup 69 | from distutils.extension import Extension 70 | from distutils.util import get_platform 71 | from distutils.ccompiler import get_default_compiler 72 | 73 | import os 74 | 75 | import numpy 76 | try: 77 | from Cython.Distutils import build_ext as build_ext 78 | 79 | 80 | sources = [os.path.join(os.getcwd(), 'cic.pyx'), 81 | os.path.join(os.getcwd(), 'potential.pyx'), 82 | os.path.join(os.getcwd(), 'acceleration.pyx'), 83 | os.path.join(os.getcwd(), 'box_smooth.pyx') 84 | ] 85 | except ImportError as e: 86 | sources = [os.path.join(os.getcwd(), 'cic.c'), 87 | os.path.join(os.getcwd(), 'potential.c'), 88 | os.path.join(os.getcwd(), 'acceleration.c'), 89 | os.path.join(os.getcwd(), 'box_smooth.c') 90 | ] 91 | for i in sources: 92 | if not os.path.exists(i): 93 | print i 94 | print os.path.exists(i) 95 | raise ImportError(str(e) + '. ' + 96 | 'Cython is required to build the initial .c file.') 97 | 98 | # We can't cythonize, but that's ok as it's been done already. 99 | from distutils.command.build_ext import build_ext 100 | 101 | 102 | 103 | include_dirs = [numpy.get_include()] 104 | 105 | library_dirs = ["/usr/common/software/fftw/3.3.4/hsw/gnu/lib/"] 106 | 107 | package_data = {} 108 | 109 | libraries = [] 110 | 111 | 112 | ext_modules = [Extension( 113 | "cic", 114 | [sources[0]], 115 | extra_compile_args=['-fopenmp','-O3','-pthread' ,'-fPIC' ,'-fwrapv','-fno-strict-aliasing'], 116 | extra_link_args=['-fopenmp','-O3','-pthread' ,'-fPIC' ,'-fwrapv','-fno-strict-aliasing'], 117 | libraries=libraries, 118 | include_dirs=include_dirs 119 | ), 120 | Extension( 121 | "potential", 122 | [sources[1]], 123 | extra_compile_args=['-fopenmp','-O3','-pthread' ,'-fPIC' ,'-fwrapv','-fno-strict-aliasing'], 124 | extra_link_args=['-fopenmp','-O3','-pthread' ,'-fPIC' ,'-fwrapv','-fno-strict-aliasing'], 125 | libraries=libraries, 126 | include_dirs=include_dirs 127 | ), 128 | Extension( 129 | "acceleration", 130 | [sources[2]], 131 | extra_compile_args=['-fopenmp','-O3','-pthread' ,'-fPIC' ,'-fwrapv','-fno-strict-aliasing'], 132 | extra_link_args=['-fopenmp','-O3','-pthread' ,'-fPIC' ,'-fwrapv','-fno-strict-aliasing'], 133 | libraries=libraries, 134 | include_dirs=include_dirs 135 | ), 136 | Extension( 137 | "box_smooth", 138 | [sources[3]], 139 | extra_compile_args=['-fopenmp','-O3','-pthread' ,'-fPIC' ,'-fwrapv','-fno-strict-aliasing'], 140 | extra_link_args=['-fopenmp','-O3','-pthread' ,'-fPIC' ,'-fwrapv','-fno-strict-aliasing'], 141 | libraries=libraries, 142 | include_dirs=include_dirs 143 | ) 144 | ] 145 | 146 | long_description="""pyCOLA is a multithreaded Python/Cython N-body code, 147 | implementing the Comoving Lagrangian Acceleration (COLA) method in the 148 | temporal and spatial domains. 149 | 150 | pyCOLA is based on the following two papers: 151 | (todo) 152 | ... 153 | ... 154 | please, cite them if using for scientific research. 155 | 156 | pyCOLA requires `NumPy _`, `SciPy 157 | `_, `pyFFTW 158 | `_, `h5py 159 | `_. Note that pyFFTW v0.9.2 does not support 160 | large arrays, so one needs to install the development version from 161 | `github `_, where the bug has 162 | been fixed. 163 | 164 | The pyCOLA documentation can be found 165 | `here `_, and the source 166 | is on `bitbucket `_. 167 | """ 168 | 169 | 170 | setup_args = { 171 | 'name': 'pyCOLA', 172 | 'version': 1.0, 173 | 'author': 'Svetlin Tassev', 174 | 'author_email': 'stassev@alum.mit.edu', 175 | 'description': 'A Python/Cython N-body code, implementing the Comoving Lagrangian Acceleration (COLA) method in the temporal and spatial domains.', 176 | 'url': '', 177 | 'long_description': long_description, 178 | 'classifiers': [ 179 | 'Programming Language :: Python', 180 | 'Programming Language :: Python :: 2.7', 181 | 'Programming Language :: Cython', 182 | 'Development Status :: 5 - Production/Stable', 183 | 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 184 | 'Intended Audience :: Science/Research', 185 | 'Topic :: Scientific/Engineering', 186 | 'Topic :: Scientific/Engineering :: Astronomy', 187 | 'Topic :: Scientific/Engineering :: Physics' 188 | ], 189 | 'packages':['ic','evolve','acceleraton', 'cic','potential','box_smooth'], 190 | 'ext_modules': ext_modules, 191 | 'include_dirs': include_dirs, 192 | 'cmdclass' : {'build_ext': build_ext}, 193 | } 194 | 195 | 196 | 197 | if __name__ == '__main__': 198 | setup(**setup_args) 199 | --------------------------------------------------------------------------------