├── xars ├── geometries │ ├── __init__.py │ ├── disk.py │ ├── gradientconetorus.py │ ├── spheretorus.py │ ├── clumpyspheretorus.py │ ├── clumpytorus.py │ ├── hydrotorus.py │ └── bntorus.py ├── xsects │ ├── generate │ │ ├── Makefile │ │ ├── README.rst │ │ └── WXSECTS.F90 │ ├── xsects_convert.py │ ├── __init__.py │ └── __main__.py ├── binning │ ├── uniform.py │ ├── bending.py │ ├── __init__.py │ └── bn.py └── coordtrans.py ├── xspecexport ├── binning ├── createblobtoruscutofftable.py ├── createpowtable.py ├── createspheretable.py ├── createdisktable.py ├── createxspecmodel.py ├── createsmoothtorustable.py ├── createwadatoruscutofftable.py ├── createtorustable.py ├── createsmoothtoruscutofftable.py ├── createtoruscutoffdisktable.py └── createtoruscutofftable.py ├── doc ├── blob.png ├── disk.png ├── covmaps.png ├── uxclumpy.png ├── CAT3D-WIND.gif ├── clumpybox.png ├── logo3-mid.png ├── logo3-large.png ├── logo4-small.png ├── uxclumpy_hump.png ├── wadageometry.png ├── warpgeometry.png ├── overview_spectra2.png ├── faq.rst ├── cat3d.rst ├── wada.rst ├── warpeddisk.rst ├── uxclumpy.rst ├── README.rst └── xars.rst ├── conda-requirements.txt ├── MANIFEST.in ├── .gitignore ├── .bumpversion.cfg ├── .coveragerc ├── setup.cfg ├── examples ├── example-blobs │ └── generate_blobs.py ├── torusCcovfrac.py ├── runsphere.sh ├── torusCNHeq.py ├── runtorusBN.sh ├── runtorus.sh ├── disk.py ├── example-grid │ └── generate_warpeddisk.py ├── torusL.py ├── torusBN.py ├── torus2.py ├── torusC.py └── torusG.py ├── pyproject.toml ├── .travis.yml ├── README.rst ├── scripts └── vizfek2.py ├── Makefile ├── CONTRIBUTING.rst └── .github └── workflows └── tests.yml /xars/geometries/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /xspecexport/binning: -------------------------------------------------------------------------------- 1 | ../binning -------------------------------------------------------------------------------- /doc/blob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/blob.png -------------------------------------------------------------------------------- /doc/disk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/disk.png -------------------------------------------------------------------------------- /conda-requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | matplotlib 4 | h5py 5 | astropy 6 | tqdm 7 | -------------------------------------------------------------------------------- /doc/covmaps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/covmaps.png -------------------------------------------------------------------------------- /doc/uxclumpy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/uxclumpy.png -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include xars/xsects/xsects.dat 2 | include README.rst 3 | include LICENSE 4 | -------------------------------------------------------------------------------- /doc/CAT3D-WIND.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/CAT3D-WIND.gif -------------------------------------------------------------------------------- /doc/clumpybox.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/clumpybox.png -------------------------------------------------------------------------------- /doc/logo3-mid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/logo3-mid.png -------------------------------------------------------------------------------- /doc/logo3-large.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/logo3-large.png -------------------------------------------------------------------------------- /doc/logo4-small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/logo4-small.png -------------------------------------------------------------------------------- /doc/uxclumpy_hump.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/uxclumpy_hump.png -------------------------------------------------------------------------------- /doc/wadageometry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/wadageometry.png -------------------------------------------------------------------------------- /doc/warpgeometry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/warpgeometry.png -------------------------------------------------------------------------------- /doc/overview_spectra2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JohannesBuchner/xars/HEAD/doc/overview_spectra2.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pdf 2 | *~ 3 | *.pyc 4 | *output*.png 5 | example*/*.png 6 | example*/*/*.png 7 | *.hdf5 8 | *.fits 9 | output 10 | *.so 11 | *.pyx.py 12 | build 13 | 14 | -------------------------------------------------------------------------------- /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 2.0.0 3 | commit = True 4 | tag = True 5 | 6 | [bumpversion:file:pyproject.toml] 7 | 8 | [bumpversion:file:xars/__init__.py] 9 | -------------------------------------------------------------------------------- /xars/xsects/generate/Makefile: -------------------------------------------------------------------------------- 1 | 2 | all: xsects 3 | 4 | xsects.dat: xsects 5 | ./xsects 6 | 7 | xsects: WXSECTS.F90 PHFIT2.F PHOTO.F 8 | gfortran -Wall -Wextra -Wno-tabs WXSECTS.F90 PHFIT2.F PHOTO.F -o xsects 9 | 10 | clean: 11 | rm xsects 12 | 13 | 14 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | # 2 | # .coveragerc to control coverage.py 3 | # 4 | 5 | [run] 6 | branch = True 7 | #include = 8 | # xars/* 9 | # examples/* 10 | 11 | 12 | [report] 13 | exclude_lines = 14 | pragma: no cover 15 | def __repr__ 16 | if __name__ == .__main__.: 17 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = docs 3 | extend-ignore = E501,F401,E128,E231,E124 4 | 5 | [aliases] 6 | test = pytest 7 | 8 | [tool:pytest] 9 | collect_ignore = ['setup.py', 'xars/xsects/xsects_convert.py'] 10 | #addopts = --junitxml=test-reports/junit.xml --html=tests/reports/index.html 11 | 12 | [pycodestyle] 13 | count = False 14 | ignore = W191,W291,W293,E231,E225 15 | max-line-length = 160 16 | statistics = False 17 | 18 | -------------------------------------------------------------------------------- /xars/binning/uniform.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | nbins = 1100 4 | 5 | 6 | # uniform bins 7 | def energy2bin(energy): 8 | b = numpy.array(((energy - 2) * 80. + 0.00000001), dtype=int) 9 | b[energy > 10] = ((10 - 2) * 80 + (energy[energy > 10] - 10) * 20 + 0.00000001) 10 | return b 11 | 12 | 13 | def bin2energy_lo(binid): 14 | e = numpy.array(binid / 80. + 2) 15 | e[binid > 640] = (binid[binid > 640] - 640) / 20. + 10 16 | return e 17 | -------------------------------------------------------------------------------- /examples/example-blobs/generate_blobs.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy 3 | 4 | # make unit spheres at distance 3 with various densities 5 | 6 | for nh in numpy.linspace(22, 26, 21): 7 | with h5py.File('torusblob%.1f.hdf5' % nh, 'w') as f: 8 | x = numpy.array([0.3]) 9 | y = numpy.array([0.]) 10 | z = numpy.array([0.]) 11 | R = numpy.array([0.1]) 12 | NH = numpy.array([nh]) 13 | f['sigma'] = 0 14 | f['x'] = x 15 | f['y'] = y 16 | f['z'] = z 17 | f['radius'] = R 18 | f['NH'] = NH 19 | 20 | 21 | -------------------------------------------------------------------------------- /xars/binning/bending.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import exp, log 3 | 4 | nbins = 1000 5 | r = 1.5 6 | A = log((8.1 + 0.015) / 8.10)**(-1. / r) 7 | 8 | 9 | def bin2energy_lo(bin): 10 | bin = numpy.asarray(bin) 11 | with numpy.errstate(invalid='ignore'): 12 | return numpy.where(bin < 800, bin * 0.01 + 0.1, 8.1 * exp(((bin - 800) / A)**r)) 13 | 14 | 15 | def energy2bin(energy): 16 | x = numpy.asarray(energy) 17 | with numpy.errstate(invalid='ignore'): 18 | return (numpy.where(x < 8.1, (x - 0.1) / 0.01, A * (log(x / 8.1))**(1. / r) + 800)).astype(int) 19 | -------------------------------------------------------------------------------- /xars/geometries/disk.py: -------------------------------------------------------------------------------- 1 | from xars.coordtrans import to_cartesian, to_spherical 2 | 3 | 4 | class DiskGeometry: 5 | def __init__(self, verbose=False): 6 | self.verbose = verbose 7 | self.NH = 1 8 | 9 | def compute_next_point(self, location, direction): 10 | (xi, yi, zi) = location 11 | (dist, beta, alpha) = direction 12 | d = dist / self.NH # distance in units of nH 13 | 14 | if self.verbose: 15 | print(' .. .. mean in nH units: ', d.mean()) 16 | # compute relative vector traveled 17 | xv, yv, zv = to_cartesian((d, beta, alpha)) 18 | 19 | # compute new position 20 | xf, yf, zf = xi + xv, yi + yv, zi + zv 21 | 22 | # compute spherical coordinates 23 | rad, phi, theta = to_spherical((xf, yf, zf)) 24 | 25 | # are we inside the disk 26 | inside = zf < 0 27 | return inside, (xf,yf,zf), (rad, phi, theta) 28 | -------------------------------------------------------------------------------- /xars/xsects/generate/README.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | Cross-section computation 3 | =========================== 4 | 5 | The xsects program computes cross-sections as a function of energy for 6 | photo-electric absorption and fluorescent emission (including line energies and yields). 7 | 8 | 9 | Compiling and Running 10 | ======================= 11 | 12 | Compile with:: 13 | 14 | $ make 15 | 16 | Run with:: 17 | 18 | $ ./xsects 19 | SYNOPSIS: xsects 20 | 21 | Abundances are relative. Use 1 for local ISM abundances. 22 | 23 | To create a xsects file with standard abundances, use:: 24 | 25 | $ ./xsects 1.0 1.0 26 | 27 | This produces a xsects.dat file, which should be placed in the xsects/ folder. 28 | 29 | Caveats 30 | =========== 31 | 32 | The total photo-electric cross-section has a bad behaviour at E>70keV. This is 33 | corrected in XARS when loading the xsects.dat file. 34 | 35 | See Brightman et al. 2011 for the definition of abundances, cross-sections, 36 | energies and yields. 37 | 38 | Updated and alternative cross-sections are welcome. 39 | 40 | 41 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=42", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "xars" 7 | version = "2.0.0" 8 | description = "XARS simulates X-rays propagating through matter in user-defined geometries." 9 | readme = "README.rst" 10 | authors = [ 11 | { name="Johannes Buchner", email="buchner.johannes@gmx.at" } 12 | ] 13 | license = { file = "LICENSE" } 14 | requires-python = ">=3.10" 15 | classifiers = [ 16 | "License :: OSI Approved :: GNU Affero General Public License v3", 17 | "Programming Language :: Python :: 3", 18 | "Intended Audience :: Science/Research", 19 | "Topic :: Scientific/Engineering :: Physics", 20 | "Topic :: Scientific/Engineering :: Astronomy", 21 | "Development Status :: 6 - Mature", 22 | ] 23 | dependencies = [ 24 | "numpy", 25 | "scipy", 26 | "tqdm", 27 | "matplotlib", 28 | "h5py", 29 | ] 30 | 31 | [project.urls] 32 | Repository = "https://github.com/JohannesBuchner/LightRayRider" 33 | Homepage = "https://github.com/JohannesBuchner/LightRayRider" 34 | 35 | 36 | [tool.setuptools] 37 | packages = ["xars", "xars.geometries", "xars.binning", "xars.xsects"] 38 | -------------------------------------------------------------------------------- /examples/torusCcovfrac.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy 3 | from numpy import cos, log10 4 | import matplotlib.pyplot as plt 5 | import sys 6 | 7 | plt.figure(figsize=(10, 8)) 8 | for i, sigma in enumerate([0, 5, 20, 60][::-1]): 9 | for j, (f, fCT) in enumerate([(0, 0), (8, 0.25), (6, 0.3), (4, 0.45), (3, 0.6), ]): 10 | Theta_tor = sigma * 1.4 11 | if f == 0 and sigma == 0: 12 | sigmas = 'dring-output/dring_empty' 13 | elif sigma == 0: 14 | sigmas = 'dring-output/dring_25.5_i0.5_n%d' % f 15 | else: 16 | sigmas = '10000_%d_f%d_gexp5core' % (sigma, f) 17 | 18 | geometryfile = '/mnt/data/daten/PostDoc/research/agn/torus/clumpy/%s.hdf5' % (sigmas) 19 | f = h5py.File(geometryfile, 'r') 20 | lognh = numpy.log10(f['NH_samples'][:]) 21 | lognh[lognh<20] = 20 22 | #lognh = 22 + log10(nh) 23 | plt.hist( 24 | lognh, cumulative=True, histtype='step', density=True, 25 | label='TORsigma=%d CTKcover=%.1f' % (Theta_tor, fCT), 26 | bins=numpy.linspace(20, 26, 40) 27 | ) 28 | print('%d %.1f %.2f %.2f' % (Theta_tor, fCT, (lognh>22).mean(), (lognh>24).mean())) 29 | #print(lognh. 30 | plt.legend(loc='lower right') 31 | plt.savefig("doc/covfracs.pdf", bbox_inches='tight') 32 | plt.savefig("doc/covfracs.png", bbox_inches='tight') 33 | plt.close() 34 | -------------------------------------------------------------------------------- /xars/coordtrans.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import arccos, arctan2, cos, pi, sin 3 | 4 | 5 | def to_spherical(cartesian_location): 6 | (xf, yf, zf) = cartesian_location 7 | xf = numpy.asarray(xf).reshape((-1,)) 8 | yf = numpy.asarray(yf).reshape((-1,)) 9 | zf = numpy.asarray(zf).reshape((-1,)) 10 | rad = (xf**2 + yf**2 + zf**2)**0.5 11 | phi = arctan2(yf, xf) 12 | mask = ~(rad == 0) 13 | theta = numpy.zeros_like(rad) 14 | theta[mask] = arccos(zf[mask] / rad[mask]) 15 | return (rad, theta, phi) 16 | 17 | 18 | def to_cartesian(spherical_location): 19 | (rad, theta, phi) = spherical_location 20 | xv = rad * sin(theta) * cos(phi) 21 | yv = rad * sin(theta) * sin(phi) 22 | zv = rad * cos(theta) 23 | return (xv, yv, zv) 24 | 25 | 26 | def test_random(): 27 | for _ in range(100): 28 | x, y, z = numpy.random.uniform(-1, 1, size=3) 29 | rad, theta, phi = to_spherical((x, y, z)) 30 | assert rad <= 3**0.5, rad 31 | assert rad >= 0 32 | assert theta <= pi 33 | assert theta >= 0 34 | assert phi <= pi, phi 35 | assert phi >= -pi, phi 36 | xv, yv, zv = to_cartesian((rad, theta, phi)) 37 | 38 | assert numpy.allclose(xv, x) 39 | assert numpy.allclose(yv, y) 40 | assert numpy.allclose(zv, z) 41 | -------------------------------------------------------------------------------- /examples/runsphere.sh: -------------------------------------------------------------------------------- 1 | 2 | i=0 3 | for nh in 9.99999978e-03 1.41000003e-02 1.99999996e-02 \ 4 | 2.82000005e-02 3.97999994e-02 5.62000014e-02 \ 5 | 7.94000030e-02 1.12000003e-01 1.58000007e-01 \ 6 | 2.24000007e-01 3.16000015e-01 4.46999997e-01 \ 7 | 6.30999982e-01 8.90999973e-01 1.25999999e+00 \ 8 | 1.77999997e+00 2.50999999e+00 3.54999995e+00 \ 9 | 5.01000023e+00 7.07999992e+00 1.00000000e+01 \ 10 | 1.41000004e+01 2.00000000e+01 2.82000008e+01 \ 11 | 3.97999992e+01 5.62000008e+01 7.94000015e+01 \ 12 | 1.12000000e+02 1.58000000e+02 2.24000000e+02 \ 13 | 3.16000000e+02 4.47000000e+02 6.31000000e+02 \ 14 | 8.91000000e+02 1.26000000e+03 1.78000000e+03 \ 15 | 2.51000000e+03 3.55000000e+03 5.01000000e+03 \ 16 | 7.08000000e+03 1.00000000e+04 17 | do 18 | #if [ -e "output/sphere_${i}_rdata.hdf5" ]; then 19 | # echo "output/sphere_${i}_rdata.hdf5" already there 20 | # ((i++)) 21 | # continue 22 | #fi 23 | #[[ "$i" -gt 35 ]] && 24 | python3 torus2.py --nh=$nh --opening-angle=0 --nevents $1 --output="output/sphere_${i}_" & 25 | sleep 1m 26 | ((i++)) 27 | done 28 | wait 29 | 30 | pushd output 31 | python3 ../xspecexport/createspherecutofftable.py sphere.fits sphere_*_rdata.hdf5 32 | python3 ../xspecexport/createspherecutofftable.py sphere-transmit.fits sphere_*_transmitrdata.hdf5 33 | python3 ../xspecexport/createspherecutofftable.py sphere-reflect.fits sphere_*_reflectrdata.hdf5 34 | -------------------------------------------------------------------------------- /xars/geometries/gradientconetorus.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import numpy 4 | from numpy import cos, log 5 | 6 | from . import conetorus 7 | 8 | """ 9 | ⎛ za ⎞ 10 | ⎜ -NH₀ - ── ⎟ 11 | ⎜ zg ⎟ 12 | ⎜10 ⋅log(10)⋅cos(θ) ⎟ 13 | zg⋅log⎜────────────────────────── + 1⎟ 14 | ⎝ d⋅zg ⎠ 15 | ────────────────────────────────────── = dist 16 | log(10)⋅cos(θ) 17 | 18 | NH = 1/dist 19 | """ 20 | 21 | 22 | class GradientConeTorusGeometry(conetorus.ConeTorusGeometry): 23 | def __init__(self, Theta_tor, NH0=21 - 22, z0=0, zg=-1, verbose=False): 24 | self.NH0 = NH0 25 | self.z0 = z0 26 | self.zg = zg 27 | self.verbose = verbose 28 | super(GradientConeTorusGeometry, self).__init__(Theta_tor, 1) 29 | 30 | def compute_next_point(self, location, direction): 31 | # print 'beta, zg', beta, self.zg 32 | (xi, yi, zi) = location 33 | (dist, beta, alpha) = direction 34 | f = log(10) * cos(beta) / self.zg 35 | # print 'f, dist', f, dist 36 | logval = f / dist * 10**(-self.NH0 - (zi + self.z0) / self.zg) + 1 37 | # print 'logval', logval 38 | 39 | self.NH = numpy.where(logval < 0, 1e-100, f / log(logval)) 40 | assert numpy.all(self.NH > 0), [self.NH[~(self.NH > 0)], f, logval] 41 | return super(GradientConeTorusGeometry, self).compute_next_point( 42 | (xi, yi, zi), (dist, beta, alpha)) 43 | -------------------------------------------------------------------------------- /examples/torusCNHeq.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import pi, log10 3 | import matplotlib.pyplot as plt 4 | from xars.geometries.clumpytorus import ClumpyTorusGeometry 5 | 6 | plt.figure(figsize=(10, 8)) 7 | for sigma in [0, 5, 20, 60]: 8 | for f, fCT in [(0, 0), (8, 0.25), (6, 0.3), (4, 0.45), (3, 0.6), ]: 9 | Theta_tor = sigma * 1.4 10 | if f == 0 and sigma == 0: 11 | sigmas = 'dring-output/dring_empty' 12 | elif sigma == 0: 13 | sigmas = 'dring-output/dring_25.5_i0.5_n%d' % f 14 | else: 15 | sigmas = '10000_%d_f%d_gexp5core' % (sigma, f) 16 | 17 | geometryfile = '/mnt/data/daten/PostDoc/research/agn/torus/clumpy/%s.hdf5' % (sigmas) 18 | geometry = ClumpyTorusGeometry(geometryfile, verbose=True) 19 | #photons = PhotonBunch(i=100, nphot=10000, verbose=True, geometry=geometryfile) 20 | #print(photons.beta.mean(), photons.beta.min(), photons.beta.max()) 21 | beta = pi / 2 + numpy.zeros(10000) 22 | alpha = numpy.linspace(0, 2 * pi, 10000 + 1)[:-1] 23 | nh = geometry.compute_los_nh(beta, alpha) 24 | lognh = log10(nh + 0.00001) + 22 25 | lognh[lognh<20] = 20 26 | plt.hist( 27 | lognh, cumulative=True, histtype='step', density=True, 28 | label='TORsigma=%d CTKcover=%.1f' % (Theta_tor, fCT), 29 | bins=numpy.linspace(20, 26, 40) 30 | ) 31 | print('%2d %.1f %.2e %.2f %.2f %.2f' % (Theta_tor, fCT, nh.mean() * 1e22, lognh.mean(), (lognh>22).mean(), (lognh>24).mean())) 32 | #print(lognh. 33 | plt.legend(loc='upper left') 34 | plt.savefig("doc/eqcovfracs.pdf", bbox_inches='tight') 35 | plt.savefig("doc/eqcovfracs.png", bbox_inches='tight') 36 | plt.close() 37 | -------------------------------------------------------------------------------- /doc/faq.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | Frequently asked questions 3 | =========================== 4 | 5 | What does the NH parameter mean? 6 | --------------------------------- 7 | 8 | This is not a property of the geometry, but the line-of-sight (LOS) column density. 9 | 10 | Basically, the sky as seen from the corona is segmented by column density (and, corsely, by viewing angle). 11 | Output spectra are accumulated in these column density bins. 12 | 13 | This makes spectral fitting less degenerate, 14 | because LOS NH predominantly shapes the spectrum. 15 | 16 | It also allows very similar viewing angles to have very different column densities, 17 | as see in cloud eclipse events. IT enables fitting variable absorption with the exact same geometry. 18 | 19 | How is the photon path solved in XARS? Is it a Monte Carlo integration? 20 | ------------------------------------------------------------------------ 21 | 22 | No. The optical depth a photon travels is a Monte carlo draw, however the end point 23 | is always determined analytically. 24 | 25 | In simple geometries (such as spheres, cones), the necessary line integral 26 | can be computed and programmed analytically. 27 | 28 | For geometries with many spheres, there is optimized, parallelised C code 29 | to determine which spheres intersect, and how they are ordered. 30 | This is implemented in https://github.com/JohannesBuchner/LightRayRider 31 | 32 | For general geometries based on grids, there is optimized, parallelised C code 33 | to traverse the grid. 34 | This is implemented in https://github.com/JohannesBuchner/LightRayRider 35 | 36 | 37 | I have a question 38 | --------------------- 39 | 40 | Please open a `Github issue `_ 41 | or, if it should not be public, send me an email. 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /examples/runtorusBN.sh: -------------------------------------------------------------------------------- 1 | 2 | i=0 3 | for nh in 9.99999978e-03 1.41000003e-02 1.99999996e-02 \ 4 | 2.82000005e-02 3.97999994e-02 5.62000014e-02 \ 5 | 7.94000030e-02 1.12000003e-01 1.58000007e-01 \ 6 | 2.24000007e-01 3.16000015e-01 4.46999997e-01 \ 7 | 6.30999982e-01 8.90999973e-01 1.25999999e+00 \ 8 | 1.77999997e+00 2.50999999e+00 3.54999995e+00 \ 9 | 5.01000023e+00 7.07999992e+00 1.00000000e+01 \ 10 | 1.41000004e+01 2.00000000e+01 2.82000008e+01 \ 11 | 3.97999992e+01 5.62000008e+01 7.94000015e+01 \ 12 | 1.12000000e+02 1.58000000e+02 2.24000000e+02 \ 13 | 3.16000000e+02 4.47000000e+02 6.31000000e+02 \ 14 | 8.91000000e+02 1.26000000e+03 1.78000000e+03 \ 15 | 2.51000000e+03 3.55000000e+03 5.01000000e+03 \ 16 | 7.08000000e+03 1.00000000e+04 17 | do 18 | j=0 19 | for o in 25.79999924 36.90000153 45.59999847 53.09999847 60. 66.40000153 72.5 78.5 84.30000305 20 | do 21 | #[ -e $2_${i}_${j}_rdata.hdf5 ] || 22 | python3 torusBN.py --nh=$nh --opening-angle=$o --nevents $1 --output="$2_${i}_${j}_" & 23 | ((j++)) 24 | done 25 | wait 26 | ((i++)) 27 | done 28 | 29 | exit 30 | 31 | # if $2 == output/bntorus 32 | cd output 33 | python3 ../xspecexport/createtorustable.py bntorus.fits bntorus_*_?_rdata.hdf5 34 | python3 ../xspecexport/createtorustable.py bntorus-transmit.fits bntorus_*_?_transmitrdata.hdf5 35 | python3 ../xspecexport/createtorustable.py bntorus-reflect.fits bntorus_*_?_reflectrdata.hdf5 36 | 37 | python3 ../xspecexport/createtoruscutofftable.py bntorus-cutoff.fits bntorus_*_?_rdata.hdf5 38 | python3 ../xspecexport/createtoruscutofftable.py bntorus-cutoff-transmit.fits bntorus_*_?_transmitrdata.hdf5 39 | python3 ../xspecexport/createtoruscutofftable.py bntorus-cutoff-reflect.fits bntorus_*_?_reflectrdata.hdf5 40 | 41 | -------------------------------------------------------------------------------- /xars/xsects/xsects_convert.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cross sections 3 | --------------- 4 | 5 | Convert cross-section input file to right energy grid 6 | """ 7 | 8 | import numpy 9 | import scipy 10 | from numpy import arccos as acos 11 | from numpy import (cos, exp, log, log10, logical_and, logical_or, pi, round, 12 | sin, tan) 13 | 14 | from xars import binning 15 | from xars.binning.bn import bin2energy_lo, nbins 16 | 17 | 18 | def interpolate(etarget, e, y): 19 | ytarget = 10**numpy.interp(x=log10(etarget), xp=log10(e), fp=log10(y)) 20 | ytarget[~numpy.isfinite(ytarget)] = 0 21 | return ytarget 22 | 23 | 24 | energy_lo, energy_hi = binning.bin2energy(numpy.arange(binning.nbins)) 25 | energy = (energy_hi + energy_lo) / 2. 26 | deltae = energy_hi - energy_lo 27 | 28 | 29 | def bin2energy_hi(i): 30 | return bin2energy_lo(i + 1) 31 | 32 | 33 | i = numpy.arange(nbins) 34 | emid = (bin2energy_lo(i) + bin2energy_hi(i)) / 2. 35 | 36 | # photoelectric and line cross-sections 37 | xsectsdata = numpy.loadtxt('xsects_orig.dat') 38 | xlines_energies = xsectsdata[0, 2:] 39 | xlines_yields = xsectsdata[1, 2:] 40 | xsects = xsectsdata[2:,:] 41 | e1 = xsects[:,0] 42 | assert len(e1) == len(emid) 43 | xphot = xsects[:,1] 44 | e70 = e1 > 70. 45 | lines_max = numpy.max(xsects[:,2:], axis=1) 46 | xphot[e70] = lines_max[e70] * xphot[e70][0] / lines_max[e70][0] 47 | assert (xphot >= 0).all() 48 | xsects[:,1] = xphot 49 | 50 | # now rebin 51 | xsects_orig = xsects 52 | xsects = [energy] 53 | for i in range(1, xsects_orig.shape[1]): 54 | xsects.append(interpolate(energy, emid, xsects_orig[:,i])) 55 | xsects = numpy.transpose(xsects) 56 | 57 | # write out 58 | with open('xsects_orig.dat') as fin: 59 | lines = fin.readlines() 60 | nheader = max([i for i, l in enumerate(lines) if l.startswith('#')]) 61 | with open('xsects.dat', 'w') as f: 62 | f.write(''.join(lines[:nheader + 1])) 63 | numpy.savetxt(f, xsects) 64 | -------------------------------------------------------------------------------- /examples/runtorus.sh: -------------------------------------------------------------------------------- 1 | nphot=$1 2 | prefix=output/torus 3 | mkdir -p output 4 | 5 | i=0 6 | for nh in 9.99999978e-03 1.41000003e-02 1.99999996e-02 \ 7 | 2.82000005e-02 3.97999994e-02 5.62000014e-02 \ 8 | 7.94000030e-02 1.12000003e-01 1.58000007e-01 \ 9 | 2.24000007e-01 3.16000015e-01 4.46999997e-01 \ 10 | 6.30999982e-01 8.90999973e-01 1.25999999e+00 \ 11 | 1.77999997e+00 2.50999999e+00 3.54999995e+00 \ 12 | 5.01000023e+00 7.07999992e+00 1.00000000e+01 \ 13 | 1.41000004e+01 2.00000000e+01 2.82000008e+01 \ 14 | 3.97999992e+01 5.62000008e+01 7.94000015e+01 \ 15 | 1.12000000e+02 1.58000000e+02 2.24000000e+02 \ 16 | 3.16000000e+02 4.47000000e+02 6.31000000e+02 \ 17 | 8.91000000e+02 1.26000000e+03 1.78000000e+03 \ 18 | 2.51000000e+03 3.55000000e+03 5.01000000e+03 \ 19 | 7.08000000e+03 1.00000000e+04 20 | do 21 | j=0 22 | for o in 25.79999924 36.90000153 45.59999847 53.09999847 60. 66.40000153 72.5 78.5 84.30000305 23 | do 24 | if [ -e "${prefix}_${i}_${j}_rdata.hdf5" ] 25 | then 26 | echo $i $j 27 | else 28 | python3 torus2.py --nh=$nh --opening-angle=$o --nevents $nphot --output="${prefix}_${i}_${j}_" & 29 | fi 30 | ((j++)) 31 | done 32 | wait 33 | ((i++)) 34 | done 35 | 36 | cd output 37 | python ../xspecexport/createtorustable.py wedge.fits ${prefix}_*_?_rdata.hdf5 38 | python ../xspecexport/createtorustable.py wedge-transmit.fits ${prefix}_*_?_transmitrdata.hdf5 39 | python ../xspecexport/createtorustable.py wedge-reflect.fits ${prefix}_*_?_reflectrdata.hdf5 40 | 41 | python ../xspecexport/createtoruscutofftable.py wedge-cutoff.fits ${prefix}_*_?_rdata.hdf5 42 | python ../xspecexport/createtoruscutofftable.py wedge-cutoff-transmit.fits ${prefix}_*_?_transmitrdata.hdf5 43 | python ../xspecexport/createtoruscutofftable.py wedge-cutoff-reflect.fits ${prefix}_*_?_reflectrdata.hdf5 44 | 45 | -------------------------------------------------------------------------------- /xspecexport/createblobtoruscutofftable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import exp 3 | import h5py 4 | import progressbar 5 | from xars.tableexport import write_atable 6 | 7 | table = [] 8 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 9 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 10 | 3., 3.2, 3.4, 3.6, 3.8, 4, 4.2, 4.4, 4.6, 4.8, 5.0 ] 11 | Ecuts = [ 1, 2, 3, 5., 10., 15, 20., 25, 30, 35, 40, 50, 60, 80, 100, 140, 160, 200, 300, 400 ] 12 | 13 | outfilename = 'blobs.fits' 14 | rdataname = '%s_outreflectrdata.hdf5' 15 | blobnhs = numpy.round(numpy.arange(22, 26.1, 0.2), 8) 16 | models = ['torusblob%.1f.hdf5' % blobnh for blobnh in blobnhs] 17 | 18 | widgets = [progressbar.Percentage(), " starting ... ", progressbar.Bar(), progressbar.ETA()] 19 | pbar = progressbar.ProgressBar(widgets=widgets) 20 | 21 | for NHcloud, model in zip(pbar(blobnhs), models): 22 | filename = rdataname % model 23 | f = h5py.File(filename, 'r') 24 | energy_lo = f['energy_lo'][()] 25 | energy_hi = f['energy_hi'][()] 26 | nbins = len(energy_lo) 27 | energy = (energy_hi + energy_lo) / 2 28 | deltae = energy_hi - energy_lo 29 | deltae0 = deltae[energy >= 1][0] 30 | nphot = f.attrs['NPHOT'] 31 | 32 | matrix = f['rdata'] 33 | a, b, nmu = matrix.shape 34 | assert a == nbins, matrix.shape 35 | assert b == nbins, matrix.shape 36 | # sum over the viewing angles, but not the nh bins 37 | matrix_noinc = numpy.sum(matrix, axis=2) 38 | 39 | # go through viewing angles 40 | widgets[1] = '| NHblob=%.1f' % (NHcloud) 41 | matrix_mu = matrix_noinc * 1. / nphot 42 | for PhoIndex in PhoIndices: 43 | for Ecut in Ecuts: 44 | weights = energy**-PhoIndex * exp(-energy / Ecut) * deltae / deltae0 45 | y = weights @ matrix_mu 46 | table.append(((PhoIndex, Ecut, NHcloud), y)) 47 | 48 | energy_info = dict(energy_lo=energy_lo, energy_hi=energy_hi) 49 | 50 | write_atable(outfilename, 51 | parameter_definitions = [ 52 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.2, 2.8, PhoIndices), 53 | ('Ecut', 0, 100.0, 10.0, 40, 400, Ecuts), 54 | ('NH_blob', 0, 25.0, 0.5, 22, 26, blobnhs), 55 | ], table=table, modelname='blobreflect', **energy_info) 56 | 57 | -------------------------------------------------------------------------------- /examples/disk.py: -------------------------------------------------------------------------------- 1 | """ 2 | Monte-Carlo simulator for X-ray obscurer geometries 3 | 4 | Literature: 5 | 6 | * Brightman & Nandra (2011) 7 | * Leahy & Creighton (1993) 8 | """ 9 | 10 | import numpy 11 | from numpy import cos 12 | import matplotlib as mpl 13 | mpl.use('Agg') 14 | 15 | rng = numpy.random 16 | 17 | from xars.geometries.disk import DiskGeometry 18 | from xars import montecarlo 19 | 20 | import argparse 21 | 22 | parser = argparse.ArgumentParser( 23 | description="""Monte-Carlo simulator for X-ray obscurer geometries""", 24 | epilog="""(C) Johannes Buchner, 2013. Based on work by Murray Brightman & Kirpal Nandra (see 2011 publication)""") 25 | 26 | parser.add_argument('--nevents', type=int, default=1000000, help='number of input photons per energy bin') 27 | parser.add_argument('--plot-paths', default=False, help='plot the paths taken?', action='store_true') 28 | parser.add_argument('--plot-interactions', default=False, help='plot the points at each interaction?', action='store_true') 29 | parser.add_argument('--verbose', default=False, help='Be more talkative, show debug statistics on interactions', action='store_true') 30 | parser.add_argument('--output', type=str, default='disk_', help='Prefix for output files. ') 31 | parser.add_argument('--plot-every', type=int, default=1, help='Plot only for every Nth energy bins') 32 | args = parser.parse_args() 33 | 34 | nmu = 10 # number of viewing angle bins 35 | geometry = DiskGeometry(verbose=args.verbose) 36 | prefix = args.output 37 | 38 | def binmapfunction(beta, alpha): 39 | return (numpy.round(0.5 + nmu * numpy.abs(cos(beta))) - 1).astype(int) 40 | 41 | rdata, nphot = montecarlo.run(prefix, nphot = args.nevents, nmu = nmu, geometry=geometry, 42 | binmapfunction = binmapfunction, 43 | plot_paths=args.plot_paths, plot_interactions=args.plot_interactions, verbose=args.verbose, 44 | plot_every=args.plot_every) 45 | 46 | rdata_transmit, rdata_reflect = rdata 47 | rdata_both = rdata_transmit + rdata_reflect 48 | montecarlo.store(prefix + 'transmit', nphot, rdata_transmit, nmu, plot=True) 49 | montecarlo.store(prefix + 'reflect', nphot, rdata_reflect, nmu, plot=True) 50 | montecarlo.store(prefix, nphot, rdata_both, nmu, plot=True) 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /examples/example-grid/generate_warpeddisk.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import pi, cos, sin 3 | import h5py 4 | 5 | # rho: density [M_sun pc^-3] 6 | # temp: gas temperature [K] 7 | # temp_dust: dust temperature [K] 8 | # u,v,w: three components of velocity [pc/Myr] 9 | # nxh, nyh, nzh: Grid size (x,y,z) 10 | # time: time in Myr 11 | 12 | nxh = 256 13 | nyh = 256 14 | nzh = 256 15 | shape = (nxh, nyh, nzh) 16 | center = numpy.array([nxh/2.+0.5, nyh/2.+0.5, nzh/2.+0.5]) 17 | 18 | pc_cm = 3.0856776e+18 19 | nH_Msun = 1.18803e+57 20 | CTNH = 10**25 21 | CTrho = CTNH / nH_Msun * pc_cm**3 / (32 * pc_cm / 256) 22 | 23 | def gamma(r): 24 | return r**0.5 25 | def beta(r): 26 | return sin(gamma(r))/gamma(r) 27 | 28 | fcov = 1 29 | for diskfraction in 16, 4, 2, 1: 30 | rho = numpy.zeros(shape) 31 | 32 | # paint in the geometric shape 33 | for u in numpy.linspace(0, 2*pi, 401): 34 | v = numpy.linspace(1e-3, pi, 401) 35 | v = v[v < pi/diskfraction] 36 | x = v*( cos(u)*sin(gamma(v)) + sin(u)*cos(gamma(v))*cos(beta(v))) 37 | y = v*(-cos(u)*cos(gamma(v)) + sin(u)*sin(gamma(v))*cos(beta(v))) 38 | z = fcov * -v*sin(u)*sin(beta(v)) 39 | 40 | i = (x / 6.3 * nxh + center[0]).astype(int) 41 | j = (y / 6.3 * nyh + center[1]).astype(int) 42 | k = (z / 6.3 * nzh + center[2]).astype(int) 43 | 44 | k -= 2 45 | rho[i,j,k] = CTrho 46 | k -= 1 47 | rho[i,j,k] = CTrho 48 | print(diskfraction, rho[tuple(center.astype(int))]) 49 | 50 | 51 | with h5py.File('warpeddisk_%s.hdf5' % diskfraction, 'w') as fout: 52 | d = fout.create_dataset('rho', data=rho, compression='gzip', shuffle=True) 53 | d.attrs['description'] = 'Density' 54 | d.attrs['unit'] = 'M_sun/pc^3' 55 | d = fout.create_dataset('nxh', data=nxh) 56 | d.attrs['description'] = 'number of grid cells in x axis' 57 | d = fout.create_dataset('nyh', data=nyh) 58 | d.attrs['description'] = 'number of grid cells in y axis' 59 | d = fout.create_dataset('nzh', data=nzh) 60 | d.attrs['description'] = 'number of grid cells in z axis' 61 | d = fout.create_dataset('center', data=center) 62 | d.attrs['unit'] = 'Myr' 63 | 64 | fout.attrs['CREATOR'] = """Johannes Buchner """ 65 | fout.attrs['ORIGIN'] = """Torqued disk with z-boost of %s""" % fcov 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /examples/torusL.py: -------------------------------------------------------------------------------- 1 | """ 2 | Monte-Carlo simulator for X-ray obscurer geometries 3 | 4 | Literature: 5 | 6 | * Brightman & Nandra (2011) 7 | * Leahy & Creighton (1993) 8 | """ 9 | 10 | import numpy 11 | from numpy import pi 12 | import matplotlib as mpl 13 | mpl.use('Agg') 14 | import matplotlib.pyplot as plt 15 | 16 | rng = numpy.random 17 | 18 | from xars.geometries.layeredconetorus import LayeredConeTorusGeometry 19 | from xars import montecarlo 20 | 21 | #rng.seed(0) 22 | 23 | import argparse 24 | 25 | parser = argparse.ArgumentParser( 26 | description="""Monte-Carlo simulator for X-ray obscurer geometries""", 27 | epilog="""(C) Johannes Buchner, 2013-2016. Based on work by Murray Brightman & Kirpal Nandra (see 2011 publication)""") 28 | 29 | parser.add_argument('--geometry', type=str, required=True, help='Geometry file') 30 | parser.add_argument('--nevents', type=int, default=1000000, help='number of input photons per energy bin') 31 | parser.add_argument('--verbose', default=False, help='Be more talkative, show debug statistics on interactions', action='store_true') 32 | args = parser.parse_args() 33 | 34 | 35 | Theta_tors, NHs = numpy.loadtxt(args.geometry).transpose() 36 | nmu = len(Theta_tors) # number of viewing angle bins 37 | prefix = args.geometry + 'layered_' 38 | 39 | geometry = LayeredConeTorusGeometry(Theta_tors = Theta_tors, NHs = NHs, verbose=args.verbose) 40 | geometry.viz() 41 | plt.savefig(prefix + "geometry.pdf") 42 | plt.savefig(prefix + "geometry.png") 43 | plt.close() 44 | 45 | def binmapfunction(beta, alpha): 46 | beta = numpy.where(beta > pi/2, pi - beta, beta) 47 | slot = numpy.zeros(beta.shape, dtype=int) 48 | for i, theta in enumerate(Theta_tors[:-1]): 49 | slot[beta > theta] = i+1 50 | return slot 51 | 52 | rdata, nphot = montecarlo.run(prefix, nphot = args.nevents, nmu = nmu, geometry=geometry, 53 | binmapfunction = binmapfunction, 54 | plot_paths=False, plot_interactions=False, verbose=args.verbose) 55 | 56 | rdata_transmit, rdata_reflect = rdata 57 | header = dict() 58 | montecarlo.store(prefix + 'transmit', nphot, rdata_transmit, nmu, extra_fits_header = header, plot=False) 59 | montecarlo.store(prefix + 'reflect', nphot, rdata_reflect, nmu, extra_fits_header = header, plot=False) 60 | rdata_transmit += rdata_reflect 61 | del rdata_reflect 62 | montecarlo.store(prefix, nphot, rdata_transmit, nmu, extra_fits_header = header, plot=True) 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | sudo: false 4 | 5 | python: 6 | - "2.7" 7 | - "3.6" 8 | 9 | git: 10 | depth: 10000 11 | 12 | install: 13 | - sudo apt-get install -qq cmake build-essential git 14 | # Fetch and install conda 15 | # ----------------------- 16 | - export CONDA_BASE="http://repo.continuum.io/miniconda/Miniconda" 17 | - if [[ "${TRAVIS_PYTHON_VERSION}" == 2* ]]; then 18 | wget ${CONDA_BASE}2-latest-Linux-x86_64.sh -O miniconda.sh; 19 | else 20 | wget ${CONDA_BASE}3-latest-Linux-x86_64.sh -O miniconda.sh; 21 | fi 22 | - bash miniconda.sh -b -p ${HOME}/miniconda 23 | - export PATH="${HOME}/miniconda/bin:${PATH}" 24 | 25 | # Create the testing environment 26 | # ------------------------------ 27 | - conda config --set always_yes true 28 | - conda config --set changeps1 no 29 | - conda config --set show_channel_urls true 30 | - conda config --add channels conda-forge 31 | - conda update --quiet conda 32 | - ENV_NAME="test-environment" 33 | - conda create --quiet -n ${ENV_NAME} python=${TRAVIS_PYTHON_VERSION} 34 | - source activate ${ENV_NAME} 35 | 36 | # Customise the testing environment 37 | # --------------------------------- 38 | - conda install --quiet --file conda-requirements.txt 39 | # - pip install coveralls progressbar2 h5py numpy 40 | 41 | # Summerise environment 42 | # --------------------- 43 | - conda list 44 | - conda info -a 45 | 46 | script: 47 | - python torus2.py --log10nh=24.2 --opening-angle=0 --nevents=100 --output=myoutput 48 | 49 | - git clone https://github.com/JohannesBuchner/LightRayRider 50 | - make -C LightRayRider/ 51 | 52 | - cd example-blobs/ 53 | - 'echo "backend: Agg" > matplotlibrc' 54 | - python generate_blobs.py 55 | - PYTHONPATH=../LightRayRider/ python ../torusC.py --geometry=torusblob23.0.hdf5 --nevents=1000 56 | - OMP_NUM_THREADS=3 PYTHONPATH=../LightRayRider/ python ../torusC.py --geometry=torusblob23.0.hdf5 --nevents=1000 57 | - cd .. 58 | 59 | - cd example-grid 60 | - 'echo "backend: Agg" > matplotlibrc' 61 | - python generate_warpeddisk.py 62 | - PYTHONPATH=../LightRayRider/ python ../torusG.py --geometry=warpeddisk_1.hdf5 --nevents=100 63 | - OMP_NUM_THREADS=3 PYTHONPATH=../LightRayRider/ python ../torusG.py --geometry=warpeddisk_1.hdf5 --nevents=100 64 | - cd .. 65 | 66 | # Run entire BNTorus geometry 67 | #- bash runtorus.sh 100 68 | 69 | #after_success: coveralls 70 | -------------------------------------------------------------------------------- /xars/geometries/spheretorus.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | from numpy import log10 3 | 4 | from xars.coordtrans import to_cartesian, to_spherical 5 | 6 | 7 | class SphereTorusGeometry: 8 | def __init__(self, NH, verbose=False): 9 | self.NH = NH 10 | self.verbose = verbose 11 | 12 | def compute_next_point(self, location, direction): 13 | (xi, yi, zi) = location 14 | (dist, beta, alpha) = direction 15 | d = dist / self.NH # distance in units of nH 16 | 17 | if self.verbose: 18 | print(' .. .. mean in nH units: ', d.mean()) 19 | # compute relative vector traveled 20 | xv, yv, zv = to_cartesian((d, beta, alpha)) 21 | 22 | # compute new position 23 | xf, yf, zf = xi + xv, yi + yv, zi + zv 24 | 25 | # compute spherical coordinates 26 | rad, phi, theta = to_spherical((xf, yf, zf)) 27 | 28 | # are we inside the cone? 29 | inside = rad < 1. 30 | return inside, (xf,yf,zf), (rad, phi, theta) 31 | 32 | def viz(self): 33 | """Visualize the current geometry.""" 34 | nh = log10(self.NH) + 22 35 | 36 | import matplotlib.lines as mlines 37 | import matplotlib.patches as mpatches 38 | plt.figure(figsize=(5,5)) 39 | font = 'sans-serif' 40 | ax = plt.axes([0,0,1,1]) 41 | 42 | thickness = max(0, min(1, (nh - 20.) / 5)) 43 | plt.text(0.35, 0.5, "nH=%2.1f" % nh, ha="right", va='center', 44 | family=font, size=14) 45 | ax.add_line(mlines.Line2D([0,0.9], [0.5,0.5], lw=1.,alpha=0.4, ls='dashed', color='grey')) 46 | ax.add_line(mlines.Line2D([0.4,0.4], [0.5,0.9], lw=1.,alpha=0.4, ls='dashed', color='grey')) 47 | ax.add_patch(mpatches.Arc((0.4,0.5), 0.5, 0.5, theta2=90, theta1=90, 48 | color='black', linewidth=1, alpha=1, fill=False, ls='dashed')) 49 | ax.add_patch(mpatches.Wedge((0.4,0.5), 0.3, -90, 90, color='black', 50 | linewidth=0, alpha=thickness, fill=True)) 51 | ax.add_patch(mpatches.Wedge((0.4,0.5), 0.3, 90, -90, color='black', 52 | linewidth=0, alpha=thickness, fill=True)) 53 | 54 | ax.add_patch(mpatches.Circle((0.4,0.5), 0.02, color='red', 55 | linewidth=0, alpha=1, fill=True)) 56 | 57 | ax.set_xticks([]) 58 | ax.set_yticks([]) 59 | -------------------------------------------------------------------------------- /xars/xsects/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cross sections 3 | --------------- 4 | 5 | Loading and computation of neutral, solar-abundance cross-sections. 6 | """ 7 | 8 | import os 9 | 10 | import numpy 11 | import scipy 12 | from numpy import arccos as acos 13 | from numpy import (cos, exp, log, log10, logical_and, logical_or, pi, round, 14 | sin, tan) 15 | 16 | from xars import binning 17 | 18 | electmass = 511. # electron rest mass in keV/c^2 19 | 20 | xscatt = numpy.zeros(binning.nbins) 21 | 22 | energy_lo, energy_hi = binning.bin2energy(numpy.arange(binning.nbins)) 23 | energy = (energy_hi + energy_lo) / 2. 24 | deltae = energy_hi - energy_lo 25 | 26 | xthom = 6.7e-4 # Thomson cross section 27 | x = energy / electmass # energy in units of electron rest mass 28 | 29 | # compute scattering cross section 30 | # Thomson regime 31 | xscatt_thomson = xthom * (1. - (2. * x) + (26. * (x**2.) / 5.)) 32 | t1 = 1. + 2. * x 33 | t2 = 1. + x 34 | term1 = t2 / (x**3.) 35 | term2 = (2. * x * t2) / t1 36 | term3 = (1. / (2. * x)) * log(t1) 37 | term4 = (1. + 3. * x) / (t1**2) 38 | xscatt_compton = 0.75 * xthom * (term1 * (term2 - log(t1)) + term3 - term4) 39 | xscatt = numpy.where(x < 0.05, xscatt_thomson, xscatt_compton) 40 | 41 | # convert to units of 1e-22 cm^2 (from 1e-21) 42 | xscatt *= 10 43 | xscatt_thomson *= 10 44 | xscatt_compton *= 10 45 | 46 | # When applied the cross section is 120% larger 47 | xscatt_thomson *= 1.2 48 | xscatt_compton *= 1.2 49 | xscatt *= 1.2 50 | 51 | # photoelectric and line cross-sections 52 | # find xsects.dat file next to this file 53 | xsectsdata = numpy.loadtxt(os.path.join(os.path.dirname(__file__), 'xsects.dat')) 54 | xlines_energies = xsectsdata[0,2:] 55 | xlines_yields = xsectsdata[1,2:] 56 | xsects = xsectsdata[2:,:] 57 | # convert to units of 1e-22 cm^2 (from 1e-21) 58 | xsects[:,1:] *= 10 59 | e1 = xsects[:,0] 60 | xphot = xsects[:,1] 61 | e70 = e1 > 70. 62 | if e70.any(): 63 | lines_max = numpy.max(xsects[:,2:], axis=1) 64 | xphot[e70] = lines_max[e70] * xphot[e70][0] / lines_max[e70][0] 65 | assert (xphot >= 0).all() 66 | assert (xscatt >= 0).all() 67 | 68 | xlines = xsects[:,2:] * xlines_yields 69 | xlines_relative = xlines / xphot[:,None] 70 | # compute probability to come out as fluorescent line 71 | xlines_cumulative = numpy.cumsum(xlines_relative, axis=1) 72 | assert (xlines >= 0).all() 73 | assert (xlines_relative >= 0).all() 74 | assert (xlines_cumulative >= 0).all() 75 | 76 | xboth = xphot + xscatt 77 | absorption_ratio = xphot / xboth 78 | 79 | 80 | def test(): 81 | assert e1.shape == energy.shape, (e1.shape, energy.shape) 82 | for i in range(len(energy)): 83 | assert (numpy.isclose(e1[i], energy[i])), (e1[i], energy[i], energy_lo[i], energy_hi[i]) 84 | -------------------------------------------------------------------------------- /xars/xsects/__main__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cross sections 3 | --------------- 4 | 5 | Loading and computation of neutral, solar-abundance cross-sections. 6 | """ 7 | 8 | import matplotlib.pyplot as plt 9 | import numpy 10 | from numpy import exp, log 11 | 12 | from . import (absorption_ratio, e1, energy, xboth, xlines, xlines_energies, 13 | xlines_relative, xphot, xscatt) 14 | 15 | # xscatt[:] = 1e-6 16 | xkfe = xlines.sum(axis=1) 17 | 18 | # energy = energy_lo 19 | plt.figure() 20 | plt.plot(energy, label='energy') 21 | plt.plot(e1, label='xphot.dat') 22 | plt.gca().set_yscale('log') 23 | plt.legend(loc='best', ncol=2, prop=dict(size=6)) 24 | plt.savefig('binning.pdf') 25 | plt.close() 26 | 27 | plt.figure(figsize=(7,18)) 28 | plt.subplot(3, 1, 1) 29 | plt.plot(energy, xphot, label='absorption') 30 | for i in range(xlines.shape[1]): 31 | plt.plot(energy, xlines[:,i], label='Fluorescent Line %.2f keV' % (xlines_energies[i])) 32 | plt.plot(energy, numpy.where(xkfe < 1e-5, 1e-5, xkfe), '--', label=r'sum') 33 | plt.ylim(xscatt.min() / 10000, None) 34 | # print 'absorption cross-section:', xphot 35 | plt.plot(energy, xscatt, label='scattering') 36 | # print 'scattering cross-section:', xscatt 37 | plt.plot(energy, xboth, label='both') 38 | plt.gca().set_yscale('log') 39 | plt.gca().set_xscale('log') 40 | plt.legend(loc='best', ncol=2, prop=dict(size=6)) 41 | plt.ylabel('cross section [${10}^{-22}$cm$^2$]') 42 | plt.xlabel('energy [keV]') 43 | plt.subplot(3, 1, 2) 44 | plt.plot(energy, exp(-0.01 * xboth), label='$N_H=10^{20}/cm^2$') 45 | plt.plot(energy, exp(-0.10 * xboth), label='$N_H=10^{21}/cm^2$') 46 | plt.plot(energy, exp(-1.00 * xboth), label='$N_H=10^{22}/cm^2$') 47 | plt.plot(energy, exp(-10.0 * xboth), label='$N_H=10^{23}/cm^2$') 48 | plt.plot(energy, exp(-100. * xboth), label='$N_H=10^{24}/cm^2$') 49 | # def comparison(nH, **kwargs): 50 | # data = numpy.loadtxt('/tmp/%d.qdp' % nH) 51 | # plt.plot(data[:,0], data[:,2], '--', **kwargs) 52 | # comparison(20, color='b') 53 | # comparison(21, color='g') 54 | # comparison(22, color='r') 55 | # comparison(23, color='c') 56 | # comparison(24, color='m') 57 | plt.plot(energy, absorption_ratio, label='vs scattering') 58 | plt.plot(energy, xlines_relative[:,0], label='vs scattering') 59 | plt.legend(loc='best', ncol=1, prop=dict(size=6)) 60 | plt.ylabel('absorption') 61 | plt.xlabel('energy [keV]') 62 | plt.gca().set_xscale('log') 63 | plt.subplot(3, 1, 3) 64 | # e^(-q*N) = prob 65 | # N = -log(prob) / q 66 | plt.plot(energy, -log(0.5) / xboth, label='mean free path') 67 | plt.legend(loc='best', ncol=2, prop=dict(size=6)) 68 | plt.ylabel('distance / $N_H$') 69 | plt.xlabel('energy [keV]') 70 | plt.gca().set_xscale('log') 71 | plt.gca().set_yscale('log') 72 | plt.savefig('xsects.pdf', bbox_inches='tight') 73 | plt.close() 74 | -------------------------------------------------------------------------------- /xars/binning/__init__.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import scipy 3 | from numpy import exp, log, log10 4 | 5 | # pick a binning module 6 | # from bn import nbins, bin2energy_lo, energy2bin 7 | # from uniform import nbins, bin2energy_lo, energy2bin 8 | from .bending import bin2energy_lo, energy2bin, nbins 9 | 10 | 11 | def bin2energy_hi(i): 12 | return bin2energy_lo(i + 1) 13 | 14 | 15 | def bin2energy(i): 16 | j = numpy.asarray(i) 17 | return bin2energy_lo(j), bin2energy_hi(j) 18 | 19 | 20 | def test_bin2energy(): 21 | allbins = numpy.arange(nbins) 22 | elo, ehi = bin2energy(allbins) 23 | edgediff = ehi[:-1] - elo[1:] 24 | assert numpy.allclose(0, edgediff, atol=1e-5, rtol=1), (allbins[edgediff > 0], edgediff[edgediff > 0]) 25 | 26 | 27 | def test_energy2bin(): 28 | E = numpy.random.uniform(0.5, 100, size=100000) 29 | binid = energy2bin(E) 30 | allbins = numpy.arange(nbins) 31 | elo, ehi = bin2energy(allbins) 32 | 33 | for i in allbins: 34 | assert (E[binid == i] >= elo[i]).all(), (i, E[binid == i].min(), E[binid == i].max(), elo[i], ehi[i]) 35 | assert (E[binid == i] < ehi[i]).all(), (i, E[binid == i].min(), E[binid == i].max(), elo[i], ehi[i]) 36 | 37 | 38 | def test_reversible_bins(): 39 | E = numpy.random.uniform(9.5, 12, size=100000) 40 | binid = energy2bin(E) 41 | elo, ehi = bin2energy(binid) 42 | assert (elo <= E).all(), (elo[~(elo <= E)], E[~(elo <= E)]) 43 | assert (ehi > E).all(), (ehi[~(ehi > E)], E[~(ehi > E)]) 44 | erand = numpy.random.uniform(elo, ehi) 45 | bin2 = energy2bin(erand) 46 | assert (bin2 == binid).all() 47 | 48 | 49 | def test_reversible(): 50 | allbins = numpy.arange(nbins) 51 | elo, ehi = bin2energy(allbins) 52 | emid = (elo + ehi) / 2. 53 | imid = energy2bin(emid) 54 | assert (imid == allbins).all(), (allbins[(imid != allbins)], imid[(imid != allbins)]) 55 | ilo = energy2bin(elo + 0.0001) 56 | assert (ilo == allbins).all(), (allbins[(ilo != allbins)], ilo[(ilo != allbins)]) 57 | ihi = energy2bin(ehi - 0.0001) 58 | assert (ihi == allbins).all(), (allbins[(ihi != allbins)], ihi[(ihi != allbins)]) 59 | 60 | 61 | if __name__ == '__main__': 62 | import matplotlib.pyplot as plt 63 | energy = numpy.logspace(log10(0.1), log10(1100), 10000) 64 | results = energy2bin(energy) 65 | bins = numpy.arange(nbins) 66 | numpy.savetxt('binning_current.txt', numpy.transpose([bin2energy_lo(bins), bin2energy_hi(bins)])) 67 | plt.figure() 68 | plt.plot(bin2energy(bins)[0], bins, '+-') 69 | plt.gca().set_xscale('log') 70 | plt.xlabel('Energy [keV]') 71 | plt.ylabel('Bin number') 72 | plt.savefig('binning.pdf') 73 | 74 | for b in bins: 75 | print(b, bin2energy(b)[0], energy2bin(bin2energy(b)[0])) 76 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ==================================== 2 | XARS X-ray Monte-carlo simulator 3 | ==================================== 4 | 5 | .. image:: doc/logo3-large.png 6 | 7 | XARS simulates X-rays propagating through matter in user-defined geometries. 8 | 9 | Features 10 | --------- 11 | 12 | XARS is ~400 lines of pure Python code. 13 | 14 | * Physical processes: 15 | 16 | * Photo-electric absorption 17 | * Compton scattering 18 | * Fluorescent line emission (self-consistent with absorption above) 19 | 20 | * Geometries: 21 | 22 | * Arbitrary user-defined geometries (included examples: toroid, sphere, disk) 23 | * Arbitrary geometries made from many spherical blobs/clumps (when combined with LightRayRider) 24 | * Arbitrary grid geometries from hydrodynamic simulations (when combined with LightRayRider) 25 | 26 | New contributions are welcome. 27 | 28 | How to cite XARS correctly 29 | --------------------------- 30 | 31 | Please reference `Buchner et al (2019) `_. 32 | 33 | Models 34 | ------------------ 35 | 36 | Go to the `Models `_ page. 37 | 38 | In Buchner et al. (in prep) we irradiated the following geometries, 39 | and you can download xspec table models here. 40 | 41 | * Sphere, Sphere with Bi-conical cut-out 42 | * Clumpy model made from 100,000 spheres: Unified X-ray Clumpy model UXCLUMPY, CAT3D-WIND 43 | * Radiative fountain, from a hydrodynamic grid simulation 44 | * Warped Disk 45 | * ... and many others 46 | 47 | Downloads, movies and more details on the `Models `_ page. 48 | 49 | Installation 50 | ------------------ 51 | 52 | .. image:: https://img.shields.io/badge/docs-published-ok.svg 53 | :target: https://johannesbuchner.github.io/xars/ 54 | :alt: Documentation Status 55 | .. image:: https://github.com/JohannesBuchner/xars/actions/workflows/tests.yml/badge.svg 56 | :target: https://github.com/JohannesBuchner/xars/actions 57 | .. image:: https://coveralls.io/repos/github/JohannesBuchner/xars/badge.svg?branch=master 58 | :target: https://coveralls.io/github/JohannesBuchner/xars?branch=master 59 | 60 | 61 | 62 | XARS requires the following python modules: 63 | 64 | numpy scipy matplotlib h5py astropy progressbar2 65 | 66 | You can install them with pip or conda as usual. 67 | 68 | Once these are installed, you just run the scripts from this directory. 69 | 70 | Usage 71 | --------------------------------------------------- 72 | 73 | See the `Code Tutorial `_ to understand the code. 74 | 75 | Questions and Problems 76 | -------------------------------------------- 77 | 78 | If you have any questions or issues or questions, please check the `FAQ `_ or open a `Github issue `_. This helps other people google the same question. 79 | 80 | License 81 | ------------------- 82 | 83 | Free and Open Source software under AGPLv3. 84 | 85 | Contact me if you need a different license. 86 | 87 | 88 | 89 | -------------------------------------------------------------------------------- /xars/binning/bn.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | """ 4 | nbins = 1000 5 | 6 | def energy2bin(energy): 7 | energy = numpy.asarray(energy).reshape((-1,)) 8 | n = 70. * numpy.log10((energy + 23.3)/23.4) 9 | dn = 0.01 * numpy.ones_like(n) 10 | ibin = numpy.array(n/dn, dtype=int) 11 | m1 = n >= 8.34 12 | dn[m1] = 0.022 13 | ibin[m1] = (n[m1]- 8.340)/dn[m1]+834. 14 | m2 = n >= 9.308 15 | dn[m2] = 0.05 16 | ibin[m2] = (n[m2]- 9.308)/dn[m2]+878. 17 | m3 = n >= 10.258 18 | dn[m3] = 0.1 19 | ibin[m3] = (n[m3]-10.258)/dn[m3]+897. 20 | m4 = n >= 11.158 21 | dn[m4] = 0.22 22 | ibin[m4] = (n[m4]-11.158)/dn[m4]+906. 23 | m5 = n >= 12.918 24 | dn[m5] = 0.5 25 | ibin[m5] = (n[m5]-12.918)/dn[m5]+914. 26 | m6 = n >= 15.418 27 | dn[m6] = 1.0 28 | ibin[m6] = (n[m6]-15.418)/dn[m6]+919. 29 | 30 | return numpy.array(ibin, dtype=int) 31 | 32 | def bin2energy_lo(i): 33 | i = numpy.asarray(i).reshape((-1,)) 34 | dn = 0.01 * numpy.ones_like(i) 35 | n = i * dn 36 | m1 = n >= 8.34 37 | dn[m1] = 0.022 38 | n[m1] = 8.34 + (i[m1] - 834.0) * dn[m1] 39 | m2 = n >= 9.308 40 | dn[m2] = 0.05 41 | n[m2] = 9.308 + (i[m2] - 878.0)*dn[m2] 42 | m3 = n >= 10.258 43 | dn[m3] = 0.1 44 | n[m3] = 10.258 + (i[m3] - 897.0)*dn[m3] 45 | m4 = n >= 11.158 46 | dn[m4] = 0.22 47 | n[m4] = 11.158 + (i[m4] - 906.0)*dn[m4] 48 | m5 = n >= 12.918 49 | dn[m5] = 0.5 50 | n[m5] = 12.918 + (i[m5] - 914.0)*dn[m5] 51 | m6 = n >= 15.418 52 | dn[m6] = 1.0 53 | n[m6] = 15.418 + (i[m6] - 919.0)*dn[m6] 54 | 55 | return 23.4 * 10**(n/70.0)-23.3 56 | 57 | """ 58 | 59 | 60 | nbins = 1250 # number of energy bins 61 | 62 | 63 | def energy2bin(energy): 64 | energy = numpy.asarray(energy).reshape((-1,)) 65 | n = 70. * numpy.log10((energy + 23.3) / 23.4) 66 | dn = 0.01 * numpy.ones_like(n) 67 | ibin = numpy.array(n / dn, dtype=int) 68 | m1 = n > 8.34 69 | dn[m1] = 0.022 70 | ibin[m1] = (n[m1] - 8.34) / dn[m1] + 834. 71 | m2 = n > 9.308 72 | dn[m2] = 0.05 73 | ibin[m2] = (n[m2] - 9.308) / dn[m2] + 878. 74 | m3 = n > 10.258 75 | dn[m3] = 0.1 76 | ibin[m3] = (n[m3] - 10.258) / dn[m3] + 897. 77 | m4 = n > 11.158 78 | dn[m4] = 0.35 79 | ibin[m4] = (n[m4] - 11.158) / dn[m4] + 906. 80 | 81 | return numpy.array(ibin, dtype=int) 82 | 83 | 84 | def bin2energy_lo(i): 85 | i = numpy.asarray(i).reshape((-1,)) 86 | dn = 0.01 * numpy.ones_like(i) 87 | n = i * dn 88 | m1 = n > 8.34 89 | dn[m1] = 0.022 90 | n[m1] = 8.34 + (i[m1] - 834.0) * dn[m1] 91 | m2 = n > 9.308 92 | dn[m2] = 0.05 93 | n[m2] = 9.308 + (i[m2] - 878.0) * dn[m2] 94 | m3 = n > 10.258 95 | dn[m3] = 0.1 96 | n[m3] = 10.258 + (i[m3] - 897.0) * dn[m3] 97 | m4 = n > 11.158 98 | dn[m4] = 0.35 99 | n[m4] = 11.158 + (i[m4] - 906.0) * dn[m4] 100 | 101 | return 23.4 * 10**(n / 70.0) - 23.3 102 | -------------------------------------------------------------------------------- /xars/geometries/clumpyspheretorus.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import log10 3 | 4 | from xars.coordtrans import to_cartesian, to_spherical 5 | 6 | 7 | class ClumpySphereTorusGeometry: 8 | def __init__(self, NH, nclumps, verbose=False): 9 | self.NH = NH 10 | self.nclumps = nclumps 11 | self.clumpNH = self.NH * 1. / nclumps 12 | self.verbose = verbose 13 | 14 | def compute_next_point(self, location, direction): 15 | # lam = int(dist / self.clumpNH) 16 | # generate random number of clumps to encounter 17 | (xi, yi, zi) = location 18 | (dist, beta, alpha) = direction 19 | k = numpy.random.poisson(self.nclumps) 20 | NHfull = k * self.clumpNH 21 | # d = dist / self.NH 22 | if dist > NHfull: 23 | # we land outside 24 | d = 10 25 | elif dist < NHfull: 26 | # we go as far as we get 27 | _ = int(dist / self.clumpNH) 28 | NHremainder = numpy.fmod(dist, self.clumpNH) 29 | d = NHfull / self.NH 30 | 31 | # how deep do we go into the k-th cloud 32 | d = (NHfull + NHremainder) / self.NH 33 | 34 | if self.verbose: 35 | print(' .. .. mean in nH units: ', d.mean()) 36 | # compute relative vector traveled 37 | xv, yv, zv = to_cartesian((d, beta, alpha)) 38 | 39 | # compute new position 40 | xf, yf, zf = xi + xv, yi + yv, zi + zv 41 | 42 | # compute spherical coordinates 43 | rad, phi, theta = to_spherical((xf, yf, zf)) 44 | 45 | # are we inside the cone? 46 | inside = rad < 1. 47 | return inside, (xf,yf,zf), (rad, phi, theta) 48 | 49 | def viz(self): 50 | """ Visualize the current geometry """ 51 | nh = log10(self.NH) + 22 52 | 53 | import matplotlib.lines as mlines 54 | import matplotlib.patches as mpatches 55 | import matplotlib.pyplot as plt 56 | plt.figure(figsize=(5,5)) 57 | font = 'sans-serif' 58 | ax = plt.axes([0,0,1,1]) 59 | 60 | thickness = max(0, min(1, (nh - 20.) / 5)) 61 | plt.text(0.35, 0.5, "nH=%2.1f" % nh, ha="right", va='center', 62 | family=font, size=14) 63 | ax.add_line(mlines.Line2D([0,0.9], [0.5,0.5], lw=1.,alpha=0.4, ls='dashed', color='grey')) 64 | ax.add_line(mlines.Line2D([0.4,0.4], [0.5,0.9], lw=1.,alpha=0.4, ls='dashed', color='grey')) 65 | ax.add_patch(mpatches.Arc((0.4,0.5), 0.5, 0.5, theta2=90, theta1=90, 66 | color='black', linewidth=1, alpha=1, fill=False, ls='dashed')) 67 | ax.add_patch(mpatches.Wedge((0.4,0.5), 0.3, -90, 90, color='black', 68 | linewidth=0, alpha=thickness, fill=True)) 69 | ax.add_patch(mpatches.Wedge((0.4,0.5), 0.3, 90, -90, color='black', 70 | linewidth=0, alpha=thickness, fill=True)) 71 | 72 | ax.add_patch(mpatches.Circle((0.4,0.5), 0.02, color='red', 73 | linewidth=0, alpha=1, fill=True)) 74 | 75 | ax.set_xticks([]) 76 | ax.set_yticks([]) 77 | -------------------------------------------------------------------------------- /scripts/vizfek2.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script demonstrates tracking photons through the simulation, and 3 | picking up the outcoming FeK emission. 4 | """ 5 | import numpy 6 | 7 | from xars.binning import bin2energy, energy2bin, nbins 8 | from xars.coordtrans import to_cartesian 9 | from xars.photons import PhotonBunch 10 | 11 | 12 | def run( 13 | nphot, geometry, 14 | verbose=False, emin=0.5, PhoIndex=2 15 | ): 16 | 17 | energy_lo, energy_hi = bin2energy(range(nbins)) 18 | energy = (energy_hi + energy_lo) / 2. 19 | # deltae = energy_hi - energy_lo 20 | 21 | photons = PhotonBunch(i=100, nphot=nphot, verbose=verbose, geometry=geometry) 22 | photons.energy = emin / numpy.random.power(PhoIndex, size=nphot) 23 | photons.energy[photons.energy > energy.max()] = energy.max() 24 | photons.binid = energy2bin(photons.energy) 25 | 26 | for n_interactions in range(1000): 27 | prev_location = photons.rad.copy(), photons.theta.copy(), photons.phi.copy() 28 | emission, more = photons.pump() 29 | if emission is None and not more: 30 | break 31 | if emission is None: 32 | continue 33 | if len(emission['energy']) == 0: 34 | if not more: 35 | break 36 | continue 37 | if verbose: 38 | print(' received %d emitted photons (after %d interactions)' % (len(emission['energy']), n_interactions)) 39 | beta = emission['beta'] 40 | alpha = emission['alpha'] 41 | # bins = emission['binid'] 42 | energy = emission['energy'] 43 | mask_escaped = emission['mask_escaped'] 44 | 45 | # get previous location 46 | prev_rad, prev_theta, prev_phi = prev_location 47 | rad, theta, phi = prev_rad[mask_escaped], prev_theta[mask_escaped], prev_phi[mask_escaped] 48 | assert rad.shape == energy.shape 49 | if n_interactions == 0: 50 | continue # we do not consider direct emission 51 | 52 | # filter to FeKa band 53 | mask = numpy.logical_and(emission['energy'] > 6.1, emission['energy'] < 6.5) 54 | 55 | if mask.any(): 56 | # convert last scattering position to xyz 57 | x, y, z = to_cartesian((rad[mask], theta[mask], phi[mask])) 58 | 59 | yield x, y, z, alpha[mask], beta[mask], energy[mask] 60 | 61 | if not more: 62 | break 63 | 64 | 65 | if __name__ == '__main__': 66 | nh = 1e24 / 1e22 67 | 68 | from xars.geometries.conetorus import ConeTorusGeometry 69 | geometry = ConeTorusGeometry(Theta_tor=80 / 180. * numpy.pi, NH=nh, verbose=False) 70 | 71 | from astropy.table import Table 72 | 73 | events = None 74 | # numpy.random.seed(101) 75 | 76 | for _ in range(10): 77 | for events_info in run(1000000, geometry, emin=6, PhoIndex=2): 78 | new_events = numpy.transpose(events_info) 79 | if events is None: 80 | events = new_events 81 | else: 82 | events = numpy.concatenate((events, new_events)) 83 | 84 | print("events collected:", events.shape) 85 | # numpy.savetxt('vizfek.txt.gz', events) 86 | Table(events, names=['x', 'y', 'z', 'alpha', 'beta', 'energy']).write('vizfek.fits', format='fits', overwrite=True) 87 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean clean-test clean-pyc clean-build docs help 2 | .DEFAULT_GOAL := help 3 | 4 | define BROWSER_PYSCRIPT 5 | import os, webbrowser, sys 6 | 7 | try: 8 | from urllib import pathname2url 9 | except: 10 | from urllib.request import pathname2url 11 | 12 | webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1]))) 13 | endef 14 | export BROWSER_PYSCRIPT 15 | 16 | define PRINT_HELP_PYSCRIPT 17 | import re, sys 18 | 19 | for line in sys.stdin: 20 | match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) 21 | if match: 22 | target, help = match.groups() 23 | print("%-20s %s" % (target, help)) 24 | endef 25 | export PRINT_HELP_PYSCRIPT 26 | 27 | PYTHON := python3 28 | 29 | BROWSER := $(PYTHON) -c "$$BROWSER_PYSCRIPT" 30 | 31 | help: 32 | @$(PYTHON) -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) 33 | 34 | clean: clean-build clean-pyc clean-test clean-doc ## remove all build, test, coverage and Python artifacts 35 | 36 | clean-build: ## remove build artifacts 37 | rm -fr build/ 38 | rm -fr dist/ 39 | rm -fr .eggs/ 40 | find . -name '*.egg-info' -exec rm -fr {} + 41 | find . -name '*.egg' -exec rm -f {} + 42 | 43 | clean-pyc: ## remove Python file artifacts 44 | find . -name '*.pyc' -exec rm -f {} + 45 | find . -name '*.pyo' -exec rm -f {} + 46 | find . -name '*~' -exec rm -f {} + 47 | find . -name '__pycache__' -exec rm -fr {} + 48 | find . -name '*.so' -exec rm -f {} + 49 | find . -name '*.c' -exec rm -f {} + 50 | 51 | clean-test: ## remove test and coverage artifacts 52 | rm -fr .tox/ 53 | rm -f .coverage 54 | rm -fr htmlcov/ 55 | rm -fr .pytest_cache 56 | 57 | clean-doc: 58 | rm -rf docs/build 59 | 60 | lint: ## check style with flake8 61 | flake8 . 62 | 63 | lint-fix: ## fix style with autopep8 and isort; ignores to not autofix tabs to spaces, but still warn when mixed 64 | autopep8 . --in-place --aggressive --aggressive --aggressive --recursive --ignore=W191,E101,E111,E122 65 | isort . 66 | 67 | test: ## run tests quickly with the default Python 68 | pytest 69 | 70 | test-all: ## run tests on every Python version with tox 71 | tox 72 | 73 | coverage: ## check code coverage quickly with the default Python 74 | coverage run --source xars -m pytest 75 | coverage report -m 76 | coverage html 77 | $(BROWSER) htmlcov/index.html 78 | 79 | docs: ## generate Sphinx HTML documentation, including API docs 80 | rm -f docs/xars.rst 81 | rm -f docs/modules.rst 82 | #nbstripout docs/*.ipynb 83 | sphinx-apidoc -H API -o docs/ xars 84 | $(MAKE) -C docs clean 85 | $(MAKE) -C docs html 86 | sed --in-place '/href="lightrayrider\/raytrace.html"/d' docs/build/html/_modules/index.html 87 | sed --in-place '/href="lightrayrider\/parallel.html"/d' docs/build/html/_modules/index.html 88 | $(BROWSER) docs/build/html/index.html 89 | 90 | servedocs: docs ## compile the docs watching for changes 91 | watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D . 92 | 93 | release: dist ## package and upload a release 94 | twine upload --verbose dist/*.tar.gz 95 | 96 | dist: clean ## builds source and wheel package 97 | $(PYTHON) setup.py sdist 98 | $(PYTHON) setup.py bdist_wheel 99 | ls -l dist 100 | 101 | install: clean ## install the package to the active Python's site-packages 102 | $(PYTHON) setup.py install 103 | -------------------------------------------------------------------------------- /xars/geometries/clumpytorus.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy 3 | from lightrayrider import sphere_raytrace, sphere_raytrace_finite 4 | from numpy import log10 5 | 6 | from xars.coordtrans import to_cartesian, to_spherical 7 | 8 | 9 | def sigma_convert(sigma): 10 | return round((sigma * 1.4) / 5) * 5 11 | 12 | 13 | class ClumpyTorusGeometry: 14 | def __init__(self, filename, verbose=False): 15 | f = h5py.File(filename, 'r') 16 | self.sigma = f.get('sigma') 17 | # load spheres 18 | self.x, self.y, self.z = f['x'][()], f['y'][()], f['z'][()] 19 | self.r = f['radius'][()] 20 | self.NH = 10**(f['NH'][()] - 22) 21 | # density = NH of crossing / distance of crossing 22 | self.rho = self.NH / (2 * self.r) 23 | self.verbose = verbose 24 | 25 | def compute_next_point(self, location, direction): 26 | (xi, yi, zi) = location 27 | (dist, beta, alpha) = direction 28 | a, b, c = to_cartesian((1, beta, alpha)) 29 | t = sphere_raytrace_finite(self.x, self.y, self.z, self.r, self.rho, xi, yi, zi, a, b, c, dist) 30 | 31 | # are we still inside? 32 | inside = t >= 0 33 | 34 | # compute new position 35 | xf, yf, zf = xi + a * t, yi + b * t, zi + c * t 36 | 37 | # compute spherical coordinates 38 | rad, phi, theta = to_spherical((xf, yf, zf)) 39 | 40 | return inside, (xf,yf,zf), (rad, phi, theta) 41 | 42 | def compute_los_nh(self, beta, alpha): 43 | a, b, c = to_cartesian((1, beta, alpha)) 44 | mindistances = numpy.zeros(1) 45 | NH = sphere_raytrace(self.x, self.y, self.z, self.r, self.rho, a, b, c, mindistances)[0,:] 46 | return NH 47 | 48 | def viz(self): 49 | """Visualize the current geometry.""" 50 | 51 | import matplotlib.pyplot as plt 52 | from matplotlib.collections import PatchCollection 53 | 54 | x, y, z = self.x, self.y, self.z 55 | NH = log10(self.NH) + 22 56 | r = self.r 57 | intersecting = numpy.abs(y) < r 58 | 59 | plt.figure(figsize=(5,5), frameon=False) 60 | plt.axis('off') 61 | if self.sigma is not None: 62 | plt.title(r'$\sigma=%d^\circ$' % (sigma_convert(self.sigma[()]))) 63 | patches = [] 64 | colors = [] 65 | for xi, yi, zi, ri, NHi in zip(x[intersecting], y[intersecting], z[intersecting], r[intersecting], NH[intersecting]): 66 | ri2 = ri - numpy.abs(yi) 67 | circle = plt.Circle((xi, zi), ri2) 68 | patches.append(circle) 69 | colors.append((min(26, max(20, NHi)))) 70 | 71 | collection = PatchCollection(patches, cmap=plt.cm.gray_r, edgecolors="none") 72 | collection.set_array(numpy.array(colors)) 73 | collection.set_clim(20, 26) 74 | coll = plt.gca().add_collection(collection) 75 | 76 | plt.plot(0, 0, 'x ', color='r', ms=4, mew=2) 77 | plt.ylim(-1, 1) 78 | plt.xlim(-1, 1) 79 | 80 | if self.sigma is None or self.sigma[()] < 30: 81 | # add colorbar 82 | ax = plt.axes([0.1, 0.1, 0.8, 0.02], frameon=False) 83 | cbar = plt.colorbar(coll, cax=ax, ticks=[20, 21, 22, 23, 24, 25, 26], 84 | cmap=plt.cm.gray_r, orientation='horizontal') 85 | cbar.solids.set_edgecolor("face") 86 | cbar.outline.set_linewidth(0) 87 | cbar.set_label('Cloud column density') 88 | -------------------------------------------------------------------------------- /doc/cat3d.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | CAT3D-WIND clumpy model 3 | =========================== 4 | 5 | .. image:: CAT3D-WIND.gif 6 | :align: right 7 | 8 | Contact: Johannes Buchner 9 | 10 | Geometry information: http://www.sungrazer.org/cat3d.html 11 | 12 | If you have any issues or questions, please check the `FAQ `_ or open a `Github issue `_! 13 | 14 | To go to the corresponding infrared model go back to `Models `_. 15 | 16 | (This geometry is the one of `Leftley et al. 2018`_, matching e.g., ESO323-G77) 17 | 18 | Components 19 | -------------- 20 | 21 | Download: https://doi.org/10.5281/zenodo.2211262 22 | 23 | ``atable{CAT3D-WIND.fits}``: 24 | 25 | Clumpy model transmitted and reflected component with fluorescent lines 26 | 27 | Incident radiation parameters: 28 | 29 | - PhoIndex: Photon Index 30 | - Ecut: Energy cutoff [keV] 31 | - norm: Photon Flux normalisation at 1keV 32 | 33 | Viewing angle parameters: 34 | 35 | - NHLOS: Total LOS column density 36 | - Theta_inc: Viewing angle, relative to the inner (flat) disk portion. 37 | 38 | Geometry parameters: 39 | 40 | - NH_cloud: Column density of each cloud. In log 41 | 42 | Model setup 43 | ------------- 44 | 45 | ``atable{CAT3D-WIND.fits} + zpow*const`` 46 | 47 | Initially, freeze Ecut=400, Theta_inc=90. 48 | 49 | Link the powerlaw parameters to the clumpy model parameters, const should be free between 1e-5 and 0.1. 50 | 51 | Fitting 52 | ------------- 53 | 54 | AGN obscurer models, including MYTORUS and BNTORUS have highly degenerate parameter spaces. 55 | These are not easy to explore with simple fitting methods, or MCMC. 56 | I generally recommend a global search algorithms. These include Multinest (through BXA). 57 | 58 | If you are stuck in a situation without such algorithms, here is some strategies to escape local minima. 59 | 60 | 61 | 1) Freeze some parameters that are less influential. For this model, freeze Ecut=400, Theta_inc=90, NH_cloud=24. 62 | 2) Limit the data. Local minima are difficult to escape because they are surrounded by steep walls. Use fewer spectra, and start with a high-energy data (e.g. 20-50keV). Freeze the powerlaw constant to 1e-10. Fit and gradually add more data (15keV, 8keV, 5keV, etc). Then allow the powerlaw constant to vary. 63 | 3) Use the error command to explore more. This is most helpful on NH and geometry parameters. 64 | 4) Plot the model -- if the model component is not present, it is because this viewing angle and this NHLOS does not exist in this geometry. You need to change the geometry or viewing angle until you get back the model. 65 | 66 | Additionally, some parameter regions are simply discontinuous. 67 | 68 | 1) Try freezing NH to Compton-thin (e.g. 30) and refit using the above steps, then thaw and fit. 69 | 2) Try freezing NH to Compton-thick (e.g. 300) and refit using the above steps, then thaw and fit. 70 | 71 | 72 | Failure states 73 | --------------- 74 | 75 | - If you get a very low photon index (<1.5), you are probably in a bad local minimum. Start from scratch. Maybe freeze PhoIndex=2 and see how far you get. 76 | 77 | - Plot the model. If the AGN obscurer component is not present, it is because this viewing angle and this NH does not exist in this geometry (zero photons). You need to change the geometry or viewing angle until you get back the model. 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /xars/geometries/hydrotorus.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy 3 | from lightrayrider import grid_raytrace_finite_flat, grid_raytrace_flat 4 | from numpy import log10 5 | 6 | from xars.coordtrans import to_cartesian, to_spherical 7 | 8 | 9 | class HydroTorusGeometry: 10 | """ 11 | A uniform grid of densities is provided as a input file 12 | """ 13 | 14 | def __init__(self, filename, verbose=False): 15 | f = h5py.File(filename, 'r') 16 | rho = numpy.array(f['rho'][()], dtype=numpy.float64) 17 | pc_cm = 3.0856776e+18 18 | nH_Msun = 1.18803e+57 19 | 20 | # Compute total mass by multiplying with 21 | # convert from pc to grid cells 22 | # convert from pc to cm 23 | # density in Msun/pc^3 24 | # 32 pc side length of grid 25 | # convert density to n(H)/cm^3 26 | # so multiply by m(H)/Msun, and by (pc/cm)^3 27 | # in units of 1e22 28 | self.rho = rho * nH_Msun / pc_cm**3 * (32 * pc_cm / 256) / 1e22 29 | self.rho_flat = numpy.array(self.rho.flatten()) 30 | self.rho = self.rho_flat.reshape(self.rho.shape) 31 | assert (self.rho >= 0).all() 32 | # print self.rho.sum(axis=1).max() 33 | self.center = f['center'][()] 34 | self.verbose = verbose 35 | 36 | def compute_next_point(self, location, direction): 37 | (xi, yi, zi) = location 38 | (dist, beta, alpha) = direction 39 | a, b, c = to_cartesian((1, beta, alpha)) 40 | x = xi + self.center[0] 41 | y = yi + self.center[1] 42 | z = zi + self.center[2] 43 | t = grid_raytrace_finite_flat(self.rho_flat, self.rho.shape[0], x, y, z, a, b, c, dist) 44 | 45 | # are we still inside? 46 | inside = t >= 0 47 | 48 | # compute new position 49 | t = numpy.where(t < 0, dist, t) 50 | xf, yf, zf = xi + a * t, yi + b * t, zi + c * t 51 | 52 | # compute spherical coordinates 53 | rad, phi, theta = to_spherical((xf, yf, zf)) 54 | 55 | return inside, (xf,yf,zf), (rad, phi, theta) 56 | 57 | def compute_los_nh(self, beta, alpha): 58 | a, b, c = to_cartesian((1, beta, alpha)) 59 | x = a * 0 + self.center[0] 60 | y = b * 0 + self.center[1] 61 | z = c * 0 + self.center[2] 62 | NH = grid_raytrace_flat(self.rho_flat, self.rho.shape[0], x, y, z, a, b, c) 63 | return NH 64 | 65 | def viz(self): 66 | """Visualize the current geometry.""" 67 | import matplotlib.pyplot as plt 68 | 69 | plt.figure(figsize=(5,5), frameon=False) 70 | plt.axis('off') 71 | ax = plt.gca() 72 | ax.get_xaxis().set_visible(False) 73 | ax.get_yaxis().set_visible(False) 74 | cmap = plt.cm.gray_r 75 | cmap = 'Greens' 76 | logrho = log10(self.rho[:,127,:] + 1e-3) 77 | plt.imshow(logrho.transpose(), cmap=cmap, vmin=-3, vmax=+3) 78 | plt.plot(self.center[2], self.center[0], 'x', color='r', ms=4, mew=0.5) 79 | plt.xlim(0, len(logrho)) 80 | plt.ylim(0, len(logrho)) 81 | """ 82 | for i in numpy.linspace(1, 179, 100): 83 | zero = numpy.zeros(100) + 0. 84 | dist = 100 + zero 85 | beta = i / 180. * pi + zero 86 | alpha = numpy.linspace(0, pi, 100) 87 | inside1, _, _ = self.compute_next_point((zero, zero, zero), (dist, beta, alpha)) 88 | inside2, _, _ = self.compute_next_point((zero, zero, zero), (dist, pi-beta, alpha)) 89 | NH = self.compute_los_nh(beta, alpha) 90 | print '%.1f %.1f%% %.1f%% NH=%.2f' % (i, inside1.mean()*100, inside2.mean()*100, NH.mean()) 91 | """ 92 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ============ 4 | Contributing 5 | ============ 6 | 7 | Contributions are welcome, and they are greatly appreciated! Every little bit 8 | helps, and credit will always be given. 9 | 10 | You can contribute in many ways: 11 | 12 | Types of Contributions 13 | ---------------------- 14 | 15 | Report Bugs 16 | ~~~~~~~~~~~ 17 | 18 | Report bugs at https://github.com/JohannesBuchner/xars/issues. 19 | 20 | If you are reporting a bug, please include: 21 | 22 | * Your operating system name and version. 23 | * Any details about your local setup that might be helpful in troubleshooting. 24 | * Detailed steps to reproduce the bug. 25 | 26 | Fix Bugs 27 | ~~~~~~~~ 28 | 29 | Look through the GitHub issues for bugs. Anything tagged with "bug" and "help 30 | wanted" is open to whoever wants to implement it. 31 | 32 | Implement Features 33 | ~~~~~~~~~~~~~~~~~~ 34 | 35 | Look through the GitHub issues for features. Anything tagged with "enhancement" 36 | and "help wanted" is open to whoever wants to implement it. 37 | 38 | Write Documentation 39 | ~~~~~~~~~~~~~~~~~~~ 40 | 41 | xars could always use more documentation, whether as part of the 42 | official xars docs, in docstrings, or even on the web in blog posts, 43 | articles, and such. 44 | 45 | Submit Feedback 46 | ~~~~~~~~~~~~~~~ 47 | 48 | The best way to send feedback is to file an issue at https://github.com/JohannesBuchner/xars/issues. 49 | 50 | If you are proposing a feature: 51 | 52 | * Explain in detail how it would work. 53 | * Keep the scope as narrow as possible, to make it easier to implement. 54 | * Remember that this is a volunteer-driven project, and that contributions 55 | are welcome :) 56 | 57 | Get Started! 58 | ------------ 59 | 60 | Ready to contribute? Here's how to set up `xars` for local development. 61 | 62 | 1. Fork the `xars` repo on GitHub. 63 | 2. Clone your fork locally:: 64 | 65 | $ git clone git@github.com:JohannesBuchner/xars.git 66 | 67 | 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: 68 | 69 | $ mkvirtualenv xars 70 | $ cd xars/ 71 | $ python setup.py develop 72 | 73 | 4. Create a branch for local development:: 74 | 75 | $ git checkout -b name-of-your-bugfix-or-feature 76 | 77 | Now you can make your changes locally. 78 | 79 | 5. When you're done making changes, check that your changes pass flake8 and the 80 | tests, including testing other Python versions with tox:: 81 | 82 | $ flake8 xars tests 83 | $ python setup.py test # or pytest 84 | $ tox 85 | 86 | To get flake8 and tox, just pip install them into your virtualenv. 87 | 88 | 6. Commit your changes and push your branch to GitHub:: 89 | 90 | $ git add . 91 | $ git commit -m "Your detailed description of your changes." 92 | $ git push origin name-of-your-bugfix-or-feature 93 | 94 | 7. Submit a pull request through the GitHub website. 95 | 96 | Pull Request Guidelines 97 | ----------------------- 98 | 99 | Before you submit a pull request, check that it meets these guidelines: 100 | 101 | 1. The pull request should include tests. 102 | 2. If the pull request adds functionality, the docs should be updated. Put 103 | your new functionality into a function with a docstring, and add the 104 | feature to the list in README.rst. 105 | 3. The pull request should work for Python 3.8, 3.10, 3.12, and for PyPy. Check 106 | https://travis-ci.org/JohannesBuchner/xars/pull_requests 107 | and make sure that the tests pass for all supported Python versions. 108 | 109 | Tips 110 | ---- 111 | 112 | To run a subset of tests:: 113 | 114 | $ pytest tests.test_snowline 115 | 116 | 117 | Deploying 118 | --------- 119 | 120 | A reminder for the maintainers on how to deploy. 121 | Make sure all your changes are committed (including an entry in HISTORY.rst). 122 | Then run:: 123 | 124 | $ bump2version patch # possible: major / minor / patch 125 | $ git push 126 | $ git push --tags 127 | 128 | Travis will then deploy to PyPI if tests pass. 129 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: build 5 | 6 | on: 7 | push: 8 | pull_request: 9 | schedule: 10 | - cron: '42 4 5,20 * *' 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@main 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@main 21 | with: 22 | python-version: 3 23 | 24 | - name: Install test dependencies 25 | run: python -m pip install matplotlib "numpy<2" setuptools scipy astropy lightrayrider flake8 pycodestyle pydocstyle pytest coverage coverage_lcov[extras] 26 | 27 | - name: Install package 28 | run: python -m pip install -e . 29 | 30 | - name: Lint with flake8 31 | run: flake8 xars/*.py xars/*/*.py 32 | 33 | - name: Check code style 34 | run: pycodestyle xars/*.py xars/*/*.py 35 | 36 | #- name: Check doc style 37 | # run: pydocstyle lightrayrider/*.py lightrayrider/*.pyx 38 | 39 | - name: Test with pytest 40 | run: | 41 | python3 -m coverage run -p -m pytest xars/binning/__init__.py xars/xsects/__init__.py xars/geometries/layeredconetorus.py xars/geometries/conetorus.py xars/geometries/wedgetorus.py xars/coordtrans.py 42 | 43 | - name: Test visualisation scripts 44 | run: | 45 | echo "backend: Agg" > matplotlibrc 46 | python3 -m coverage run -p scripts/vizfek2.py 47 | python3 -m coverage run -p -m xars.xsects 48 | 49 | - name: Test package 50 | run: | 51 | python3 -m coverage run -p examples/torus2.py --log10nh=24.2 --opening-angle=0 --nevents=100 --output=examples/myoutput 52 | python3 -m coverage run -p examples/disk.py --nevents=3 --output=examples/output-disk --plot-interactions --plot-paths --plot-every=40 --verbose 53 | 54 | - name: Create blob test data 55 | run: | 56 | cd examples/example-blobs 57 | echo "backend: Agg" > matplotlibrc 58 | python3 -m coverage run -p generate_blobs.py 59 | 60 | - name: Test on blob 61 | run: | 62 | python3 -m coverage run -p examples/torusC.py --geometry=examples/example-blobs/torusblob23.0.hdf5 --nevents=1000 63 | OMP_NUM_THREADS=3 python3 examples/torusC.py --geometry=examples/example-blobs/torusblob23.0.hdf5 --nevents=1000 64 | 65 | - name: Create grid test data 66 | run: | 67 | cd examples/example-grid 68 | echo "backend: Agg" > matplotlibrc 69 | python3 -m coverage run -p generate_warpeddisk.py 70 | 71 | - name: Test on grid 72 | run: | 73 | python3 -m coverage run -p examples/torusG.py --geometry=examples/example-grid/warpeddisk_1.hdf5 --nevents=100 74 | OMP_NUM_THREADS=3 python3 examples/torusG.py --geometry=examples/example-grid/warpeddisk_1.hdf5 --nevents=100 75 | 76 | #- name: Run entire BNTorus geometry 77 | # run: | 78 | # cd examples 79 | # bash runtorus.sh 100 80 | 81 | - name: Convert coverage output to lcov for coveralls 82 | run: | 83 | coverage combine . # examples/example-*/ 84 | coverage lcov -o lcov.info 85 | # make paths relative 86 | sed -i s,$PWD/,,g lcov.info 87 | grep SF lcov.info 88 | - name: prepare coveralls partial upload 89 | uses: coverallsapp/github-action@master 90 | with: 91 | github-token: ${{ secrets.github_token }} 92 | path-to-lcov: lcov.info 93 | flag-name: run-${{ matrix.python-version }} 94 | parallel: true 95 | 96 | finish: 97 | needs: build 98 | if: ${{ always() }} 99 | runs-on: ubuntu-latest 100 | steps: 101 | - name: Coveralls Finished 102 | uses: coverallsapp/github-action@master 103 | with: 104 | github-token: ${{ secrets.github_token }} 105 | parallel-finished: true 106 | carryforward: "run-1,run-2" 107 | -------------------------------------------------------------------------------- /examples/torusBN.py: -------------------------------------------------------------------------------- 1 | """ 2 | Monte-Carlo simulator for X-ray obscurer geometries 3 | 4 | Literature: 5 | 6 | * Brightman & Nandra (2011) 7 | * Leahy & Creighton (1993) 8 | """ 9 | 10 | import numpy 11 | from numpy import pi, cos, log10, arccos as acos 12 | import matplotlib as mpl 13 | mpl.use('Agg') 14 | import matplotlib.pyplot as plt 15 | 16 | rng = numpy.random 17 | 18 | from xars.geometries.bntorus import BNTorusGeometry 19 | from xars.geometries.spheretorus import SphereTorusGeometry 20 | from xars import montecarlo 21 | 22 | import argparse 23 | import sys 24 | 25 | parser = argparse.ArgumentParser( 26 | description="""Monte-Carlo simulator for X-ray obscurer geometries""", 27 | epilog="""(C) Johannes Buchner, 2013-2016. Based on work by Murray Brightman & Kirpal Nandra (see 2011 publication)""") 28 | 29 | parser.add_argument('--log10nh', type=float, help='column density (10^X cm^-1)', default=None) 30 | parser.add_argument('--nh', type=float, help='column density (X/10^22 cm^-1)', default=None) 31 | parser.add_argument('--covering-fraction', type=float, help='cosine of opening angle', default=None) 32 | parser.add_argument('--opening-angle', type=float, help='opening angle in degrees', default=None) 33 | parser.add_argument('--nevents', type=int, default=1000000, help='number of input photons per energy bin') 34 | parser.add_argument('--plot-paths', default=False, help='plot the paths taken?', action='store_true') 35 | parser.add_argument('--plot-interactions', default=False, help='plot the points at each interaction?', action='store_true') 36 | parser.add_argument('--verbose', default=False, help='Be more talkative, show debug statistics on interactions', action='store_true') 37 | parser.add_argument('--output', type=str, default=None, help='Prefix for output files. Chosen based on log10nh and opening angle if not specifified') 38 | args = parser.parse_args() 39 | 40 | nmu = 10 # number of viewing angle bins 41 | 42 | if args.log10nh is not None: 43 | nh_in = args.log10nh 44 | nh = 10**(nh_in-22) # current NH value 45 | else: 46 | if args.nh is None: 47 | sys.stderr.write("ERROR: NH not given.\n\n") 48 | parser.print_help() 49 | sys.exit(-1) 50 | nh = args.nh 51 | nh_in = log10(nh)+22 52 | print(' NH : 10^%.1f (%.3fe22)' % (nh_in, nh)) 53 | if args.covering_fraction is not None: 54 | cone = acos(args.covering_fraction) # current torus opening angle 55 | cone_in = args.covering_fraction 56 | else: 57 | if args.opening_angle is None: 58 | sys.stderr.write("ERROR: opening angle not given.\n\n") 59 | parser.print_help() 60 | sys.exit(-1) 61 | cone = args.opening_angle / 180. * pi # current torus opening angle 62 | cone_in = cos(cone) 63 | print(' opening angle: %.1f degrees (covering fraction %.1f)' % (cone * 180. / pi, cone_in)) 64 | assert cone * 180. / pi >= 0, 'Opening angle should be above 0 degrees' 65 | assert cone * 180. / pi <= 90, 'Opening angle should be below 90 degrees' 66 | assert nh_in >= 19, 'NH should be above 20' 67 | assert nh_in <= 27, 'NH should be below 27' 68 | 69 | prefix = '%.1f_%.1f_' % (nh_in, cone_in) if args.output is None else args.output 70 | # total number of photons to send in 71 | 72 | if cone == 0: 73 | geometry = SphereTorusGeometry(NH = nh, verbose=args.verbose) 74 | else: 75 | geometry = BNTorusGeometry(Theta_tor = cone, NH = nh, verbose=args.verbose) 76 | geometry.viz() 77 | plt.savefig(prefix + "geometry.pdf") 78 | plt.savefig(prefix + "geometry.png") 79 | plt.close() 80 | 81 | def binmapfunction(beta, alpha): 82 | mu = ((0.5 + nmu * numpy.abs(cos(beta))) - 1).astype(int) 83 | mu[mu >= nmu] = nmu - 1 84 | return mu 85 | 86 | rdata, nphot = montecarlo.run(prefix, nphot = args.nevents, nmu = nmu, geometry=geometry, 87 | binmapfunction = binmapfunction, 88 | plot_paths=args.plot_paths, plot_interactions=args.plot_interactions, verbose=args.verbose) 89 | 90 | rdata_transmit, rdata_reflect = rdata 91 | header = dict(NH=nh, OPENING=cone) 92 | montecarlo.store(prefix + 'transmit', nphot, rdata_transmit, nmu, extra_fits_header = header, plot=False) 93 | montecarlo.store(prefix + 'reflect', nphot, rdata_reflect, nmu, extra_fits_header = header, plot=False) 94 | rdata_transmit += rdata_reflect 95 | del rdata_reflect 96 | montecarlo.store(prefix, nphot, rdata_transmit, nmu, extra_fits_header = header, plot=True) 97 | 98 | 99 | 100 | 101 | -------------------------------------------------------------------------------- /doc/wada.rst: -------------------------------------------------------------------------------- 1 | ================================= 2 | Radiative Fountain model 3 | ================================= 4 | 5 | .. image:: wadageometry.png 6 | :target: wada.rst 7 | :align: right 8 | 9 | This model was presented in `Buchner et al. (2021) `_. 10 | If you have any issues or questions, please check the `FAQ `_ or open a `Github issue `_! 11 | 12 | To go to the corresponding infrared model go back to `Models `_. 13 | 14 | Visualisation 15 | --------------- 16 | 17 | - Simulation movie: http://astrophysics.jp/Circinus/ 18 | 19 | - Reference: `Wada (2012) `_ 20 | 21 | - Infrared model: http://astrophysics.jp/Circinus/movie_theta_phi720.mov 22 | 23 | - Reference: `Wada, Schartmann, & Meijerink (2016) `_ 24 | 25 | 26 | Components 27 | -------------- 28 | 29 | Download: https://doi.org/10.5281/zenodo.2235504 30 | 31 | ``atable{wada-cutoff.fits}``: 32 | 33 | Radiative fountain transmitted and reflected component with fluorescent lines 34 | 35 | Incident radiation parameters: 36 | 37 | - PhoIndex: Photon Index 38 | - Ecut: Energy cutoff [keV] 39 | - norm: Photon Flux normalisation at 1keV 40 | 41 | Viewing angle parameters: 42 | 43 | - NHLOS: Total LOS column density 44 | - Theta_inc: Viewing angle, relative to the inner (flat) disk portion. 45 | 46 | Geometry parameters: 47 | 48 | These are variations of the model: 49 | 50 | - wadac-cutoff.fits: pmc0012 simulation, corresponds to Circinus, Wada, Schartmann, & Meijerink (2016) 51 | - wada-cutoff.fits: pma0129 simulation, Wada (2012) 52 | - wada+ring-cutoff.fits: same as wada-cutoff.fits, but with a Compton-thick ring in the innermost grid elements. 53 | 54 | 55 | ``atable{wada-cutoff-omni.fits}``: 56 | 57 | Warm mirror emission. This is the angle-averaged (omni-directional) spectrum, 58 | containing mostly the incident powerlaw from unobscured sightlines. 59 | 60 | Given space-filling ionised gas (e.g. in the narrow-line region), 61 | Thomson scattering can mirror this emission into otherwise obscured LOS. 62 | 63 | The parameters are the same as for the main component, and should always 64 | be linked. A fraction (with const) should be multiplied onto this component, 65 | with a maximum of 0.1. 66 | 67 | Model setup 68 | ------------- 69 | 70 | ``atable{wada-cutoff.fits} + atable{wada-cutoff-omni.fits}*const`` 71 | 72 | Initially, freeze Ecut=400, Theta_inc=90. 73 | 74 | Link the omni component parameters to the main model, const should be free between 1e-5 and 0.1. 75 | 76 | 77 | Fitting 78 | ------------- 79 | 80 | 81 | AGN obscurer models, including MYTORUS and BNTORUS have highly degenerate parameter spaces. 82 | These are not easy to explore with simple fitting methods, or MCMC. 83 | I generally recommend a global search algorithms. These include Multinest (through BXA). 84 | 85 | If you are stuck in a situation without such algorithms, here is some strategies to escape local minima. 86 | 87 | 88 | 1) Freeze some parameters that are less influential. For the uxclumpy model, freeze Ecut=400, and maybe CTKcov=0.4. 89 | 2) Limit the data. Local minima are difficult to escape because they are surrounded by steep walls. Use fewer spectra, and start with a high-energy data (e.g. 20-50keV). Freeze the omni constant to 1e-10. Fit and gradually add more data (15keV, 8keV, 5keV, etc). Then allow the omni constant to vary. 90 | 3) Use the error command to explore more. This is most helpful on NH and geometry parameters. 91 | 4) Plot the model -- if the model component is not present, it is because this viewing angle and this NHLOS does not exist in this geometry. You need to change the geometry or viewing angle until you get back the model. 92 | 93 | Additionally, some parameter regions are simply discontinuous. 94 | 95 | 1) Try freezing NH to Compton-thin (e.g. 30) and refit using the above steps, then thaw and fit. 96 | 2) Try freezing NH to Compton-thick (e.g. 300) and refit using the above steps, then thaw and fit. 97 | 3) Try freezing NH to 0 and use a LOS absorber (see Model setup). 98 | 99 | 100 | 101 | Failure states 102 | --------------- 103 | 104 | - If you get a very low photon index (<1.5), you are probably in a bad local minimum. Start from scratch. Maybe freeze PhoIndex=2 and see how far you get. 105 | 106 | - Plot the model. If the AGN obscurer component is not present, it is because this viewing angle and this NHLOS does not exist in this geometry (zero photons). You need to change the geometry or viewing angle until you get back the model. 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | -------------------------------------------------------------------------------- /examples/torus2.py: -------------------------------------------------------------------------------- 1 | """ 2 | Monte-Carlo simulator for X-ray obscurer geometries 3 | 4 | Literature: 5 | 6 | * Brightman & Nandra (2011) 7 | * Leahy & Creighton (1993) 8 | """ 9 | 10 | import numpy 11 | from numpy import pi, arccos as acos, log10, cos 12 | import matplotlib as mpl 13 | mpl.use('Agg') 14 | import matplotlib.pyplot as plt 15 | 16 | rng = numpy.random 17 | 18 | from xars.geometries.conetorus import ConeTorusGeometry 19 | from xars.geometries.spheretorus import SphereTorusGeometry 20 | from xars import montecarlo 21 | 22 | import argparse 23 | import sys 24 | 25 | parser = argparse.ArgumentParser( 26 | description="""Monte-Carlo simulator for X-ray obscurer geometries""", 27 | epilog="""(C) Johannes Buchner, 2013-2016. Based on work by Murray Brightman & Kirpal Nandra (see 2011 publication)""") 28 | 29 | parser.add_argument('--log10nh', type=float, help='column density (10^X cm^-1)', default=None) 30 | parser.add_argument('--nh', type=float, help='column density (X/10^22 cm^-1)', default=None) 31 | parser.add_argument('--covering-fraction', type=float, help='cosine of opening angle', default=None) 32 | parser.add_argument('--opening-angle', type=float, help='opening angle in degrees', default=None) 33 | parser.add_argument('--nevents', type=int, default=1000000, help='number of input photons per energy bin') 34 | parser.add_argument('--plot-paths', default=False, help='plot the paths taken?', action='store_true') 35 | parser.add_argument('--plot-interactions', default=False, help='plot the points at each interaction?', action='store_true') 36 | parser.add_argument('--verbose', default=False, help='Be more talkative, show debug statistics on interactions', action='store_true') 37 | parser.add_argument('--output', type=str, default=None, help='Prefix for output files. Chosen based on log10nh and opening angle if not specifified') 38 | args = parser.parse_args() 39 | 40 | nmu = 10 # number of viewing angle bins 41 | 42 | if args.log10nh is not None: 43 | nh_in = args.log10nh 44 | nh = 10**(nh_in-22) # current NH value 45 | else: 46 | if args.nh is None: 47 | sys.stderr.write("ERROR: NH not given.\n\n") 48 | parser.print_help() 49 | sys.exit(-1) 50 | nh = args.nh 51 | nh_in = log10(nh)+22 52 | print(' NH : 10^%.1f (%.3fe22)' % (nh_in, nh)) 53 | if args.covering_fraction is not None: 54 | cone = acos(args.covering_fraction) # current torus opening angle 55 | cone_in = args.covering_fraction 56 | else: 57 | if args.opening_angle is None: 58 | sys.stderr.write("ERROR: opening angle not given.\n\n") 59 | parser.print_help() 60 | sys.exit(-1) 61 | cone = args.opening_angle / 180. * pi # current torus opening angle 62 | cone_in = cos(cone) 63 | print(' opening angle: %.1f degrees (covering fraction %.1f)' % (cone * 180. / pi, cone_in)) 64 | assert cone * 180. / pi >= 0, 'Opening angle should be above 0 degrees' 65 | assert cone * 180. / pi <= 90, 'Opening angle should be below 90 degrees' 66 | assert nh_in >= 19, 'NH should be above 20' 67 | assert nh_in <= 27, 'NH should be below 27' 68 | 69 | prefix = '%.1f_%.1f_' % (nh_in, cone_in) if args.output is None else args.output 70 | # total number of photons to send in 71 | 72 | if cone == 0: 73 | geometry = SphereTorusGeometry(NH = nh, verbose=args.verbose) 74 | else: 75 | geometry = ConeTorusGeometry(Theta_tor = cone, NH = nh, verbose=args.verbose) 76 | geometry.viz() 77 | plt.savefig(prefix + "geometry.pdf") 78 | plt.savefig(prefix + "geometry.png") 79 | plt.close() 80 | 81 | def mapper(beta, alpha): 82 | #beta[beta > pi/2] = pi - beta[beta > pi/2] 83 | beta0 = numpy.where(beta > pi/2, pi - beta, beta) 84 | slot = numpy.floor(nmu * beta0 / pi) 85 | #print beta * 180 / pi, slot 86 | return slot 87 | 88 | #binmapfunction = lambda beta, alpha: numpy.floor((nmu - 2) * beta / pi) 89 | #binmapfunction = mapper 90 | #binmapfunction = lambda beta, alpha: (numpy.round(0.5 + nmu * numpy.abs(cos(beta))) - 1).astype(int) 91 | def binmapfunction(beta, alpha): 92 | mu = ((0.5 + nmu * numpy.abs(cos(beta))) - 1).astype(int) 93 | mu[mu >= nmu] = nmu - 1 94 | return mu 95 | 96 | rdata, nphot = montecarlo.run(prefix, nphot = args.nevents, nmu = nmu, geometry=geometry, 97 | binmapfunction = binmapfunction, 98 | plot_paths=args.plot_paths, plot_interactions=args.plot_interactions, verbose=args.verbose) 99 | 100 | rdata_transmit, rdata_reflect = rdata 101 | header = dict(NH=nh, OPENING=cone) 102 | montecarlo.store(prefix + 'transmit', nphot, rdata_transmit, nmu, extra_fits_header = header, plot=False) 103 | montecarlo.store(prefix + 'reflect', nphot, rdata_reflect, nmu, extra_fits_header = header, plot=False) 104 | rdata_transmit += rdata_reflect 105 | del rdata_reflect 106 | montecarlo.store(prefix, nphot, rdata_transmit, nmu, extra_fits_header = header, plot=True) 107 | 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /xars/geometries/bntorus.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import log10, logical_and, pi, tan 3 | 4 | from xars.coordtrans import to_cartesian, to_spherical 5 | 6 | 7 | class BNTorusGeometry: 8 | def __init__(self, Theta_tor, NH, verbose=False): 9 | self.Theta_tor = Theta_tor 10 | self.NH = NH 11 | self.verbose = verbose 12 | 13 | def compute_next_point(self, location, direction): 14 | (xi, yi, zi) = location 15 | (dist, beta, alpha) = direction 16 | d = dist / self.NH # distance in units of nH 17 | 18 | if self.verbose: 19 | print(' .. .. mean in nH units: ', d.mean()) 20 | 21 | # compute relative vector traveled 22 | xv, yv, zv = to_cartesian((d, beta, alpha)) 23 | 24 | # compute new position 25 | xf, yf, zf = xi + xv, yi + yv, zi + zv 26 | 27 | # compute intersection with cone border 28 | a = zv**2 - (xv**2 + yv**2) * tan(0.5 * pi - self.Theta_tor)**2 29 | b = 2. * zi * zv - (2. * xi * xv + 2. * yi * yv) * tan(0.5 * pi - self.Theta_tor)**2 30 | c = zi**2 - (xi**2 + yi**2) * tan(0.5 * pi - self.Theta_tor)**2 31 | quad = b**2 - 4. * a * c 32 | 33 | # compute the two solutions 34 | e1 = (-b - quad**0.5) / (2. * a) 35 | e2 = (-b + quad**0.5) / (2. * a) 36 | # if both are positive and e1<1 37 | twosolmask = logical_and( 38 | logical_and(quad > 0., e1 > 0.), 39 | logical_and(e1 < 1., e2 > 0.)) 40 | # if self.verbose: print ' .. %d of %d have 2 solutions' % (twosolmask.sum(), len(twosolmask)) 41 | # compute the two possible new positions 42 | # print 'twosol:', twosolmask 43 | 44 | x1 = xi[twosolmask] + e1[twosolmask] * xv[twosolmask] 45 | x2 = xi[twosolmask] + e2[twosolmask] * xv[twosolmask] 46 | y1 = yi[twosolmask] + e1[twosolmask] * yv[twosolmask] 47 | y2 = yi[twosolmask] + e2[twosolmask] * yv[twosolmask] 48 | z1 = zi[twosolmask] + e1[twosolmask] * zv[twosolmask] 49 | z2 = zi[twosolmask] + e2[twosolmask] * zv[twosolmask] 50 | 51 | # print 'e2', e2 52 | ltsol = e2[twosolmask] < 1. 53 | gtsol = ~ltsol 54 | 55 | asol = twosolmask.copy() 56 | asol[twosolmask] = ltsol 57 | bsol = twosolmask.copy() 58 | bsol[twosolmask] = gtsol 59 | xf[asol] = x2[ltsol] + xv[asol] - (x1[ltsol] - xi[asol]) 60 | yf[asol] = y2[ltsol] + yv[asol] - (y1[ltsol] - yi[asol]) 61 | zf[asol] = z2[ltsol] + zv[asol] - (z1[ltsol] - zi[asol]) 62 | 63 | xf[bsol] += (x2[gtsol] - x1[gtsol]) 64 | yf[bsol] += (y2[gtsol] - y1[gtsol]) 65 | zf[bsol] += (z2[gtsol] - z1[gtsol]) 66 | 67 | # use symmetries 68 | # bring to upper side of torus 69 | zf = numpy.abs(zf) 70 | # compute spherical coordinates 71 | rad, theta, phi = to_spherical((xf, yf, zf)) 72 | assert not numpy.isnan(rad).any() 73 | assert not numpy.isnan(theta).any() 74 | assert not numpy.isnan(phi).any() 75 | # if self.verbose: print ' .. checking if left cone' 76 | # are we inside the cone? 77 | inside = numpy.logical_and(rad < 1., theta > self.Theta_tor) 78 | return inside, (xf,yf,zf), (rad, phi, theta) 79 | 80 | def viz(self): 81 | """ Visualize the current geometry """ 82 | Theta_tor = self.Theta_tor * 180 / pi 83 | nh = log10(self.NH) + 22 84 | 85 | import matplotlib.lines as mlines 86 | import matplotlib.patches as mpatches 87 | import matplotlib.pyplot as plt 88 | plt.figure(figsize=(5,5)) 89 | font = 'sans-serif' 90 | ax = plt.axes([0,0,1,1]) 91 | 92 | thickness = max(0, min(1, (nh - 20.) / 5)) 93 | plt.text( 94 | 0.35, 0.5, "nH=%2.1f" % nh, ha="right", va='center', 95 | family=font, size=14) 96 | ax.add_line(mlines.Line2D([0,0.9], [0.5,0.5], lw=1.,alpha=0.4, ls='dashed', color='grey')) 97 | ax.add_line(mlines.Line2D([0.4,0.4], [0.5,0.9], lw=1.,alpha=0.4, ls='dashed', color='grey')) 98 | ax.add_patch( 99 | mpatches.Arc((0.4,0.5), 0.5, 0.5, theta2=90, theta1=90 - Theta_tor, 100 | color='black', linewidth=1, alpha=1, fill=False, ls='dashed')) 101 | plt.text( 102 | 0.4 + 0.02, 0.5 + 0.25 + 0.02, "%2.0f" % Theta_tor, ha="left", va='bottom', 103 | family=font, size=14) 104 | ax.add_patch( 105 | mpatches.Wedge((0.4,0.5), 0.3, -90 + Theta_tor, 90 - Theta_tor, color='black', 106 | linewidth=0, alpha=thickness, fill=True)) 107 | ax.add_patch( 108 | mpatches.Wedge((0.4,0.5), 0.3, 90 + Theta_tor, -90 - Theta_tor, color='black', 109 | linewidth=0, alpha=thickness, fill=True)) 110 | 111 | ax.add_patch( 112 | mpatches.Circle((0.4,0.5), 0.02, color='red', 113 | linewidth=0, alpha=1, fill=True)) 114 | 115 | ax.set_xticks([]) 116 | ax.set_yticks([]) 117 | -------------------------------------------------------------------------------- /doc/warpeddisk.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | Warped disk model 3 | ================== 4 | 5 | .. image:: warpgeometry.png 6 | :target: warpeddisk.rst 7 | :align: right 8 | 9 | This model was presented in `Buchner et al. (2021) `_. If you have any issues or questions, please check the `FAQ `_ or open a `Github issue `_! 10 | 11 | To go to the corresponding infrared model go back to `Models `_. 12 | 13 | Visualisation: https://zenodo.org/record/1193939/files/warpgeometry.pdf 14 | 15 | Components 16 | -------------- 17 | 18 | Download: https://doi.org/10.5281/zenodo.823728 19 | 20 | ``atable{warpeddisk.fits}``: 21 | 22 | Warped disk transmitted and reflected component with fluorescent lines 23 | 24 | Incident radiation parameters: 25 | 26 | - PhoIndex: Photon Index 27 | - Ecut: Energy cutoff [keV] 28 | - norm: Photon Flux normalisation at 1keV 29 | 30 | Viewing angle parameters: 31 | 32 | - NHLOS: LOS column density 33 | - Theta_inc: Viewing angle, relative to the inner (flat) disk portion. 34 | 35 | Geometry parameters: 36 | 37 | - NHdisk: Column density through the disk (in particular at the center). In log 38 | - diskfrac: Warp extent, i.e. how far the disk reaches. 0.1 means a relatively flat disk, 1 is a stronger warp. 39 | 40 | ``atable{warpeddisk-omni.fits}``: 41 | 42 | Warm mirror emission. This is the angle-averaged (omni-directional) spectrum, 43 | containing mostly the incident powerlaw from unobscured sightlines. 44 | 45 | Given space-filling ionised gas (e.g. in the narrow-line region), 46 | Thomson scattering can mirror this emission into otherwise obscured LOS. 47 | 48 | The parameters are the same as for the main component, and should always 49 | be linked. A fraction (with const) should be multiplied onto this component, 50 | with a maximum of 0.1. 51 | 52 | Model setup 53 | ------------- 54 | 55 | 1) **Heavily Obscured configuration**:: 56 | Here you view Compton-thick through the disk, edge-on to the inner disk. 57 | 58 | ``atable{warpeddisk.fits} + atable{warpeddisk-omni.fits}*const`` 59 | 60 | NHLOS=1000, PhoIndex=2, Ecut=400, diskfrac=0.5, Theta_inc=90 61 | 62 | Freeze Ecut=400, Theta_inc=90. Link the omni component parameters to the main model, const should be free between 1e-5 and 0.1. 63 | 64 | 2) **Mildly Obscured configuration**:: 65 | The warped disk can not produce Compton-thin obscured AGN by itself. 66 | Clouds in the host ISM (in particular the inner 100s of pc), can provide 67 | this obscuration. 68 | Here you view the disk unobscured (e.g. perpendicular), with some polar absorber. 69 | 70 | ``tbabs * atable{warpeddisk.fits} + atable{warpeddisk-omni.fits} * const`` 71 | 72 | NHTBABS=1, NHLOS=0, PhoIndex=2, Ecut=400, diskfrac=0.5 Theta_inc=0 73 | 74 | Freeze Ecut=400, NHLOS=0. Link the omni component parameters to the main model, const should be free between 1e-5 and 0.1. 75 | 76 | Also try 1) with a different viewing angle (e.g. polar, Theta_inc=0). 77 | 78 | 79 | Fitting 80 | ------------- 81 | 82 | 83 | AGN obscurer models, including MYTORUS and BNTORUS have highly degenerate parameter spaces. 84 | These are not easy to explore with simple fitting methods, or MCMC. 85 | I generally recommend a global search algorithms. These include Multinest (through BXA). 86 | 87 | If you are stuck in a situation without such algorithms, here is some strategies to escape local minima. 88 | 89 | 90 | 1) Freeze some parameters that are less influential. For the warpeddisk model, freeze Ecut=400, NHdisk=25 and diskfrac=0.5. 91 | 2) Limit the data. Local minima are difficult to escape because they are surrounded by steep walls. Use fewer spectra, and start with a high-energy data (e.g. 20-50keV). Freeze the omni constant to 1e-10. Fit and gradually add more data (15keV, 8keV, 5keV, etc). Then allow the omni constant to vary. 92 | 3) Use the error command to explore more. This is most helpful on NH and geometry parameters. 93 | 4) Plot the model -- if the model component is not present, it is because this viewing angle and this NHLOS does not exist in this geometry. You need to change the geometry or viewing angle until you get back the model. 94 | 95 | Additionally, some parameter regions are simply discontinuous. 96 | 97 | 1) Try freezing NH to Compton-thin (e.g. 30) and refit using the above steps, then thaw and fit. 98 | 2) Try freezing NH to Compton-thick (e.g. 300) and refit using the above steps, then thaw and fit. 99 | 3) Try freezing NH to 0 and use a LOS absorber (see Model setup). 100 | 101 | 102 | 103 | Failure states 104 | --------------- 105 | 106 | - If you get a very low photon index (<1.5), you are probably in a bad local minimum. Start from scratch. Maybe freeze PhoIndex=2 and see how far you get. 107 | 108 | - Plot the model. If the AGN obscurer component is not present, it is because this viewing angle and this NHLOS does not exist in this geometry (zero photons). You need to change the geometry or viewing angle until you get back the model. 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /xspecexport/createpowtable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import exp, log 3 | import astropy.io.fits as pyfits 4 | 5 | outfilename = 'myatable.fits' 6 | 7 | # create some energy grid --- START 8 | # you can replace this with your own grid 9 | 10 | nbins = 1000 11 | def bin2energy_lo(i): 12 | """Get lower energy edge of bin with index i""" 13 | # get lower edge of bin, in keV 14 | r = 1.5 15 | A = log((8.1 + 0.015)/8.10)**(-1./r) 16 | with numpy.errstate(invalid='ignore'): 17 | return numpy.where(i < 800, i * 0.01 + 0.1, 8.1*exp(((i-800)/A)**r)) 18 | 19 | energy_lo = bin2energy_lo(numpy.arange(nbins)) 20 | energy_hi = bin2energy_lo(numpy.arange(nbins) + 1) 21 | # create some energy grid --- END 22 | 23 | # central energy of bin 24 | energy = (energy_hi + energy_lo) / 2 25 | # width of bin 26 | deltae = energy_hi - energy_lo 27 | 28 | table = [] 29 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 30 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 31 | 3. ] 32 | Ecuts = [20., 30, 40, 60, 100, 140, 200, 400] 33 | 34 | # width of bin at 1keV 35 | deltae0 = deltae[energy >= 1][0] 36 | print(deltae, deltae0) 37 | 38 | for PhoIndex in PhoIndices: 39 | for Ecut in Ecuts: 40 | # compute model: here a powerlaw with an exponential cut-off. 41 | # We need to integrate over the energy bin, which we do here by 42 | # evaluating at the centre and multiplying by the width 43 | mymodel = energy**-PhoIndex * exp(-energy / Ecut) * deltae 44 | # add parameters and spectrum: 45 | table.append(((PhoIndex, Ecut), mymodel)) 46 | 47 | hdus = [] 48 | hdu = pyfits.PrimaryHDU() 49 | import datetime, time 50 | now = datetime.datetime.fromtimestamp(time.time()) 51 | nowstr = now.isoformat() 52 | hdu.header['CREATOR'] = """Johannes Buchner """ 53 | hdu.header['DATE'] = nowstr[:nowstr.rfind('.')] 54 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 55 | hdu.header['MODLNAME'] = 'torus' 56 | hdu.header['ADDMODEL'] = True 57 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 58 | hdu.header['EXTEND'] = True 59 | hdu.header['REDSHIFT'] = True 60 | hdu.header['SIMPLE'] = True 61 | hdu.header['HDUDOC'] = 'OGIP/92-009' 62 | hdu.header['HDUVERS1'] = '1.0.0' 63 | hdu.header['HDUCLASS'] = 'OGIP' 64 | hdus.append(hdu) 65 | 66 | # parameters of this model (must agree with the loops above!) 67 | 68 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 69 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 70 | 71 | parameters = numpy.array([ 72 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 73 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 74 | 3. , 0. , 0. , 0. , 0. , 75 | 0. , 0. , 0. , 0. , 0. , 76 | 0. , 0. , 0. , 0. , 0. , 77 | 0. , 0. , 0. , 0. , 0. , 78 | 0. , 0. , 0. , 0. , 0. , 79 | 0. , 0. , 0. , 0. , 0. , 0. ])), 80 | ('Ecut', 0, 100.0, 10.0, 20, 20, 400, 400, 8, numpy.array([ 20. , 30, 40, 60, 100, 81 | 140 , 200, 400 , 0 , 0, 82 | 0. , 0. , 0. , 0. , 0. , 83 | 0. , 0. , 0. , 0. , 0. , 84 | 0. , 0. , 0. , 0. , 0. , 85 | 0. , 0. , 0. , 0. , 0. , 86 | 0. , 0. , 0. , 0. , 0. , 87 | 0. , 0. , 0. , 0. , 0. , 0. ])), 88 | ], dtype=dtype) 89 | # check that parameter definition agrees with computed models 90 | assert numpy.product(parameters['NUMBVALS']) == len(table), ('parameter definition does not match spectra table', parameters['NUMBVALS'], numpy.product(parameters['NUMBVALS']), len(table)) 91 | hdu = pyfits.BinTableHDU(data=parameters) 92 | hdu.header['DATE'] = nowstr 93 | hdu.header['EXTNAME'] = 'PARAMETERS' 94 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 95 | hdu.header['HDUVERS1'] = '1.0.0' 96 | hdu.header['NINTPARM'] = len(parameters) 97 | hdu.header['NADDPARM'] = 0 98 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 99 | hdus.append(hdu) 100 | 101 | # ENERG_LO, ENERG_HI 102 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 103 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 104 | hdu = pyfits.BinTableHDU(data=energies) 105 | hdu.header['DATE'] = nowstr 106 | hdu.header['EXTNAME'] = 'ENERGIES' 107 | hdu.header['TUNIT2'] = 'keV' 108 | hdu.header['TUNIT1'] = 'keV' 109 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 110 | hdu.header['HDUCLAS2'] = 'ENERGIES' 111 | hdu.header['HDUVERS1'] = '1.0.0' 112 | hdus.append(hdu) 113 | 114 | # PARAMVAL (4), INTPSPEC 115 | dtype = [('PARAMVAL', '>f4', (len(parameters),)), ('INTPSPEC', '>f4', (nbins,))] 116 | table.sort() 117 | table = numpy.array(table, dtype=dtype) 118 | hdu = pyfits.BinTableHDU(data=table) 119 | hdu.header['DATE'] = nowstr 120 | hdu.header['EXTNAME'] = 'SPECTRA' 121 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 122 | hdu.header['TUNIT1'] = 'none' 123 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 124 | hdu.header['HDUCLAS2'] = 'SPECTRA' 125 | hdu.header['HDUVERS1'] = '1.0.0' 126 | 127 | hdus.append(hdu) 128 | hdus = pyfits.HDUList(hdus) 129 | 130 | hdus.writeto(outfilename, overwrite=True) 131 | -------------------------------------------------------------------------------- /xspecexport/createspheretable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import h5py 3 | import astropy.io.fits as pyfits 4 | import sys 5 | import progressbar 6 | from binning import nbins, energy2bin, bin2energy 7 | 8 | energy_lo, energy_hi = bin2energy(numpy.arange(nbins)) 9 | energy = (energy_hi + energy_lo) / 2 10 | deltae = energy_hi - energy_lo 11 | deltae0 = deltae[energy >= 1][0] 12 | 13 | table = [] 14 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 15 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 16 | 3. ] 17 | data = {} 18 | 19 | outfilename = sys.argv[1] 20 | 21 | widgets = [progressbar.Percentage(), " starting ... ", progressbar.Bar(), progressbar.ETA()] 22 | pbar = progressbar.ProgressBar(widgets=widgets) 23 | 24 | for filename in pbar(sys.argv[2:]): 25 | f = h5py.File(filename) 26 | nphot = f.attrs['NPHOT'] 27 | nh = float(f.attrs['NH']) 28 | widgets[1] = ' NH=%.3f ' % nh 29 | 30 | matrix = f['rdata'] 31 | 32 | a, b, nmu = matrix.shape 33 | assert a == nbins, matrix.shape 34 | assert b == nbins, matrix.shape 35 | 36 | # go through viewing angles 37 | matrix_mu = matrix[()].sum(axis=2) 38 | for PhoIndex in PhoIndices: 39 | spectrum = energy**-PhoIndex 40 | spectrum[1150:] = 0 41 | weights = (spectrum * deltae / deltae0).reshape((-1,1)) 42 | y = (weights * matrix_mu).sum(axis=0) / (nphot / 10.) 43 | table.append(((nh, PhoIndex), y)) 44 | 45 | hdus = [] 46 | hdu = pyfits.PrimaryHDU() 47 | import datetime, time 48 | now = datetime.datetime.fromtimestamp(time.time()) 49 | nowstr = now.isoformat() 50 | nowstr = nowstr[:nowstr.rfind('.')] 51 | hdu.header['CREATOR'] = """Johannes Buchner """ 52 | hdu.header['DATE'] = nowstr 53 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 54 | hdu.header['MODLNAME'] = 'torus' 55 | hdu.header['ADDMODEL'] = True 56 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 57 | hdu.header['EXTEND'] = True 58 | hdu.header['REDSHIFT'] = True 59 | hdu.header['SIMPLE'] = True 60 | hdu.header['HDUDOC'] = 'OGIP/92-009' 61 | hdu.header['HDUVERS1'] = '1.0.0' 62 | hdu.header['HDUCLASS'] = 'OGIP' 63 | hdus.append(hdu) 64 | 65 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 66 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 67 | 68 | parameters = numpy.array([ 69 | ('nH', 1, 10.0, 1.0, 0.0099999998, 0.0099999998, 10000.0, 10000.0, 41, numpy.array([ 9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 70 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 71 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 72 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 73 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 74 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 75 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 76 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 77 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 78 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 79 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 80 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 81 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 82 | 7.08000000e+03, 1.00000000e+04])), 83 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 84 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 85 | 3. , 0. , 0. , 0. , 0. , 86 | 0. , 0. , 0. , 0. , 0. , 87 | 0. , 0. , 0. , 0. , 0. , 88 | 0. , 0. , 0. , 0. , 0. , 89 | 0. , 0. , 0. , 0. , 0. , 90 | 0. , 0. , 0. , 0. , 0. , 0. ])), 91 | ], dtype=dtype) 92 | assert numpy.product(parameters['NUMBVALS']) == len(table), ('parameter definition does not match spectra table', parameters['NUMBVALS'], numpy.product(parameters['NUMBVALS']), len(table)) 93 | hdu = pyfits.BinTableHDU(data=parameters) 94 | hdu.header['DATE'] = nowstr 95 | hdu.header['EXTNAME'] = 'PARAMETERS' 96 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 97 | hdu.header['HDUVERS1'] = '1.0.0' 98 | hdu.header['NINTPARM'] = len(parameters) 99 | hdu.header['NADDPARM'] = 0 100 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 101 | hdus.append(hdu) 102 | 103 | # ENERG_LO, ENERG_HI 104 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 105 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 106 | hdu = pyfits.BinTableHDU(data=energies) 107 | hdu.header['DATE'] = nowstr 108 | hdu.header['EXTNAME'] = 'ENERGIES' 109 | hdu.header['TUNIT2'] = 'keV' 110 | hdu.header['TUNIT1'] = 'keV' 111 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 112 | hdu.header['HDUCLAS2'] = 'ENERGIES' 113 | hdu.header['HDUVERS1'] = '1.0.0' 114 | hdus.append(hdu) 115 | 116 | # PARAMVAL (4), INTPSPEC 117 | dtype = [('PARAMVAL', '>f4', (len(parameters),)), ('INTPSPEC', '>f4', (nbins,))] 118 | table.sort() 119 | table = numpy.array(table, dtype=dtype) 120 | hdu = pyfits.BinTableHDU(data=table) 121 | hdu.header['DATE'] = nowstr 122 | hdu.header['EXTNAME'] = 'SPECTRA' 123 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 124 | hdu.header['TUNIT1'] = 'none' 125 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 126 | hdu.header['HDUCLAS2'] = 'SPECTRA' 127 | hdu.header['HDUVERS1'] = '1.0.0' 128 | 129 | hdus.append(hdu) 130 | hdus = pyfits.HDUList(hdus) 131 | 132 | hdus.writeto(outfilename, overwrite=True) 133 | 134 | 135 | -------------------------------------------------------------------------------- /doc/uxclumpy.rst: -------------------------------------------------------------------------------- 1 | ================================= 2 | UXCLUMPY - Unified Clumpy model 3 | ================================= 4 | 5 | .. image:: uxclumpy.png 6 | :target: https://vimeo.com/218031864 7 | :align: right 8 | 9 | This model was presented in `Buchner et al (2019) `_. If you have any issues or questions, please check the `FAQ `_ or open a `Github issue `_! 10 | 11 | To go to the corresponding infrared model go back to `Models `_. 12 | 13 | Visualisation 14 | --------------- 15 | 16 | - 360° VR video: https://vimeo.com/253036759 17 | - normal video: https://vimeo.com/218031864 18 | 19 | .. figure:: covmaps.png 20 | 21 | Projections of the column densities as seen from the central X-ray corona. 22 | Column densities vary from 20 to 26 in logarithmic units of inverse square cm. 23 | 24 | Parameters TORSigma and CTKcover are different in the panels. 25 | 26 | These line-of-sight column densities are used to divide the sky into bins 27 | of similar NH. 28 | The spectrum collected in the direction of such bin is the model spectrum 29 | when setting NHLOS. 30 | Additional to binning by NH, the binning is further subdivided into inclination angles. 31 | This allows selection LOS NH and inclination angle independently. 32 | An exception is of course when there is no obscuration in a chosen inclination angle 33 | (e.g., no CTK columns exist face-on). In that case, the spectrum is empty. 34 | 35 | .. figure:: uxclumpy_hump.png 36 | 37 | The Comptom-hump near 20keV is highly diverse in UXCLUMPY! 38 | The plot shows the reflection-dominated model spectrum under a Compton-thick line-of-sight. 39 | The Comptom-hump strength and shape depends on the geometry parameters 40 | (TORSigma and CTKcover, illustrated above). 41 | This has implications for the cosmic X-ray background and the inference and modelling of 42 | heavily obscured AGN. 43 | 44 | Components 45 | -------------- 46 | 47 | Download: https://doi.org/10.5281/zenodo.602282 48 | 49 | ``atable{uxclumpy.fits}``: 50 | 51 | Clumpy torus transmitted and reflected component with fluorescent lines 52 | 53 | Incident radiation parameters: 54 | 55 | - PhoIndex: Photon Index 56 | - Ecut: Energy cutoff [keV] 57 | - norm: Photon Flux normalisation at 1keV 58 | 59 | Viewing angle parameters: 60 | 61 | - NHLOS: Total LOS column density 62 | - Theta_inc: Viewing angle, relative to the inner (flat) disk portion. 63 | 64 | Geometry parameters: 65 | 66 | - TORsigma: vertical extent of the cloud population. sigma is the width of a gaussian distribution (see `CLUMPY model `_). The number of clouds remains constant, so low sigmas yield slightly higher covering factors (2%-5%). 67 | - CTKcover: covering factor of inner Compton-thick ring of clouds. If low, many small clouds form a thin ring. If high, few large clouds are used. The column densities of these clouds are logNH=25+-0.5. 68 | 69 | ``atable{uxclumpy-omni.fits}``: 70 | 71 | Warm mirror emission. This is the angle-averaged (omni-directional) spectrum, 72 | containing mostly the incident powerlaw from unobscured sightlines. 73 | 74 | Given space-filling ionised gas (e.g. in the narrow-line region), 75 | Thomson scattering can mirror this emission into otherwise obscured LOS. 76 | 77 | The parameters are the same as for the main component, and should always 78 | be linked. A fraction (with const) should be multiplied onto this component, 79 | with a maximum of 0.1. 80 | 81 | Model setup 82 | ------------- 83 | 84 | ``atable{uxclumpy.fits} + atable{uxclumpy-omni.fits}*const`` 85 | 86 | Initially, freeze Ecut=400, Theta_inc=90. 87 | 88 | Link the omni component parameters to the main model parameters, const should be free between 1e-5 and 0.1. 89 | 90 | 91 | Fitting 92 | ------------- 93 | 94 | 95 | AGN obscurer models, including MYTORUS and BNTORUS have highly degenerate parameter spaces. 96 | These are not easy to explore with simple fitting methods, or MCMC. 97 | I generally recommend a global search algorithms. These include Multinest (through BXA). 98 | 99 | If you are stuck in a situation without such algorithms, here is some strategies to escape local minima. 100 | 101 | 102 | 1) Freeze some parameters that are less influential. For the uxclumpy model, freeze Ecut=400, and maybe CTKcov=0.4. 103 | 2) Limit the data. Local minima are difficult to escape because they are surrounded by steep walls. Use fewer spectra, and start with a high-energy data (e.g. 20-50keV). Freeze the omni constant to 1e-10. Fit and gradually add more data (15keV, 8keV, 5keV, etc). Then allow the omni constant to vary. 104 | 3) Use the error command to explore more. This is most helpful on NH and geometry parameters. 105 | 4) Plot the model -- if the model component is not present, it is because this viewing angle and this NHLOS does not exist in this geometry. You need to change the geometry or viewing angle until you get back the model. 106 | 107 | Additionally, some parameter regions are simply discontinuous. 108 | 109 | 1) Try freezing NH to Compton-thin (e.g. 30) and refit using the above steps, then thaw and fit. 110 | 2) Try freezing NH to Compton-thick (e.g. 300) and refit using the above steps, then thaw and fit. 111 | 3) Try freezing NH to 0 and use a LOS absorber (see Model setup). 112 | 113 | 114 | 115 | Failure states 116 | --------------- 117 | 118 | - If you get a very low photon index (<1.5), you are probably in a bad local minimum. Start from scratch. Maybe freeze PhoIndex=2 and see how far you get. 119 | 120 | - Plot the model. If the AGN obscurer component is not present, it is because this viewing angle and this NHLOS does not exist in this geometry (zero photons). You need to change the geometry or viewing angle until you get back the model. 121 | -------------------------------------------------------------------------------- /xspecexport/createdisktable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import pi, exp 3 | import astropy.io.fits as pyfits 4 | import h5py 5 | import sys 6 | from binning import nbins, energy2bin, bin2energy 7 | 8 | energy_lo, energy_hi = bin2energy(numpy.arange(nbins)) 9 | energy = (energy_hi + energy_lo) / 2 10 | deltae = energy_hi - energy_lo 11 | 12 | table = [] 13 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 14 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 15 | 3. ] 16 | ThetaIncs = [ 18.20000076, 31.79999924, 41.40000153, 49.5 , 17 | 56.59999847, 63.29999924, 69.5 , 75.5 , 18 | 81.40000153, 87.09999847] 19 | Ecuts = [ 20., 30, 40, 60, 100, 140, 200, 400 ] 20 | data = {} 21 | 22 | outfilename = sys.argv[1] 23 | 24 | for filename in sys.argv[2:]: 25 | print('loading', filename) 26 | #f = pyfits.open(filename) 27 | #nphot = int(f[0].header['NPHOT']) 28 | #matrix = f[0].data 29 | f = h5py.File(filename) 30 | nphot = f.attrs['NPHOT'] 31 | matrix = f['rdata'] 32 | a, b, nmu = matrix.shape 33 | assert a == nbins, f[0].data.shape 34 | assert b == nbins, f[0].data.shape 35 | #data[(nh, opening)] = [(nphot, f[0].data)] 36 | 37 | for mu, ThetaInc in enumerate(ThetaIncs[::-1]): 38 | # go through viewing angles 39 | matrix_mu = matrix[:,:,mu] 40 | print(ThetaInc) 41 | for PhoIndex in PhoIndices: 42 | for Ecut in Ecuts: 43 | weights = (energy**-PhoIndex * exp(-energy / Ecut) * deltae).reshape((-1,1)) 44 | y = (weights * matrix_mu).sum(axis=0) / (nphot / 10.) 45 | #print PhoIndex, ThetaInc #, (y/deltae)[energy_lo >= 1][0] 46 | #print ' ', (weights * matrix[:,:,mu]).sum(axis=0), deltae, (nphot / 1000000.) 47 | #assert numpy.any(y > 0), y 48 | table.append(((PhoIndex, Ecut, ThetaInc), y)) 49 | 50 | hdus = [] 51 | hdu = pyfits.PrimaryHDU() 52 | import datetime, time 53 | now = datetime.datetime.fromtimestamp(time.time()) 54 | nowstr = now.isoformat() 55 | nowstr = nowstr[:nowstr.rfind('.')] 56 | hdu.header['CREATOR'] = """Johannes Buchner """ 57 | hdu.header['DATE'] = nowstr 58 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 59 | hdu.header['MODLNAME'] = 'disk' 60 | hdu.header['ADDMODEL'] = True 61 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 62 | hdu.header['EXTEND'] = True 63 | hdu.header['REDSHIFT'] = True 64 | hdu.header['SIMPLE'] = True 65 | hdu.header['HDUDOC'] = 'OGIP/92-009' 66 | hdu.header['HDUVERS1'] = '1.0.0' 67 | hdu.header['HDUCLASS'] = 'OGIP' 68 | hdus.append(hdu) 69 | 70 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 71 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 72 | 73 | parameters = numpy.array([ 74 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 75 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 76 | 3. , 0. , 0. , 0. , 0. , 77 | 0. , 0. , 0. , 0. , 0. , 78 | 0. , 0. , 0. , 0. , 0. , 79 | 0. , 0. , 0. , 0. , 0. , 80 | 0. , 0. , 0. , 0. , 0. , 81 | 0. , 0. , 0. , 0. , 0. , 0. ])), 82 | ('Ecut', 0, 100.0, 10.0, 20, 20, 400, 400, 8, numpy.array([ 20. , 30, 40, 60, 100, 83 | 140 , 200, 400 , 0 , 0, 84 | 0. , 0. , 0. , 0. , 0. , 85 | 0. , 0. , 0. , 0. , 0. , 86 | 0. , 0. , 0. , 0. , 0. , 87 | 0. , 0. , 0. , 0. , 0. , 88 | 0. , 0. , 0. , 0. , 0. , 89 | 0. , 0. , 0. , 0. , 0. , 0. ])), 90 | ('Theta_inc', 0, 18.200001, 5.0, 0.0, 18.200001, 87.099998, 90.0, 10, numpy.array([ 18.20000076, 31.79999924, 41.40000153, 49.5 , 91 | 56.59999847, 63.29999924, 69.5 , 75.5 , 92 | 81.40000153, 87.09999847, 0. , 0. , 93 | 0. , 0. , 0. , 0. , 94 | 0. , 0. , 0. , 0. , 95 | 0. , 0. , 0. , 0. , 96 | 0. , 0. , 0. , 0. , 97 | 0. , 0. , 0. , 0. , 98 | 0. , 0. , 0. , 0. , 99 | 0. , 0. , 0. , 0. , 0. ])), 100 | ], dtype=dtype) 101 | hdu = pyfits.BinTableHDU(data=parameters) 102 | hdu.header['DATE'] = nowstr 103 | hdu.header['EXTNAME'] = 'PARAMETERS' 104 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 105 | hdu.header['HDUVERS1'] = '1.0.0' 106 | hdu.header['NINTPARM'] = len(parameters) 107 | hdu.header['NADDPARM'] = 0 108 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 109 | hdus.append(hdu) 110 | 111 | # ENERG_LO, ENERG_HI 112 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 113 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 114 | hdu = pyfits.BinTableHDU(data=energies) 115 | hdu.header['DATE'] = nowstr 116 | hdu.header['EXTNAME'] = 'ENERGIES' 117 | hdu.header['TUNIT2'] = 'keV' 118 | hdu.header['TUNIT1'] = 'keV' 119 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 120 | hdu.header['HDUCLAS2'] = 'ENERGIES' 121 | hdu.header['HDUVERS1'] = '1.0.0' 122 | hdus.append(hdu) 123 | 124 | # PARAMVAL (4), INTPSPEC 125 | dtype = [('PARAMVAL', '>f4', (len(parameters),)), ('INTPSPEC', '>f4', (nbins,))] 126 | table.sort() 127 | table = numpy.array(table, dtype=dtype) 128 | hdu = pyfits.BinTableHDU(data=table) 129 | hdu.header['DATE'] = nowstr 130 | hdu.header['EXTNAME'] = 'SPECTRA' 131 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 132 | hdu.header['TUNIT1'] = 'none' 133 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 134 | hdu.header['HDUCLAS2'] = 'SPECTRA' 135 | hdu.header['HDUVERS1'] = '1.0.0' 136 | 137 | hdus.append(hdu) 138 | hdus = pyfits.HDUList(hdus) 139 | 140 | hdus.writeto(outfilename, overwrite=True) 141 | 142 | 143 | -------------------------------------------------------------------------------- /doc/README.rst: -------------------------------------------------------------------------------- 1 | ==================================== 2 | XARS X-ray Monte-carlo simulator 3 | ==================================== 4 | 5 | XARS simulates X-rays passing through matter in user-defined geometries. 6 | 7 | Photo-electric absorption, compton scattering and fluorescent line processes are 8 | modelled. 9 | 10 | 11 | Usage 12 | -------------------------- 13 | see `Code Documentation `_ 14 | 15 | How to cite XARS correctly 16 | --------------------------- 17 | 18 | Please reference `Buchner et al (2019), A&A, 629, A16, 14pp. 19 | `_ 20 | 21 | 22 | Models 23 | ================== 24 | 25 | In `Buchner+19 `_ we irradiated the following geometries, 26 | and you can **download xspec table models here** for them. 27 | See the paper for description of the parameters and model assumptions. 28 | 29 | **Each of these models also has a infrared model associated with it.** 30 | 31 | 32 | UXCLUMPY 33 | -------------------- 34 | 35 | .. image:: uxclumpy.png 36 | :target: https://vimeo.com/218031864 37 | :align: right 38 | 39 | The Unified X-ray Clumpy model (UXCLUMPY) features: 40 | 41 | * Unification: Can produce unobscured, obscured and Compton-thick AGN in the right proportions. 42 | * Eclipse events: The frequency and sizes of eclipses are reproduced by clumps on Keplerian orbits. 43 | * X-ray spectra can fit observations well 44 | * Compatible with CLUMPY infrared models 45 | 46 | Here you can access: 47 | 48 | * Geometry movies: https://vimeo.com/218031864 and 360 VR https://vimeo.com/253036759 49 | * X-ray table model available at: `UXCLUMPY page `_ 50 | * Infrared model available at: http://clumpy.org 51 | * More information: `Buchner et al. 2019 `_ (or send me an email) 52 | 53 | Warped disk 54 | -------------------- 55 | 56 | .. image:: warpgeometry.png 57 | :target: warpeddisk.rst 58 | :align: right 59 | 60 | A simple warped disk geometry 61 | 62 | * Geometry images: `Warped Disk page `_ 63 | * X-ray table model available at: `Warped Disk page `_ 64 | * Infrared model: see `Jud et al (2017) `_ 65 | * More information: `Buchner et al. (2021) `_ 66 | 67 | 68 | Radiative fountain (Wada 2012) 69 | ------------------------------- 70 | 71 | .. image:: wadageometry.png 72 | :target: wada.rst 73 | :align: right 74 | 75 | * Geometry images: `Radiative fountain page `_ 76 | * X-ray table model available at: `Radiative fountain page `_ 77 | * Infrared model: `Radiative fountain page `_ 78 | * More information: `Buchner et al. (2021) `_ 79 | 80 | CAT3D-WIND 81 | --------------------------- 82 | 83 | .. image:: CAT3D-WIND.gif 84 | :align: right 85 | 86 | * Infrared model: http://www.sungrazer.org/cat3d.html 87 | * X-ray model: `CAT3D+WIND page `_ 88 | 89 | 90 | Response of a single spherical blob 91 | ------------------------------------- 92 | 93 | .. image:: blob.png 94 | :align: right 95 | 96 | Reflection from a single sphere with 97 | 98 | * Isotropic density 99 | * Exponential density profile 100 | * Gaussian density profile 101 | 102 | Spectrum gives 103 | 104 | * Angle-averaged reflection off a blob with uniform, gaussian or exponentialdensity profile. 105 | * Parameters: Photon index, Energy cut-off, NH across the blob. 106 | * Files: blob_uniform.fits, blob_gaussian.fits, blob_exponential.fits 107 | 108 | Download: 109 | 110 | * X-ray table model: https://doi.org/10.5281/zenodo.2235456 111 | * Infrared model: https://en.wikipedia.org/wiki/Planck%27s_law 112 | 113 | More information in the appendix of `Buchner et al. 2019 `_. 114 | 115 | Wedge model 116 | ---------------- 117 | 118 | .. image:: overview_spectra2.png 119 | :align: right 120 | 121 | Download from: https://doi.org/10.5281/zenodo.2224650 122 | 123 | * Single wedge 124 | 125 | * Sphere with bi-conical cut-out (see also `BORUS02 `_; Buchner et al., submitted) 126 | * Parameters: Photon index, Energy cut-off, Torus Opening angle, Viewing angle. 127 | 128 | * Gradient wedge model 129 | 130 | * Sphere with bi-conical cut-out, but gradually increasing density (see Buchner et al., submitted) 131 | * Parameters: Photon index, Energy cut-off, Torus Opening angle, Viewing angle. 132 | * Files: gradientwedge.fits gradientwedge-reflect.fits gradientwedge-reflect.fits 133 | 134 | Disk 135 | ----------------------- 136 | 137 | .. image:: disk.png 138 | :align: right 139 | 140 | 141 | * Infinitely thick disk (similar to pexrav or pexmon, put self-consistently computed with XARS) 142 | * Parameters: Photon index, Energy cut-off, viewing angle. 143 | * Download link: https://doi.org/10.5281/zenodo.2224471 144 | * Files: disk.fits diskreflect.fits disktransmit.fits 145 | 146 | Presented in `Buchner et al. 2019 `_. 147 | 148 | Clumpybox3 149 | ---------------- 150 | 151 | .. image:: clumpybox.png 152 | :align: right 153 | 154 | * crate of spheres, just touching each other 155 | * With many free parameters; useful for exploring clumpy geometries 156 | * Geometry parameters: 157 | 158 | * NH through the spheres (mean and variance across the spheres), 159 | * filling: 1 if spheres are just touching (like in the illustration), otherwise radii are smaller by that factor. 160 | 161 | * Parameters: Photon index, Energy cut-off, LOS NH, Inclination angle 162 | * Download link: https://doi.org/10.5281/zenodo.2245188 163 | * clumpy/clumpybox3.fits 164 | 165 | Other torus models 166 | -------------------- 167 | 168 | * a constant-density donut-shaped geometry, 60° opening angle: `MYTORUS `_ and also `RXTorus `_. 169 | * Another cone-cutout geometry `e-torus `_ (logNH=22-25 range only) 170 | * Clumps in a donut outline `Ctorus `_ (logNH=22-25, E=1-450keV only) 171 | 172 | 173 | 174 | -------------------------------------------------------------------------------- /examples/torusC.py: -------------------------------------------------------------------------------- 1 | """ 2 | Monte-Carlo simulator for X-ray obscurer geometries 3 | 4 | Literature: 5 | 6 | * Brightman & Nandra (2011) 7 | * Leahy & Creighton (1993) 8 | """ 9 | 10 | import numpy 11 | from numpy import pi, cos, log10 12 | import tqdm 13 | from xars.binning import nbins, energy2bin, bin2energy 14 | import matplotlib as mpl 15 | mpl.use('Agg') 16 | import matplotlib.pyplot as plt 17 | 18 | rng = numpy.random 19 | 20 | from xars.geometries.clumpytorus import ClumpyTorusGeometry 21 | from xars import montecarlo 22 | from xars.photons import PhotonBunch 23 | 24 | #rng.seed(0) 25 | 26 | import argparse 27 | import os 28 | 29 | parser = argparse.ArgumentParser( 30 | description="""Monte-Carlo simulator for X-ray obscurer geometries""", 31 | epilog="""(C) Johannes Buchner, 2013-2016. Based on work by Murray Brightman & Kirpal Nandra (see 2011 publication)""") 32 | 33 | parser.add_argument('--geometry', type=str, required=True, help='Geometry file') 34 | parser.add_argument('--nevents', type=int, default=1000000, help='number of input photons per energy bin') 35 | parser.add_argument('--verbose', default=False, help='Be more talkative, show debug statistics on interactions', action='store_true') 36 | args = parser.parse_args() 37 | 38 | nmu = 3 # number of viewing angle bins 39 | nh_bins = numpy.array([9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 40 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 41 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 42 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 43 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 44 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 45 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 46 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 47 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 48 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 49 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 50 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 51 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 52 | 7.08000000e+03, 1.00000000e+04]) 53 | 54 | n_nh_bins = len(nh_bins) 55 | binmapfunction = lambda beta, alpha: (numpy.round(0.5 + nmu * numpy.abs(cos(beta))) - 1).astype(int) 56 | 57 | prefix = args.geometry + '_out' 58 | 59 | geometry = ClumpyTorusGeometry(args.geometry, verbose=args.verbose) 60 | geometry.viz() 61 | plt.savefig(prefix + "geometry.pdf") 62 | plt.savefig(prefix + "geometry.png") 63 | plt.close() 64 | 65 | def compute_normalisation(prefix, binmapfunction, verbose=False, nphot=1000000): 66 | # use 40000 rays in random directions 67 | import astropy.io.fits as pyfits 68 | if verbose: 69 | print('computing normalisation ...') 70 | photons = PhotonBunch(i=100, nphot=nphot, verbose=verbose, geometry=geometry) 71 | # vertical bin, i.e. which viewing angle can see this photon? 72 | mbin = numpy.asarray(binmapfunction(beta=photons.beta, alpha=photons.alpha)).astype(numpy.uint) 73 | # highest bin exceeded due to rounding 74 | mbin[mbin == nmu] = nmu - 1 75 | 76 | # bin in NH 77 | if verbose: 78 | print(' computing LOS NH ...') 79 | nh = geometry.compute_los_nh(photons.beta, photons.alpha) 80 | if verbose: 81 | print(' computing LOS NH ... done') 82 | nh[nh<1e-2] = 1e-2 83 | # make the bins from 0 to 6 spread out over n_nh_bins 84 | kbin = ((log10(nh) + 2) * n_nh_bins / (4 + 2)).astype(int) 85 | kbin[kbin == n_nh_bins] = n_nh_bins - 1 86 | 87 | mkbin = kbin * nmu + mbin 88 | 89 | # compute fraction in the given NH/mu bins 90 | counts, xedges = numpy.histogram(mkbin, bins=list(range(nmu*n_nh_bins+1))) 91 | normalisation = counts * 1. / len(mkbin) 92 | print(normalisation, normalisation.shape) 93 | hdu = pyfits.PrimaryHDU(normalisation) 94 | import datetime, time 95 | now = datetime.datetime.fromtimestamp(time.time()) 96 | nowstr = now.isoformat() 97 | nowstr = nowstr[:nowstr.rfind('.')] 98 | hdu.header['CREATOR'] = """Johannes Buchner """ 99 | hdu.header['DATE'] = nowstr 100 | hdu.header['METHOD'] = 'Monte-Carlo simulation code' 101 | hdu.header['NPHOT'] = nphot 102 | if verbose: 103 | print(' saving ...') 104 | hdu.writeto(prefix + "normalisation.fits", overwrite=True) 105 | if verbose: 106 | print(' saving ... done') 107 | return normalisation 108 | 109 | if not os.path.exists(prefix + "normalisation.fits"): 110 | compute_normalisation(prefix, binmapfunction=binmapfunction, verbose=True) 111 | 112 | def run(prefix, nphot, nmu, n_nh_bins, geometry, binmapfunction, verbose=False): 113 | rdata_transmit = numpy.zeros((nbins, nbins, nmu*n_nh_bins)) 114 | rdata_reflect = numpy.zeros((nbins, nbins, nmu*n_nh_bins)) 115 | #rdata = [0] * nbins 116 | energy_lo, energy_hi = bin2energy(list(range(nbins))) 117 | 118 | binrange = [list(range(nbins+1)), list(range(nmu*n_nh_bins+1))] 119 | for i in tqdm.trange(nbins-1, -1, -1): 120 | photons = PhotonBunch(i=i, nphot=nphot, verbose=verbose, geometry=geometry) 121 | for n_interactions in range(1000): 122 | emission, more = photons.pump() 123 | if emission is None and not more: 124 | break 125 | if emission is None: 126 | continue 127 | if len(emission['energy']) == 0: 128 | if not more: 129 | break 130 | continue 131 | if verbose: print(' received %d emitted photons (after %d interactions)' % (len(emission['energy']), n_interactions)) 132 | beta = emission['beta'] 133 | alpha = emission['alpha'] 134 | assert (beta <= pi).all(), beta 135 | assert (beta >= 0).all(), beta 136 | 137 | # vertical bin, i.e. which viewing angle can see this photon? 138 | mbin = numpy.asarray(binmapfunction(beta=beta, alpha=alpha)).astype(numpy.uint) 139 | # highest bin exceeded due to rounding 140 | mbin[mbin == nmu] = nmu - 1 141 | 142 | # bin in NH 143 | nh = geometry.compute_los_nh(beta, alpha) 144 | nh[nh<1e-2] = 1e-2 145 | kbin = ((log10(nh) + 2) * n_nh_bins / (4 + 2)).astype(int) 146 | kbin[kbin == n_nh_bins] = n_nh_bins - 1 147 | 148 | mkbin = kbin * nmu + mbin 149 | 150 | bins = emission['binid'] 151 | # produce unique array bins, mbin which contains counts 152 | counts, xedges, yedges = numpy.histogram2d(bins, mkbin, bins=binrange) 153 | # record into histogram if it landed within relevant range 154 | if n_interactions < 1: 155 | rdata_transmit[i] += counts 156 | else: 157 | rdata_reflect[i] += counts 158 | del counts, emission, bins 159 | if not more: 160 | break 161 | del photons 162 | 163 | return (rdata_transmit, rdata_reflect), nphot 164 | 165 | rdata, nphot = run(prefix, nphot = args.nevents, nmu = nmu, n_nh_bins = n_nh_bins, geometry=geometry, 166 | binmapfunction = binmapfunction, verbose=args.verbose) 167 | 168 | rdata_transmit, rdata_reflect = rdata 169 | montecarlo.store(prefix + 'transmit', nphot, rdata_transmit, nmu*n_nh_bins, plot=False) 170 | montecarlo.store(prefix + 'reflect', nphot, rdata_reflect, nmu*n_nh_bins, plot=False) 171 | rdata_transmit += rdata_reflect 172 | del rdata_reflect 173 | montecarlo.store(prefix, nphot, rdata_transmit, nmu*n_nh_bins, plot=False) 174 | 175 | -------------------------------------------------------------------------------- /examples/torusG.py: -------------------------------------------------------------------------------- 1 | """ 2 | Monte-Carlo simulator for X-ray obscurer geometries 3 | 4 | Literature: 5 | 6 | * Brightman & Nandra (2011) 7 | * Leahy & Creighton (1993) 8 | """ 9 | import numpy 10 | from numpy import pi, cos, log10 11 | import tqdm 12 | from xars.binning import nbins, energy2bin, bin2energy 13 | import matplotlib as mpl 14 | mpl.use('Agg') 15 | import matplotlib.pyplot as plt 16 | 17 | rng = numpy.random 18 | 19 | from xars.geometries.hydrotorus import HydroTorusGeometry 20 | from xars import montecarlo 21 | from xars.photons import PhotonBunch 22 | 23 | #rng.seed(0) 24 | 25 | import argparse 26 | import os 27 | 28 | parser = argparse.ArgumentParser( 29 | description="""Monte-Carlo simulator for X-ray obscurer geometries""", 30 | epilog="""(C) Johannes Buchner, 2013-2016. Based on work by Murray Brightman & Kirpal Nandra (see 2011 publication)""") 31 | 32 | parser.add_argument('--geometry', type=str, required=True, help='Geometry file') 33 | parser.add_argument('--nevents', type=int, default=1000000, help='number of input photons per energy bin') 34 | parser.add_argument('--verbose', default=False, help='Be more talkative, show debug statistics on interactions', action='store_true') 35 | args = parser.parse_args() 36 | 37 | nmu = 3 # number of viewing angle bins 38 | nh_bins = numpy.array([9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 39 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 40 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 41 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 42 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 43 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 44 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 45 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 46 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 47 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 48 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 49 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 50 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 51 | 7.08000000e+03, 1.00000000e+04]) 52 | 53 | n_nh_bins = len(nh_bins) 54 | binmapfunction = lambda beta, alpha: (numpy.round(0.5 + nmu * numpy.abs(cos(beta))) - 1).astype(int) 55 | 56 | prefix = args.geometry + '_out' 57 | 58 | geometry = HydroTorusGeometry(args.geometry, verbose=args.verbose) 59 | geometry.viz() 60 | plt.savefig(prefix + "geometry.pdf", bbox_inches='tight', pad_inches = 0) 61 | plt.savefig(prefix + "geometry.png", bbox_inches='tight', pad_inches = 0) 62 | plt.close() 63 | 64 | def compute_normalisation(prefix, binmapfunction, verbose=False, nphot=1000000): 65 | # use 40000 rays in random directions 66 | import astropy.io.fits as pyfits 67 | if verbose: 68 | print('computing normalisation ...') 69 | photons = PhotonBunch(i=100, nphot=nphot, verbose=verbose, geometry=geometry) 70 | # vertical bin, i.e. which viewing angle can see this photon? 71 | mbin = numpy.asarray(binmapfunction(beta=photons.beta, alpha=photons.alpha)).astype(numpy.uint) 72 | # highest bin exceeded due to rounding 73 | mbin[mbin == nmu] = nmu - 1 74 | 75 | # bin in NH 76 | if verbose: 77 | print(' computing LOS NH ...') 78 | nh = geometry.compute_los_nh(photons.beta, photons.alpha) 79 | if verbose: 80 | print(' computing LOS NH ... done') 81 | nh[nh<1e-2] = 1e-2 82 | kbin = ((log10(nh) + 2) * n_nh_bins / (4 + 2)).astype(int) 83 | kbin[kbin == n_nh_bins] = n_nh_bins - 1 84 | 85 | mkbin = kbin * nmu + mbin 86 | 87 | # compute fraction in the given NH/mu bins 88 | counts, xedges = numpy.histogram(mkbin, bins=list(range(nmu*n_nh_bins+1))) 89 | normalisation = counts * 1. / len(mkbin) 90 | hdu = pyfits.PrimaryHDU(normalisation) 91 | import datetime, time 92 | now = datetime.datetime.fromtimestamp(time.time()) 93 | nowstr = now.isoformat() 94 | nowstr = nowstr[:nowstr.rfind('.')] 95 | hdu.header['CREATOR'] = """Johannes Buchner """ 96 | hdu.header['DATE'] = nowstr 97 | hdu.header['METHOD'] = 'Monte-Carlo simulation code' 98 | hdu.header['NPHOT'] = nphot 99 | if verbose: 100 | print(' saving ...') 101 | hdu.writeto(prefix + "normalisation.fits", overwrite=True) 102 | if verbose: 103 | print(' saving ... done') 104 | return normalisation 105 | 106 | if not os.path.exists(prefix + "normalisation.fits"): 107 | compute_normalisation(prefix, binmapfunction=binmapfunction, verbose=True) 108 | 109 | #outphotons = open(prefix + 'photons.txt', 'a') 110 | 111 | def run(prefix, nphot, nmu, n_nh_bins, geometry, binmapfunction, verbose=False): 112 | rdata_transmit = numpy.zeros((nbins, nbins, nmu*n_nh_bins)) 113 | rdata_reflect = numpy.zeros((nbins, nbins, nmu*n_nh_bins)) 114 | #rdata = [0] * nbins 115 | energy_lo, energy_hi = bin2energy(list(range(nbins))) 116 | 117 | binrange = [list(range(nbins+1)), list(range(nmu*n_nh_bins+1))] 118 | for i in tqdm.trange(nbins - 1, -1, -1): 119 | photons = PhotonBunch(i=i, nphot=nphot, verbose=verbose, geometry=geometry) 120 | for n_interactions in range(1000): 121 | emission, more = photons.pump() 122 | if emission is None and not more: 123 | break 124 | if emission is None: 125 | continue 126 | if len(emission['energy']) == 0: 127 | if not more: 128 | break 129 | continue 130 | if verbose: print(' received %d emitted photons (after %d interactions)' % (len(emission['energy']), n_interactions)) 131 | beta = emission['beta'] 132 | alpha = emission['alpha'] 133 | assert (beta <= pi).all(), beta 134 | assert (beta >= 0).all(), beta 135 | 136 | # vertical bin, i.e. which viewing angle can see this photon? 137 | mbin = numpy.asarray(binmapfunction(beta=beta, alpha=alpha)).astype(numpy.uint) 138 | # highest bin exceeded due to rounding 139 | mbin[mbin == nmu] = nmu - 1 140 | 141 | #if False and n_interactions > 0: 142 | # numpy.savetxt(outphotons, numpy.transpose([ 143 | # energy[i] * numpy.ones_like(emission['x']), 144 | # emission['energy'], 145 | # emission['x'], emission['y'], emission['z'], 146 | # emission['beta'], emission['alpha']])) 147 | # outphotons.flush() 148 | 149 | # bin in NH 150 | nh = geometry.compute_los_nh(beta, alpha) 151 | nh[nh<1e-2] = 1e-2 152 | kbin = ((log10(nh) + 2) * n_nh_bins / (4 + 2)).astype(int) 153 | kbin[kbin == n_nh_bins] = n_nh_bins - 1 154 | 155 | mkbin = kbin * nmu + mbin 156 | 157 | bins = emission['binid'] 158 | # produce unique array bins, mbin which contains counts 159 | counts, xedges, yedges = numpy.histogram2d(bins, mkbin, bins=binrange) 160 | # record into histogram if it landed within relevant range 161 | if n_interactions < 1: 162 | rdata_transmit[i] += counts 163 | else: 164 | rdata_reflect[i] += counts 165 | del counts, emission, bins 166 | if not more: 167 | break 168 | del photons 169 | 170 | return (rdata_transmit, rdata_reflect), nphot 171 | 172 | (rdata_transmit, rdata_reflect), nphot = run(prefix, nphot = args.nevents, nmu = nmu, n_nh_bins = n_nh_bins, geometry=geometry, 173 | binmapfunction = binmapfunction, verbose=args.verbose) 174 | 175 | montecarlo.store(prefix + 'transmit', nphot, rdata_transmit, nmu*n_nh_bins, plot=False) 176 | montecarlo.store(prefix + 'reflect', nphot, rdata_reflect, nmu*n_nh_bins, plot=False) 177 | rdata_transmit += rdata_reflect 178 | del rdata_reflect 179 | montecarlo.store(prefix, nphot, rdata_transmit, nmu*n_nh_bins, plot=True) 180 | 181 | -------------------------------------------------------------------------------- /xspecexport/createxspecmodel.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import pi 3 | import astropy.io.fits as pyfits 4 | import h5py 5 | import sys 6 | from binning import nbins, energy2bin, bin2energy 7 | 8 | energy_lo, energy_hi = bin2energy(numpy.arange(nbins)) 9 | energy = (energy_hi + energy_lo) / 2 10 | deltae = energy_hi - energy_lo 11 | 12 | table = [] 13 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 14 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 15 | 3. ] 16 | ThetaIncs = [ 18.20000076, 31.79999924, 41.40000153, 49.5 , 17 | 56.59999847, 63.29999924, 69.5 , 75.5 , 18 | 81.40000153, 87.09999847] 19 | ThetaTors = [25.79999924, 36.90000153, 45.59999847, 53.09999847, 20 | 60. , 66.40000153, 72.5 , 78.5 , 21 | 84.30000305] 22 | data = {} 23 | 24 | def readfile(filename): 25 | if filename.endswith('.hdf5') or filename.endswith('.h5'): 26 | f = h5py.File(filename, 'r') 27 | matrix = f['rdata'] 28 | header = f.attrs 29 | else: 30 | f = pyfits.open(filename) 31 | header = f[0].header 32 | matrix = f[0].data 33 | return header, matrix 34 | 35 | for filename in sys.argv[1:]: 36 | print('loading', filename) 37 | header, matrix = readfile(filename) 38 | #f = pyfits.open(filename) 39 | nh = float(header['NH']) 40 | opening = float(header['OPENING']) * 180 / pi 41 | nphot = int(header['NPHOT']) 42 | opening = [thetator for thetator in ThetaTors if numpy.abs(opening - thetator) < 0.1][0] 43 | a, b, nmu = matrix.shape 44 | assert a == nbins, matrix.shape 45 | assert b == nbins, matrix.shape 46 | #data[(nh, opening)] = [(nphot, f[0].data)] 47 | 48 | for PhoIndex in PhoIndices: 49 | weights = (energy**-PhoIndex * deltae).reshape((-1,1)) 50 | # go through viewing angles 51 | for mu, ThetaInc in enumerate(ThetaIncs[::-1]): 52 | y = (weights * matrix[:,:,mu]).sum(axis=0) / (nphot / 10.) 53 | print(nh, PhoIndex, opening, ThetaInc) #, (y/deltae)[energy_lo >= 1][0] 54 | #print ' ', (weights * matrix[:,:,mu]).sum(axis=0), deltae, (nphot / 1000000.) 55 | #assert numpy.any(y > 0), y 56 | table.append(((nh, PhoIndex, opening, ThetaInc), y)) 57 | 58 | hdus = [] 59 | hdu = pyfits.PrimaryHDU() 60 | import datetime, time 61 | now = datetime.datetime.fromtimestamp(time.time()) 62 | nowstr = now.isoformat() 63 | nowstr = nowstr[:nowstr.rfind('.')] 64 | hdu.header['CREATOR'] = """Johannes Buchner """ 65 | hdu.header['DATE'] = nowstr 66 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 67 | hdu.header['MODLNAME'] = 'torus' 68 | hdu.header['ADDMODEL'] = True 69 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 70 | hdu.header['EXTEND'] = True 71 | hdu.header['REDSHIFT'] = True 72 | hdu.header['SIMPLE'] = True 73 | hdu.header['HDUDOC'] = 'OGIP/92-009' 74 | hdu.header['HDUVERS1'] = '1.0.0' 75 | hdu.header['HDUCLASS'] = 'OGIP' 76 | hdus.append(hdu) 77 | 78 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 79 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 80 | 81 | parameters = numpy.array([ 82 | ('nH', 1, 10.0, 1.0, 0.0099999998, 0.0099999998, 10000.0, 10000.0, 41, numpy.array([ 9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 83 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 84 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 85 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 86 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 87 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 88 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 89 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 90 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 91 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 92 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 93 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 94 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 95 | 7.08000000e+03, 1.00000000e+04])), 96 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 97 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 98 | 3. , 0. , 0. , 0. , 0. , 99 | 0. , 0. , 0. , 0. , 0. , 100 | 0. , 0. , 0. , 0. , 0. , 101 | 0. , 0. , 0. , 0. , 0. , 102 | 0. , 0. , 0. , 0. , 0. , 103 | 0. , 0. , 0. , 0. , 0. , 0. ])), 104 | ('Theta_tor', 0, 60.0, 5.0, 0.0, 25.799999, 84.300003, 90.0, 9, numpy.array([ 25.79999924, 36.90000153, 45.59999847, 53.09999847, 105 | 60. , 66.40000153, 72.5 , 78.5 , 106 | 84.30000305, 0. , 0. , 0. , 107 | 0. , 0. , 0. , 0. , 108 | 0. , 0. , 0. , 0. , 109 | 0. , 0. , 0. , 0. , 110 | 0. , 0. , 0. , 0. , 111 | 0. , 0. , 0. , 0. , 112 | 0. , 0. , 0. , 0. , 113 | 0. , 0. , 0. , 0. , 0. ])), 114 | ('Theta_inc', 0, 18.200001, 5.0, 0.0, 18.200001, 87.099998, 90.0, 10, numpy.array([ 18.20000076, 31.79999924, 41.40000153, 49.5 , 115 | 56.59999847, 63.29999924, 69.5 , 75.5 , 116 | 81.40000153, 87.09999847, 0. , 0. , 117 | 0. , 0. , 0. , 0. , 118 | 0. , 0. , 0. , 0. , 119 | 0. , 0. , 0. , 0. , 120 | 0. , 0. , 0. , 0. , 121 | 0. , 0. , 0. , 0. , 122 | 0. , 0. , 0. , 0. , 123 | 0. , 0. , 0. , 0. , 0. ])), 124 | ], dtype=dtype) 125 | hdu = pyfits.BinTableHDU(data=parameters) 126 | hdu.header['DATE'] = nowstr 127 | hdu.header['EXTNAME'] = 'PARAMETERS' 128 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 129 | hdu.header['HDUVERS1'] = '1.0.0' 130 | hdu.header['NINTPARM'] = len(parameters) 131 | hdu.header['NADDPARM'] = 0 132 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 133 | hdus.append(hdu) 134 | 135 | # ENERG_LO, ENERG_HI 136 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 137 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 138 | hdu = pyfits.BinTableHDU(data=energies) 139 | hdu.header['DATE'] = nowstr 140 | hdu.header['EXTNAME'] = 'ENERGIES' 141 | hdu.header['TUNIT2'] = 'keV' 142 | hdu.header['TUNIT1'] = 'keV' 143 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 144 | hdu.header['HDUCLAS2'] = 'ENERGIES' 145 | hdu.header['HDUVERS1'] = '1.0.0' 146 | hdus.append(hdu) 147 | 148 | # PARAMVAL (4), INTPSPEC 149 | dtype = [('PARAMVAL', '>f4', (4,)), ('INTPSPEC', '>f4', (nbins,))] 150 | table.sort() 151 | table = numpy.array(table, dtype=dtype) 152 | hdu = pyfits.BinTableHDU(data=table) 153 | hdu.header['DATE'] = nowstr 154 | hdu.header['EXTNAME'] = 'SPECTRA' 155 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 156 | hdu.header['TUNIT1'] = 'none' 157 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 158 | hdu.header['HDUCLAS2'] = 'SPECTRA' 159 | hdu.header['HDUVERS1'] = '1.0.0' 160 | 161 | hdus.append(hdu) 162 | hdus = pyfits.HDUList(hdus) 163 | 164 | hdus.writeto("xspecmodel.fits", overwrite=True) 165 | 166 | 167 | -------------------------------------------------------------------------------- /doc/xars.rst: -------------------------------------------------------------------------------- 1 | XARS X-ray Monte-carlo simulator 2 | ------------------------------------ 3 | 4 | .. image:: logo3-mid.png 5 | :align: right 6 | 7 | XARS simulates X-rays propagating through matter in user-defined geometries. 8 | 9 | This code tutorial explains how to use and modify XARS. 10 | 11 | To find existing models, go back to `Models `_. 12 | 13 | Tutorial: Part I: Irradiating a programmed geometry 14 | --------------------------------------------------- 15 | 16 | In part I we look at a irradiating a user-specified geometry. Part II will look at 17 | a geometry made up of spherical clumps/blobs. 18 | 19 | The bash script runsphere.sh simulates a spherical obscurer with various 20 | column densities. To run a single one, run, for example:: 21 | 22 | $ python torus2.py --log10nh=24.2 --opening-angle=0 --nevents=1000000 --output=myoutput 23 | 24 | You can study torus2.py, and in particular its geometry definition geometries/spheretorus.py 25 | to understand how the code works. See below for detailed description. 26 | 27 | 28 | Tutorial: Part II: Irradiating a geometry made up of spheres 29 | --------------------------------------------------------------- 30 | 31 | In part II, we assume that your geometry can be expressed as many spheres. 32 | 33 | The example-blobs/generate_blobs.py demonstrates how to generate a hdf5 input 34 | file which describes the blobs, with their x/y/z positions and column densities. 35 | In this simple case, there is only one; in general, just enlarge the arrays. 36 | 37 | To irradiate such models, you need to install the LightRayRider library 38 | which performs fast photon propagation via optimized C functions. 39 | Download from https://github.com/JohannesBuchner/LightRayRider, for example to 40 | $HOME/Downloads/LightRayRider/. Then compile with:: 41 | 42 | $ make -C $HOME/Downloads/LightRayRider/ 43 | 44 | This will create a ray.so object. 45 | 46 | To irradiate the output files, e.g. torusblob23.0.hdf5, run:: 47 | 48 | $ PYTHONPATH=$HOME/Downloads/LightRayRider/ python torusC.py --geometry=torusblob23.0.hdf5 --nevents=1000000 49 | 50 | To use parallelise over 10 CPUs, run with:: 51 | 52 | $ OMP_NUM_THREADS=10 PYTHONPATH=$HOME/Downloads/LightRayRider/ python torusC.py --geometry=torusblob23.0.hdf5 --nevents=1000000 53 | 54 | Tutorial: Part III: Irradiating a simulation grid 55 | ------------------------------------------------------------- 56 | 57 | In part III we assume that you have created a hydrodynamic simulation on a 58 | 3d uniform grid, and want to irradiate this with X-rays. 59 | 60 | The example-grid/generate_warpeddisk.py demonstrates how to generate a hdf5 input 61 | file which describes the grid and its density, as well as the irradiation 62 | location. 63 | 64 | To irradiate such models, you need to install the LightRayRider library 65 | which performs fast photon propagation via optimized C functions. 66 | Download from https://github.com/JohannesBuchner/LightRayRider, for example to 67 | $HOME/Downloads/LightRayRider/. Then compile with:: 68 | 69 | $ make -C $HOME/Downloads/LightRayRider/ 70 | 71 | This will create a ray.so object. 72 | 73 | To irradiate the output files, e.g. warpeddisk_1.hdf5, run:: 74 | 75 | $ PYTHONPATH=$HOME/Downloads/LightRayRider/ python torusG.py --geometry=warpeddisk_1.hdf5 --nevents=1000000 76 | 77 | To use parallelise over 10 CPUs, run with:: 78 | 79 | $ OMP_NUM_THREADS=10 PYTHONPATH=$HOME/Downloads/LightRayRider/ python torusC.py --geometry=warpeddisk_1.hdf5 --nevents=1000000 80 | 81 | 82 | 83 | Outline of the code 84 | ---------------------- 85 | 86 | binning: Defines the energy binning used. 87 | 88 | xsects/__init__.py: computes Compton scattering cross-sections, loads cross-sections for lines from xsects.dat 89 | 90 | xsects/xscats.dat contains for each outcoming line its energy, yield, and a table of cross-sections leading to its emission over the energy binning. 91 | 92 | If you change the binning, xsects_convert.py can help you rebin xsects_orig.dat into xscats.dat. 93 | If you need different abundances, the Fortran code in xsects/generate/ can help you create a new xsects.dat file. 94 | You will need to insert the yields and line energies in the file header. 95 | 96 | To see a example usage, look at disk.py. 97 | Most of it specifies that the user can run this code e.g. with "python disk.py --nevents 600000 --output=output/disk", 98 | making the output files being stored in output/disk* and using 600000 events in each energy bin for estimating the energy response. 99 | 100 | The geometry is loaded "from geometries.disk import DiskGeometry". Several geometries are already defined in geometries. 101 | 102 | The binmapfunction defines how output spectra are binned across the sky. 103 | 104 | montecarlo.py: montecarlo.run runs the monte-carlo code and returns green function response matrices (These can be stored efficiently into HDF5 files with montecatlo.store). 105 | montecarlo.run goes through each energy bin and creates a package of many photons. The package is pumped through the geometry. 106 | Two things can happen in a pumping step: Either it is still scattering around at some location in the matter of the geometry, 107 | or it escapes to infinity (at which point montecarlo.py records its output energy and direction). 108 | Optionally you can also plot and print what the photons are doing. If you need something more specialised, 109 | you should write your own version of montecarlo.run. 110 | 111 | photons.py: All interactions are modelled here. This is actually fast because we are dealing with large numpy arrays of photon packages at once. 112 | PhotonBunch.__init__ sets up the photon package at the origin in random, isotropic directions. 113 | PhotonBunch.pump asks the geometry to compute how far the photon may travel, computes the interaction taking place. 114 | If compton scattered (marked "stuck"), a new direction is computed in the lower half of PhotonBunch.pump. 115 | PhotonBunch.pump returns photons that escaped to infinite, and should be run in a loop until no photons are left. 116 | 117 | Geometries 118 | --------------- 119 | 120 | It is easy to define geometries in XARS. geometries/disk.py shows an example. 121 | A class needs to be defined with a function compute_next_point. 122 | It receives the photon location (xi, yi, zi) and its direction (dist, beta, alpha). 123 | Dist is how far, in units of column density NH [1e22cm^-2], it should travel. 124 | Given this information, compute_next_point must compute where it ends up, 125 | in linear (xf,yf,zf) and spherical coordinates (rad, phi, theta) 126 | It also returns whether the photon has left the geometry to infinitey (inside). 127 | All operations work on (large) arrays. 128 | 129 | Another example is the sphere geometry in geometries/spheretorus.py. It is 130 | good practice to visualise the geometry as well. This is 131 | 132 | torus2.py shows how the visualisation is stored in this more elaborate example. 133 | 134 | Parallelisation 135 | ------------------- 136 | 137 | runtorus.sh shows how an array of simulations is run, exploring a grid of 138 | geometry configurations. 139 | 140 | * Irradiating different geometries is embarrassingly parallel. 141 | * For irradiating the same geometry, XARS can take advantage of multiple CPUs (see OMP_NUM_THREADS). 142 | * To parallelise over multiple machines, make sure the output files are named differently. You can combine the rdata output files with the rdataaddmultiple.py script. 143 | 144 | Xspec table models 145 | ------------------- 146 | 147 | At the bottom of runtorus.sh, commands are shown how to transform rdata output 148 | arrays into fits model tables that xspec can read. 149 | These scripts (in the xspecexport folder, e.g. createtorustable.py) assume a input 150 | photon spectrum (e.g. a powerlaw) and store the output spectrum into a fits file. 151 | Adjust to additional parameters and input spectra as needed. 152 | 153 | Questions and Problems 154 | -------------------------------------------- 155 | 156 | For any questions or problems with the software, please open an issue. 157 | This helps other people google the same question. 158 | 159 | 160 | 161 | 162 | -------------------------------------------------------------------------------- /xspecexport/createsmoothtorustable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import astropy.io.fits as pyfits 3 | import h5py 4 | import sys 5 | import progressbar 6 | from binning import nbins, energy2bin, bin2energy 7 | 8 | energy_lo, energy_hi = bin2energy(numpy.arange(nbins)) 9 | energy = (energy_hi + energy_lo) / 2 10 | deltae = energy_hi - energy_lo 11 | deltae0 = deltae[energy >= 1][0] 12 | 13 | table = [] 14 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 15 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 16 | 3. ] 17 | nh_bins = numpy.array([9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 18 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 19 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 20 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 21 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 22 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 23 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 24 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 25 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 26 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 27 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 28 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 29 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 30 | 7.08000000e+03, 1.00000000e+04]) 31 | 32 | data = {} 33 | 34 | outfilename = sys.argv[1] 35 | prefix = sys.argv[2] 36 | sigmas = ['5_gexp2','10_gexp2','20_gexp2','30_gexp2','sphere'] 37 | sigmav = [5,10,20,30,90] 38 | opening = [90-s for s in sigmav] 39 | filenames = [prefix % o for o in sigmas] 40 | 41 | widgets = [progressbar.Percentage(), " starting ... ", progressbar.Bar(), progressbar.ETA()] 42 | pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(filenames)*len(nh_bins)).start() 43 | 44 | 45 | for Theta_tor, filename in zip(opening, filenames): 46 | #print 'loading', filename 47 | f = h5py.File(filename) 48 | nphot = f.attrs['NPHOT'] 49 | 50 | geometry = numpy.loadtxt(filename.replace('_rdata.hdf5', '').replace('_transmitrdata.hdf5', '').replace('_reflectrdata.hdf5', '').replace('layered','')) 51 | 52 | matrix = f['rdata'] 53 | a, b, nmu = matrix.shape 54 | assert a == nbins, matrix.shape 55 | assert b == nbins, matrix.shape 56 | #data[(nh, opening)] = [(nphot, f[0].data)] 57 | 58 | last_angle = 0 59 | # go through viewing angles 60 | for mu, nh in enumerate(nh_bins): 61 | angle = geometry[mu,0] 62 | # theta = arccos(1-x) 63 | deltac = numpy.cos(last_angle) - numpy.cos(angle) 64 | last_angle = angle 65 | 66 | matrix_mu = matrix[:,:,mu] 67 | widgets[1] = '| op=%d nh=%.3f ' % (Theta_tor, nh) 68 | pbar.update(getattr(pbar, 'currval', getattr(pbar, 'value')) + 1) 69 | for PhoIndex in PhoIndices: 70 | weights = (energy**-PhoIndex * deltae).reshape((-1,1)) 71 | y = (weights * matrix_mu).sum(axis=0) / nphot * 1. / deltac 72 | #print nh, PhoIndex, Theta_tor #, (y/deltae)[energy_lo >= 1][0] 73 | #print ' ', (weights * matrix[:,:,mu]).sum(axis=0), deltae, (nphot / 1000000.) 74 | #assert numpy.any(y > 0), y 75 | table.append(((nh, PhoIndex, Theta_tor), y)) 76 | pbar.finish() 77 | 78 | hdus = [] 79 | hdu = pyfits.PrimaryHDU() 80 | import datetime, time 81 | now = datetime.datetime.fromtimestamp(time.time()) 82 | nowstr = now.isoformat() 83 | nowstr = nowstr[:nowstr.rfind('.')] 84 | hdu.header['CREATOR'] = """Johannes Buchner """ 85 | hdu.header['DATE'] = nowstr 86 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 87 | hdu.header['MODLNAME'] = 'torus' 88 | hdu.header['ADDMODEL'] = True 89 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 90 | hdu.header['EXTEND'] = True 91 | hdu.header['REDSHIFT'] = True 92 | hdu.header['SIMPLE'] = True 93 | hdu.header['HDUDOC'] = 'OGIP/92-009' 94 | hdu.header['HDUVERS1'] = '1.0.0' 95 | hdu.header['HDUCLASS'] = 'OGIP' 96 | hdus.append(hdu) 97 | 98 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 99 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 100 | 101 | parameters = numpy.array([ 102 | ('nH', 1, 10.0, 1.0, 0.0099999998, 0.0099999998, 10000.0, 10000.0, 41, numpy.array([ 9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 103 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 104 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 105 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 106 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 107 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 108 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 109 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 110 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 111 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 112 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 113 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 114 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 115 | 7.08000000e+03, 1.00000000e+04])), 116 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 117 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 118 | 3. , 0. , 0. , 0. , 0. , 119 | 0. , 0. , 0. , 0. , 0. , 120 | 0. , 0. , 0. , 0. , 0. , 121 | 0. , 0. , 0. , 0. , 0. , 122 | 0. , 0. , 0. , 0. , 0. , 123 | 0. , 0. , 0. , 0. , 0. , 0. ])), 124 | ('Theta_tor', 0, 60.0, 5.0, 0.0, 0.0, 85.0, 85.0, 5, numpy.array([ 0,60,70,80,85, 125 | 0. , 0. , 0. , 126 | 0. , 0. , 0. , 0. , 127 | 0. , 0. , 0. , 0. , 128 | 0. , 0. , 0. , 0. , 129 | 0. , 0. , 0. , 0. , 130 | 0. , 0. , 0. , 0. , 131 | 0. , 0. , 0. , 0. , 132 | 0. , 0. , 0. , 0. , 133 | 0. , 0. , 0. , 0. , 0. ])), 134 | ], dtype=dtype) 135 | assert numpy.product(parameters['NUMBVALS']) == len(table), ('parameter definition does not match spectra table', parameters['NUMBVALS'], numpy.product(parameters['NUMBVALS']), len(table)) 136 | hdu = pyfits.BinTableHDU(data=parameters) 137 | hdu.header['DATE'] = nowstr 138 | hdu.header['EXTNAME'] = 'PARAMETERS' 139 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 140 | hdu.header['HDUVERS1'] = '1.0.0' 141 | hdu.header['NINTPARM'] = len(parameters) 142 | hdu.header['NADDPARM'] = 0 143 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 144 | hdus.append(hdu) 145 | 146 | # ENERG_LO, ENERG_HI 147 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 148 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 149 | hdu = pyfits.BinTableHDU(data=energies) 150 | hdu.header['DATE'] = nowstr 151 | hdu.header['EXTNAME'] = 'ENERGIES' 152 | hdu.header['TUNIT2'] = 'keV' 153 | hdu.header['TUNIT1'] = 'keV' 154 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 155 | hdu.header['HDUCLAS2'] = 'ENERGIES' 156 | hdu.header['HDUVERS1'] = '1.0.0' 157 | hdus.append(hdu) 158 | 159 | # PARAMVAL (4), INTPSPEC 160 | dtype = [('PARAMVAL', '>f4', (len(parameters),)), ('INTPSPEC', '>f4', (nbins,))] 161 | table.sort() 162 | table = numpy.array(table, dtype=dtype) 163 | hdu = pyfits.BinTableHDU(data=table) 164 | hdu.header['DATE'] = nowstr 165 | hdu.header['EXTNAME'] = 'SPECTRA' 166 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 167 | hdu.header['TUNIT1'] = 'none' 168 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 169 | hdu.header['HDUCLAS2'] = 'SPECTRA' 170 | hdu.header['HDUVERS1'] = '1.0.0' 171 | 172 | hdus.append(hdu) 173 | hdus = pyfits.HDUList(hdus) 174 | 175 | hdus.writeto(outfilename, overwrite=True) 176 | 177 | 178 | -------------------------------------------------------------------------------- /xspecexport/createwadatoruscutofftable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import exp 3 | import h5py 4 | import astropy.io.fits as pyfits 5 | import sys 6 | import tqdm 7 | from binning import nbins, energy2bin, bin2energy 8 | 9 | energy_lo, energy_hi = bin2energy(numpy.arange(nbins)) 10 | energy = (energy_hi + energy_lo) / 2 11 | deltae = energy_hi - energy_lo 12 | 13 | table = [] 14 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 15 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 16 | 3. ] 17 | Ecuts = [ 20., 30, 40, 60, 100, 140, 200, 400 ] 18 | 19 | nh_bins = numpy.array([9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 20 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 21 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 22 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 23 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 24 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 25 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 26 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 27 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 28 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 29 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 30 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 31 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 32 | 7.08000000e+03, 1.00000000e+04]) 33 | 34 | data = {} 35 | 36 | outfilename = sys.argv[1] 37 | filename = sys.argv[2] 38 | norm_filename = sys.argv[3] 39 | nh_bins_ThetaInc = [(nh, ThetaInc) for nh in nh_bins for ThetaInc in [90,60,0]] 40 | nmu = len(nh_bins_ThetaInc) 41 | deltae0 = deltae[energy >= 1][0] 42 | 43 | f = h5py.File(filename) 44 | normalisations = pyfits.open(norm_filename)[0].data 45 | nphot = f.attrs['NPHOT'] 46 | 47 | matrix = f['rdata'] 48 | a, b, nmu = matrix.shape 49 | assert a == nbins, matrix.shape 50 | assert b == nbins, matrix.shape 51 | 52 | pbar = tqdm.tqdm(list(enumerate(zip(nh_bins_ThetaInc, normalisations)))) 53 | for mu, ((nh, ThetaInc), norm) in pbar: 54 | # go through viewing angles 55 | matrix_mu = matrix[:,:,mu] 56 | #print ' ', nh, ThetaInc 57 | pbar.set_description('| nh=%.3f inc=%02d ' % (nh, ThetaInc)) 58 | for PhoIndex in PhoIndices: 59 | for Ecut in Ecuts: 60 | weights = (energy**-PhoIndex * exp(-energy / Ecut) * deltae / deltae0).reshape((-1,1)) 61 | y = (weights * matrix_mu).sum(axis=0) / (nphot * max(norm, 1e-6) * nmu) 62 | #print ' ', (weights * matrix[:,:,mu]).sum(axis=0), deltae, (nphot / 1000000.) 63 | #assert numpy.any(y > 0), y 64 | table.append(((nh, PhoIndex, Ecut, ThetaInc), y)) 65 | 66 | hdus = [] 67 | hdu = pyfits.PrimaryHDU() 68 | import datetime, time 69 | now = datetime.datetime.fromtimestamp(time.time()) 70 | nowstr = now.isoformat() 71 | nowstr = nowstr[:nowstr.rfind('.')] 72 | hdu.header['CREATOR'] = """Johannes Buchner """ 73 | hdu.header['DATE'] = nowstr 74 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 75 | hdu.header['MODLNAME'] = 'torus' 76 | hdu.header['ADDMODEL'] = True 77 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 78 | hdu.header['EXTEND'] = True 79 | hdu.header['REDSHIFT'] = True 80 | hdu.header['SIMPLE'] = True 81 | hdu.header['HDUDOC'] = 'OGIP/92-009' 82 | hdu.header['HDUVERS1'] = '1.0.0' 83 | hdu.header['HDUCLASS'] = 'OGIP' 84 | hdus.append(hdu) 85 | 86 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 87 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 88 | 89 | parameters = numpy.array([ 90 | ('nH', 1, 10.0, 1.0, 0.0099999998, 0.0099999998, 10000.0, 10000.0, 41, numpy.array([ 9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 91 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 92 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 93 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 94 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 95 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 96 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 97 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 98 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 99 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 100 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 101 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 102 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 103 | 7.08000000e+03, 1.00000000e+04])), 104 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 105 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 106 | 3. , 0. , 0. , 0. , 0. , 107 | 0. , 0. , 0. , 0. , 0. , 108 | 0. , 0. , 0. , 0. , 0. , 109 | 0. , 0. , 0. , 0. , 0. , 110 | 0. , 0. , 0. , 0. , 0. , 111 | 0. , 0. , 0. , 0. , 0. , 0. ])), 112 | ('Ecut', 0, 100.0, 10.0, 20, 20, 400, 400, 8, numpy.array([ 20. , 30, 40, 60, 100, 113 | 140 , 200, 400 , 0 , 0, 114 | 0. , 0. , 0. , 0. , 0. , 115 | 0. , 0. , 0. , 0. , 0. , 116 | 0. , 0. , 0. , 0. , 0. , 117 | 0. , 0. , 0. , 0. , 0. , 118 | 0. , 0. , 0. , 0. , 0. , 119 | 0. , 0. , 0. , 0. , 0. , 0. ])), 120 | ('Theta_inc', 0, 18.200001, 5.0, 0.0, 18.200001, 87.099998, 90.0, 3, numpy.array([ 0, 60, 90, 0.0 , 121 | 0. , 0. , 0. , 0. , 122 | 0. , 0. , 0. , 0. , 123 | 0. , 0. , 0. , 0. , 124 | 0. , 0. , 0. , 0. , 125 | 0. , 0. , 0. , 0. , 126 | 0. , 0. , 0. , 0. , 127 | 0. , 0. , 0. , 0. , 128 | 0. , 0. , 0. , 0. , 129 | 0. , 0. , 0. , 0. , 0. ])), 130 | ], dtype=dtype) 131 | assert numpy.product(parameters['NUMBVALS']) == len(table), ('parameter definition does not match spectra table', parameters['NUMBVALS'], numpy.product(parameters['NUMBVALS']), len(table)) 132 | hdu = pyfits.BinTableHDU(data=parameters) 133 | hdu.header['DATE'] = nowstr 134 | hdu.header['EXTNAME'] = 'PARAMETERS' 135 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 136 | hdu.header['HDUVERS1'] = '1.0.0' 137 | hdu.header['NINTPARM'] = len(parameters) 138 | hdu.header['NADDPARM'] = 0 139 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 140 | hdus.append(hdu) 141 | 142 | # ENERG_LO, ENERG_HI 143 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 144 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 145 | hdu = pyfits.BinTableHDU(data=energies) 146 | hdu.header['DATE'] = nowstr 147 | hdu.header['EXTNAME'] = 'ENERGIES' 148 | hdu.header['TUNIT2'] = 'keV' 149 | hdu.header['TUNIT1'] = 'keV' 150 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 151 | hdu.header['HDUCLAS2'] = 'ENERGIES' 152 | hdu.header['HDUVERS1'] = '1.0.0' 153 | hdus.append(hdu) 154 | 155 | # PARAMVAL (4), INTPSPEC 156 | dtype = [('PARAMVAL', '>f4', (len(parameters),)), ('INTPSPEC', '>f4', (nbins,))] 157 | table.sort() 158 | table = numpy.array(table, dtype=dtype) 159 | hdu = pyfits.BinTableHDU(data=table) 160 | hdu.header['DATE'] = nowstr 161 | hdu.header['EXTNAME'] = 'SPECTRA' 162 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 163 | hdu.header['TUNIT1'] = 'none' 164 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 165 | hdu.header['HDUCLAS2'] = 'SPECTRA' 166 | hdu.header['HDUVERS1'] = '1.0.0' 167 | 168 | hdus.append(hdu) 169 | hdus = pyfits.HDUList(hdus) 170 | 171 | hdus.writeto(outfilename, overwrite=True) 172 | 173 | 174 | -------------------------------------------------------------------------------- /xspecexport/createtorustable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import pi 3 | import astropy.io.fits as pyfits 4 | import h5py 5 | import sys 6 | import progressbar 7 | from binning import nbins, energy2bin, bin2energy 8 | 9 | energy_lo, energy_hi = bin2energy(numpy.arange(nbins)) 10 | energy = (energy_hi + energy_lo) / 2 11 | deltae = energy_hi - energy_lo 12 | deltae0 = deltae[energy >= 1][0] 13 | 14 | table = [] 15 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 16 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 17 | 3. ] 18 | ThetaIncs = [ 18.20000076, 31.79999924, 41.40000153, 49.5 , 19 | 56.59999847, 63.29999924, 69.5 , 75.5 , 20 | 81.40000153, 87.09999847] 21 | ThetaTors = [25.79999924, 36.90000153, 45.59999847, 53.09999847, 22 | 60. , 66.40000153, 72.5 , 78.5 , 23 | 84.30000305] 24 | data = {} 25 | 26 | outfilename = sys.argv[1] 27 | models = sys.argv[2:] 28 | 29 | widgets = [progressbar.Percentage(), " starting ... ", progressbar.Bar(), progressbar.ETA()] 30 | pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(models)*len(ThetaIncs)).start() 31 | 32 | for filename in models: 33 | #print 'loading', filename 34 | f = h5py.File(filename) 35 | nphot = f.attrs['NPHOT'] 36 | nh = float(f.attrs['NH']) 37 | opening = float(f.attrs['OPENING']) * 180 / pi 38 | 39 | matrix = f['rdata'] 40 | 41 | #nh = float(f[0].header['NH']) 42 | #opening = float(f[0].header['OPENING']) * 180 / pi 43 | opening = [thetator for thetator in ThetaTors if numpy.abs(opening - thetator) < 0.1][0] 44 | #nphot = int(f[0].header['NPHOT']) 45 | #matrix = f[0].data 46 | a, b, nmu = matrix.shape 47 | assert a == nbins, matrix.shape 48 | assert b == nbins, matrix.shape 49 | #data[(nh, opening)] = [(nphot, f[0].data)] 50 | 51 | # go through viewing angles 52 | for mu, ThetaInc in enumerate(ThetaIncs[::-1]): 53 | matrix_mu = matrix[:,:,mu] 54 | widgets[1] = '| op=%d nh=%.3f inc=%02d ' % (opening, nh, ThetaInc) 55 | pbar.update(getattr(pbar, 'currval', getattr(pbar, 'value')) + 1) 56 | for PhoIndex in PhoIndices: 57 | spectrum = energy**-PhoIndex 58 | spectrum[1150:] = 0 59 | weights = (spectrum * deltae / deltae0).reshape((-1,1)) 60 | y = (weights * matrix_mu).sum(axis=0) / nphot / 10. 61 | #print nh, PhoIndex, opening, ThetaInc #, (y/deltae)[energy_lo >= 1][0] 62 | #print ' ', (weights * matrix[:,:,mu]).sum(axis=0), deltae, (nphot / 1000000.) 63 | #assert numpy.any(y > 0), y 64 | table.append(((nh, PhoIndex, opening, ThetaInc), y)) 65 | pbar.finish() 66 | 67 | hdus = [] 68 | hdu = pyfits.PrimaryHDU() 69 | import datetime, time 70 | now = datetime.datetime.fromtimestamp(time.time()) 71 | nowstr = now.isoformat() 72 | nowstr = nowstr[:nowstr.rfind('.')] 73 | hdu.header['CREATOR'] = """Johannes Buchner """ 74 | hdu.header['DATE'] = nowstr 75 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 76 | hdu.header['MODLNAME'] = 'torus' 77 | hdu.header['ADDMODEL'] = True 78 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 79 | hdu.header['EXTEND'] = True 80 | hdu.header['REDSHIFT'] = True 81 | hdu.header['SIMPLE'] = True 82 | hdu.header['HDUDOC'] = 'OGIP/92-009' 83 | hdu.header['HDUVERS1'] = '1.0.0' 84 | hdu.header['HDUCLASS'] = 'OGIP' 85 | hdus.append(hdu) 86 | 87 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 88 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 89 | 90 | parameters = numpy.array([ 91 | ('nH', 1, 10.0, 1.0, 0.0099999998, 0.0099999998, 10000.0, 10000.0, 41, numpy.array([ 9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 92 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 93 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 94 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 95 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 96 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 97 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 98 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 99 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 100 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 101 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 102 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 103 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 104 | 7.08000000e+03, 1.00000000e+04])), 105 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 106 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 107 | 3. , 0. , 0. , 0. , 0. , 108 | 0. , 0. , 0. , 0. , 0. , 109 | 0. , 0. , 0. , 0. , 0. , 110 | 0. , 0. , 0. , 0. , 0. , 111 | 0. , 0. , 0. , 0. , 0. , 112 | 0. , 0. , 0. , 0. , 0. , 0. ])), 113 | ('Theta_tor', 0, 60.0, 5.0, 0.0, 25.799999, 84.300003, 90.0, 9, numpy.array([ 25.79999924, 36.90000153, 45.59999847, 53.09999847, 114 | 60. , 66.40000153, 72.5 , 78.5 , 115 | 84.30000305, 0. , 0. , 0. , 116 | 0. , 0. , 0. , 0. , 117 | 0. , 0. , 0. , 0. , 118 | 0. , 0. , 0. , 0. , 119 | 0. , 0. , 0. , 0. , 120 | 0. , 0. , 0. , 0. , 121 | 0. , 0. , 0. , 0. , 122 | 0. , 0. , 0. , 0. , 0. ])), 123 | ('Theta_inc', 0, 18.200001, 5.0, 0.0, 18.200001, 87.099998, 90.0, 10, numpy.array([ 18.20000076, 31.79999924, 41.40000153, 49.5 , 124 | 56.59999847, 63.29999924, 69.5 , 75.5 , 125 | 81.40000153, 87.09999847, 0. , 0. , 126 | 0. , 0. , 0. , 0. , 127 | 0. , 0. , 0. , 0. , 128 | 0. , 0. , 0. , 0. , 129 | 0. , 0. , 0. , 0. , 130 | 0. , 0. , 0. , 0. , 131 | 0. , 0. , 0. , 0. , 132 | 0. , 0. , 0. , 0. , 0. ])), 133 | ], dtype=dtype) 134 | assert numpy.product(parameters['NUMBVALS']) == len(table), ('parameter definition does not match spectra table', parameters['NUMBVALS'], numpy.product(parameters['NUMBVALS']), len(table)) 135 | hdu = pyfits.BinTableHDU(data=parameters) 136 | hdu.header['DATE'] = nowstr 137 | hdu.header['EXTNAME'] = 'PARAMETERS' 138 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 139 | hdu.header['HDUVERS1'] = '1.0.0' 140 | hdu.header['NINTPARM'] = len(parameters) 141 | hdu.header['NADDPARM'] = 0 142 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 143 | hdus.append(hdu) 144 | 145 | # ENERG_LO, ENERG_HI 146 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 147 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 148 | hdu = pyfits.BinTableHDU(data=energies) 149 | hdu.header['DATE'] = nowstr 150 | hdu.header['EXTNAME'] = 'ENERGIES' 151 | hdu.header['TUNIT2'] = 'keV' 152 | hdu.header['TUNIT1'] = 'keV' 153 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 154 | hdu.header['HDUCLAS2'] = 'ENERGIES' 155 | hdu.header['HDUVERS1'] = '1.0.0' 156 | hdus.append(hdu) 157 | 158 | # PARAMVAL (4), INTPSPEC 159 | dtype = [('PARAMVAL', '>f4', (len(parameters),)), ('INTPSPEC', '>f4', (nbins,))] 160 | table.sort() 161 | table = numpy.array(table, dtype=dtype) 162 | hdu = pyfits.BinTableHDU(data=table) 163 | hdu.header['DATE'] = nowstr 164 | hdu.header['EXTNAME'] = 'SPECTRA' 165 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 166 | hdu.header['TUNIT1'] = 'none' 167 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 168 | hdu.header['HDUCLAS2'] = 'SPECTRA' 169 | hdu.header['HDUVERS1'] = '1.0.0' 170 | 171 | hdus.append(hdu) 172 | hdus = pyfits.HDUList(hdus) 173 | 174 | hdus.writeto(outfilename, overwrite=True) 175 | 176 | 177 | -------------------------------------------------------------------------------- /xars/xsects/generate/WXSECTS.F90: -------------------------------------------------------------------------------- 1 | PROGRAM WXSECTS 2 | 3 | 4 | IMPLICIT NONE 5 | 6 | 7 | INTEGER I,K ! Do loop variables 8 | INTEGER Z(17) ! Atomic numbers 9 | INTEGER NPAR2 ! Number of values per parameter 10 | INTEGER FEABX 11 | INTEGER NBINS 12 | 13 | INTEGER PHOTO_STATUS 14 | 15 | DOUBLE PRECISION, ALLOCATABLE :: E(:) ! Energy 16 | DOUBLE PRECISION, ALLOCATABLE :: DELTAE(:) ! Bin width 17 | DOUBLE PRECISION, ALLOCATABLE :: XPHOT(:,:) ! Absorption cross section 18 | DOUBLE PRECISION, ALLOCATABLE :: XKN(:) ! Klein Nishina scattering cross section 19 | DOUBLE PRECISION, ALLOCATABLE :: XKFE(:,:) ! Iron-K cross section 20 | DOUBLE PRECISION, ALLOCATABLE :: XKC(:) ! Carbon-K cross section 21 | DOUBLE PRECISION, ALLOCATABLE :: XKO(:) ! Oxygen-K cross section 22 | DOUBLE PRECISION, ALLOCATABLE :: XKNE(:) ! Neon-K cross section 23 | DOUBLE PRECISION, ALLOCATABLE :: XKMG(:) ! Magnesium-K cross section 24 | DOUBLE PRECISION, ALLOCATABLE :: XKSI(:) ! Silicon-K cross section 25 | DOUBLE PRECISION, ALLOCATABLE :: XKAR(:) ! Argon-K cross section 26 | DOUBLE PRECISION, ALLOCATABLE :: XKCA(:) ! Calcium-K cross section 27 | DOUBLE PRECISION, ALLOCATABLE :: XKCR(:) ! Cromium-K cross section 28 | DOUBLE PRECISION, ALLOCATABLE :: XKNI(:) ! Nickel-K cross section 29 | 30 | DOUBLE PRECISION FEABUND ! Iron abundance 31 | DOUBLE PRECISION RELABUNDFE ! Input Iron abundance, relative 32 | DOUBLE PRECISION RELABUNDZ ! Input Metal abundance, relative 33 | DOUBLE PRECISION PH 34 | DOUBLE PRECISION ABUND(17) 35 | DOUBLE PRECISION LENERG(11) 36 | DOUBLE PRECISION LYIELD(11) 37 | DOUBLE PRECISION ABUND0 38 | 39 | DOUBLE PRECISION DBLE_PHOTO 40 | 41 | DOUBLE PRECISION BINNINGA 42 | DOUBLE PRECISION BINNINGR 43 | INTEGER NUM_ARGS 44 | CHARACTER(len=100) BUFFER 45 | 46 | REAL S ! Output cross section from PHFIT2 47 | 48 | REAL, EXTERNAL:: PHOTO 49 | 50 | ABUND0=1 51 | NPAR2=7 52 | NBINS=1000 53 | RELABUNDZ=1 54 | RELABUNDFE=1 55 | 56 | BINNINGR=1.5 57 | BINNINGA=LOG((8.1D0 + 0.015D0)/8.10D0)**(-1./BINNINGR) 58 | 59 | ALLOCATE(E(0:NBINS)) 60 | ALLOCATE(DELTAE(0:NBINS-1)) 61 | ALLOCATE(XPHOT(0:NBINS-1,1:9)) 62 | ALLOCATE(XKN(0:NBINS-1)) 63 | ALLOCATE(XKFE(0:NBINS-1,1:9)) 64 | ALLOCATE(XKC(0:NBINS-1)) 65 | ALLOCATE(XKO(0:NBINS-1)) 66 | ALLOCATE(XKNE(0:NBINS-1)) 67 | ALLOCATE(XKMG(0:NBINS-1)) 68 | ALLOCATE(XKSI(0:NBINS-1)) 69 | ALLOCATE(XKAR(0:NBINS-1)) 70 | ALLOCATE(XKCA(0:NBINS-1)) 71 | ALLOCATE(XKCR(0:NBINS-1)) 72 | ALLOCATE(XKNI(0:NBINS-1)) 73 | 74 | NUM_ARGS=command_argument_count() 75 | IF (NUM_ARGS.EQ.2) THEN 76 | CALL get_command_argument(1,BUFFER) 77 | READ(BUFFER,*) RELABUNDFE 78 | CALL get_command_argument(2,BUFFER) 79 | READ(BUFFER,*) RELABUNDZ 80 | WRITE(*,*) " Fe:",RELABUNDFE, " Z:",RELABUNDZ 81 | ELSE 82 | WRITE(*,*) "SYNOPSIS: xsects " 83 | WRITE(*,*) "" 84 | WRITE(*,*) "Abundances are relative. Use 1 for local ISM abundances." 85 | CALL EXIT(1) 86 | END IF 87 | 88 | Z=(/1,2,6,7,8,10,11,12,13,14,16,17,18,20,24,26,28/) 89 | ABUND=(/1.D0,9.77D-2,3.63D-4,1.12D-4,8.51D-4,1.23D-4,2.14D-6,3.80D-5,2.95D-6,3.55D-5,1.62D-5,& 90 | 1.88D-7,3.63D-6,2.29D-6,4.84D-7,4.68D-5,1.78D-6/) 91 | ! Fe Kalpha yield fraction: 0.866D0, Kbeta yield 92 | ! XKFEa XKFEb XKC XKO XKNE XKMG XKSI XKAR XKCA XKCR XKNI 93 | LENERG=(/6.40D0,7.06D0,0.277D0,0.525D0,0.849D0,1.25D0,1.74D0,2.96D0,3.69D0,5.41D0,7.48D0/) 94 | ! 0.342D0 is the yield of Fe fluorescence and for alpha/beta, the fractions are 0.866 vs 0.134 95 | ! multiplying these together we get the first two numbers here: 96 | LYIELD=(/0.296172D0,0.045828D0,0.0025D0,0.0086D0,0.0183D0,0.0303D0,0.042914D0,0.121024D0,0.146496D0,0.245361D0,0.365895D0/) 97 | 98 | 99 | DO K=1,17 100 | ABUND(K)=ABUND0*ABUND(K) 101 | END DO 102 | 103 | 104 | DO I=0,NBINS-1 105 | IF (I.LT.800) THEN 106 | E(I)=I * 0.01D0 + 0.1D0 107 | ELSE 108 | E(I)=8.1D0 * EXP(((I-800)/BINNINGA)**BINNINGR) 109 | END IF 110 | IF ((I+1).LT.800) THEN 111 | E(I+1)=(I+1) * 0.01D0 + 0.1D0 112 | ELSE 113 | E(I+1)=8.1D0 * EXP(((I+1-800)/BINNINGA)**BINNINGR) 114 | END IF 115 | DELTAE(I)=E(I+1)-E(I) 116 | 117 | ! Scattering cross section array 118 | XKN(I)=(E(I)+0.5D0*DELTAE(I)) 119 | 120 | DO FEABX=1,NPAR2 121 | PH=0.D0 122 | FEABUND=4.68D-5*10.D0**(-1.D0+(2.D0*(DBLE(FEABX)-1.D0)/(NPAR2-1.D0))) 123 | ! Photoelectric absorption cross section array 124 | ABUND(16)=FEABUND 125 | PH=0.D0 126 | DO K=1,17 127 | DBLE_PHOTO=DBLE(PHOTO(REAL(E(I)),REAL(E(I+1)),Z(K),2,PHOTO_STATUS)) 128 | PH=PH+ABUND(K)*DBLE_PHOTO 129 | END DO 130 | XPHOT(I,FEABX)=PH*1.D21 131 | ! Iron-K cross section array 132 | IF (E(I).GE.7.124D0) THEN 133 | CALL PHFIT2(26,26,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 134 | XKFE(I,FEABX)=DBLE(S)*1.D3*FEABUND 135 | ELSE 136 | XKFE(I,FEABX)=0.D0 137 | END IF 138 | 139 | END DO 140 | 141 | ! Carbon-K cross section array 142 | IF (E(I).GE.0.2910D0) THEN 143 | CALL PHFIT2(6,6,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 144 | XKC(I)=DBLE(S)*1.D3*ABUND(3) 145 | ELSE 146 | XKC(I)=0.D0 147 | END IF 148 | 149 | ! Oxygen-K cross section array 150 | IF (E(I).GE.0.5380D0) THEN 151 | CALL PHFIT2(8,8,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 152 | XKO(I)=DBLE(S)*1.D3*ABUND(5) 153 | ELSE 154 | XKO(I)=0.D0 155 | END IF 156 | 157 | ! Neon-K cross section array 158 | IF (E(I).GE.0.8701D0) THEN 159 | CALL PHFIT2(10,10,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 160 | XKNE(I)=DBLE(S)*1.D3*ABUND(6) 161 | ELSE 162 | XKNE(I)=0.D0 163 | END IF 164 | 165 | ! Magnesium-K cross section array 166 | IF (E(I).GE.1.311D0) THEN 167 | CALL PHFIT2(12,12,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 168 | XKMG(I)=DBLE(S)*1.D3*ABUND(8) 169 | ELSE 170 | XKMG(I)=0.D0 171 | END IF 172 | 173 | ! Silicon-K cross section array 174 | IF (E(I).GE.1.846D0) THEN 175 | CALL PHFIT2(14,14,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 176 | XKSI(I)=DBLE(S)*1.D3*ABUND(10) 177 | ELSE 178 | XKSI(I)=0.D0 179 | END IF 180 | 181 | ! Argon-K cross section array 182 | IF (E(I).GE.3.203D0) THEN 183 | CALL PHFIT2(18,18,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 184 | XKAR(I)=DBLE(S)*1.D3*ABUND(13) 185 | ELSE 186 | XKAR(I)=0.D0 187 | END IF 188 | 189 | ! Calcium-K cross section array 190 | IF (E(I).GE.4.043D0) THEN 191 | CALL PHFIT2(20,20,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 192 | XKCA(I)=DBLE(S)*1.D3*ABUND(14) 193 | ELSE 194 | XKCA(I)=0.D0 195 | END IF 196 | 197 | ! Chromium-K cross section array 198 | IF (E(I).GE.5.996D0) THEN 199 | CALL PHFIT2(24,24,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 200 | XKCR(I)=DBLE(S)*1.D3*ABUND(15) 201 | ELSE 202 | XKCR(I)=0.D0 203 | END IF 204 | 205 | ! Nickel-K cross section array 206 | IF (E(I).GE.8.348D0) THEN 207 | CALL PHFIT2(28,28,1,REAL((E(I)+0.5D0*DELTAE(I))*1000.D0),S) 208 | XKNI(I)=DBLE(S)*1.D3*ABUND(17) 209 | ELSE 210 | XKNI(I)=0.D0 211 | END IF 212 | END DO 213 | 214 | OPEN(UNIT=1,FILE='xphot.dat') 215 | OPEN(UNIT=2,FILE='xkfe.dat') 216 | OPEN(UNIT=3,FILE='xsects.dat') 217 | 218 | WRITE(3,'(a)')"# cross-sections for absorption and fluorescence" 219 | WRITE(3,'(a)')"# E XPHOT XKFE XKC XKO XKNE XKMG XKSI XKAR XKCA XKCR XKNI" 220 | WRITE(3,'(a)')"# but first: line energies" 221 | !WRITE(3,*)"0 0 6.40 7.06 0.277 0.525 0.849 1.25 1.74 2.96 3.69 5.41 7.48" 222 | WRITE(3,"(13F8.4)")0.D0,0.D0,LENERG(1),LENERG(2),LENERG(3),LENERG(4),LENERG(5), & 223 | LENERG(6),LENERG(7),LENERG(8),LENERG(9),LENERG(10),LENERG(11) 224 | WRITE(3,'(a)')"# and second: line yields (fractions of cross-sections)" 225 | !WRITE(3,*)"0 1 0.296172 0.045828 0.0025 0.0086 0.0183 0.0303 0.042914 0.121024 0.146496 0.245361 0.365895" 226 | WRITE(3,"(13F10.6)")0.D0,1.D0,LYIELD(1),LYIELD(2),LYIELD(3),LYIELD(4),LYIELD(5), & 227 | LYIELD(6),LYIELD(7),LYIELD(8),LYIELD(9),LYIELD(10),LYIELD(11) 228 | WRITE(3,'(a)')"# XKFE,XKC,XKO,XKNE,XKMG,XKSI,XKAR,XKCA,XKCR,XKNI" 229 | WRITE(3,'(a)')"# now comes the table" 230 | WRITE(3,'(a)')"# E XPHOT XKFEa XKFEb XKC XKO XKNE XKMG XKSI XKAR XKCA XKCR XKNI" 231 | 232 | 233 | DO I=0,NBINS-1 234 | WRITE(1,*)E(I),XPHOT(I,4) 235 | WRITE(2,*)E(I),XKFE(I,4) 236 | WRITE(3,*)(E(I)+E(I+1))/2,XPHOT(I,4),XKFE(I,4),XKFE(I,4),XKC(I),XKO(I),XKNE(I), & 237 | XKMG(I),XKSI(I),XKAR(I),XKCA(I),XKCR(I),XKNI(I) 238 | END DO 239 | 240 | 241 | STOP 242 | END PROGRAM 243 | 244 | 245 | -------------------------------------------------------------------------------- /xspecexport/createsmoothtoruscutofftable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import exp 3 | import astropy.io.fits as pyfits 4 | import h5py 5 | import sys 6 | import progressbar 7 | from binning import nbins, energy2bin, bin2energy 8 | 9 | energy_lo, energy_hi = bin2energy(numpy.arange(nbins)) 10 | energy = (energy_hi + energy_lo) / 2 11 | deltae = energy_hi - energy_lo 12 | deltae0 = deltae[energy >= 1][0] 13 | 14 | table = [] 15 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 16 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 17 | 3. ] 18 | nh_bins = numpy.array([9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 19 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 20 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 21 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 22 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 23 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 24 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 25 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 26 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 27 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 28 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 29 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 30 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 31 | 7.08000000e+03, 1.00000000e+04]) 32 | 33 | Ecuts = [ 20., 30, 40, 60, 100, 140, 200, 400 ] 34 | data = {} 35 | 36 | outfilename = sys.argv[1] 37 | prefix = sys.argv[2] 38 | sigmas = ['5_gexp2','10_gexp2','20_gexp2','30_gexp2','sphere'] 39 | sigmav = [5,10,20,30,90] 40 | opening = [90-s for s in sigmav] 41 | filenames = [prefix % o for o in sigmas] 42 | 43 | widgets = [progressbar.Percentage(), " starting ... ", progressbar.Bar(), progressbar.ETA()] 44 | pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(filenames)*len(nh_bins)).start() 45 | 46 | 47 | for Theta_tor, filename in zip(opening, filenames): 48 | #print 'loading', filename 49 | f = h5py.File(filename) 50 | nphot = f.attrs['NPHOT'] 51 | 52 | geometry = numpy.loadtxt(filename.replace('_rdata.hdf5', '').replace('_transmitrdata.hdf5', '').replace('_reflectrdata.hdf5', '').replace('layered','')) 53 | 54 | matrix = f['rdata'] 55 | a, b, nmu = matrix.shape 56 | assert a == nbins, matrix.shape 57 | assert b == nbins, matrix.shape 58 | #data[(nh, opening)] = [(nphot, f[0].data)] 59 | 60 | last_angle = 0 61 | # go through viewing angles 62 | for mu, nh in enumerate(nh_bins): 63 | angle = geometry[mu,0] 64 | # theta = arccos(1-x) 65 | deltac = numpy.cos(last_angle) - numpy.cos(angle) 66 | last_angle = angle 67 | 68 | matrix_mu = matrix[:,:,mu] 69 | widgets[1] = '| op=%d nh=%.3f ' % (Theta_tor, nh) 70 | pbar.update(getattr(pbar, 'currval', getattr(pbar, 'value')) + 1) 71 | for PhoIndex in PhoIndices: 72 | for Ecut in Ecuts: 73 | weights = (energy**-PhoIndex * exp(-energy / Ecut) * deltae / deltae0).reshape((-1,1)) 74 | y = (weights * matrix_mu).sum(axis=0) / nphot * 1. / deltac 75 | #print nh, PhoIndex, Theta_tor #, (y/deltae)[energy_lo >= 1][0] 76 | #print ' ', (weights * matrix[:,:,mu]).sum(axis=0), deltae, (nphot / 1000000.) 77 | #assert numpy.any(y > 0), y 78 | table.append(((nh, PhoIndex, Ecut, Theta_tor), y)) 79 | pbar.finish() 80 | 81 | hdus = [] 82 | hdu = pyfits.PrimaryHDU() 83 | import datetime, time 84 | now = datetime.datetime.fromtimestamp(time.time()) 85 | nowstr = now.isoformat() 86 | nowstr = nowstr[:nowstr.rfind('.')] 87 | hdu.header['CREATOR'] = """Johannes Buchner """ 88 | hdu.header['DATE'] = nowstr 89 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 90 | hdu.header['MODLNAME'] = 'torus' 91 | hdu.header['ADDMODEL'] = True 92 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 93 | hdu.header['EXTEND'] = True 94 | hdu.header['REDSHIFT'] = True 95 | hdu.header['SIMPLE'] = True 96 | hdu.header['HDUDOC'] = 'OGIP/92-009' 97 | hdu.header['HDUVERS1'] = '1.0.0' 98 | hdu.header['HDUCLASS'] = 'OGIP' 99 | hdus.append(hdu) 100 | 101 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 102 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 103 | 104 | parameters = numpy.array([ 105 | ('nH', 1, 10.0, 1.0, 0.0099999998, 0.0099999998, 10000.0, 10000.0, 41, numpy.array([ 9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 106 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 107 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 108 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 109 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 110 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 111 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 112 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 113 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 114 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 115 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 116 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 117 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 118 | 7.08000000e+03, 1.00000000e+04])), 119 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 120 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 121 | 3. , 0. , 0. , 0. , 0. , 122 | 0. , 0. , 0. , 0. , 0. , 123 | 0. , 0. , 0. , 0. , 0. , 124 | 0. , 0. , 0. , 0. , 0. , 125 | 0. , 0. , 0. , 0. , 0. , 126 | 0. , 0. , 0. , 0. , 0. , 0. ])), 127 | ('Ecut', 0, 100.0, 10.0, 20, 20, 400, 400, 8, numpy.array([ 20. , 30, 40, 60, 100, 128 | 140 , 200, 400 , 0 , 0, 129 | 0. , 0. , 0. , 0. , 0. , 130 | 0. , 0. , 0. , 0. , 0. , 131 | 0. , 0. , 0. , 0. , 0. , 132 | 0. , 0. , 0. , 0. , 0. , 133 | 0. , 0. , 0. , 0. , 0. , 134 | 0. , 0. , 0. , 0. , 0. , 0. ])), 135 | ('Theta_tor', 0, 60.0, 5.0, 0.0, 0.0, 85.0, 85.0, 5, numpy.array([ 0,60,70,80,85, 136 | 0. , 0. , 0. , 137 | 0. , 0. , 0. , 0. , 138 | 0. , 0. , 0. , 0. , 139 | 0. , 0. , 0. , 0. , 140 | 0. , 0. , 0. , 0. , 141 | 0. , 0. , 0. , 0. , 142 | 0. , 0. , 0. , 0. , 143 | 0. , 0. , 0. , 0. , 144 | 0. , 0. , 0. , 0. , 0. ])), 145 | ], dtype=dtype) 146 | assert numpy.product(parameters['NUMBVALS']) == len(table), ('parameter definition does not match spectra table', parameters['NUMBVALS'], numpy.product(parameters['NUMBVALS']), len(table)) 147 | hdu = pyfits.BinTableHDU(data=parameters) 148 | hdu.header['DATE'] = nowstr 149 | hdu.header['EXTNAME'] = 'PARAMETERS' 150 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 151 | hdu.header['HDUVERS1'] = '1.0.0' 152 | hdu.header['NINTPARM'] = len(parameters) 153 | hdu.header['NADDPARM'] = 0 154 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 155 | hdus.append(hdu) 156 | 157 | # ENERG_LO, ENERG_HI 158 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 159 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 160 | hdu = pyfits.BinTableHDU(data=energies) 161 | hdu.header['DATE'] = nowstr 162 | hdu.header['EXTNAME'] = 'ENERGIES' 163 | hdu.header['TUNIT2'] = 'keV' 164 | hdu.header['TUNIT1'] = 'keV' 165 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 166 | hdu.header['HDUCLAS2'] = 'ENERGIES' 167 | hdu.header['HDUVERS1'] = '1.0.0' 168 | hdus.append(hdu) 169 | 170 | # PARAMVAL (4), INTPSPEC 171 | dtype = [('PARAMVAL', '>f4', (len(parameters),)), ('INTPSPEC', '>f4', (nbins,))] 172 | table.sort() 173 | table = numpy.array(table, dtype=dtype) 174 | hdu = pyfits.BinTableHDU(data=table) 175 | hdu.header['DATE'] = nowstr 176 | hdu.header['EXTNAME'] = 'SPECTRA' 177 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 178 | hdu.header['TUNIT1'] = 'none' 179 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 180 | hdu.header['HDUCLAS2'] = 'SPECTRA' 181 | hdu.header['HDUVERS1'] = '1.0.0' 182 | 183 | hdus.append(hdu) 184 | hdus = pyfits.HDUList(hdus) 185 | 186 | hdus.writeto(outfilename, overwrite=True) 187 | 188 | 189 | -------------------------------------------------------------------------------- /xspecexport/createtoruscutoffdisktable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import pi, exp 3 | import astropy.io.fits as pyfits 4 | import h5py 5 | import sys 6 | import progressbar 7 | from binning import nbins, energy2bin, bin2energy 8 | 9 | energy_lo, energy_hi = bin2energy(numpy.arange(nbins)) 10 | energy = (energy_hi + energy_lo) / 2 11 | deltae = energy_hi - energy_lo 12 | deltae0 = deltae[energy >= 1][0] 13 | 14 | table = [] 15 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 16 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 17 | 3. ] 18 | ThetaIncs = [ 18.20000076, 31.79999924, 41.40000153, 49.5 , 19 | 56.59999847, 63.29999924, 69.5 , 75.5 , 20 | 81.40000153, 87.09999847] 21 | ThetaTors = [25.79999924, 36.90000153, 45.59999847, 53.09999847, 22 | 60. , 66.40000153, 72.5 , 78.5 , 23 | 84.30000305] 24 | Ecuts = [ 20., 30, 40, 60, 100, 140, 200, 400 ] 25 | data = {} 26 | 27 | outfilename = sys.argv[1] 28 | diskfilename = sys.argv[2] 29 | models = sys.argv[3:] 30 | 31 | f = h5py.File(diskfilename, 'r') 32 | nphot = f.attrs['NPHOT'] 33 | matrix = f['rdata'][()] 34 | # response of disk reflection 35 | # 2x -- from top and bottom 36 | matrix_disk = matrix.sum(axis=2) * 1. / nphot * 2 37 | 38 | widgets = [progressbar.Percentage(), " starting ... ", progressbar.Bar(), progressbar.ETA()] 39 | pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(models)*len(ThetaIncs)).start() 40 | 41 | for filename in models: 42 | #print 'loading', filename 43 | f = h5py.File(filename) 44 | nphot = f.attrs['NPHOT'] 45 | nh = float(f.attrs['NH']) 46 | opening = float(f.attrs['OPENING']) * 180 / pi 47 | 48 | matrix = f['rdata'] 49 | 50 | #nh = float(f[0].header['NH']) 51 | #opening = float(f[0].header['OPENING']) * 180 / pi 52 | opening = [thetator for thetator in ThetaTors if numpy.abs(opening - thetator) < 0.1][0] 53 | #nphot = int(f[0].header['NPHOT']) 54 | #matrix = f[0].data 55 | a, b, nmu = matrix.shape 56 | assert a == nbins, matrix.shape 57 | assert b == nbins, matrix.shape 58 | #data[(nh, opening)] = [(nphot, f[0].data)] 59 | 60 | # go through viewing angles 61 | for mu, ThetaInc in enumerate(ThetaIncs[::-1]): 62 | matrix_mu = matrix[:,:,mu] 63 | widgets[1] = '| op=%d nh=%.3f inc=%02d ' % (opening, nh, ThetaInc) 64 | pbar.update(getattr(pbar, 'currval', getattr(pbar, 'value')) + 1) 65 | for PhoIndex in PhoIndices: 66 | for Ecut in Ecuts: 67 | weights = (energy**-PhoIndex * exp(-energy / Ecut) * deltae / deltae0).reshape((-1,1)) 68 | y_disk = (weights * matrix_disk).sum(axis=0) 69 | y = (y_disk * matrix_mu).sum(axis=0) / nphot / 10. 70 | table.append(((nh, PhoIndex, Ecut, opening, ThetaInc), y)) 71 | pbar.finish() 72 | 73 | hdus = [] 74 | hdu = pyfits.PrimaryHDU() 75 | import datetime, time 76 | now = datetime.datetime.fromtimestamp(time.time()) 77 | nowstr = now.isoformat() 78 | nowstr = nowstr[:nowstr.rfind('.')] 79 | hdu.header['CREATOR'] = """Johannes Buchner """ 80 | hdu.header['DATE'] = nowstr 81 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 82 | hdu.header['MODLNAME'] = 'torus' 83 | hdu.header['ADDMODEL'] = True 84 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 85 | hdu.header['EXTEND'] = True 86 | hdu.header['REDSHIFT'] = True 87 | hdu.header['SIMPLE'] = True 88 | hdu.header['HDUDOC'] = 'OGIP/92-009' 89 | hdu.header['HDUVERS1'] = '1.0.0' 90 | hdu.header['HDUCLASS'] = 'OGIP' 91 | hdus.append(hdu) 92 | 93 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 94 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 95 | 96 | parameters = numpy.array([ 97 | ('nH', 1, 10.0, 1.0, 0.0099999998, 0.0099999998, 10000.0, 10000.0, 41, numpy.array([ 9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 98 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 99 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 100 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 101 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 102 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 103 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 104 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 105 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 106 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 107 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 108 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 109 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 110 | 7.08000000e+03, 1.00000000e+04])), 111 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 112 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 113 | 3. , 0. , 0. , 0. , 0. , 114 | 0. , 0. , 0. , 0. , 0. , 115 | 0. , 0. , 0. , 0. , 0. , 116 | 0. , 0. , 0. , 0. , 0. , 117 | 0. , 0. , 0. , 0. , 0. , 118 | 0. , 0. , 0. , 0. , 0. , 0. ])), 119 | ('Ecut', 0, 100.0, 10.0, 20, 20, 400, 400, 8, numpy.array([ 20. , 30, 40, 60, 100, 120 | 140 , 200, 400 , 0 , 0, 121 | 0. , 0. , 0. , 0. , 0. , 122 | 0. , 0. , 0. , 0. , 0. , 123 | 0. , 0. , 0. , 0. , 0. , 124 | 0. , 0. , 0. , 0. , 0. , 125 | 0. , 0. , 0. , 0. , 0. , 126 | 0. , 0. , 0. , 0. , 0. , 0. ])), 127 | ('Theta_tor', 0, 60.0, 5.0, 0.0, 25.799999, 84.300003, 90.0, 9, numpy.array([ 25.79999924, 36.90000153, 45.59999847, 53.09999847, 128 | 60. , 66.40000153, 72.5 , 78.5 , 129 | 84.30000305, 0. , 0. , 0. , 130 | 0. , 0. , 0. , 0. , 131 | 0. , 0. , 0. , 0. , 132 | 0. , 0. , 0. , 0. , 133 | 0. , 0. , 0. , 0. , 134 | 0. , 0. , 0. , 0. , 135 | 0. , 0. , 0. , 0. , 136 | 0. , 0. , 0. , 0. , 0. ])), 137 | ('Theta_inc', 0, 18.200001, 5.0, 0.0, 18.200001, 87.099998, 90.0, 10, numpy.array([ 18.20000076, 31.79999924, 41.40000153, 49.5 , 138 | 56.59999847, 63.29999924, 69.5 , 75.5 , 139 | 81.40000153, 87.09999847, 0. , 0. , 140 | 0. , 0. , 0. , 0. , 141 | 0. , 0. , 0. , 0. , 142 | 0. , 0. , 0. , 0. , 143 | 0. , 0. , 0. , 0. , 144 | 0. , 0. , 0. , 0. , 145 | 0. , 0. , 0. , 0. , 146 | 0. , 0. , 0. , 0. , 0. ])), 147 | ], dtype=dtype) 148 | hdu = pyfits.BinTableHDU(data=parameters) 149 | hdu.header['DATE'] = nowstr 150 | hdu.header['EXTNAME'] = 'PARAMETERS' 151 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 152 | hdu.header['HDUVERS1'] = '1.0.0' 153 | hdu.header['NINTPARM'] = len(parameters) 154 | hdu.header['NADDPARM'] = 0 155 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 156 | hdus.append(hdu) 157 | 158 | # ENERG_LO, ENERG_HI 159 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 160 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 161 | hdu = pyfits.BinTableHDU(data=energies) 162 | hdu.header['DATE'] = nowstr 163 | hdu.header['EXTNAME'] = 'ENERGIES' 164 | hdu.header['TUNIT2'] = 'keV' 165 | hdu.header['TUNIT1'] = 'keV' 166 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 167 | hdu.header['HDUCLAS2'] = 'ENERGIES' 168 | hdu.header['HDUVERS1'] = '1.0.0' 169 | hdus.append(hdu) 170 | 171 | # PARAMVAL (4), INTPSPEC 172 | dtype = [('PARAMVAL', '>f4', (len(parameters),)), ('INTPSPEC', '>f4', (nbins,))] 173 | table.sort() 174 | table = numpy.array(table, dtype=dtype) 175 | hdu = pyfits.BinTableHDU(data=table) 176 | hdu.header['DATE'] = nowstr 177 | hdu.header['EXTNAME'] = 'SPECTRA' 178 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 179 | hdu.header['TUNIT1'] = 'none' 180 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 181 | hdu.header['HDUCLAS2'] = 'SPECTRA' 182 | hdu.header['HDUVERS1'] = '1.0.0' 183 | 184 | hdus.append(hdu) 185 | hdus = pyfits.HDUList(hdus) 186 | 187 | hdus.writeto(outfilename, overwrite=True) 188 | 189 | 190 | -------------------------------------------------------------------------------- /xspecexport/createtoruscutofftable.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import pi, exp 3 | import astropy.io.fits as pyfits 4 | import h5py 5 | import sys 6 | import progressbar 7 | from binning import nbins, energy2bin, bin2energy 8 | 9 | energy_lo, energy_hi = bin2energy(numpy.arange(nbins)) 10 | energy = (energy_hi + energy_lo) / 2 11 | deltae = energy_hi - energy_lo 12 | deltae0 = deltae[energy >= 1][0] 13 | 14 | table = [] 15 | PhoIndices = [ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 16 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 17 | 3. ] 18 | ThetaIncs = [ 18.20000076, 31.79999924, 41.40000153, 49.5 , 19 | 56.59999847, 63.29999924, 69.5 , 75.5 , 20 | 81.40000153, 87.09999847] 21 | ThetaTors = [0, 25.79999924, 36.90000153, 45.59999847, 53.09999847, 22 | 60. , 66.40000153, 72.5 , 78.5 , 23 | 84.30000305] 24 | Ecuts = [ 20., 30, 40, 60, 100, 140, 200, 400 ] 25 | data = {} 26 | 27 | outfilename = sys.argv[1] 28 | models = sys.argv[2:] 29 | 30 | widgets = [progressbar.Percentage(), " starting ... ", progressbar.Bar(), progressbar.ETA()] 31 | pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(models)*len(ThetaIncs)).start() 32 | 33 | for filename in models: 34 | #print 'loading', filename 35 | f = h5py.File(filename) 36 | nphot = f.attrs['NPHOT'] 37 | nh = float(f.attrs['NH']) 38 | opening = float(f.attrs['OPENING']) * 180 / pi 39 | 40 | matrix = f['rdata'] 41 | 42 | #nh = float(f[0].header['NH']) 43 | #opening = float(f[0].header['OPENING']) * 180 / pi 44 | opening = [thetator for thetator in ThetaTors if numpy.abs(opening - thetator) < 0.1][0] 45 | #nphot = int(f[0].header['NPHOT']) 46 | #matrix = f[0].data 47 | a, b, nmu = matrix.shape 48 | assert a == nbins, matrix.shape 49 | assert b == nbins, matrix.shape 50 | #data[(nh, opening)] = [(nphot, f[0].data)] 51 | 52 | # go through viewing angles 53 | for mu, ThetaInc in enumerate(ThetaIncs[::-1]): 54 | matrix_mu = matrix[:,:,mu] 55 | widgets[1] = '| op=%d nh=%.3f inc=%02d ' % (opening, nh, ThetaInc) 56 | pbar.update(getattr(pbar, 'currval', getattr(pbar, 'value')) + 1) 57 | for PhoIndex in PhoIndices: 58 | for Ecut in Ecuts: 59 | weights = (energy**-PhoIndex * exp(-energy / Ecut) * deltae / deltae0).reshape((-1,1)) 60 | y = (weights * matrix_mu).sum(axis=0) / nphot / 10. 61 | #print nh, PhoIndex, opening, ThetaInc #, (y/deltae)[energy_lo >= 1][0] 62 | #print ' ', (weights * matrix[:,:,mu]).sum(axis=0), deltae, (nphot / 1000000.) 63 | #assert numpy.any(y > 0), y 64 | table.append(((nh, PhoIndex, Ecut, opening, ThetaInc), y)) 65 | pbar.finish() 66 | 67 | hdus = [] 68 | hdu = pyfits.PrimaryHDU() 69 | import datetime, time 70 | now = datetime.datetime.fromtimestamp(time.time()) 71 | nowstr = now.isoformat() 72 | nowstr = nowstr[:nowstr.rfind('.')] 73 | hdu.header['CREATOR'] = """Johannes Buchner """ 74 | hdu.header['DATE'] = nowstr 75 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 76 | hdu.header['MODLNAME'] = 'torus' 77 | hdu.header['ADDMODEL'] = True 78 | hdu.header['MODLUNIT'] = 'photons/cm^2/s' 79 | hdu.header['EXTEND'] = True 80 | hdu.header['REDSHIFT'] = True 81 | hdu.header['SIMPLE'] = True 82 | hdu.header['HDUDOC'] = 'OGIP/92-009' 83 | hdu.header['HDUVERS1'] = '1.0.0' 84 | hdu.header['HDUCLASS'] = 'OGIP' 85 | hdus.append(hdu) 86 | 87 | # NAME, METHOD, INITIAL, DELTA, MINIMUM, BOTTOM, TOP, MAXIMUM, NUMBVALS, VALUE (41) 88 | dtype = [('NAME', 'S12'), ('METHOD', '>i4'), ('INITIAL', '>f4'), ('DELTA', '>f4'), ('MINIMUM', '>f4'), ('BOTTOM', '>f4'), ('TOP', '>f4'), ('MAXIMUM', '>f4'), ('NUMBVALS', '>i4'), ('VALUE', '>f4', (41,))] 89 | 90 | parameters = numpy.array([ 91 | ('nH', 1, 10.0, 1.0, 0.0099999998, 0.0099999998, 10000.0, 10000.0, 41, numpy.array([ 9.99999978e-03, 1.41000003e-02, 1.99999996e-02, 92 | 2.82000005e-02, 3.97999994e-02, 5.62000014e-02, 93 | 7.94000030e-02, 1.12000003e-01, 1.58000007e-01, 94 | 2.24000007e-01, 3.16000015e-01, 4.46999997e-01, 95 | 6.30999982e-01, 8.90999973e-01, 1.25999999e+00, 96 | 1.77999997e+00, 2.50999999e+00, 3.54999995e+00, 97 | 5.01000023e+00, 7.07999992e+00, 1.00000000e+01, 98 | 1.41000004e+01, 2.00000000e+01, 2.82000008e+01, 99 | 3.97999992e+01, 5.62000008e+01, 7.94000015e+01, 100 | 1.12000000e+02, 1.58000000e+02, 2.24000000e+02, 101 | 3.16000000e+02, 4.47000000e+02, 6.31000000e+02, 102 | 8.91000000e+02, 1.26000000e+03, 1.78000000e+03, 103 | 2.51000000e+03, 3.55000000e+03, 5.01000000e+03, 104 | 7.08000000e+03, 1.00000000e+04])), 105 | ('PhoIndex', 0, 2.0, 0.0099999998, 1.0, 1.2, 2.8, 3.0, 11, numpy.array([ 1. , 1.20000005, 1.39999998, 1.60000002, 1.79999995, 106 | 2. , 2.20000005, 2.4000001 , 2.5999999 , 2.79999995, 107 | 3. , 0. , 0. , 0. , 0. , 108 | 0. , 0. , 0. , 0. , 0. , 109 | 0. , 0. , 0. , 0. , 0. , 110 | 0. , 0. , 0. , 0. , 0. , 111 | 0. , 0. , 0. , 0. , 0. , 112 | 0. , 0. , 0. , 0. , 0. , 0. ])), 113 | ('Ecut', 0, 100.0, 10.0, 20, 20, 400, 400, 8, numpy.array([ 20. , 30, 40, 60, 100, 114 | 140 , 200, 400 , 0 , 0, 115 | 0. , 0. , 0. , 0. , 0. , 116 | 0. , 0. , 0. , 0. , 0. , 117 | 0. , 0. , 0. , 0. , 0. , 118 | 0. , 0. , 0. , 0. , 0. , 119 | 0. , 0. , 0. , 0. , 0. , 120 | 0. , 0. , 0. , 0. , 0. , 0. ])), 121 | ('Theta_tor', 0, 60.0, 5.0, 0.0, 0.0, 84.300003, 84.300003, 10, numpy.array([ 0, 25.79999924, 36.90000153, 45.59999847, 53.09999847, 122 | 60. , 66.40000153, 72.5 , 78.5 , 123 | 84.30000305, 0. , 0. , 124 | 0. , 0. , 0. , 0. , 125 | 0. , 0. , 0. , 0. , 126 | 0. , 0. , 0. , 0. , 127 | 0. , 0. , 0. , 0. , 128 | 0. , 0. , 0. , 0. , 129 | 0. , 0. , 0. , 0. , 130 | 0. , 0. , 0. , 0. , 0. ])), 131 | ('Theta_inc', 0, 18.200001, 5.0, 0.0, 18.200001, 87.099998, 90.0, 10, numpy.array([ 18.20000076, 31.79999924, 41.40000153, 49.5 , 132 | 56.59999847, 63.29999924, 69.5 , 75.5 , 133 | 81.40000153, 87.09999847, 0. , 0. , 134 | 0. , 0. , 0. , 0. , 135 | 0. , 0. , 0. , 0. , 136 | 0. , 0. , 0. , 0. , 137 | 0. , 0. , 0. , 0. , 138 | 0. , 0. , 0. , 0. , 139 | 0. , 0. , 0. , 0. , 140 | 0. , 0. , 0. , 0. , 0. ])), 141 | ], dtype=dtype) 142 | assert numpy.product(parameters['NUMBVALS']) == len(table), ('parameter definition does not match spectra table', parameters['NUMBVALS'], numpy.product(parameters['NUMBVALS']), len(table)) 143 | hdu = pyfits.BinTableHDU(data=parameters) 144 | hdu.header['DATE'] = nowstr 145 | hdu.header['EXTNAME'] = 'PARAMETERS' 146 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 147 | hdu.header['HDUVERS1'] = '1.0.0' 148 | hdu.header['NINTPARM'] = len(parameters) 149 | hdu.header['NADDPARM'] = 0 150 | hdu.header['HDUCLAS2'] = 'PARAMETERS' 151 | hdus.append(hdu) 152 | 153 | # ENERG_LO, ENERG_HI 154 | dtype = [('ENERG_LO', '>f4'), ('ENERG_HI', '>f4')] 155 | energies = numpy.array(list(zip(energy_lo, energy_hi)), dtype=dtype) 156 | hdu = pyfits.BinTableHDU(data=energies) 157 | hdu.header['DATE'] = nowstr 158 | hdu.header['EXTNAME'] = 'ENERGIES' 159 | hdu.header['TUNIT2'] = 'keV' 160 | hdu.header['TUNIT1'] = 'keV' 161 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 162 | hdu.header['HDUCLAS2'] = 'ENERGIES' 163 | hdu.header['HDUVERS1'] = '1.0.0' 164 | hdus.append(hdu) 165 | 166 | # PARAMVAL (4), INTPSPEC 167 | dtype = [('PARAMVAL', '>f4', (len(parameters),)), ('INTPSPEC', '>f4', (nbins,))] 168 | table.sort() 169 | table = numpy.array(table, dtype=dtype) 170 | hdu = pyfits.BinTableHDU(data=table) 171 | hdu.header['DATE'] = nowstr 172 | hdu.header['EXTNAME'] = 'SPECTRA' 173 | hdu.header['TUNIT2'] = 'photons/cm^2/s' 174 | hdu.header['TUNIT1'] = 'none' 175 | hdu.header['HDUCLAS1'] = 'XSPEC TABLE MODEL' 176 | hdu.header['HDUCLAS2'] = 'SPECTRA' 177 | hdu.header['HDUVERS1'] = '1.0.0' 178 | 179 | hdus.append(hdu) 180 | hdus = pyfits.HDUList(hdus) 181 | 182 | hdus.writeto(outfilename, overwrite=True) 183 | 184 | 185 | --------------------------------------------------------------------------------