├── __init__.py ├── BSSN ├── __init__.py ├── tests │ └── __init__.py ├── Psi4Cartesianvalidation │ ├── BSSN │ │ ├── __init__.py │ │ ├── Psi4_tetrads.py │ │ ├── BSSN_quantities.py │ │ └── ADM_in_terms_of_BSSN.py │ ├── SIMD.py │ ├── grid.py │ ├── WeylScal4NRPy │ │ └── __init__.py │ ├── outputC.py │ ├── indexedexp.py │ ├── NRPy_param_funcs.py │ ├── reference_metric.py │ ├── finite_difference.py │ └── latex_nrpy_style.tplx └── BSSN_T4UUmunu_vars.py ├── GRFFE ├── __init__.py └── tests │ └── test_GRFFE.py ├── GRHD ├── __init__.py └── tests │ └── test_GRHD.py ├── GRMHD ├── __init__.py └── tests │ └── test_GRMHD.py ├── NRPyPN ├── __init__.py ├── nbconvert_latex_settings ├── indexedexpNRPyPN.py └── PN_Hamiltonian_SSS.py ├── SEOBNR ├── __init__.py ├── Hamstring_constants.txt └── Hamstring_variables.txt ├── TOV ├── __init__.py ├── tov_interp └── ETK_TOVSolver_src_tov_c_diff_3f18340d8cc602c8ba9005242e64c5e184767288.patch ├── tests └── __init__.py ├── ScalarWave ├── __init__.py ├── tests │ ├── __init__.py │ ├── trusted_values_dict.py │ └── test_ScalarWave.py └── CommonParams.py ├── UnitTesting ├── __init__.py ├── Test_UnitTesting │ ├── __init__.py │ ├── module_for_testing.py │ ├── trusted_values_dict.py │ └── test_module.py ├── standard_constants.py ├── fix_cruft__remove_trailing_whitespace_in_notebook.sh ├── fix_cruft__remove_trailing_whitespace__add_newline_at_end_of_files.sh ├── cleanup_repo__remove_carriage_return_chars.sh ├── RepeatedTimer.py ├── all_notebooks_lint_in_pyconv.sh ├── bench_all_notebooks.sh ├── test_skeleton.py ├── setup_trusted_values_dict.py ├── create_dict_string.py ├── run_all_notebooks.sh ├── first_time_print.py ├── evaluate_globals.py ├── core_Jupyter_notebook_testsuite_GiRonly.sh └── core_Jupyter_notebook_testsuite.sh ├── IllinoisGRMHD ├── __init__.py ├── doc │ ├── __init__.py │ └── generate_IllinoisGRMHD_from_ipynb_files.sh ├── ID_converter_ILGRMHD │ ├── src │ │ └── make.code.defn │ ├── README │ └── LICENSE └── Convert_to_HydroBase │ ├── src │ └── make.code.defn │ ├── README │ └── LICENSE ├── MoLtimestepping └── __init__.py ├── WeylScal4NRPy ├── __init__.py └── tests │ ├── __init__.py │ ├── trusted_values_dict.py │ └── test_WeylScal4NRPy.py ├── FishboneMoncriefID ├── __init__.py ├── tests │ ├── __init__.py │ ├── test_FishboneMoncriefID.py │ └── trusted_values_dict.py ├── LICENSE └── README ├── diagnostics_generic ├── __init__.py └── process_2D_data.py ├── CurviBoundaryConditions ├── __init__.py ├── bdrycond_general_algorithm.gif └── SENR.patch-83cf8f5f4e0ec046253cc0017fd4b217d126e68b ├── _config.yml ├── in_progress-SEOBNR └── SEOBNR │ ├── __init__.py │ ├── Hamstring_constants.txt │ ├── Hamstring_variables.txt │ └── constant_coeffs.py ├── SpinWeight_minus2_SphHarmonics └── __init__.py ├── in_progress-Maxwell ├── Maxwell │ ├── __init__.py │ ├── tests │ │ └── __init__.py │ ├── CommonParams.py │ └── MaxwellCartesian_ID.py └── MaxwellVacuum │ └── example_parfiles │ ├── Ax-convergence.png │ ├── Ay-convergence.png │ ├── Ex-convergence.png │ ├── Ey-convergence.png │ ├── constraintviolation.png │ ├── maxwell_toroidaldipole-0.25_OB8.par │ └── maxwell_toroidaldipole-0.125_OB4.par ├── u0_smallb_Poynting__Cartesian ├── __init__.py └── tests │ ├── __init__.py │ ├── test_u0_smallb_Poynting__Cartesian.py │ └── trusted_values_dict.py ├── in_progress-NRPyCritCol └── ScalarField │ ├── __init__.py │ ├── ScalarField_declare_gridfunctions.py │ ├── ScalarField_output_central_values.h │ └── ScalarField_Tmunu.py ├── in_progress-GiRaFFE_NRPy ├── GiRaFFE_NRPy │ ├── __init__.py │ ├── example_par_files │ │ └── figure1_GiRaFFE_paper.png │ ├── GiRaFFE_Ccode_library │ │ └── compute_conservatives_FFE.C │ └── GiRaFFE_NRPy_Characteristic_Speeds.py └── GiRaFFEfood_NRPy │ ├── __init__.py │ ├── GiRaFFEfood_NRPy_Magnetospheric_Wald.py │ ├── GiRaFFEfood_NRPy_FFE_Breakdown.py │ ├── BasisTransform.py │ ├── GiRaFFEfood_NRPy_Aligned_Rotator.py │ ├── GiRaFFEfood_NRPy_Fast_Wave.py │ └── GiRaFFEfood_NRPy_Exact_Wald.py ├── in_progress ├── latex_nrpy_style.tplx ├── 2021_ETK_School │ ├── logos │ │ ├── ETK.png │ │ ├── NSF.jpg │ │ ├── WVU.png │ │ ├── Idaho.png │ │ └── Nerpy.png │ ├── test_results │ │ ├── Ax-convergence.png │ │ ├── Ex-convergence.png │ │ └── constraintviolation.png │ ├── generate_MaxwellVacuum_thorns.sh │ └── run_Jupyter_notebook.sh └── tabulatedEOS │ ├── AUTHORS │ ├── NRPyEOS_minimal.c │ └── latex_nrpy_style.tplx ├── requirements.txt ├── .gitignore ├── .github ├── codeql.yml ├── workflows │ ├── codeql.yml │ ├── github-actions-windows2022.yml │ └── github-actions-MacOS12.yml ├── logs_for_broken-github-actions-core_Jupyter_notebooks_latestsympy-MacOS12.yml └── broken-github-actions-core_Jupyter_notebooks_latestsympy-MacOS12.yml ├── param.txt ├── convert_jupyter_to_python_and_run.sh ├── .deepsource.toml ├── Deprecated ├── SommerfeldBoundaryCondition │ ├── E_rel_diagonal.png │ ├── E_rel_x-axis.png │ ├── NRPy_vs_ETK_x-axis.png │ └── NRPy_vs_ETK_diagonal.png ├── CurviBoundaryConditions │ └── boundary_conditions │ │ ├── bcstruct_freemem.h │ │ ├── CurviBC_include_Cfunctions.h │ │ ├── driver_bcstruct.h │ │ └── BCs_data_structs.h └── BSSN │ └── BSSN_ID_function_string.py ├── .lgtm.yml ├── nbconvert_latex_settings ├── conf.json ├── style_bw_python.tex.j2 ├── index.tex.j2 ├── style_python.tex.j2 ├── report.tex.j2 ├── display_priority.j2 ├── style_bw_ipython.tex.j2 ├── style_ipython.tex.j2 ├── document_contents.tex.j2 └── LICENSE ├── defines_dict.py ├── WaveToyNRPy └── example_parfiles │ ├── 8thOrder_ConvergenceTests │ ├── wavetimes.png │ ├── convergence-RK8-FD8-3D.png │ ├── FD8-RK8__test_output_plot.png │ ├── convergence-RK8-FD8-spherical_gaussian.png │ ├── convert_IOASCII_1D_to_gnuplot.sh │ ├── runscript.sh │ ├── gnuplot_script │ ├── planewave_along_3D_diagonal-dx_0.4__FD8-RK8.par │ ├── planewave_along_3D_diagonal-dx_0.2__FD8-RK8.par │ ├── sphericalgaussian-dx_0.2__FD8-RK8.par │ └── sphericalgaussian-dx_0.4__FD8-RK8.par │ └── 4thOrder_ConvergenceTests │ ├── out-RK4-FD4-3D.png │ ├── convergence-RK4-FD4-3D.png │ ├── convergence-RK4-FD4-spherical_gaussian.png │ ├── convert_IOASCII_1D_to_gnuplot.sh │ ├── runscript.sh │ ├── gnuplot_script │ ├── planewave_along_3D_diagonal-dx_0.2__FD4-RK4.par │ └── sphericalgaussian-dx_0.2__FD4-RK4.par ├── BaikalETK └── Makefile ├── .coveragerc ├── suffixes.py ├── lint_Jupyter_notebook.sh ├── NRPyEOS ├── NRPyEOS_free_memory.c ├── Makefile └── NRPyEOS_validation.c ├── NRPy_logo.py ├── here.py ├── colored.py ├── var_access.py ├── CarpetX └── tests │ └── test_WaveToy.py ├── latex_nrpy_style.tplx ├── LICENSE ├── jupyter_notebook_config.py ├── fstr.py ├── Min_Max_and_Piecewise_Expressions.py ├── safewrite.py ├── tensor_rotation.py ├── run_Jupyter_notebook.sh ├── README.md ├── benchmark-z4cnrpy.par ├── Tutorial-Loop_Generation_Cache_Blocking_soln.ipynb └── functional.py /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /BSSN/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /GRFFE/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /GRHD/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /GRMHD/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /NRPyPN/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SEOBNR/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /TOV/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /BSSN/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ScalarWave/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UnitTesting/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /IllinoisGRMHD/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MoLtimestepping/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ScalarWave/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /WeylScal4NRPy/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /FishboneMoncriefID/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /IllinoisGRMHD/doc/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /WeylScal4NRPy/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /diagnostics_generic/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /CurviBoundaryConditions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /FishboneMoncriefID/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-hacker -------------------------------------------------------------------------------- /in_progress-SEOBNR/SEOBNR/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SpinWeight_minus2_SphHarmonics/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /UnitTesting/Test_UnitTesting/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /in_progress-Maxwell/Maxwell/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /u0_smallb_Poynting__Cartesian/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/BSSN/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /in_progress-Maxwell/Maxwell/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /in_progress-NRPyCritCol/ScalarField/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /u0_smallb_Poynting__Cartesian/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/SIMD.py: -------------------------------------------------------------------------------- 1 | ../../SIMD.py -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/grid.py: -------------------------------------------------------------------------------- 1 | ../../grid.py -------------------------------------------------------------------------------- /in_progress-GiRaFFE_NRPy/GiRaFFE_NRPy/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /in_progress-GiRaFFE_NRPy/GiRaFFEfood_NRPy/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/WeylScal4NRPy/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/outputC.py: -------------------------------------------------------------------------------- 1 | ../../outputC.py -------------------------------------------------------------------------------- /NRPyPN/nbconvert_latex_settings: -------------------------------------------------------------------------------- 1 | ../nbconvert_latex_settings -------------------------------------------------------------------------------- /in_progress/latex_nrpy_style.tplx: -------------------------------------------------------------------------------- 1 | ../latex_nrpy_style.tplx -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/indexedexp.py: -------------------------------------------------------------------------------- 1 | ../../indexedexp.py -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | sympy 2 | matplotlib 3 | nrpylatex>=1.0.7 4 | -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/BSSN/Psi4_tetrads.py: -------------------------------------------------------------------------------- 1 | ../../Psi4_tetrads.py -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/NRPy_param_funcs.py: -------------------------------------------------------------------------------- 1 | ../../NRPy_param_funcs.py -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/reference_metric.py: -------------------------------------------------------------------------------- 1 | ../../reference_metric.py -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/BSSN/BSSN_quantities.py: -------------------------------------------------------------------------------- 1 | ../../BSSN_quantities.py -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/finite_difference.py: -------------------------------------------------------------------------------- 1 | ../../finite_difference.py -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/latex_nrpy_style.tplx: -------------------------------------------------------------------------------- 1 | ../../latex_nrpy_style.tplx -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | out*.txt 3 | htmlcov/ 4 | .coverage 5 | .ipynb_checkpoints 6 | -------------------------------------------------------------------------------- /BSSN/Psi4Cartesianvalidation/BSSN/ADM_in_terms_of_BSSN.py: -------------------------------------------------------------------------------- 1 | ../../ADM_in_terms_of_BSSN.py -------------------------------------------------------------------------------- /TOV/tov_interp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/TOV/tov_interp -------------------------------------------------------------------------------- /.github/codeql.yml: -------------------------------------------------------------------------------- 1 | query-filters: 2 | - exclude: 3 | id: py/clear-text-logging-sensitive-data 4 | -------------------------------------------------------------------------------- /SEOBNR/Hamstring_constants.txt: -------------------------------------------------------------------------------- 1 | m1 2 | m2 3 | tortoise 4 | eta 5 | KK 6 | k0 7 | k1 8 | EMgamma 9 | d1v2 10 | dheffSSv2 11 | -------------------------------------------------------------------------------- /SEOBNR/Hamstring_variables.txt: -------------------------------------------------------------------------------- 1 | x 2 | y 3 | z 4 | px 5 | py 6 | pz 7 | s1x 8 | s1y 9 | s1z 10 | s2x 11 | s2y 12 | s2z 13 | -------------------------------------------------------------------------------- /param.txt: -------------------------------------------------------------------------------- 1 | BSSN_RHSs::AdvectShift = True # Just a comment! = :: ### 2 | BSSN_RHSs::AdvectLapse = True 3 | finite_difference::FDORDER=8 4 | -------------------------------------------------------------------------------- /convert_jupyter_to_python_and_run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | jupyter nbconvert --to python $1 --output=blah 4 | ipython blah.py && rm -f blah.py 5 | -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/logos/ETK.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress/2021_ETK_School/logos/ETK.png -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/logos/NSF.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress/2021_ETK_School/logos/NSF.jpg -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/logos/WVU.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress/2021_ETK_School/logos/WVU.png -------------------------------------------------------------------------------- /in_progress-SEOBNR/SEOBNR/Hamstring_constants.txt: -------------------------------------------------------------------------------- 1 | m1 2 | m2 3 | tortoise 4 | eta 5 | KK 6 | k0 7 | k1 8 | EMgamma 9 | d1v2 10 | dheffSSv2 11 | -------------------------------------------------------------------------------- /in_progress-SEOBNR/SEOBNR/Hamstring_variables.txt: -------------------------------------------------------------------------------- 1 | x 2 | y 3 | z 4 | px 5 | py 6 | pz 7 | s1x 8 | s1y 9 | s1z 10 | s2x 11 | s2y 12 | s2z 13 | -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/logos/Idaho.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress/2021_ETK_School/logos/Idaho.png -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/logos/Nerpy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress/2021_ETK_School/logos/Nerpy.png -------------------------------------------------------------------------------- /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | [[analyzers]] 4 | name = "python" 5 | enabled = true 6 | 7 | [analyzers.meta] 8 | runtime_version = "3.x.x" 9 | -------------------------------------------------------------------------------- /UnitTesting/standard_constants.py: -------------------------------------------------------------------------------- 1 | # Contains the constants to be shared throughout all unittests. 2 | # Typical value for precision is 30 3 | 4 | precision = 30 5 | -------------------------------------------------------------------------------- /CurviBoundaryConditions/bdrycond_general_algorithm.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/CurviBoundaryConditions/bdrycond_general_algorithm.gif -------------------------------------------------------------------------------- /Deprecated/SommerfeldBoundaryCondition/E_rel_diagonal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/Deprecated/SommerfeldBoundaryCondition/E_rel_diagonal.png -------------------------------------------------------------------------------- /Deprecated/SommerfeldBoundaryCondition/E_rel_x-axis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/Deprecated/SommerfeldBoundaryCondition/E_rel_x-axis.png -------------------------------------------------------------------------------- /.lgtm.yml: -------------------------------------------------------------------------------- 1 | extraction: 2 | python: 3 | python_setup: 4 | setup_py: false 5 | version: 3 6 | 7 | queries: 8 | - exclude: py/clear-text-logging-sensitive-data 9 | -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/test_results/Ax-convergence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress/2021_ETK_School/test_results/Ax-convergence.png -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/test_results/Ex-convergence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress/2021_ETK_School/test_results/Ex-convergence.png -------------------------------------------------------------------------------- /Deprecated/SommerfeldBoundaryCondition/NRPy_vs_ETK_x-axis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/Deprecated/SommerfeldBoundaryCondition/NRPy_vs_ETK_x-axis.png -------------------------------------------------------------------------------- /Deprecated/SommerfeldBoundaryCondition/NRPy_vs_ETK_diagonal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/Deprecated/SommerfeldBoundaryCondition/NRPy_vs_ETK_diagonal.png -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/test_results/constraintviolation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress/2021_ETK_School/test_results/constraintviolation.png -------------------------------------------------------------------------------- /nbconvert_latex_settings/conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "base_template": "base", 3 | "mimetypes": { 4 | "text/latex": true, 5 | "text/tex": true, 6 | "application/pdf": true 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /UnitTesting/fix_cruft__remove_trailing_whitespace_in_notebook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sed -i "s/\" \\\n\",/\"\\\n\",/g;s/\" \\\n\",/\"\\\n\",/g;s/\" \\\n\",/\"\\\n\",/g" $1 4 | -------------------------------------------------------------------------------- /defines_dict.py: -------------------------------------------------------------------------------- 1 | outC_NRPy_basic_defines_h_dict = {} 2 | 3 | def set_outC_NRPy_basic_defines_h_dict(x): 4 | global outC_NRPy_basic_defines_h_dict 5 | outC_NRPy_basic_defines_h_dict = x 6 | -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/wavetimes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/wavetimes.png -------------------------------------------------------------------------------- /in_progress-Maxwell/MaxwellVacuum/example_parfiles/Ax-convergence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress-Maxwell/MaxwellVacuum/example_parfiles/Ax-convergence.png -------------------------------------------------------------------------------- /in_progress-Maxwell/MaxwellVacuum/example_parfiles/Ay-convergence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress-Maxwell/MaxwellVacuum/example_parfiles/Ay-convergence.png -------------------------------------------------------------------------------- /in_progress-Maxwell/MaxwellVacuum/example_parfiles/Ex-convergence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress-Maxwell/MaxwellVacuum/example_parfiles/Ex-convergence.png -------------------------------------------------------------------------------- /in_progress-Maxwell/MaxwellVacuum/example_parfiles/Ey-convergence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress-Maxwell/MaxwellVacuum/example_parfiles/Ey-convergence.png -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests/out-RK4-FD4-3D.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests/out-RK4-FD4-3D.png -------------------------------------------------------------------------------- /in_progress-Maxwell/MaxwellVacuum/example_parfiles/constraintviolation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stevenrbrandt/nrpytutorial/master/in_progress-Maxwell/MaxwellVacuum/example_parfiles/constraintviolation.png -------------------------------------------------------------------------------- /UnitTesting/fix_cruft__remove_trailing_whitespace__add_newline_at_end_of_files.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Remove trailing whitespace 4 | sed -i 's/[ \t]*$//' $1 5 | 6 | # Add newline at end of file 7 | sed -i '$a\' $1 8 | -------------------------------------------------------------------------------- /Deprecated/CurviBoundaryConditions/boundary_conditions/bcstruct_freemem.h: -------------------------------------------------------------------------------- 1 | 2 | for(int i=0;i $1/uuGF.gnuplot 4 | -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/convert_IOASCII_1D_to_gnuplot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat $1/uuGF.xl | awk '{if($1=="#Time") printf("\n"); print $0}' | awk '{if(NF==3) { printf("%s %.15e %.15e\n",$0,sin($2/sqrt(3.)-$1),sin($2-$1)); } else { print $0 }}' > $1/uuGF.gnuplot 4 | -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests/runscript.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | for i in planewave_along_3D_diagonal-dx_0.4__FD4-RK4.par planewave_along_3D_diagonal-dx_0.2__FD4-RK4.par ; do 3 | taskset -c 0,1,2,3 ../cactus_etilgrmhdgcc $i 4 | DIRNAME=`echo $i|sed "s/.par//g"` 5 | ./convert_IOASCII_1D_to_gnuplot.sh $DIRNAME 6 | done 7 | -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/runscript.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | for i in planewave_along_3D_diagonal-dx_0.4__FD8-RK8.par planewave_along_3D_diagonal-dx_0.2__FD8-RK8.par ; do 3 | taskset -c 0,1,2,3 ../cactus_etilgrmhdgcc $i 4 | DIRNAME=`echo $i|sed "s/.par//g"` 5 | ./convert_IOASCII_1D_to_gnuplot.sh $DIRNAME 6 | done 7 | -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/generate_MaxwellVacuum_thorns.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Generating MaxwellVacuum ETK thorn..." 4 | ./run_Jupyter_notebook.sh ETK_Workshop_2021-NRPy_tutorial.ipynb 5 | 6 | echo "Generating MaxwellVacuumID ETK thorn..." 7 | ./run_Jupyter_notebook.sh ../../Tutorial-ETK_thorn-MaxwellVacuumID.ipynb 8 | mv ../../MaxwellVacuumID . 9 | -------------------------------------------------------------------------------- /suffixes.py: -------------------------------------------------------------------------------- 1 | import sympy as sp 2 | 3 | subtable = {} 4 | 5 | def setsuffix(f,t): 6 | assert type(f) == str 7 | assert type(t) == str 8 | subtable[f] = t 9 | 10 | def getsuffix(f): 11 | assert type(f) == str 12 | return subtable.get(f,"") 13 | 14 | def dosubs(expr): 15 | for sym in [s for s in expr.free_symbols]: 16 | ss = str(sym) 17 | if ss in subtable: 18 | expr = expr.subs(sym, sp.symbols(ss+subtable[ss])) 19 | return expr 20 | -------------------------------------------------------------------------------- /in_progress/tabulatedEOS/AUTHORS: -------------------------------------------------------------------------------- 1 | Below is a list of authors that have contributed to NRPyEOS 2 | and the codes NRPyEOS is based on: 3 | 4 | * Leo Werneck 5 | - Lead author of NRPyEOS. 6 | 7 | * Christian Ott 8 | - Author of original source code and its adaptation to the Einstein 9 | Toolkit thorn EOS_Omni. 10 | 11 | * Erik Schnetter 12 | - Co-author of EOS_Omni. 13 | 14 | * Lorenzo Sala 15 | - Added modifications to EOS to improve, among other things, errors 16 | in temperature recovery. 17 | -------------------------------------------------------------------------------- /UnitTesting/cleanup_repo__remove_carriage_return_chars.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | 5 | # Replace \r\n with \n: 6 | for file in os.listdir("."): 7 | # `file` can be a file or a directory. Check that it's in fact a file: 8 | if os.path.isfile(file): 9 | with open(file,"r") as readfile: 10 | with open("Tut.ipynb","w") as writefile: 11 | for line in readfile.readlines(): 12 | writefile.write(line.replace(r'\r\n',r'\n')) 13 | os.rename("Tut.ipynb", file) 14 | -------------------------------------------------------------------------------- /lint_Jupyter_notebook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$1" ]; then 4 | echo "Correct usage of this script:" 5 | echo "./lint_Jupyter_notebook.sh [Jupyter notebook (a file with .ipynb extension)]" 6 | exit 7 | fi 8 | 9 | if [ ! -f $1 ]; then 10 | echo "You input: ./lint_Jupyter_notebook.sh" $1 11 | echo "Jupyter notebook" \"$1\" "not found!" 12 | exit 13 | fi 14 | 15 | jupyter nbconvert --to python $1 --stdout |grep -v "^\# " > $1.py # ignore lines that start with #. 16 | pylint --disable=trailing-newlines,reimported,ungrouped-imports $1.py 17 | -------------------------------------------------------------------------------- /NRPyEOS/NRPyEOS_free_memory.c: -------------------------------------------------------------------------------- 1 | #include "NRPyEOS.h" 2 | 3 | void NRPyEOS_free_memory(NRPyEOS_params_tabulated *restrict eos_params) { 4 | 5 | printf("(NRPyEOS) *******************************\n"); 6 | printf("(NRPyEOS) Freeing up memory.\n"); 7 | 8 | // Free memory allocated for the table 9 | free(eos_params->logrho); 10 | free(eos_params->logtemp); 11 | free(eos_params->yes); 12 | free(eos_params->alltables); 13 | free(eos_params->epstable); 14 | 15 | printf("(NRPyEOS) All done!\n"); 16 | printf("(NRPyEOS) *******************************\n"); 17 | 18 | } 19 | -------------------------------------------------------------------------------- /nbconvert_latex_settings/style_bw_python.tex.j2: -------------------------------------------------------------------------------- 1 | ((= Black&white Python input/output style =)) 2 | 3 | ((*- extends 'base.tex.j2' -*)) 4 | 5 | %=============================================================================== 6 | % Input 7 | %=============================================================================== 8 | 9 | ((* block input scoped *)) 10 | \begin{verbatim} 11 | ((*- if resources.global_content_filter.include_input_prompt *)) 12 | ((( cell.source | add_prompts ))) 13 | ((* else *)) 14 | ((( cell.source ))) 15 | ((* endif *)) 16 | \end{verbatim} 17 | ((* endblock input *)) 18 | -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests/gnuplot_script: -------------------------------------------------------------------------------- 1 | # GNUPLOT SCRIPTS 2 | 3 | 4 | # RK4 + FD4 3D run: 5 | set term post enh color fontscale 1.5 6 | set out "out-RK4-FD4-3D.ps" 7 | set title "4th-order Convergence:\n om=k=c=1 Sine Plane Wave along Grid Diagonal, t=3 / om";set xlabel "x (units of 1/k)";set ylabel "|Numerical - Exact|";a=15;p [-4:4] "planewave_along_3D_diagonal-dx_0.4__FD4-RK4/uuGF.gnuplot" u 2:(abs($3-$4)) i a ti "dx=0.4" w l,"planewave_along_3D_diagonal-dx_0.2__FD4-RK4/uuGF.gnuplot" u 2:(abs($3-$4)*16) i a ti "rescaled (by 2**4; 4th order conv) dx = 0.2" w lp 8 | !ps2pdf out-RK4-FD4-3D.ps -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/gnuplot_script: -------------------------------------------------------------------------------- 1 | # GNUPLOT SCRIPTS 2 | 3 | 4 | # RK8 + FD8 3D run: 5 | set term post enh color fontscale 1.5 6 | set out "out-RK8-FD8-3D.ps" 7 | set title "8th-order Convergence:\n om=k=c=1 Sine Plane Wave along Grid Diagonal, t=3 / om";set xlabel "x (units of 1/k)";set ylabel "|Numerical - Exact|";a=15;p [-4:4] "planewave_along_3D_diagonal-dx_0.4__FD8-RK8/uuGF.gnuplot" u 2:(abs($3-$4)) i a ti "dx=0.4" w l,"planewave_along_3D_diagonal-dx_0.2__FD8-RK8/uuGF.gnuplot" u 2:(abs($3-$4)*256) i a ti "rescaled (by 2**8; 4th order conv) dx = 0.2" w lp 8 | !ps2pdf out-RK4-FD4-3D.ps 9 | -------------------------------------------------------------------------------- /nbconvert_latex_settings/index.tex.j2: -------------------------------------------------------------------------------- 1 | 2 | ((=- Default to the notebook output style -=)) 3 | ((*- if not cell_style is defined -*)) 4 | ((* set cell_style = 'style_jupyter.tex.j2' *)) 5 | ((*- endif -*)) 6 | 7 | ((=- Inherit from the specified cell style. -=)) 8 | ((* extends cell_style *)) 9 | 10 | 11 | %=============================================================================== 12 | % Latex Article 13 | %=============================================================================== 14 | 15 | ((*- block docclass -*)) 16 | \documentclass[landscape,letterpaper,10pt,english]{article} 17 | ((*- endblock docclass -*)) 18 | -------------------------------------------------------------------------------- /NRPy_logo.py: -------------------------------------------------------------------------------- 1 | def print_logo(print_to_stdout=True): 2 | 3 | logo_str = """ 4 | ooooo ooo ooooooooo. ooooooooo. 88 5 | `888b. `8' `888 `Y88. `888 `Y88. 888888 6 | 8 `88b. 8 888 .d88' 888 .d88' oooo ooo 88 7 | 8 `88b. 8 888ooo88P' 888ooo88P' `88. .8' 8 | 8 `88b.8 888`88b. 888 `88..8' 9 | 8 `888 888 `88b. 888 `888' 10 | o8o `8 o888o o888o o888o .8' 11 | .o..P' 12 | NRPy+: Python-based Code Generation `Y8P' 13 | for Numerical Relativity... and Beyond! 14 | - homepage: http://blackholesathome.net 15 | - download: https://github.com/zachetienne/nrpytutorial 16 | """ 17 | if print_to_stdout==True: 18 | print(logo_str) 19 | else: 20 | return logo_str 21 | -------------------------------------------------------------------------------- /here.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from colored import colored 3 | import os 4 | import re 5 | 6 | _here = os.path.realpath(os.getcwd()) 7 | 8 | def here(*args): 9 | herell(False,*args) 10 | 11 | def herecc(*args): 12 | herell(True,*args) 13 | 14 | def herell(usecc,*args): 15 | import inspect 16 | stack = inspect.stack() 17 | frame = stack[2] 18 | if usecc: 19 | herestr = re.sub(r"^herecc\((.*)\)$",r"HERE: \1:",frame.code_context[0].strip()) 20 | else: 21 | herestr = "HERE:" 22 | fname = os.path.realpath(frame.filename) 23 | if fname.startswith(_here): 24 | fname = fname[len(_here)+1:] 25 | print(colored(herestr,"cyan"),fname+":"+colored(frame.lineno,"yellow"), *args, flush=True) 26 | frame = None 27 | stack = None 28 | 29 | if __name__ == "__main__": 30 | here(_here) 31 | herecc(_here) 32 | -------------------------------------------------------------------------------- /TOV/ETK_TOVSolver_src_tov_c_diff_3f18340d8cc602c8ba9005242e64c5e184767288.patch: -------------------------------------------------------------------------------- 1 | @@ -343,3 +346,12 @@ void TOV_C_Integrate_RHS(CCTK_ARGUMENTS) 2 | TOV_m_1d[i] = Surface_Mass; 3 | TOV_phi_1d[i] = 0.5 * log( 1.0 - 2.0 * Surface_Mass / TOV_r_1d[i]); 4 | TOV_mbary_1d[i] = TOV_mbary_1d[TOV_Surface_Index]; 5 | + 6 | + 7 | + } 8 | + if(i < (int)(TOV_Surface_Index*1.2) && i%5==0) { 9 | + // rho = pow(press / K, 1.0 / Gamma); 10 | + // eps = press / (Gamma - 1.0) / rho; 11 | + // mu = rho * (1.0 + eps); // -> mu = rho + press/(Gamma - 1.0) = total mass-energy density 12 | + printf("%.15e %.15e %.15e %.15e %.15e %.15e %.15e TOVV\n",TOV_r_1d[i],pow(TOV_press_1d[i+1]/TOV_K,1.0 / TOV_Gamma) + TOV_press_1d[i+1]/(TOV_Gamma - 1.0), 13 | + TOV_press_1d[i], TOV_m_1d[i], TOV_phi_1d[i],TOV_r_1d[i]/TOV_rbar_1d[i], TOV_rbar_1d[i]); 14 | -------------------------------------------------------------------------------- /colored.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def not_colored(a,_): 4 | return repr(a) 5 | 6 | colors = { 7 | "red":"\033[31m", 8 | "green":"\033[32m", 9 | "yellow":"\033[33m", 10 | "blue":"\033[34m", 11 | "magenta":"\033[35m", 12 | "cyan":"\033[36m", 13 | } 14 | reset = "\033[0m" 15 | 16 | def colored(arg,c): 17 | assert type(c) == str 18 | assert c in colors 19 | s = str(arg) 20 | return colors[c] + s + reset 21 | 22 | if hasattr(sys.stdout,"isatty"): 23 | is_tty = sys.stdout.isatty() 24 | else: 25 | is_tty = False 26 | 27 | is_jupyter = type(sys.stdout).__name__ == 'OutStream' and type(sys.stdout).__module__ == 'ipykernel.iostream' 28 | if (not is_tty) and (not is_jupyter): 29 | colored = not_colored 30 | 31 | if __name__ == "__main__": 32 | if installed: 33 | print(colored("Colored was installed","green")) 34 | else: 35 | print("Colored was NOT installed") 36 | -------------------------------------------------------------------------------- /nbconvert_latex_settings/style_python.tex.j2: -------------------------------------------------------------------------------- 1 | ((= Python input/output style =)) 2 | 3 | ((*- extends 'base.tex.j2' -*)) 4 | 5 | % Custom definitions 6 | ((* block definitions *)) 7 | ((( super() ))) 8 | 9 | % Pygments definitions 10 | ((( resources.latex.pygments_definitions ))) 11 | ((* endblock definitions *)) 12 | 13 | %=============================================================================== 14 | % Input 15 | %=============================================================================== 16 | 17 | ((* block input scoped *)) 18 | \begin{Verbatim}[commandchars=\\\{\}] 19 | ((*- if resources.global_content_filter.include_input_prompt *)) 20 | ((( cell.source | highlight_code(strip_verbatim=True, metadata=cell.metadata) | add_prompts ))) 21 | ((* else *)) 22 | ((( cell.source | highlight_code(strip_verbatim=True, metadata=cell.metadata) ))) 23 | ((* endif *)) 24 | \end{Verbatim} 25 | ((* endblock input *)) 26 | -------------------------------------------------------------------------------- /ScalarWave/CommonParams.py: -------------------------------------------------------------------------------- 1 | # Common parameters for scalar wave evolutions 2 | # 3 | # Author: Zachariah B. Etienne 4 | # zachetie **at** gmail **dot* com 5 | # 6 | # License: BSD 2-Clause 7 | 8 | # COMPLETE DOCUMENTATION (JUPYTER NOTEBOOKS): 9 | # START PAGE (start here!): ../NRPy+_Tutorial.ipynb 10 | # THIS MODULE: ../Tutorial-Scalarwave.ipynb 11 | 12 | # Step P1: Import needed NRPy+ core modules: 13 | import NRPy_param_funcs as par # NRPy+: Parameter interface 14 | 15 | thismodule = __name__ 16 | 17 | # Parameters common to/needed by all ScalarWave Python modules 18 | 19 | # Step P2: Define the C parameter wavespeed. The `wavespeed` 20 | # variable is a proper SymPy variable, so it can be 21 | # used in below expressions. In the C code, it acts 22 | # just like a usual parameter, whose value is 23 | # specified in the parameter file. 24 | wavespeed = par.Cparameters("REAL", thismodule, "wavespeed", 1.0) 25 | -------------------------------------------------------------------------------- /UnitTesting/RepeatedTimer.py: -------------------------------------------------------------------------------- 1 | # https://stackoverflow.com/questions/3393612/run-certain-code-every-n-seconds/13151299 2 | 3 | from threading import Timer 4 | 5 | 6 | class RepeatedTimer(object): 7 | def __init__(self, interval, function, *args, **kwargs): 8 | self._timer = None 9 | self.interval = interval 10 | self.function = function 11 | self.args = args 12 | self.kwargs = kwargs 13 | self.is_running = False 14 | self.start() 15 | 16 | def _run(self): 17 | self.is_running = False 18 | self.start() 19 | self.function(*self.args, **self.kwargs) 20 | 21 | def start(self): 22 | if not self.is_running: 23 | self._timer = Timer(self.interval, self._run) 24 | self._timer.start() 25 | self.is_running = True 26 | 27 | def stop(self): 28 | self._timer.cancel() 29 | self.is_running = False 30 | -------------------------------------------------------------------------------- /UnitTesting/Test_UnitTesting/module_for_testing.py: -------------------------------------------------------------------------------- 1 | import sympy as sp 2 | 3 | 4 | # Generic module that can be used for testing, specifically test_functions 5 | # function() can be called in isolation. init_function2() must be called before function2() is called. 6 | def function(create_gamma=False): 7 | global alpha, betaU 8 | 9 | a, b, c = sp.symbols('a b c') 10 | 11 | alpha = a + b + c 12 | betaU = [0, a**2 + 2*b**2 + c**2, sp.sqrt(a + b)] 13 | 14 | if create_gamma: 15 | global gamma 16 | 17 | gamma = sp.atan2(b, a) 18 | 19 | 20 | def function2(create_gamma=False): 21 | 22 | global alpha2, betaU2 23 | 24 | alpha2 = a2 + b2 + c2 25 | 26 | betaU2 = [0, a2**2 + 2*b2**2 + c2**2, sp.sqrt(a2 + b2)] 27 | 28 | if create_gamma: 29 | global gamma2 30 | 31 | gamma2 = sp.atan2(b2, a2) 32 | 33 | 34 | def init_function2(): 35 | global a2, b2, c2 36 | a2, b2, c2 = sp.symbols('a2 b2 c2') 37 | -------------------------------------------------------------------------------- /var_access.py: -------------------------------------------------------------------------------- 1 | import re 2 | from fstr import f 3 | 4 | from_access = {} 5 | 6 | def set_access(ret, varname): 7 | from_access[ret] = varname 8 | 9 | def var_from_access(access): 10 | access = access.strip() # This really shouldn't be necessary 11 | v = from_access.get(access, None) 12 | if v is not None: 13 | return v 14 | g = re.match(r'^(\w+)(\[\w+\])*$', access) 15 | if g: 16 | return g.group(1) 17 | g = re.match(r'in_gfs\w*\[IDX4S\((\w+),i0,i1,i2\)\]', access) 18 | if g: 19 | return g.group(1) 20 | g = re.match(r'^const\s+(\w+)\s+(\w+)', access) 21 | if g: 22 | return g.group(2) 23 | g = re.match(r'^\*?([\w.]+?)[DU]*\d*$', access) 24 | if g: 25 | return g.group(1) 26 | g = re.match(r'^(\w+)\[CCTK_GFINDEX3D\(cctkGH,i0,i1,i2\)\]', access) 27 | if g: 28 | return g.group(1) 29 | return "?" 30 | #raise Exception("Could not identify a variable name from the access string '"+access+"'") 31 | -------------------------------------------------------------------------------- /in_progress-NRPyCritCol/ScalarField/ScalarField_declare_gridfunctions.py: -------------------------------------------------------------------------------- 1 | # Gridfunction registration for a massless scalar field 2 | 3 | # Author: Leonardo R. Werneck 4 | # wernecklr **at** gmail **dot* com 5 | 6 | # This NRPy+ module is used internally by the other ScalarField NRPy+ modules 7 | 8 | import sympy as sp 9 | import grid as gri 10 | 11 | def declare_scalar_field_gridfunctions_if_not_declared_already(): 12 | # Step 2: Register all needed BSSN gridfunctions. 13 | 14 | global sf, sfM 15 | 16 | # Step 2.a: First check to see if this function has already been called. 17 | # If so, do not register the gridfunctions again! 18 | for i in range(len(gri.glb_gridfcs_list)): 19 | if "sf" in gri.glb_gridfcs_list[i].name: 20 | sf, sfM = sp.symbols('sf sfM', real=True) 21 | return sf, sfM 22 | 23 | # Step 2.b: Register indexed quantities, using ixp.register_... functions 24 | sf, sfM = gri.register_gridfunctions("EVOL", ["sf", "sfM"]) 25 | return sf, sfM 26 | -------------------------------------------------------------------------------- /nbconvert_latex_settings/report.tex.j2: -------------------------------------------------------------------------------- 1 | 2 | % Default to the notebook output style 3 | ((* if not cell_style is defined *)) 4 | ((* set cell_style = 'style_ipython.tex.j2' *)) 5 | ((* endif *)) 6 | 7 | % Inherit from the specified cell style. 8 | ((* extends cell_style *)) 9 | 10 | 11 | %=============================================================================== 12 | % Latex Book 13 | %=============================================================================== 14 | 15 | ((* block predoc *)) 16 | ((( super() ))) 17 | ((* block tableofcontents *))\tableofcontents((* endblock tableofcontents *)) 18 | ((* endblock predoc *)) 19 | 20 | ((* block docclass *)) 21 | \documentclass{report} 22 | ((* endblock docclass *)) 23 | 24 | ((* block markdowncell scoped *)) 25 | ((( cell.source | citation2latex | strip_files_prefix | convert_pandoc('markdown+tex_math_double_backslash', 'json',extra_args=[]) | resolve_references | convert_pandoc('json','latex', extra_args=["--top-level-division=chapter"]) ))) 26 | ((* endblock markdowncell *)) 27 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | schedule: 9 | - cron: "12 12 * * 2" 10 | 11 | jobs: 12 | analyze: 13 | name: Analyze 14 | runs-on: ubuntu-latest 15 | permissions: 16 | actions: read 17 | contents: read 18 | security-events: write 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | language: [ python ] 24 | 25 | steps: 26 | - name: Checkout 27 | uses: actions/checkout@v3 28 | 29 | - name: Initialize CodeQL 30 | uses: github/codeql-action/init@v2 31 | with: 32 | languages: ${{ matrix.language }} 33 | config-file: ./.github/codeql.yml 34 | queries: +security-and-quality 35 | 36 | - name: Autobuild 37 | uses: github/codeql-action/autobuild@v2 38 | 39 | - name: Perform CodeQL Analysis 40 | uses: github/codeql-action/analyze@v2 41 | with: 42 | category: "/language:${{ matrix.language }}" 43 | -------------------------------------------------------------------------------- /in_progress/tabulatedEOS/NRPyEOS_minimal.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "NRPyEOS.h" 4 | 5 | int main(int argc, char **argv) { 6 | 7 | // Step 0: Check for correct usage 8 | if( argc != 2 ) { 9 | fprintf(stderr,"(NRPyEOS - minimal) Correct usage is ./minimal eos_file_path\n"); 10 | exit(1); 11 | } 12 | 13 | // Step 1: Initialize the EOS struct 14 | NRPyEOS_params eos_params; 15 | NRPyEOS_readtable_set_EOS_params(argv[1],&eos_params); 16 | 17 | // Step 2: Perform one interpolation 18 | const double rho = 1e-6; 19 | const double Y_e = 0.4; 20 | const double T = 1.1; 21 | double P, eps; 22 | NRPyEOS_P_and_eps_from_rho_Ye_T(&eos_params,rho,Y_e,T,&P,&eps); 23 | 24 | // Step 3: Print information 25 | printf("(NRPyEOS) Density : %.15e\n",rho); 26 | printf("(NRPyEOS) e- fraction: %.15e\n",Y_e); 27 | printf("(NRPyEOS) Temperature: %.15e\n",T); 28 | printf("(NRPyEOS) Pressure : %.15e\n",P); 29 | printf("(NRPyEOS) Energy : %.15e\n",eps); 30 | 31 | // Step 4: Free memory 32 | NRPyEOS_free_memory(&eos_params); 33 | 34 | // All done! 35 | return 0; 36 | } 37 | -------------------------------------------------------------------------------- /.github/logs_for_broken-github-actions-core_Jupyter_notebooks_latestsympy-MacOS12.yml: -------------------------------------------------------------------------------- 1 | (EXEC): Executing `gcc -std=gnu99 -Ofast -fopenmp -funroll-loops MaxwellEvolCart_Playground_Ccodes/Maxwell_Playground.c -o MaxwellEvolCart_Playground_Ccodes/output/Maxwell_Playground -lm`... 2 | clang: error: unsupported option '-fopenmp' 3 | clang: error: unsupported option '-fopenmp' 4 | (BENCH): Finished executing in 0.36935901641845703 seconds. 5 | Next-to-most optimized compilation failed. Moving to maximally-compatible gcc compile option: 6 | (EXEC): Executing `gcc -std=gnu99 -O2 MaxwellEvolCart_Playground_Ccodes/Maxwell_Playground.c -o MaxwellEvolCart_Playground_Ccodes/output/Maxwell_Playground -lm`... 7 | MaxwellEvolCart_Playground_Ccodes/Maxwell_Playground.c:200:9: error: called object type 'double' is not a function or function pointer 8 | time(&start_timer); // Resolution of one second... 9 | ~~~~^ 10 | MaxwellEvolCart_Playground_Ccodes/Maxwell_Playground.c:278:13: error: called object type 'double' is not a function or function pointer 11 | time(&end_timer); // Resolution of one second... 12 | ~~~~^ 13 | 2 errors generated. 14 | -------------------------------------------------------------------------------- /.github/workflows/github-actions-windows2022.yml: -------------------------------------------------------------------------------- 1 | name: Windows 2022 2 | 3 | on: 4 | push: 5 | branches: master 6 | pull_request: 7 | branches: master 8 | 9 | jobs: 10 | TwoBHsCollidelatestSymPy: 11 | 12 | runs-on: windows-2022 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | python-version: ["3.11.0"] 17 | 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: Set up Python ${{ matrix.python-version }} 21 | uses: actions/setup-python@v3 22 | with: 23 | python-version: ${{ matrix.python-version }} 24 | # - name: Brew install needed TeX packages (for pdflatex) 25 | # run: | 26 | # choco install texlive 27 | # choco install ffmpeg pandoc 28 | - name: Install dependencies 29 | run: | 30 | python -m pip install --upgrade pip setuptools 31 | python -m pip install --upgrade nbconvert 32 | python -m pip install testfixtures sympy mpmath jupyter matplotlib scipy nrpylatex clang_format 33 | - name: Core Jupyter notebook testsuite 34 | run: | 35 | bash ./UnitTesting/run_NRPy_UnitTests.sh python3 36 | -------------------------------------------------------------------------------- /UnitTesting/all_notebooks_lint_in_pyconv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf pyconv 4 | mkdir pyconv 5 | 6 | rm -f /tmp/joblist.txt /tmp/joblist2.txt 7 | for i in *.ipynb; do 8 | # FIXME: Don't know why Tutorial-TOV-Piecewise_Polytrope_EOSs.ipynb.py causes pylint to choke... 9 | # FIXME: Don't know why Tutorial-WeylScalarsInvariants-Cartesian.ipynb causes pylint to choke... 10 | if [ $i != "Tutorial-BaikalETK.ipynb" ] && [ $i != "NRPyPlus_Tutorial.ipynb" ] && [ $i != "Tutorial-How_NRPy_Computes_Finite_Difference_Coeffs.ipynb" ] && [ $i != "Tutorial-TOV-Piecewise_Polytrope_EOSs.ipynb" ] && [ $i != "Tutorial-WeylScalarsInvariants-Cartesian.ipynb" ] ; then 11 | echo "jupyter nbconvert --to python $i --stdout |grep -v \"^\# \" > pyconv/$i.py" >> /tmp/joblist.txt # ignore lines that start with #. 12 | echo "echo \"Linting complete: $i\" ; pylint --disable=trailing-newlines,reimported,ungrouped-imports --output=pyconv/$i.py.txt pyconv/$i.py" >> /tmp/joblist2.txt 13 | fi 14 | done 15 | 16 | parallel --jobs 16 < /tmp/joblist.txt 17 | parallel --jobs 16 < /tmp/joblist2.txt 18 | 19 | echo "TOP OFFENDERS:" 20 | for i in pyconv/*.txt; do echo `grep rated $i` $i;done |sort -k7 -g|head -n20 21 | -------------------------------------------------------------------------------- /WeylScal4NRPy/tests/trusted_values_dict.py: -------------------------------------------------------------------------------- 1 | from mpmath import mpf, mp, mpc 2 | from UnitTesting.standard_constants import precision 3 | 4 | mp.dps = precision 5 | trusted_values_dict = {} 6 | 7 | # Generated on: 2019-08-09 8 | trusted_values_dict['WeylScalars_Cartesian__WeylScalars_Cartesian__globals'] = {'psi4r': mpc(real='6.71936568881256768293042114237323', imag='3.50716640525663647665055577817839'), 'psi4i': mpc(real='-11.7102358497456435770800453610718', imag='-2.61371968686489353217439202126116'), 'psi3r': mpc(real='0.884973387269943123634163839597022', imag='0.205327296474456050257018091542704'), 'psi3i': mpc(real='-3.16957376509790744734118561609648', imag='6.88196288136694445114471818669699'), 'psi2r': mpf('1.76923151768067781302894394962632'), 'psi2i': mpc(real='0.0', imag='3.6603433359376409406138463964453'), 'psi1r': mpc(real='0.884973387269943123634163839597022', imag='-0.205327296474456050257018091542704'), 'psi1i': mpc(real='3.16957376509790744734118561609648', imag='6.88196288136694445114471818669699'), 'psi0r': mpc(real='6.71936568881256768293042114237323', imag='-3.50716640525663647665055577817839'), 'psi0i': mpc(real='11.7102358497456435770800453610718', imag='-2.61371968686489353217439202126116')} 9 | -------------------------------------------------------------------------------- /ScalarWave/tests/trusted_values_dict.py: -------------------------------------------------------------------------------- 1 | from mpmath import mpf, mp, mpc 2 | from UnitTesting.standard_constants import precision 3 | 4 | mp.dps = precision 5 | trusted_values_dict = {} 6 | 7 | # Generated on: 2019-08-09 8 | trusted_values_dict['ScalarWave_RHSs__ScalarWave_RHSs__globals'] = {'wavespeed': mpf('0.668616460564278702882745619717753'), 'uu_rhs': mpf('0.65261108714780824424650518267299'), 'vv_rhs': mpf('0.368531803639439351490746662269146')} 9 | 10 | # Generated on: 2019-08-09 11 | trusted_values_dict['ScalarWaveCurvilinear_RHSs__ScalarWaveCurvilinear_RHSs__globals'] = {'uu_rhs': mpf('0.65261108714780824424650518267299'), 'vv_rhs': mpf('7.91102076566947763349564936041703')} 12 | 13 | # Generated on: 2020-05-23 14 | trusted_values_dict['InitialData__InitialData__WaveType__PlaneWave___globals'] = {'uu_ID': mpf('2.52358067004083477110217386122'), 'vv_ID': mpf('-0.569645247209539485189673979502')} 15 | 16 | # Generated on: 2022-11-11 17 | # Notes: Added 2 to uu, so that relative error is well-defined everywhere. 18 | trusted_values_dict['InitialData__InitialData__WaveType__SphericalGaussian___globals'] = {'uu_ID': mpf('2.36513405713036977689022106748'), 'vv_ID': mpf('0.0563512459589975646763098472872')} 19 | -------------------------------------------------------------------------------- /CarpetX/tests/test_WaveToy.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_wavetoy_carpetx_module(): 5 | 6 | module = 'CarpetX.WaveToy' 7 | 8 | module_name = 'WaveToy' 9 | 10 | function_and_global_dict = {'run_all()': ['evol','anal','init']} 11 | 12 | create_test(module, module_name, function_and_global_dict) 13 | 14 | 15 | # Ignore this -- it's to ensure bash functionality 16 | if __name__ == '__main__': 17 | import sys 18 | 19 | if len(sys.argv) <= 3: 20 | failed_functions = [] 21 | for fun in dir(): 22 | if fun[0:5] == 'test_': 23 | print('\nTesting ' + str(fun) + '...\n') 24 | try: 25 | exec(fun + '()') 26 | except SystemExit: 27 | failed_functions.append(fun) 28 | 29 | if failed_functions != []: 30 | import sys, os 31 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 32 | for function in failed_functions: 33 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 34 | sys.exit(1) 35 | 36 | else: 37 | globals()[sys.argv[4]]() 38 | -------------------------------------------------------------------------------- /UnitTesting/bench_all_notebooks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -f /tmp/outNRPybench.txt 4 | time for i in *.ipynb; do 5 | 6 | # First clean up any mess made by notebook. 7 | git clean -fdq 8 | echo Working on $i now ... 9 | 10 | # NRPy+ Jupyter notebooks are completely Python 2/3 cross-compatible. 11 | # However `jupyter nbconvert` will refuse to run if the notebook 12 | # was generated using a different kernel. Here we fool Jupyter 13 | # to think the notebook was written using the native python kernel. 14 | PYTHONMAJORVERSION=`python -c "import sys;print(sys.version_info[0])"` 15 | if (( $PYTHONMAJORVERSION == 3 )); then 16 | cat $i | sed "s/ \"name\": \"python2\"/ \"name\": \"python3\"/g" > $i-tmp ; mv $i-tmp $i 17 | else 18 | cat $i | sed "s/ \"name\": \"python3\"/ \"name\": \"python2\"/g" > $i-tmp ; mv $i-tmp $i 19 | fi 20 | 21 | BENCH=$(/usr/bin/time -f %e jupyter nbconvert --log-level=0 --to notebook --inplace --execute --ExecutePreprocessor.timeout=-1 $i 2>&1) 22 | echo $BENCH $i | tee -a /tmp/outNRPybench.txt 23 | 24 | done 25 | 26 | sort -k1 -g /tmp/outNRPybench.txt 27 | # Clean up any mess made by last notebook run 28 | git clean -fdq 29 | -------------------------------------------------------------------------------- /GRFFE/tests/test_GRFFE.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_equations(): 5 | 6 | module = 'GRFFE.equations' 7 | 8 | module_name = 'GRFFE' 9 | 10 | function_and_global_dict = {'generate_everything_for_UnitTesting()': ['B_notildeU','smallb4U','smallbsquared','TEM4UU','TEM4UD','S_tildeD','S_tilde_fluxUD','S_tilde_source_termD']} 11 | 12 | create_test(module, module_name, function_and_global_dict) 13 | 14 | 15 | if __name__ == '__main__': 16 | import sys 17 | 18 | if len(sys.argv) <= 3: 19 | failed_functions = [] 20 | for fun in dir(): 21 | if fun[0:5] == 'test_': 22 | print('\nTesting ' + str(fun) + '...\n') 23 | try: 24 | exec(fun + '()') 25 | except SystemExit: 26 | failed_functions.append(fun) 27 | 28 | if failed_functions != []: 29 | import sys, os 30 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 31 | for function in failed_functions: 32 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 33 | sys.exit(1) 34 | 35 | else: 36 | globals()[sys.argv[4]]() 37 | -------------------------------------------------------------------------------- /latex_nrpy_style.tplx: -------------------------------------------------------------------------------- 1 | % Based on http://nbviewer.jupyter.org/github/ipython/nbconvert-examples/blob/master/citations/Tutorial.ipynb , authored by Brian E. Granger 2 | ((*- extends 'style_jupyter.tex.j2' -*)) 3 | ((* block docclass *)) 4 | % Declare the document class 5 | \documentclass[landscape,letterpaper,10pt,english]{article} 6 | ((* endblock docclass *)) 7 | 8 | % ((* block Xauthor *)) 9 | % \author{Zachariah Etienne} 10 | % ((* endblock Xauthor *)) 11 | 12 | % ((* block Xbibliography *)) 13 | % \bibliographystyle{unsrt} 14 | % \bibliography{ipython} 15 | % ((* endblock Xbibliography *)) 16 | 17 | ((* block commands *)) 18 | % Start the section counter at -1, so the Table of Contents is Section 0 19 | \setcounter{section}{-2} 20 | % Prevent overflowing lines due to hard-to-break entities 21 | \sloppy 22 | % Setup hyperref package 23 | \hypersetup{ 24 | breaklinks=true, % so long urls are correctly broken across lines 25 | colorlinks=true, 26 | urlcolor=urlcolor, 27 | linkcolor=linkcolor, 28 | citecolor=citecolor, 29 | } 30 | 31 | % Slightly bigger margins than the latex defaults 32 | \geometry{verbose,tmargin=0.5in,bmargin=0.5in,lmargin=0.5in,rmargin=0.5in} 33 | ((* endblock commands *)) 34 | -------------------------------------------------------------------------------- /.github/broken-github-actions-core_Jupyter_notebooks_latestsympy-MacOS12.yml: -------------------------------------------------------------------------------- 1 | name: Core Jupyter notebook testsuite--MacOS 12, Latest SymPy 2 | 3 | on: 4 | push: 5 | branches: master 6 | pull_request: 7 | branches: master 8 | 9 | jobs: 10 | build: 11 | 12 | runs-on: macos-12 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | python-version: ["3.7.15", "3.8.14", "3.10.8", "3.11.0"] 17 | 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: Set up Python ${{ matrix.python-version }} 21 | uses: actions/setup-python@v3 22 | with: 23 | python-version: ${{ matrix.python-version }} 24 | - name: Brew install needed TeX packages (for pdflatex) 25 | run: | 26 | brew install texlive 27 | brew install ffmpeg colordiff pandoc 28 | - name: Install dependencies 29 | run: | 30 | python -m pip install --upgrade pip setuptools 31 | python -m pip install --upgrade nbconvert 32 | python -m pip install testfixtures sympy mpmath jupyter matplotlib scipy nrpylatex 33 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 34 | - name: UnitTests 35 | run: | 36 | ./UnitTesting/core_Jupyter_notebook_testsuite.sh 37 | -------------------------------------------------------------------------------- /UnitTesting/test_skeleton.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_your_module(): 5 | 6 | module = 'your_module_location.your_module' 7 | 8 | module_name = 'name_of_your_module' 9 | 10 | function_and_global_dict = {'function_call': ['your global', 'your next global', 'your next next global', 'etc']} 11 | 12 | create_test(module, module_name, function_and_global_dict) 13 | 14 | 15 | # Ignore this -- it's to ensure bash functionality 16 | if __name__ == '__main__': 17 | import sys 18 | 19 | if len(sys.argv) <= 3: 20 | failed_functions = [] 21 | for fun in dir(): 22 | if fun[0:5] == 'test_': 23 | print('\nTesting ' + str(fun) + '...\n') 24 | try: 25 | exec(fun + '()') 26 | except SystemExit: 27 | failed_functions.append(fun) 28 | 29 | if failed_functions != []: 30 | import sys, os 31 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 32 | for function in failed_functions: 33 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 34 | sys.exit(1) 35 | 36 | else: 37 | globals()[sys.argv[4]]() 38 | -------------------------------------------------------------------------------- /WeylScal4NRPy/tests/test_WeylScal4NRPy.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_WeylScalars_Cartesian(): 5 | 6 | module = 'WeylScal4NRPy.WeylScalars_Cartesian' 7 | 8 | module_name = 'WeylScalars_Cartesian' 9 | 10 | function_and_global_dict = {'WeylScalars_Cartesian()': ['psi4r', 'psi4i', 'psi3r', 'psi3i', 'psi2r', 'psi2i', 'psi1r', 'psi1i', 'psi0r', 'psi0i']} 11 | 12 | create_test(module, module_name, function_and_global_dict) 13 | 14 | 15 | if __name__ == '__main__': 16 | import sys 17 | 18 | if len(sys.argv) <= 3: 19 | failed_functions = [] 20 | for fun in dir(): 21 | if fun[0:5] == 'test_': 22 | print('\nTesting ' + str(fun) + '...\n') 23 | try: 24 | exec(fun + '()') 25 | except SystemExit: 26 | failed_functions.append(fun) 27 | 28 | if failed_functions != []: 29 | import sys, os 30 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 31 | for function in failed_functions: 32 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 33 | sys.exit(1) 34 | 35 | else: 36 | globals()[sys.argv[4]]() 37 | -------------------------------------------------------------------------------- /in_progress/tabulatedEOS/latex_nrpy_style.tplx: -------------------------------------------------------------------------------- 1 | % Based on http://nbviewer.jupyter.org/github/ipython/nbconvert-examples/blob/master/citations/Tutorial.ipynb , authored by Brian E. Granger 2 | ((*- extends 'style_jupyter.tex.j2' -*)) 3 | ((* block docclass *)) 4 | % Declare the document class 5 | \documentclass[landscape,letterpaper,10pt,english]{article} 6 | ((* endblock docclass *)) 7 | 8 | % ((* block Xauthor *)) 9 | % \author{Zachariah Etienne} 10 | % ((* endblock Xauthor *)) 11 | 12 | % ((* block Xbibliography *)) 13 | % \bibliographystyle{unsrt} 14 | % \bibliography{ipython} 15 | % ((* endblock Xbibliography *)) 16 | 17 | ((* block commands *)) 18 | % Start the section counter at -1, so the Table of Contents is Section 0 19 | \setcounter{section}{-2} 20 | % Prevent overflowing lines due to hard-to-break entities 21 | \sloppy 22 | % Setup hyperref package 23 | \hypersetup{ 24 | breaklinks=true, % so long urls are correctly broken across lines 25 | colorlinks=true, 26 | urlcolor=urlcolor, 27 | linkcolor=linkcolor, 28 | citecolor=citecolor, 29 | } 30 | 31 | % Slightly bigger margins than the latex defaults 32 | \geometry{verbose,tmargin=0.5in,bmargin=0.5in,lmargin=0.5in,rmargin=0.5in} 33 | ((* endblock commands *)) 34 | -------------------------------------------------------------------------------- /UnitTesting/setup_trusted_values_dict.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | 5 | # [setup_trusted_values_dict] takes in a path [path], and creates the file [trusted_values_dict.py] in the 6 | # directory specified in [path]. If [trusted_values_dict.py] already exists within this directory, 7 | # nothing happens. 8 | 9 | # Called by NRPyUnitTests_(Anything)_Globals 10 | 11 | def setup_trusted_values_dict(path): 12 | 13 | # Try opening [trusted_values_dict.py] in [directory]. 14 | try: 15 | logging.debug(' Trying to open {}/trusted_values_dict.py...'.format(path)) 16 | fr = open(os.path.join(path, 'trusted_values_dict.py'), 'r') 17 | logging.debug(' ...Success, file already exists.') 18 | fr.close() 19 | # If [trusted_values_dict.py] does not exist in [directory], create it with default content. 20 | except IOError: 21 | logging.info(' ...trusted_values_dict.py does not exist. Creating it...') 22 | fw = open(os.path.join(path, 'trusted_values_dict.py'), 'w+') 23 | fw.write('from mpmath import mpf, mp, mpc\nfrom UnitTesting.standard_constants import precision\n\n' 24 | 'mp.dps = precision\ntrusted_values_dict = {}\n') 25 | fw.close() 26 | logging.info(' ...Success: trusted_values_dict.py created.\n') 27 | -------------------------------------------------------------------------------- /UnitTesting/create_dict_string.py: -------------------------------------------------------------------------------- 1 | from mpmath import mp, mpf, mpc 2 | from UnitTesting.standard_constants import precision 3 | 4 | # [create_dict_string] takes in a value dictionary [value_dict] and returns a string representation of that dictionary 5 | # that can be easily printed to the console or to a file. 6 | 7 | # Called by calc_error, first_time_print 8 | 9 | 10 | def create_dict_string(value_dict): 11 | 12 | # Setting proper precision 13 | mp.dps = precision 14 | 15 | # Initializing return_string 16 | return_string = '' 17 | 18 | # For each entry in the sorted dictionary, add properly formatted dictionary entry to return_string based on 19 | # type of variable (mpf, mpc, other) 20 | for var, num in sorted(value_dict.items(), key=lambda s: s[0].lower()): 21 | if isinstance(num, mpf): 22 | return_string += "'" + var + "': mpf('" + str(num) + "'), " 23 | elif isinstance(num, mpc): 24 | return_string += "'" + var + "': mpc(real='" + str(num.real) + "', imag='" + str(num.imag) + "'), " 25 | else: 26 | return_string += "'" + var + "': " + str(num) + ", " 27 | 28 | # Add dictionary brackets and remove extra ", " at the end 29 | return_string = '{' + return_string[0:-2] + '}' 30 | 31 | return return_string 32 | -------------------------------------------------------------------------------- /FishboneMoncriefID/tests/test_FishboneMoncriefID.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_FishboneMoncriefID(): 5 | 6 | module = 'FishboneMoncriefID.FishboneMoncriefID' 7 | 8 | module_name = 'FishBoneMoncriefID' 9 | 10 | function_and_global_dict = {'FishboneMoncriefID()': ['hm1', 'rho_initial', 'uBL4D', 'uBL4U', 'uKS4U', 'IDalpha', 'IDgammaDD', 'IDKDD', 'IDbetaU', 'IDValencia3velocityU']} 11 | 12 | create_test(module, module_name, function_and_global_dict) 13 | 14 | 15 | if __name__ == '__main__': 16 | import sys 17 | 18 | if len(sys.argv) <= 3: 19 | failed_functions = [] 20 | for fun in dir(): 21 | if fun[0:5] == 'test_': 22 | print('\nTesting ' + str(fun) + '...\n') 23 | try: 24 | exec(fun + '()') 25 | except SystemExit: 26 | failed_functions.append(fun) 27 | 28 | if failed_functions != []: 29 | import sys, os 30 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 31 | for function in failed_functions: 32 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 33 | sys.exit(1) 34 | 35 | else: 36 | globals()[sys.argv[4]]() 37 | -------------------------------------------------------------------------------- /in_progress-Maxwell/Maxwell/CommonParams.py: -------------------------------------------------------------------------------- 1 | # Common parameters for VacuumMaxwell evolutions 2 | 3 | # Author: Terrence Pierre Jacques 4 | # terrencepierrej **at** gmail **dot* com 5 | 6 | # License: BSD 2-Clause 7 | 8 | # COMPLETE DOCUMENTATION (JUPYTER NOTEBOOKS): 9 | # START PAGE (start here!): ../NRPy+_Tutorial.ipynb 10 | # THIS MODULE: ../Tutorial-VacuumMaxwell_Flat_Cartesian_ID.ipynb 11 | 12 | # Step P1: Import needed NRPy+ core modules: 13 | import NRPy_param_funcs as par # NRPy+: Parameter interface 14 | 15 | # The name of this module ("CommonParams") is given by __name__: 16 | thismodule = __name__ 17 | 18 | # Parameters common to/needed by all VacuumMaxwell Python modules 19 | 20 | # Step P2: Define the C parameters amp, lam, time, and wavespeed. 21 | # These variables proper SymPy variables, so they can be 22 | # used in SymPy expressions. In the C code, it acts 23 | # just like a usual parameter, whose value is 24 | # specified in the parameter file. 25 | 26 | # amplitude 27 | amp = par.Cparameters("REAL",thismodule,"amp", default_vals=1.0) 28 | 29 | # lambda 30 | lam = par.Cparameters("REAL",thismodule,"lam", default_vals=1.0) 31 | 32 | time = par.Cparameters("REAL",thismodule,"time", default_vals=0.0) 33 | 34 | wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", default_vals=1.0) 35 | -------------------------------------------------------------------------------- /BSSN/BSSN_T4UUmunu_vars.py: -------------------------------------------------------------------------------- 1 | # This module registers rescaled BSSN T^{mu nu} source term variables 2 | # as AUX (i.e., not evolved) gridfunctions 3 | 4 | # Author: Zachariah B. Etienne 5 | # zachetie **at** gmail **dot* com 6 | 7 | # Step P1: import all needed modules from NRPy+: 8 | import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 9 | import grid as gri # NRPy+: Functions having to do with numerical grids 10 | 11 | def define_BSSN_T4UUmunu_rescaled_source_terms(): 12 | # Step 0: First check to see if this function has already been called. 13 | # If so, do not register the gridfunctions again! 14 | for i in range(len(gri.glb_gridfcs_list)): 15 | if "sDD00" in gri.glb_gridfcs_list[i].name: 16 | return 17 | 18 | # Step 1: Declare as globals all quantities declared in this function. 19 | global rho,S,sD,sDD 20 | 21 | # Step 2: Register all needed *evolved* gridfunctions. 22 | # Step 2a: Register indexed quantities, using ixp.register_... functions 23 | sDD = ixp.register_gridfunctions_for_single_rank2("AUX", "sDD", "sym01") 24 | sD = ixp.register_gridfunctions_for_single_rank1("AUX", "sD") 25 | # Step 2b: Register scalar quantities, using gri.register_gridfunctions() 26 | rho, S = gri.register_gridfunctions("AUX",["rho","S"]) 27 | -------------------------------------------------------------------------------- /IllinoisGRMHD/Convert_to_HydroBase/README: -------------------------------------------------------------------------------- 1 | Cactus Code Thorn Convert_to_HydroBase 2 | Author(s) : Zachariah B. Etienne 3 | Note : Convert IllinoisGRMHD-compatible variables 4 | to HydroBase-compatible variables. Used 5 | for compatibility with HydroBase/ADMBase 6 | analysis thorns in the Einstein Toolkit. 7 | Maintainer(s): Zachariah B. Etienne 8 | License : FreeBSD License, AKA "2-Clause BSD License" 9 | ---------------------------------------------------------- 10 | 11 | 1. Purpose 12 | 13 | IllinoisGRMHD and HydroBase variables are incompatible; 14 | The former uses 3-velocity defined as v^i = u^i/u^0, and 15 | the latter uses the Valencia formalism definition of v^i. 16 | 17 | Define the Valencia formalism's definition of v^i to be 18 | "W^i", and IllinoisGRMHD's definition "v^i" 19 | Then 20 | 21 | W^i = (v^i + \beta^i) / (\alpha). (Eq 11 in 22 | http://arxiv.org/pdf/1501.07276.pdf). 23 | 24 | Similarly, 25 | 26 | v_i = (\alpha) W^i - \beta^i 27 | 28 | In addition, IllinoisGRMHD needs the A-fields to be 29 | defined on *staggered* grids, and HydroBase does not yet 30 | support this option. The staggerings are defined in 31 | Table 1 of the IllinoisGRMHD code announcement paper: 32 | http://arxiv.org/pdf/1501.07276.pdf (page 15). 33 | 34 | The long-term goal should be to deprecate this thorn. 35 | -------------------------------------------------------------------------------- /IllinoisGRMHD/ID_converter_ILGRMHD/README: -------------------------------------------------------------------------------- 1 | Cactus Code Thorn ID_conerter_ILGRMHD 2 | Author(s) : Zachariah B. Etienne 3 | Note : Convert HydroBase variables to 4 | IllinoisGRMHD-compatible variables. Used 5 | for compatibility with HydroBase/ADMBase 6 | initial data thorns in the Einstein 7 | Toolkit. 8 | Maintainer(s): Zachariah B. Etienne 9 | License : FreeBSD License, AKA "2-Clause BSD License" 10 | ---------------------------------------------------------- 11 | 12 | 1. Purpose 13 | 14 | IllinoisGRMHD and HydroBase variables are incompatible; 15 | The former uses 3-velocity defined as v^i = u^i/u^0, and 16 | the latter uses the Valencia formalism definition of v^i. 17 | 18 | Define the Valencia formalism's definition of v^i to be 19 | "W^i", and IllinoisGRMHD's definition "v^i" 20 | Then 21 | 22 | W^i = (v^i + \beta^i) / (\alpha). (Eq 11 in 23 | http://arxiv.org/pdf/1501.07276.pdf). 24 | 25 | Similarly, 26 | 27 | v_i = (\alpha) W^i - \beta^i 28 | 29 | In addition, IllinoisGRMHD needs the A-fields to be 30 | defined on *staggered* grids, and HydroBase does not yet 31 | support this option. The staggerings are defined in 32 | Table 1 of the IllinoisGRMHD code announcement paper: 33 | http://arxiv.org/pdf/1501.07276.pdf (page 15). 34 | 35 | The long-term goal should be to deprecate this thorn. 36 | -------------------------------------------------------------------------------- /NRPyEOS/Makefile: -------------------------------------------------------------------------------- 1 | # HDF5 directory 2 | # HDF_DIR = /usr/local/opt/hdf5 3 | HDF_DIR = /usr/lib/x86_64-linux-gnu/hdf5/serial 4 | 5 | CC = gcc 6 | CFLAGS = -Wall -I../ -I$(HDF_DIR)/include 7 | LDFLAGS = -L$(HDF_DIR)/lib -lhdf5 -lm 8 | 9 | all: NRPyEOS_validation 10 | 11 | NRPyEOS_readtable_set_EOS_params.o: NRPyEOS_readtable_set_EOS_params.c 12 | $(CC) $(CFLAGS) -c $< -o $@ 13 | 14 | NRPyEOS_free_memory.o: NRPyEOS_free_memory.c 15 | $(CC) $(CFLAGS) -c $< -o $@ 16 | 17 | NRPyEOS_Tabulated_general_interpolators.o: NRPyEOS_Tabulated_general_interpolators.c 18 | $(CC) $(CFLAGS) -c $< -o $@ 19 | 20 | NRPyEOS_Tabulated_known_T.o: NRPyEOS_Tabulated_known_T.c 21 | $(CC) $(CFLAGS) -c $< -o $@ 22 | 23 | NRPyEOS_Tabulated_unknown_T.o: NRPyEOS_Tabulated_unknown_T.c 24 | $(CC) $(CFLAGS) -c $< -o $@ 25 | 26 | NRPyEOS_validation.o: NRPyEOS_validation.c 27 | $(CC) $(CFLAGS) -c $< -o $@ 28 | 29 | NRPyEOS_validation: NRPyEOS_readtable_set_EOS_params.o NRPyEOS_free_memory.o NRPyEOS_Tabulated_general_interpolators.o NRPyEOS_Tabulated_known_T.o NRPyEOS_Tabulated_unknown_T.o NRPyEOS_validation.o 30 | $(CC) $(CFLAGS) NRPyEOS_readtable_set_EOS_params.o NRPyEOS_free_memory.o NRPyEOS_Tabulated_general_interpolators.o NRPyEOS_Tabulated_known_T.o NRPyEOS_Tabulated_unknown_T.o NRPyEOS_validation.o -o NRPyEOS_validation $(LDFLAGS) 31 | 32 | clean: 33 | rm -f *.o NRPyEOS_validation 34 | 35 | veryclean: clean 36 | rm -f *.txt 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2021, Zachariah Etienne 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /FishboneMoncriefID/LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2019, Zachariah Etienne 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /FishboneMoncriefID/README: -------------------------------------------------------------------------------- 1 | Cactus Code Thorn TOVSolver 2 | Author: Zachariah B. Etienne 3 | zachetie **at** gmail **dot* com 4 | Maintainer(s): Einstein Toolkit 5 | Licence : BSD 2-Clause 6 | -------------------------------------------------------------------------- 7 | 8 | 1. Purpose 9 | 10 | This thorn constructs Fishbone-Moncrief accretion 11 | disk initial data, within a Kerr-Schild spinning 12 | black hole spacetime. 13 | 14 | 2. Documentation 15 | 16 | Full documentation may be found in the NRPy+ 17 | Jupyter notebook tutorial modules: 18 | * Tutorial-FishboneMoncriefID.ipynb and 19 | * Tutorial-ETK_thorn-FishboneMoncriefID.ipynb , 20 | 21 | Both of the above notebooks are located in the 22 | doc/ directory, 23 | 24 | The notebooks also generate clickable LaTeX'ed versions 25 | of themselves, and compile the LaTeX into PDFs. 26 | 27 | 3. Example parameter file 28 | 29 | An example parameter file may be found in parfile_examples/ 30 | 31 | 4. Note on automatic code generation 32 | 33 | This thorn is automatically generated by running the 34 | Tutorial-ETK_thorn-FishboneMoncriefID.ipynb within 35 | NRPy+, which is also BSD 2-Clause licensed: 36 | https://github.com/zachetienne/nrpytutorial 37 | 38 | 5. Special acknowledgement 39 | 40 | Thanks to Brendan Drachler for spotting and fixing 41 | a bug in an earlier release, and to Steve Brandt & 42 | Roland Haas for code review and additional suggestions. 43 | -------------------------------------------------------------------------------- /GRHD/tests/test_GRHD.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_equations(): 5 | 6 | module = 'GRHD.equations' 7 | 8 | module_name = 'GRHD' 9 | 10 | function_and_global_dict = {'generate_everything_for_UnitTesting()': ['h','T4UU','T4UD','sqrtgammaDET','rho_star','tau_tilde','S_tildeD','vU','rho_star_fluxU','tau_tilde_fluxU','S_tilde_fluxUD', 11 | 's_source_term','g4DD_zerotimederiv_dD','S_tilde_source_termD','rescaledValenciavU','u4U_ito_ValenciavU','rescaledvU','u4U_ito_vU']} 12 | 13 | create_test(module, module_name, function_and_global_dict) 14 | 15 | 16 | if __name__ == '__main__': 17 | import sys 18 | 19 | if len(sys.argv) <= 3: 20 | failed_functions = [] 21 | for fun in dir(): 22 | if fun[0:5] == 'test_': 23 | print('\nTesting ' + str(fun) + '...\n') 24 | try: 25 | exec(fun + '()') 26 | except SystemExit: 27 | failed_functions.append(fun) 28 | 29 | if failed_functions != []: 30 | import sys, os 31 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 32 | for function in failed_functions: 33 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 34 | sys.exit(1) 35 | 36 | else: 37 | globals()[sys.argv[4]]() 38 | -------------------------------------------------------------------------------- /GRMHD/tests/test_GRMHD.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_equations(): 5 | 6 | module = 'GRMHD.equations' 7 | 8 | module_name = 'GRMHD' 9 | 10 | function_and_global_dict = {'generate_everything_for_UnitTesting()': 11 | ['GRHDT4UU','GRFFET4UU','T4UU','T4UD', 12 | 'rho_star','tau_tilde','S_tildeD', 13 | 'rho_star_fluxU','tau_tilde_fluxU','S_tilde_fluxUD', 14 | 's_source_term','S_tilde_source_termD']} 15 | 16 | create_test(module, module_name, function_and_global_dict) 17 | 18 | 19 | if __name__ == '__main__': 20 | import sys 21 | 22 | if len(sys.argv) <= 3: 23 | failed_functions = [] 24 | for fun in dir(): 25 | if fun[0:5] == 'test_': 26 | print('\nTesting ' + str(fun) + '...\n') 27 | try: 28 | exec(fun + '()') 29 | except SystemExit: 30 | failed_functions.append(fun) 31 | 32 | if failed_functions != []: 33 | import sys, os 34 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 35 | for function in failed_functions: 36 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 37 | sys.exit(1) 38 | 39 | else: 40 | globals()[sys.argv[4]]() 41 | -------------------------------------------------------------------------------- /UnitTesting/run_all_notebooks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit upon failure of any command. 4 | set -e 5 | 6 | for i in *.ipynb NRPyPN/*.ipynb ScalarField/*.ipynb; do 7 | 8 | echo $i; 9 | 10 | # First clean up any mess made by notebook. 11 | git clean -fdq 12 | 13 | # NRPy+ Jupyter notebooks are completely Python 2/3 cross-compatible. 14 | # However `jupyter nbconvert` will refuse to run if the notebook 15 | # was generated using a different kernel. Here we fool Jupyter 16 | # to think the notebook was written using the native python kernel. 17 | PYTHONMAJORVERSION=`python -c "import sys;print(sys.version_info[0])"` 18 | if (( $PYTHONMAJORVERSION == 3 )); then 19 | cat $i | sed "s/ \"name\": \"python2\"/ \"name\": \"python3\"/g" > $i-tmp ; mv $i-tmp $i 20 | else 21 | cat $i | sed "s/ \"name\": \"python3\"/ \"name\": \"python2\"/g" > $i-tmp ; mv $i-tmp $i 22 | fi 23 | 24 | jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=-1 $i 25 | cat $i |sed "s/\\\r\\\n/\\\n/g" > $i-backslashrs_removed 26 | mv $i-backslashrs_removed $i 27 | echo "^^^" $i "^^^" 28 | echo 29 | 30 | done 31 | 32 | # Report significant differences only. 33 | git diff|grep -v "image/png"|grep -v "pdfTeX"|grep -v "write18 enabled"|grep -v seconds|grep -v "Generating C code" |grep -v PASSED|grep -v taskset|cdiff 34 | 35 | # Clean up any mess made by last notebook run 36 | git clean -fdq 37 | -------------------------------------------------------------------------------- /jupyter_notebook_config.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------ 2 | # Custom whitespace stripping -- Append to bottom of ~/.jupyter/jupyter_notebook_config.py 3 | # Found here: https://github.com/jupyter/notebook/issues/1455#issuecomment-519891159 4 | #------------------------------------------------------------------------------ 5 | def strip_white_space(text): 6 | return '\n'.join([line.rstrip() for line in text.split('\n')]) 7 | 8 | def scrub_output_pre_save(model=None, **kwargs): 9 | """Auto strip trailing white space before saving.""" 10 | # If we are dealing with a notebook # 11 | if model['type'] == 'notebook': 12 | # Don't run on anything other than nbformat version 4 # 13 | if model['content']['nbformat'] != 4: 14 | print("Skipping white space stripping since `nbformat` != 4.") 15 | return 16 | # Apply function to every cell # 17 | print("Stripping white space on a notebook.") 18 | for cell in model['content']['cells']: 19 | if cell['cell_type'] != 'code': continue 20 | cell['source'] = strip_white_space(cell['source']) 21 | # If we are dealing with a file # 22 | if model['type'] == 'file': 23 | if model['format'] == 'text': 24 | print("Stripping white space on a file.") 25 | model['content'] = strip_white_space(model['content']) 26 | 27 | c.ContentsManager.pre_save_hook = scrub_output_pre_save 28 | -------------------------------------------------------------------------------- /UnitTesting/Test_UnitTesting/trusted_values_dict.py: -------------------------------------------------------------------------------- 1 | from mpmath import mpf, mp 2 | from UnitTesting.standard_constants import precision 3 | 4 | mp.dps = precision 5 | trusted_values_dict = {} 6 | 7 | # Generated on: 2019-08-19 8 | trusted_values_dict['test_module__function__create_gamma_True__globals'] = {'alpha': mpf('1.54386601990377991810987623466644'), 'betaU[0]': mpf('0.0'), 'betaU[1]': mpf('1.98874089862252425611087670798774'), 'betaU[2]': mpf('1.18528390621386752257266960717399'), 'gamma': mpf('1.10517914597414785529786062528438')} 9 | 10 | # Generated on: 2019-08-19 11 | trusted_values_dict['test_module__function__create_gamma_False__globals'] = {'alpha': mpf('1.54386601990377991810987623466644'), 'betaU[0]': mpf('0.0'), 'betaU[1]': mpf('1.98874089862252425611087670798774'), 'betaU[2]': mpf('1.18528390621386752257266960717399')} 12 | 13 | # Generated on: 2019-08-19 14 | trusted_values_dict['test_module__function2__create_gamma_True__globals'] = {'alpha2': mpf('1.21221658210736660254980279205483'), 'betaU2[0]': mpf('0.0'), 'betaU2[1]': mpf('0.923593586131018217151706833148966'), 'betaU2[2]': mpf('0.876891628743949392648859253713177'), 'gamma2': mpf('1.27519340026786004782753578280176')} 15 | 16 | # Generated on: 2019-08-19 17 | trusted_values_dict['test_module__function2__create_gamma_False__globals'] = {'alpha2': mpf('1.21221658210736660254980279205483'), 'betaU2[0]': mpf('0.0'), 'betaU2[1]': mpf('0.923593586131018217151706833148966'), 'betaU2[2]': mpf('0.876891628743949392648859253713177')} 18 | -------------------------------------------------------------------------------- /u0_smallb_Poynting__Cartesian/tests/test_u0_smallb_Poynting__Cartesian.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_u0_smallb_Poynting__Cartesian(): 5 | 6 | module = 'u0_smallb_Poynting__Cartesian.u0_smallb_Poynting__Cartesian' 7 | 8 | module_name = 'u0sbPoyn' 9 | 10 | function_and_global_dict = {'compute_u0_smallb_Poynting__Cartesian()': ['u0', 'uD', 'uBcontraction', 'uU', 11 | 'smallb4U', 'smallb4D', 12 | 'smallb2etk', 'PoynSU']} 13 | 14 | create_test(module, module_name, function_and_global_dict) 15 | 16 | 17 | if __name__ == '__main__': 18 | import sys 19 | 20 | if len(sys.argv) <= 3: 21 | failed_functions = [] 22 | for fun in dir(): 23 | if fun[0:5] == 'test_': 24 | print('\nTesting ' + str(fun) + '...\n') 25 | try: 26 | exec(fun + '()') 27 | except SystemExit: 28 | failed_functions.append(fun) 29 | 30 | if failed_functions != []: 31 | import sys, os 32 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 33 | for function in failed_functions: 34 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 35 | sys.exit(1) 36 | 37 | else: 38 | globals()[sys.argv[4]]() 39 | 40 | -------------------------------------------------------------------------------- /IllinoisGRMHD/doc/generate_IllinoisGRMHD_from_ipynb_files.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | NUM_JOBS=0 4 | 5 | count=0 6 | mkdir ../src 7 | mkdir ../Convert_to_HydroBase 8 | mkdir ../Convert_to_HydroBase/src 9 | mkdir ../ID_converter_ILGRMHD 10 | mkdir ../ID_converter_ILGRMHD/src 11 | for i in *.ipynb; do 12 | echo Executing $i ... 13 | # NRPy+ Jupyter notebooks are completely Python 2/3 cross-compatible. 14 | # However `jupyter nbconvert` will refuse to run if the notebook 15 | # was generated using a different kernel. Here we fool Jupyter 16 | # to think the notebook was written using the native python kernel. 17 | PYTHONMAJORVERSION=`python -c "import sys;print(sys.version_info[0])"` 18 | if (( $PYTHONMAJORVERSION == 3 )); then 19 | cat $i | sed "s/ \"name\": \"python2\"/ \"name\": \"python3\"/g" > $i-tmp ; mv $i-tmp $i 20 | else 21 | cat $i | sed "s/ \"name\": \"python3\"/ \"name\": \"python2\"/g" > $i-tmp ; mv $i-tmp $i 22 | fi 23 | 24 | jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=-1 $i & 25 | if ((count==$NUM_JOBS)); then 26 | wait 27 | count=0 28 | else 29 | let count+=1 30 | fi 31 | done 32 | 33 | wait 34 | echo Finished! 35 | 36 | # Alternative approach if gnu parallel is installed: 37 | # rm -f /tmp/joblist.txt 38 | # for i in *.ipynb; do 39 | # echo jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=-1 $i >> /tmp/joblist.txt 40 | # done 41 | # parallel --jobs 8 < /tmp/joblist.txt 42 | -------------------------------------------------------------------------------- /u0_smallb_Poynting__Cartesian/tests/trusted_values_dict.py: -------------------------------------------------------------------------------- 1 | from mpmath import mpf, mp, mpc 2 | from UnitTesting.standard_constants import precision 3 | 4 | mp.dps = precision 5 | trusted_values_dict = {} 6 | 7 | # Generated on: 2019-10-08 8 | # Reason for changing values: Issue with lowering operator on u^{\mu} to compute u_j. Also remove g4DD and g4UU; those are computed within BSSN/ADMBSSN_tofrom_4metric 9 | trusted_values_dict['u0sbPoyn__compute_u0_smallb_Poynting__Cartesian__globals'] = {'u0': mpf('0.751914772923022001194226504594553'), 'uD[0]': mpf('0.183721751888366903617562385511448'), 'uD[1]': mpf('0.119812812315040561829864674578358'), 'uD[2]': mpf('0.312788770366989702982585175553276'), 'uBcontraction': mpf('0.175347309305583743759195653696604'), 'uU[0]': mpf('-0.364066660324468905733189036041903'), 'uU[1]': mpf('-0.0378849772494775056256865716174872'), 'uU[2]': mpf('-0.476480636313712572229280970631576'), 'smallb4U[0]': mpf('0.086103714189473992020992352682665'), 'smallb4U[1]': mpf('0.307306636736306798163353899991219'), 'smallb4U[2]': mpf('0.339319044770849009959319783387464'), 'smallb4U[3]': mpf('-0.0250871356138368976300514945528851'), 'smallb4D[0]': mpf('0.563652010197405599081596784509181'), 'smallb4D[1]': mpf('0.281871300583031671116212159720726'), 'smallb4D[2]': mpf('0.104105229879200435577841952656928'), 'smallb4D[3]': mpf('0.447887797020086877757907992275478'), 'smallb2etk': mpf('0.159242118217654315732110368166076'), 'PoynSU[0]': mpf('0.10514608339161159128197446122545'), 'PoynSU[1]': mpf('0.110459918842554559580077471109353'), 'PoynSU[2]': mpf('-0.00074355206485074832000831084852268')} 10 | -------------------------------------------------------------------------------- /UnitTesting/first_time_print.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from datetime import date 4 | from UnitTesting.create_dict_string import create_dict_string 5 | 6 | # [first_time_print] takes in a module [mod], a value dictionary [value_dict], a path [path], and a boolean [write]. 7 | # It prints to the console the properly formatted trusted_values_dict entry based on [mod] and [value_dict]. 8 | # Additionally, if [write] is [True], it appends this output to the file [path]/trusted_values_dict.py 9 | 10 | # Called by run_test 11 | 12 | # Uses self.module_name, self.trusted_values_dict_name, self.calculated_dict, self.path 13 | 14 | 15 | def first_time_print(self, write=True): 16 | dict_name = self.trusted_values_dict_name 17 | 18 | output_string = r""" 19 | # Generated on: """ + str(date.today()) + r""" 20 | trusted_values_dict['""" + dict_name + r"""'] = """ + \ 21 | str(create_dict_string(self.calculated_dict)) 22 | 23 | error = r""" 24 | Module: """ + self.module_name + r""" 25 | Please copy the following code between the ##### and paste it into your trusted_values_dict.py file for this module: 26 | 27 | ##### 28 | 29 | """ + output_string + """ 30 | 31 | ##### 32 | """ 33 | logging.error(error) 34 | 35 | # If [write] is [True], write to [trusted_values_dict] 36 | if write: 37 | logging.debug(' Writing trusted_values_dict entry to trusted_values_dict.py...') 38 | with open(os.path.join(self.path, 'trusted_values_dict.py'), 'a') as file: 39 | file.write(output_string) 40 | logging.debug(' ...Success: entry written to trusted_values_dict.py\n') 41 | -------------------------------------------------------------------------------- /fstr.py: -------------------------------------------------------------------------------- 1 | from inspect import currentframe 2 | import re 3 | import sys 4 | from here import here 5 | 6 | def f(s): 7 | """ 8 | Mimic the functionality of formatted strings in Python3. Convert curly brackets in s 9 | to expressions. 10 | >>> f('3+2={3+2}.') 11 | '3+2=5.' 12 | >>> f('3+2={3+2}') 13 | '3+2=5' 14 | >>> f('{"="*3} test {"="*3}') 15 | '=== test ===' 16 | >>> f('{{hello}}') 17 | '{hello}' 18 | 19 | >>> metric="gxx" 20 | >>> f('\\texttt{{{metric}}}') 21 | '\\texttt{gxx}' 22 | """ 23 | globs = currentframe().f_back.f_globals 24 | locs= currentframe().f_back.f_locals 25 | count = 0 26 | ns = '' 27 | w = '' 28 | i = 0 29 | while i < len(s): 30 | c = s[i] 31 | if i + 1 < len(s): 32 | nc = s[i+1] 33 | else: 34 | nc = "" 35 | i += 1 36 | 37 | if c == '{' and nc == '{': 38 | ns += '{' 39 | i += 1 40 | elif c == '}' and nc == '}': 41 | ns += '}' 42 | i += 1 43 | elif c == '{': 44 | count = 1 45 | j = i 46 | while i < len(s): 47 | if s[i] == '{': 48 | count += 1 49 | elif s[i] == '}': 50 | count -= 1 51 | if count == 0: 52 | break 53 | i += 1 54 | ns += str(eval(s[j:i],globs,locs)) 55 | i += 1 56 | else: 57 | ns += c 58 | return ns 59 | 60 | if __name__ == "__main__": 61 | import doctest 62 | doctest.testmod() 63 | -------------------------------------------------------------------------------- /IllinoisGRMHD/Convert_to_HydroBase/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Zachariah B. Etienne 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 17 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 20 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those 25 | of the authors and should not be interpreted as representing official policies, 26 | either expressed or implied, of the FreeBSD Project. 27 | -------------------------------------------------------------------------------- /IllinoisGRMHD/ID_converter_ILGRMHD/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Zachariah B. Etienne 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 17 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 20 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those 25 | of the authors and should not be interpreted as representing official policies, 26 | either expressed or implied, of the FreeBSD Project. 27 | -------------------------------------------------------------------------------- /NRPyPN/indexedexpNRPyPN.py: -------------------------------------------------------------------------------- 1 | # indexedexpNRPyPN.py: functions related to indexed expressions, 2 | # including e.g., tensors and pseudotensors. 3 | # *** This is a stripped-down version of the indexedexp.py module 4 | # in the NRPy+ root directory. 5 | 6 | # Step 1: Load needed modules 7 | import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends 8 | import sys # Standard Python module for multiplatform OS-level functions 9 | 10 | thismodule = __name__ 11 | 12 | def zerorank1(DIM=-1): 13 | if DIM == -1: 14 | DIM = 3 # default to 3D 15 | return [sp.sympify(0) for i in range(DIM)] 16 | 17 | def zerorank3(DIM=-1): 18 | if DIM == -1: 19 | DIM = 3 # default to 3D 20 | return [[[sp.sympify(0) for i in range(DIM)] for j in range(DIM)] for k in range(DIM)] 21 | 22 | def declarerank1(objname, DIM=-1): 23 | if DIM==-1: 24 | DIM = 3 # default to 3D 25 | return [sp.sympify(objname + str(i)) for i in range(DIM)] 26 | 27 | class NonInvertibleMatrixError(ZeroDivisionError): 28 | """ Matrix Not Invertible; Division By Zero """ 29 | 30 | # Define the rank-3 version of the Levi-Civita symbol. 31 | def LeviCivitaSymbol_dim3_rank3(): 32 | LeviCivitaSymbol = zerorank3(DIM=3) 33 | 34 | for i in range(3): 35 | for j in range(3): 36 | for k in range(3): 37 | # From https://codegolf.stackexchange.com/questions/160359/levi-civita-symbol : 38 | LeviCivitaSymbol[i][j][k] = (i - j) * (j - k) * (k - i) * sp.Rational(1,2) 39 | return LeviCivitaSymbol 40 | 41 | if __name__ == "__main__": 42 | import doctest 43 | sys.exit(doctest.testmod()[0]) 44 | -------------------------------------------------------------------------------- /Min_Max_and_Piecewise_Expressions.py: -------------------------------------------------------------------------------- 1 | from outputC import nrpyAbs # NRPy+: Core C code output module 2 | import NRPy_param_funcs as par # NRPy+: parameter interface 3 | import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends 4 | 5 | thismodule = __name__ 6 | 7 | TINYDOUBLE = par.Cparameters("REAL", thismodule, "TINYDOUBLE", 1e-100) 8 | 9 | def min_noif(a,b): 10 | # Returns the minimum of a and b 11 | if a==sp.sympify(0): 12 | return sp.Rational(1,2) * (b-nrpyAbs(b)) 13 | if b==sp.sympify(0): 14 | return sp.Rational(1,2) * (a-nrpyAbs(a)) 15 | return sp.Rational(1,2) * (a+b-nrpyAbs(a-b)) 16 | 17 | def max_noif(a,b): 18 | # Returns the maximum of a and b 19 | if a==sp.sympify(0): 20 | return sp.Rational(1,2) * (b+nrpyAbs(b)) 21 | if b==sp.sympify(0): 22 | return sp.Rational(1,2) * (a+nrpyAbs(a)) 23 | return sp.Rational(1,2) * (a+b+nrpyAbs(a-b)) 24 | 25 | def coord_leq_bound(x,xstar): 26 | # Returns 1.0 if x <= xstar, 0.0 otherwise. 27 | # Requires appropriately defined TINYDOUBLE 28 | return min_noif(x-xstar-TINYDOUBLE,0.0)/(x-xstar-TINYDOUBLE) 29 | 30 | def coord_geq_bound(x,xstar): 31 | # Returns 1.0 if x >= xstar, 0.0 otherwise. 32 | # Requires appropriately defined TINYDOUBLE 33 | return max_noif(x-xstar+TINYDOUBLE,0.0)/(x-xstar+TINYDOUBLE) 34 | 35 | def coord_less_bound(x,xstar): 36 | # Returns 1.0 if x < xstar, 0.0 otherwise. 37 | # Requires appropriately defined TINYDOUBLE 38 | return min_noif(x-xstar,0.0)/(x-xstar-TINYDOUBLE) 39 | 40 | def coord_greater_bound(x,xstar): 41 | # Returns 1.0 if x > xstar, 0.0 otherwise. 42 | # Requires appropriately defined TINYDOUBLE 43 | return max_noif(x-xstar,0.0)/(x-xstar+TINYDOUBLE) 44 | -------------------------------------------------------------------------------- /nbconvert_latex_settings/display_priority.j2: -------------------------------------------------------------------------------- 1 | ((= Auto-generated template file, DO NOT edit directly! 2 | To edit this file, please refer to ../../skeleton/README.md =)) 3 | 4 | 5 | ((*- extends 'null.j2' -*)) 6 | 7 | ((=display data priority=)) 8 | 9 | 10 | ((*- block data_priority scoped -*)) 11 | ((*- for type in output.data | filter_data_type -*)) 12 | ((*- if type == 'application/pdf' -*)) 13 | ((*- block data_pdf -*)) 14 | ((*- endblock -*)) 15 | ((*- elif type == 'image/svg+xml' -*)) 16 | ((*- block data_svg -*)) 17 | ((*- endblock -*)) 18 | ((*- elif type == 'image/png' -*)) 19 | ((*- block data_png -*)) 20 | ((*- endblock -*)) 21 | ((*- elif type == 'text/html' -*)) 22 | ((*- block data_html -*)) 23 | ((*- endblock -*)) 24 | ((*- elif type == 'text/markdown' -*)) 25 | ((*- block data_markdown -*)) 26 | ((*- endblock -*)) 27 | ((*- elif type == 'image/jpeg' -*)) 28 | ((*- block data_jpg -*)) 29 | ((*- endblock -*)) 30 | ((*- elif type == 'text/plain' -*)) 31 | ((*- block data_text -*)) 32 | ((*- endblock -*)) 33 | ((*- elif type == 'text/latex' -*)) 34 | ((*- block data_latex -*)) 35 | ((*- endblock -*)) 36 | ((*- elif type == 'application/javascript' -*)) 37 | ((*- block data_javascript -*)) 38 | ((*- endblock -*)) 39 | ((*- elif type == 'application/vnd.jupyter.widget-view+json' -*)) 40 | ((*- block data_widget_view -*)) 41 | ((*- endblock -*)) 42 | ((*- else -*)) 43 | ((*- block data_other -*)) 44 | ((*- endblock -*)) 45 | ((*- endif -*)) 46 | ((*- endfor -*)) 47 | ((*- endblock data_priority -*)) 48 | -------------------------------------------------------------------------------- /in_progress-NRPyCritCol/ScalarField/ScalarField_output_central_values.h: -------------------------------------------------------------------------------- 1 | // This simple function computes the central values of the scalar field, 2 | // the lapse function, and energy-density. If the lapse falls below 3 | // a certain threshold, the function returns 1, indicating that the 4 | // lapse has collapsed, otherwise it returns 0. 5 | // 6 | // Author: Leonardo R. Werneck 7 | // wernecklr **at** gmail **dot** com 8 | 9 | int output_central_values( const REAL t, const paramstruct *restrict params, REAL *restrict in_gfs ) { 10 | #include "set_Cparameters.h" 11 | 12 | /* Set indices */ 13 | const int i0 = NGHOSTS; 14 | const int i1 = NGHOSTS; 15 | const int i2 = NGHOSTS; 16 | /* Set needed values of the scalar field */ 17 | const REAL sf_i0p1 = in_gfs[IDX4S(SFGF, i0,i1,i2)]; 18 | const REAL sf_i0p2 = in_gfs[IDX4S(SFGF, i0+1,i1,i2)]; 19 | const REAL sf_i0p3 = in_gfs[IDX4S(SFGF, i0+2,i1,i2)]; 20 | /* Set needed values of alpha */ 21 | const REAL alpha_i0p1 = in_gfs[IDX4S(ALPHAGF, i0,i1,i2)]; 22 | const REAL alpha_i0p2 = in_gfs[IDX4S(ALPHAGF, i0+1,i1,i2)]; 23 | const REAL alpha_i0p3 = in_gfs[IDX4S(ALPHAGF, i0+2,i1,i2)]; 24 | /* Compute the central values of the scalar field, alpha, and rho */ 25 | const REAL sf_c = 3.0*sf_i0p1 - 3.0*sf_i0p2 + sf_i0p3; 26 | const REAL alpha_c = 3.0*alpha_i0p1 - 3.0*alpha_i0p2 + alpha_i0p3; 27 | 28 | /* Set the output file */ 29 | FILE *outfile; 30 | if( t > 0.0 ) { 31 | outfile = fopen("out_central_values.dat","a"); 32 | } 33 | else { 34 | outfile = fopen("out_central_values.dat","w"); 35 | } 36 | 37 | /* Output the central values of the scalar field and alpha */ 38 | fprintf(outfile,"%.15e %.15e %.15e\n",t,alpha_c,sf_c); 39 | 40 | /* Close the file */ 41 | fclose(outfile); 42 | 43 | if( alpha_c < alpha_threshold ) 44 | return 1; 45 | else 46 | return 0; 47 | 48 | } 49 | -------------------------------------------------------------------------------- /safewrite.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from io import StringIO 3 | import os 4 | from difflib import context_diff 5 | import sys 6 | from subprocess import Popen, PIPE 7 | from clang_format import _get_executable as get_executable 8 | from colored import colored 9 | 10 | clang_formatter = get_executable("clang-format") 11 | 12 | verbose = False 13 | nochange = False 14 | 15 | class SafeWrite: 16 | def __init__(self, fname, do_format=False): 17 | self.fname = os.path.abspath(fname) 18 | self.fd = None 19 | self.do_format = do_format 20 | def __enter__(self): 21 | self.fd = StringIO() 22 | return self.fd 23 | def __exit__(self, ty, val, tb): 24 | print("Checking",self.fname,end="...") 25 | newcontent = self.fd.getvalue() 26 | if self.do_format: 27 | pipe = Popen([clang_formatter],stdout=PIPE,stdin=PIPE,universal_newlines=True) 28 | out, err = pipe.communicate(newcontent) 29 | newcontent = out 30 | if os.path.exists(self.fname): 31 | with open(self.fname) as fd: 32 | oldcontent = fd.read() 33 | do_write = newcontent.strip() != oldcontent.strip() 34 | if do_write and verbose: 35 | print("Diff for:",self.fname) 36 | oldlines=[line+"\n" for line in oldcontent.strip().split("\n")] 37 | newlines=[line+"\n" for line in newcontent.strip().split("\n")] 38 | sys.stdout.writelines(context_diff(oldlines,newlines,fromfile='before',tofile='after')) 39 | else: 40 | do_write = True 41 | if do_write: 42 | assert nochange == False 43 | with open(self.fname, "w") as fd: 44 | fd.write(newcontent) 45 | print(colored("[written]","red")) 46 | else: 47 | print(colored("[no changes]","green")) 48 | -------------------------------------------------------------------------------- /in_progress-GiRaFFE_NRPy/GiRaFFE_NRPy/GiRaFFE_Ccode_library/compute_conservatives_FFE.C: -------------------------------------------------------------------------------- 1 | void GiRaFFE_NRPy_compute_conservatives(const REAL gxxL,const REAL gxyL,const REAL gxzL,const REAL gyyL,const REAL gyzL,const REAL gzzL, 2 | const REAL BxL, const REAL ByL, const REAL BzL, const REAL vxL, const REAL vyL, const REAL vzL, 3 | //const REAL betaxL, const REAL betayL, const REAL betazL, const REAL alpL, 4 | const REAL sqrtg,REAL *StildeD0L, REAL *StildeD1L, REAL *StildeD2L) { 5 | 6 | //const REAL fourpialpha_inv = 1.0/( 4.0*M_PI*(METRIC[LAPM1] + 1.0) ); 7 | const REAL fourpi_inv = 1.0/( 4.0*M_PI ); 8 | 9 | const REAL B2 = gxxL*BxL*BxL + gyyL*ByL*ByL + gzzL*BzL*BzL 10 | + 2.0*(gxyL*BxL*ByL + gxzL*BxL*BzL + gyzL*ByL*BzL); 11 | 12 | 13 | // NOTE: SIGNIFICANTLY MODIFIED FROM ILLINOISGRMHD VERSION: 14 | // velocities in GiRaFFE are defined to be "drift" velocity. 15 | // cf. Eqs 47 and 85 in http://arxiv.org/pdf/1310.3274.pdf 16 | // Modified again from the original GiRaFFE to use Valencia velocity 17 | 18 | const REAL v_xL = gxxL*vxL + gxyL*vyL + gxzL*vzL; 19 | const REAL v_yL = gxyL*vxL + gyyL*vyL + gyzL*vzL; 20 | const REAL v_zL = gxzL*vxL + gyzL*vyL + gzzL*vzL; 21 | 22 | /* 23 | * Comments: 24 | * Eq. 85 in https://arxiv.org/pdf/1310.3274.pdf: 25 | * v^i = 4 pi alpha * (gamma^{ij} tilde{S}_j) / (sqrtgamma * B^2) - beta^i 26 | * which implies that 27 | * (v^i + beta^i)*(sqrtgamma * B^2)/(4 pi alpha) = gamma^{ij} tilde{S}_j 28 | * Multiply both sides by gamma_{ik}: 29 | * gamma_{ik} (v^i + beta^i)*(sqrtgamma * B^2)/(4 pi alpha) = gamma_{ik} gamma^{ij} tilde{S}_j 30 | * 31 | * -> tilde{S}_k = gamma_{ik} (v^i + beta^i)*(sqrtgamma * B^2)/(4 pi alpha) 32 | */ 33 | 34 | *StildeD0L = v_xL * sqrtg * B2 * fourpi_inv; 35 | *StildeD1L = v_yL * sqrtg * B2 * fourpi_inv; 36 | *StildeD2L = v_zL * sqrtg * B2 * fourpi_inv; 37 | } 38 | -------------------------------------------------------------------------------- /in_progress-Maxwell/Maxwell/MaxwellCartesian_ID.py: -------------------------------------------------------------------------------- 1 | import NRPy_param_funcs as par # NRPy+: Parameter interface 2 | import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 3 | import grid as gri # NRPy+: Functions having to do with numerical grids 4 | import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends 5 | 6 | def MaxwellCartesian_ID(): 7 | DIM = par.parval_from_str("grid::DIM") 8 | 9 | x, y, z = gri.register_gridfunctions("AUX", ["x", "y", "z"]) 10 | gammaDD = ixp.register_gridfunctions_for_single_rank2("AUX","gammaDD", "sym01") # The AUX or EVOL designation is *not* 11 | # used in diagnostic modules. 12 | 13 | # Step 1: Declare free parameters intrinsic to these initial data 14 | amp,lam = par.Cparameters("REAL",__name__,["amp","lam"], [1.0,1.0]) # __name__ = "MaxwellCartesian_ID", this module's name 15 | 16 | # Step 2: Set the initial data 17 | system = par.parval_from_str("System_to_use") 18 | if system == "System_I" or system == "System_II": 19 | global AidD,EidD,psi_ID 20 | AidD = ixp.zerorank1() 21 | 22 | EidD = ixp.zerorank1() 23 | EidU = ixp.zerorank1() 24 | # Set the coordinate transformations: 25 | radial = sp.sqrt(x*x + y*y + z*z) 26 | polar = sp.atan2(sp.sqrt(x*x + y*y),z) 27 | EU_phi = 8*amp*radial*sp.sin(polar)*lam*lam*sp.exp(-lam*radial*radial) 28 | EidU[0] = -(y * EU_phi)/sp.sqrt(x*x + y*y) 29 | EidU[1] = (x * EU_phi)/sp.sqrt(x*x + y*y) 30 | # The z component (2)is zero. 31 | for i in range(DIM): 32 | for j in range(DIM): 33 | EidD[i] += gammaDD[i][j] * EidU[j] 34 | 35 | psi_ID = sp.sympify(0) 36 | if system == "System_II": 37 | global Gamma_ID 38 | Gamma_ID = sp.sympify(0) 39 | else: 40 | print("Invalid choice of system: System_to_use must be either System_I or System_II") 41 | -------------------------------------------------------------------------------- /tensor_rotation.py: -------------------------------------------------------------------------------- 1 | """ Symbolic Tensor (Quaternion) Rotation 2 | 3 | The following script will perform symbolic tensor rotation using quaternions. 4 | """ 5 | # Author: Ken Sible 6 | # Email: ksible *at* outlook *dot* com 7 | 8 | from sympy import Quaternion as quat 9 | from sympy import Matrix 10 | 11 | def rotate(tensor, axis, angle): 12 | """ Rotate symbolic vector or tensor about an arbitrary axis 13 | 14 | :arg: 3-vector or (3x3)-matrix 15 | :arg: rotation axis (normal 3-vector) 16 | :arg: rotation angle (in radians) 17 | :return: rotated tensor (of original type) 18 | """ 19 | # Quaternion-Matrix Multiplication 20 | def mul(*args): 21 | if isinstance(args[0], list): 22 | q, M = args[1], args[0] 23 | for i, col in enumerate(M): 24 | M[i] = col * q 25 | else: 26 | q, M = args[0], args[1] 27 | for i, col in enumerate(M): 28 | M[i] = q * col 29 | return M 30 | # Rotation Quaternion (Axis, Angle) 31 | q = quat.from_axis_angle(axis, angle) 32 | if isinstance(tensor[0], list): 33 | tensor = Matrix(tensor) 34 | if tensor.shape != (3, 3): 35 | raise Exception('Invalid Matrix Size') 36 | # Rotation Formula: M' = (q.(q.M.q*)^T.q*)^T 37 | M = [quat(0, *tensor[:, i]) for i in range(tensor.shape[1])] 38 | M = mul(q, mul(M, q.conjugate())) 39 | for i in range(tensor.shape[1]): 40 | tensor[:, i] = [M[i].b, M[i].c, M[i].d] 41 | M = [quat(0, *tensor[i, :]) for i in range(tensor.shape[0])] 42 | M = mul(q, mul(M, q.conjugate())) 43 | for i in range(tensor.shape[0]): 44 | tensor[i, :] = [[M[i].b, M[i].c, M[i].d]] 45 | return tensor.tolist() 46 | if isinstance(tensor, list): 47 | if len(tensor) != 3: 48 | raise Exception('Invalid Vector Length') 49 | # Rotation Formula: v' = q.v.q* 50 | v = q * quat(0, *tensor) * q.conjugate() 51 | return [v.b, v.c, v.d] 52 | raise Exception('Unsupported Tensor Type') 53 | -------------------------------------------------------------------------------- /in_progress-GiRaFFE_NRPy/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Magnetospheric_Wald.py: -------------------------------------------------------------------------------- 1 | # Step 0: Import the NRPy+ core modules and set the reference metric to Cartesian 2 | # Step 0: Add NRPy's directory to the path 3 | # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory 4 | import os,sys 5 | nrpy_dir_path = os.path.join("..") 6 | if nrpy_dir_path not in sys.path: 7 | sys.path.append(nrpy_dir_path) 8 | nrpy_dir_path = os.path.join("../..") 9 | if nrpy_dir_path not in sys.path: 10 | sys.path.append(nrpy_dir_path) 11 | giraffefood_dir_path = os.path.join("GiRaFFEfood_NRPy") 12 | if giraffefood_dir_path not in sys.path: 13 | sys.path.append(giraffefood_dir_path) 14 | 15 | # Step 0.a: Import the NRPy+ core modules and set the reference metric to Cartesian 16 | import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends 17 | import NRPy_param_funcs as par # NRPy+: Parameter interface 18 | import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 19 | import GiRaFFEfood_NRPy_Common_Functions as gfcf # Some useful functions for GiRaFFE initial data. 20 | 21 | import reference_metric as rfm 22 | par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") 23 | rfm.reference_metric() 24 | 25 | # Step 1a: Set commonly used parameters. 26 | thismodule = __name__ 27 | 28 | C0 = par.Cparameters("REAL",thismodule,"C0",1.0) 29 | 30 | # Step 2: Set the vectors A and E in Spherical coordinates 31 | def Ar_MW(r,theta,phi, **params): 32 | g4DD = params["g4DD"] 33 | a = params["a"] 34 | return sp.Rational(1,2) * C0 * (g4DD[1][3] + 2 * a * g4DD[0][1]) 35 | 36 | def Ath_MW(r,theta,phi, **params): 37 | g4DD = params["g4DD"] 38 | a = params["a"] 39 | return sp.Rational(1,2) * C0 * (g4DD[2][3] + 2 * a * g4DD[0][2]) 40 | 41 | def Aph_MW(r,theta,phi, **params): 42 | g4DD = params["g4DD"] 43 | a = params["a"] 44 | return sp.Rational(1,2) * C0 * (g4DD[3][3] + 2 * a * g4DD[0][3]) 45 | 46 | #Step 3: Compute v^i from A_i and E_i 47 | def ValenciavU_func_MW(**params): 48 | return ixp.zerorank1() -------------------------------------------------------------------------------- /CurviBoundaryConditions/SENR.patch-83cf8f5f4e0ec046253cc0017fd4b217d126e68b: -------------------------------------------------------------------------------- 1 | 7c7 2 | < #include "Logo.c" 3 | --- 4 | > //#include "Logo.c" 5 | 16c16 6 | < printf("\x1B[32mID: %s, Evol: %s, Coords: %s, FD order: %d\x1B[0m\n", params.ID_scheme,params.Evol_scheme,params.CoordSystem, params.FDCENTERDERIVS_FDORDER); 7 | --- 8 | > // printf("\x1B[32mID: %s, Evol: %s, Coords: %s, FD order: %d\x1B[0m\n", params.ID_scheme,params.Evol_scheme,params.CoordSystem, params.FDCENTERDERIVS_FDORDER); 9 | 189a190,205 10 | > LOOP_GZFILL(ii,jj,kk) { 11 | > int which_gf = 0; 12 | > gfs_n[IDX4(VET1,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 13 | > gfs_n[IDX4(VET2,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 14 | > gfs_n[IDX4(VET3,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 15 | > 16 | > gfs_n[IDX4(A11,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 17 | > gfs_n[IDX4(A12,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 18 | > gfs_n[IDX4(A13,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 19 | > gfs_n[IDX4(A22,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 20 | > gfs_n[IDX4(A23,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 21 | > gfs_n[IDX4(A33,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 22 | > 23 | > gfs_n[IDX4(CF,ii,jj,kk)] = (REAL)IDX4(which_gf,ii,jj,kk); which_gf++; 24 | > } 25 | > 26 | 195a212,230 27 | > LOOP_GZFILL(ii,jj,kk) { 28 | > int which_gf = 0; 29 | > printf("%d %d %d | ",ii,jj,kk); 30 | > printf("%d ",(int)gfs_n[IDX4(VET1,ii,jj,kk)]); 31 | > printf("%d ",(int)gfs_n[IDX4(VET2,ii,jj,kk)]); 32 | > printf("%d ",(int)gfs_n[IDX4(VET3,ii,jj,kk)]); 33 | > 34 | > printf("%d ",(int)gfs_n[IDX4(A11,ii,jj,kk)]); 35 | > printf("%d ",(int)gfs_n[IDX4(A12,ii,jj,kk)]); 36 | > printf("%d ",(int)gfs_n[IDX4(A13,ii,jj,kk)]); 37 | > printf("%d ",(int)gfs_n[IDX4(A22,ii,jj,kk)]); 38 | > printf("%d ",(int)gfs_n[IDX4(A23,ii,jj,kk)]); 39 | > printf("%d ",(int)gfs_n[IDX4(A33,ii,jj,kk)]); 40 | > 41 | > printf("%d ",(int)gfs_n[IDX4(CF,ii,jj,kk)]); 42 | > printf("\n"); 43 | > } 44 | > exit(0); 45 | > 46 | -------------------------------------------------------------------------------- /run_Jupyter_notebook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$1" ]; then 4 | echo "Correct usage of this script:" 5 | echo "./run_Jupyter_notebook.sh [Jupyter notebook (a file with .ipynb extension)]" 6 | exit 7 | fi 8 | 9 | if [ ! -f $1 ]; then 10 | echo "You input: ./run_Jupyter_notebook.sh" $1 11 | echo "Jupyter notebook" \"$1\" "not found!" 12 | exit 13 | fi 14 | 15 | 16 | # NRPy+ Jupyter notebooks are completely Python 2/3 cross-compatible. 17 | # However `jupyter nbconvert` will refuse to run if the notebook 18 | # was generated using a different kernel. Here we fool Jupyter 19 | # to think the notebook was written using the native python kernel. 20 | PYTHONMAJORVERSION=`python -c "import sys;print(sys.version_info[0])"` 21 | if (( $PYTHONMAJORVERSION == 3 )); then 22 | cat $1 | sed "s/ \"name\": \"python2\"/ \"name\": \"python3\"/g" > $1-tmp ; mv $1-tmp $1 23 | else 24 | cat $1 | sed "s/ \"name\": \"python3\"/ \"name\": \"python2\"/g" > $1-tmp ; mv $1-tmp $1 25 | fi 26 | 27 | if [ "$2" == "notimer" ]; then 28 | if jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=-1 $1; 29 | then 30 | echo 31 | # do nothing 32 | else 33 | echo BACKTRACE: 34 | echo git diff $1 35 | cat in_progress/Validation/out_GiRaFFEfood_NRPy_test.txt 36 | git diff $1 2>&1 | cat 37 | exit 1 38 | fi 39 | else 40 | if time jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=-1 $1; 41 | then 42 | echo 43 | # do nothing 44 | else 45 | echo BACKTRACE: 46 | if jupyter nbconvert --to python $1; 47 | then 48 | FILENAME=`echo $1 | sed 's/ipynb/py/g'` 49 | echo $FILENAME 50 | if (( $PYTHONMAJORVERSION == 3 )); then 51 | ipython3 --log-level=DEBUG $FILENAME 52 | else 53 | ipython --log-level=DEBUG $FILENAME 54 | fi 55 | exit 1 56 | else 57 | echo ERROR: could not convert $1 to a Python script! 58 | exit 1 59 | fi 60 | fi 61 | fi 62 | -------------------------------------------------------------------------------- /ScalarWave/tests/test_ScalarWave.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_InitialData_PlaneWave(): 5 | 6 | module = 'ScalarWave.InitialData' 7 | 8 | module_name = 'InitialData' 9 | 10 | function_and_global_dict = {'InitialData(WaveType="PlaneWave")': ['uu_ID', 'vv_ID']} 11 | 12 | create_test(module, module_name, function_and_global_dict) 13 | 14 | def test_InitialData_SphericalGaussian(): 15 | 16 | module = 'ScalarWave.InitialData' 17 | 18 | module_name = 'InitialData' 19 | 20 | function_and_global_dict = {'InitialData(WaveType="SphericalGaussian")': ['uu_ID', 'vv_ID']} 21 | 22 | create_test(module, module_name, function_and_global_dict) 23 | 24 | 25 | def test_ScalarWave_RHSs(): 26 | 27 | module = 'ScalarWave.ScalarWave_RHSs' 28 | 29 | module_name = 'ScalarWave_RHSs' 30 | 31 | function_and_global_dict = {'ScalarWave_RHSs()': ['wavespeed', 'uu_rhs', 'vv_rhs']} 32 | 33 | create_test(module, module_name, function_and_global_dict) 34 | 35 | 36 | def test_ScalarWaveCurvilinear_RHSs(): 37 | 38 | module = 'ScalarWave.ScalarWaveCurvilinear_RHSs' 39 | 40 | module_name = 'ScalarWaveCurvilinear_RHSs' 41 | 42 | function_and_global_dict = {'ScalarWaveCurvilinear_RHSs()': ['uu_rhs', 'vv_rhs']} 43 | 44 | create_test(module, module_name, function_and_global_dict) 45 | 46 | 47 | if __name__ == '__main__': 48 | import sys 49 | 50 | if len(sys.argv) <= 3: 51 | failed_functions = [] 52 | for fun in dir(): 53 | if fun[0:5] == 'test_': 54 | print('\nTesting ' + str(fun) + '...\n') 55 | try: 56 | exec(fun + '()') 57 | except SystemExit: 58 | failed_functions.append(fun) 59 | 60 | if failed_functions != []: 61 | import sys, os 62 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 63 | for function in failed_functions: 64 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 65 | sys.exit(1) 66 | 67 | else: 68 | globals()[sys.argv[4]]() 69 | -------------------------------------------------------------------------------- /in_progress/2021_ETK_School/run_Jupyter_notebook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$1" ]; then 4 | echo "Correct usage of this script:" 5 | echo "./run_Jupyter_notebook.sh [Jupyter notebook (a file with .ipynb extension)]" 6 | exit 7 | fi 8 | 9 | if [ ! -f $1 ]; then 10 | echo "You input: ./run_Jupyter_notebook.sh" $1 11 | echo "Jupyter notebook" \"$1\" "not found!" 12 | exit 13 | fi 14 | 15 | 16 | # NRPy+ Jupyter notebooks are completely Python 2/3 cross-compatible. 17 | # However `jupyter nbconvert` will refuse to run if the notebook 18 | # was generated using a different kernel. Here we fool Jupyter 19 | # to think the notebook was written using the native python kernel. 20 | PYTHONMAJORVERSION=`python -c "import sys;print(sys.version_info[0])"` 21 | if (( $PYTHONMAJORVERSION == 3 )); then 22 | cat $1 | sed "s/ \"name\": \"python2\"/ \"name\": \"python3\"/g" > $1-tmp ; mv $1-tmp $1 23 | else 24 | cat $1 | sed "s/ \"name\": \"python3\"/ \"name\": \"python2\"/g" > $1-tmp ; mv $1-tmp $1 25 | fi 26 | 27 | if [ "$2" == "notimer" ]; then 28 | if jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=-1 $1; 29 | then 30 | echo 31 | # do nothing 32 | else 33 | echo BACKTRACE: 34 | echo git diff $1 35 | cat in_progress/Validation/out_GiRaFFEfood_NRPy_test.txt 36 | git diff $1 2>&1 | cat 37 | exit 1 38 | fi 39 | else 40 | if time jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=-1 $1; 41 | then 42 | echo 43 | # do nothing 44 | else 45 | echo BACKTRACE: 46 | if jupyter nbconvert --to python $1; 47 | then 48 | FILENAME=`echo $1 | sed 's/ipynb/py/g'` 49 | echo $FILENAME 50 | if (( $PYTHONMAJORVERSION == 3 )); then 51 | ipython3 --log-level=DEBUG $FILENAME 52 | else 53 | ipython --log-level=DEBUG $FILENAME 54 | fi 55 | exit 1 56 | else 57 | echo ERROR: could not convert $1 to a Python script! 58 | exit 1 59 | fi 60 | fi 61 | fi 62 | -------------------------------------------------------------------------------- /.github/workflows/github-actions-MacOS12.yml: -------------------------------------------------------------------------------- 1 | name: MacOS 12 2 | 3 | on: 4 | push: 5 | branches: master 6 | pull_request: 7 | branches: master 8 | 9 | jobs: 10 | UnitTestlatestSymPy: 11 | 12 | runs-on: macos-12 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | python-version: ["3.7.15", "3.8.14", "3.10.8", "3.11.0"] 17 | 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: Set up Python ${{ matrix.python-version }} 21 | uses: actions/setup-python@v3 22 | with: 23 | python-version: ${{ matrix.python-version }} 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade pip setuptools 27 | python -m pip install --upgrade nbconvert 28 | python -m pip install testfixtures sympy mpmath jupyter matplotlib scipy nrpylatex clang_format 29 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 30 | - name: UnitTests 31 | run: | 32 | ./UnitTesting/run_NRPy_UnitTests.sh python3 33 | # TwoBHsCollideJupyterNotebook: 34 | 35 | # runs-on: macos-12 36 | # strategy: 37 | # fail-fast: false 38 | # matrix: 39 | # python-version: ["3.7.15", "3.8.14", "3.10.8", "3.11.0"] 40 | 41 | # steps: 42 | # - uses: actions/checkout@v3 43 | # - name: Set up Python ${{ matrix.python-version }} 44 | # uses: actions/setup-python@v3 45 | # with: 46 | # python-version: ${{ matrix.python-version }} 47 | # - name: Brew install needed TeX packages (for pdflatex) 48 | # run: | 49 | # brew install texlive 50 | # brew install ffmpeg colordiff pandoc 51 | # - name: Install dependencies 52 | # run: | 53 | # python -m pip install --upgrade pip setuptools 54 | # python -m pip install --upgrade nbconvert 55 | # python -m pip install testfixtures sympy mpmath jupyter matplotlib scipy nrpylatex 56 | # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 57 | # - name: BSSNTwoBHsCollide 58 | # run: | 59 | # ./run_Jupyter_notebook.sh Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide_new_way.ipynb 60 | -------------------------------------------------------------------------------- /Deprecated/BSSN/BSSN_ID_function_string.py: -------------------------------------------------------------------------------- 1 | # This module sets up a standard initial data function used for 2 | # setting up SENR initial data at all gridpoints. 3 | 4 | # Author: Zachariah B. Etienne 5 | # zachetie **at** gmail **dot* com 6 | 7 | from outputC import outputC, add_to_Cfunction_dict # NRPy+: Core C code output module 8 | 9 | def BSSN_ID_function_string(cf,hDD,lambdaU,aDD,trK,alpha,vetU,betU, include_NRPy_basic_defines=False): 10 | includes = [] 11 | if include_NRPy_basic_defines: 12 | includes = ["NRPy_basic_defines.h"] 13 | rhss = [trK,alpha,cf] 14 | lhss = ["in_gfs[IDX4S(TRKGF,i0,i1,i2)]","in_gfs[IDX4S(ALPHAGF,i0,i1,i2)]","in_gfs[IDX4S(CFGF,i0,i1,i2)]"] 15 | for i in range(3): 16 | rhss.append(lambdaU[i]) 17 | lhss.append("in_gfs[IDX4S(LAMBDAU"+str(i)+"GF,i0,i1,i2)]") 18 | rhss.append(vetU[i]) 19 | lhss.append("in_gfs[IDX4S(VETU"+str(i)+"GF,i0,i1,i2)]") 20 | rhss.append(betU[i]) 21 | lhss.append("in_gfs[IDX4S(BETU"+str(i)+"GF,i0,i1,i2)]") 22 | for j in range(i,3): 23 | rhss.append(hDD[i][j]) 24 | lhss.append("in_gfs[IDX4S(HDD" + str(i) + str(j) + "GF,i0,i1,i2)]") 25 | rhss.append(aDD[i][j]) 26 | lhss.append("in_gfs[IDX4S(ADD" + str(i) + str(j) + "GF,i0,i1,i2)]") 27 | 28 | # Sort the lhss list alphabetically, and rhss to match: 29 | lhss,rhss = [list(x) for x in zip(*sorted(zip(lhss, rhss), key=lambda pair: pair[0]))] 30 | 31 | body = outputC(rhss, lhss, filename="returnstring", 32 | params="preindent=1,CSE_enable=True,outCverbose=False", # outCverbose=False to prevent 33 | # enormous output files. 34 | prestring="", poststring="") 35 | 36 | desc = "Set up the initial data at all points on the numerical grid." 37 | add_to_Cfunction_dict( 38 | includes=includes, 39 | desc =desc, 40 | name ="initial_data", 41 | params ="const paramstruct *restrict params,REAL *restrict xx[3], REAL *restrict in_gfs", 42 | body =body, 43 | loopopts="AllPoints,Read_xxs") 44 | -------------------------------------------------------------------------------- /UnitTesting/Test_UnitTesting/test_module.py: -------------------------------------------------------------------------------- 1 | from UnitTesting.create_test import create_test 2 | 3 | 4 | def test_module_for_testing_no_gamma(): 5 | 6 | module = 'module_for_testing' 7 | 8 | module_name = 'test_module' 9 | 10 | function_and_global_dict = {'function(create_gamma=False)': ['alpha', 'betaU'], 11 | 'function2(create_gamma=False)': ['alpha2', 'betaU2']} 12 | 13 | initialization_string = 'import module_for_testing as mft\nmft.init_function2()' 14 | 15 | initialization_string_dict = {'function2(create_gamma=False)': initialization_string} 16 | 17 | create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict) 18 | 19 | 20 | def test_module_for_testing_gamma(): 21 | 22 | module = 'module_for_testing' 23 | 24 | module_name = 'test_module' 25 | 26 | function_and_global_dict = {'function(create_gamma=True)': ['alpha', 'betaU', 'gamma'], 27 | 'function2(create_gamma=True)': ['alpha2', 'betaU2', 'gamma2']} 28 | 29 | initialization_string = 'import module_for_testing as mft\nmft.init_function2()' 30 | 31 | initialization_string_dict = {'function2(create_gamma=True)': initialization_string} 32 | 33 | create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict) 34 | 35 | 36 | if __name__ == '__main__': 37 | import sys 38 | 39 | if len(sys.argv) <= 3: 40 | failed_functions = [] 41 | for fun in dir(): 42 | if fun[0:5] == 'test_': 43 | print('\nTesting ' + str(fun) + '...\n') 44 | try: 45 | exec(fun + '()') 46 | except SystemExit: 47 | failed_functions.append(fun) 48 | 49 | if failed_functions != []: 50 | import sys, os 51 | with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file: 52 | for function in failed_functions: 53 | file.write(sys.argv[0] + ': ' + str(function) + '\n') 54 | sys.exit(1) 55 | 56 | else: 57 | globals()[sys.argv[4]]() 58 | -------------------------------------------------------------------------------- /Deprecated/CurviBoundaryConditions/boundary_conditions/driver_bcstruct.h: -------------------------------------------------------------------------------- 1 | 2 | // Step 1: Allocate memory storage for bc_gz_map, which 3 | // in the case a boundary point is a *parity* 4 | // boundary, is set to the interior, non- 5 | // boundary point corresponding to the same 6 | // Cartesian gridpoint. Otherwise bc_gz_map 7 | // is set to (i0,i1,i2) = (-1,-1,-1). 8 | gz_map *bc_gz_map = (gz_map *)malloc(sizeof(gz_map)*Nxx_plus_2NGHOSTS_tot); 9 | 10 | // Step 2: Allocate memory storage for bc_parity_conditions, 11 | // which store parity conditions for all 10 12 | // gridfunction types at all grid points. 13 | parity_condition *bc_parity_conditions = (parity_condition *)malloc(sizeof(parity_condition)*Nxx_plus_2NGHOSTS_tot); 14 | 15 | // Step 3: Set bc_gz_map and bc_parity_conditions at *all* 16 | // points; on the boundary and otherwise. 17 | set_up__bc_gz_map_and_parity_condns(¶ms, xx, bc_gz_map, 18 | bc_parity_conditions 19 | ); 20 | 21 | // Step 4: Declare and allocate memory for bcstruct, 22 | // which will store all information needed for 23 | // applying the boundary conditions. 24 | bcstruct.outer = (outer_bc **)malloc(sizeof(outer_bc *)*NGHOSTS); 25 | bcstruct.inner = (inner_bc **)malloc(sizeof(inner_bc *)*NGHOSTS); 26 | bcstruct.num_ob_gz_pts = ( int *)malloc(sizeof(int)*NGHOSTS); 27 | bcstruct.num_ib_gz_pts = ( int *)malloc(sizeof(int)*NGHOSTS); 28 | 29 | // Step 4: Store all information needed to quickly and 30 | // efficiently apply boundary conditions. This 31 | // function transfers all information from 32 | // bc_gz_map (defined at *all gridpoints*) into 33 | // bcstruct (defined only at boundary points). 34 | // Thus when this function has finished, 35 | // bc_gz_map is no longer needed. 36 | set_bcstruct(¶ms,bc_gz_map, 37 | bc_parity_conditions, 38 | &bcstruct); 39 | 40 | // Step 5: As described in Step 4, bc_gz_map is no 41 | // longer needed at this point, so we free its 42 | // memory. Farewell, friend! 43 | free(bc_gz_map); 44 | free(bc_parity_conditions); 45 | -------------------------------------------------------------------------------- /nbconvert_latex_settings/style_bw_ipython.tex.j2: -------------------------------------------------------------------------------- 1 | ((= Black&white ipython input/output style =)) 2 | 3 | ((*- extends 'base.tex.j2' -*)) 4 | 5 | %=============================================================================== 6 | % Input 7 | %=============================================================================== 8 | 9 | ((* block input scoped *)) 10 | ((*- if resources.global_content_filter.include_input_prompt *)) 11 | ((( add_prompt(cell.source, cell, 'In ') ))) 12 | ((* else *)) 13 | (((cell.source))) 14 | ((* endif -*)) 15 | ((* endblock input *)) 16 | 17 | 18 | %=============================================================================== 19 | % Output 20 | %=============================================================================== 21 | 22 | ((* block execute_result scoped *)) 23 | ((*- for type in output.data | filter_data_type -*)) 24 | ((*- if resources.global_content_filter.include_output_prompt -*)) 25 | ((*- if type in ['text/plain'] *)) 26 | ((( add_prompt(output.data['text/plain'], cell, 'Out') ))) 27 | ((*- else -*)) 28 | \verb+Out[((( cell.execution_count )))]:+((( super() ))) 29 | ((*- endif -*)) 30 | ((*- else -*)) 31 | ((*- if type in ['text/plain'] *)) 32 | ((( output.data['text/plain'] ))) 33 | ((*- else -*)) 34 | \verb+((( super() ))) 35 | ((*- endif -*)) 36 | ((*- endif -*)) 37 | ((*- endfor -*)) 38 | ((* endblock execute_result *)) 39 | 40 | 41 | %============================================================================== 42 | % Support Macros 43 | %============================================================================== 44 | 45 | % Name: draw_prompt 46 | % Purpose: Renders an output/input prompt 47 | ((* macro add_prompt(text, cell, prompt) -*)) 48 | ((*- if cell.execution_count is defined -*)) 49 | ((*- set execution_count = "" ~ (cell.execution_count | replace(None, " ")) -*)) 50 | ((*- else -*)) 51 | ((*- set execution_count = " " -*)) 52 | ((*- endif -*)) 53 | ((*- set indentation = " " * (execution_count | length + 7) -*)) 54 | \begin{verbatim} 55 | (((- text | add_prompts(first=prompt ~ '[' ~ execution_count ~ ']: ', cont=indentation) -))) 56 | \end{verbatim} 57 | ((*- endmacro *)) 58 | -------------------------------------------------------------------------------- /in_progress-GiRaFFE_NRPy/GiRaFFE_NRPy/GiRaFFE_NRPy_Characteristic_Speeds.py: -------------------------------------------------------------------------------- 1 | import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends 2 | import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 3 | 4 | thismodule = __name__ 5 | 6 | # We'll write this as a function so that we can calculate the expressions on-demand for any choice of i 7 | def find_cp_cm(lapse,shifti,gammaUUii): 8 | # Inputs: u0,vi,lapse,shift,gammadet,gupii 9 | # Outputs: cplus,cminus 10 | 11 | # a = 1/(alpha^2) 12 | a = sp.sympify(1)/(lapse*lapse) 13 | # b = 2 beta^i / alpha^2 14 | b = sp.sympify(2) * shifti /(lapse*lapse) 15 | # c = -g^{ii} + (beta^i)^2 / alpha^2 16 | c = - gammaUUii + shifti*shifti/(lapse*lapse) 17 | 18 | # Now, we are free to solve the quadratic equation as usual. We take care to avoid passing a 19 | # negative value to the sqrt function. 20 | detm = b*b - sp.sympify(4)*a*c 21 | 22 | import Min_Max_and_Piecewise_Expressions as noif 23 | detm = sp.sqrt(noif.max_noif(sp.sympify(0),detm)) 24 | global cplus,cminus 25 | cplus = sp.Rational(1,2)*(-b/a + detm/a) 26 | cminus = sp.Rational(1,2)*(-b/a - detm/a) 27 | 28 | # We'll write this as a function, and call it within HLLE_solver, below. 29 | def find_cmax_cmin(flux_dirn,gamma_faceDD,beta_faceU,alpha_face): 30 | # Inputs: flux direction flux_dirn, Inverse metric gamma_faceUU, shift beta_faceU, 31 | # lapse alpha_face, metric determinant gammadet_face 32 | # Outputs: maximum and minimum characteristic speeds cmax and cmin 33 | # First, we need to find the characteristic speeds on each face 34 | gamma_faceUU,unusedgammaDET = ixp.generic_matrix_inverter3x3(gamma_faceDD) 35 | find_cp_cm(alpha_face,beta_faceU[flux_dirn],gamma_faceUU[flux_dirn][flux_dirn]) 36 | cp = cplus 37 | cm = cminus 38 | 39 | # The following algorithms have been verified with random floats: 40 | 41 | global cmax,cmin 42 | # Now, we need to set cmax to the larger of cpr,cpl, and 0 43 | 44 | import Min_Max_and_Piecewise_Expressions as noif 45 | cmax = noif.max_noif(cp,sp.sympify(0)) 46 | 47 | # And then, set cmin to the smaller of cmr,cml, and 0 48 | cmin = -noif.min_noif(cm,sp.sympify(0)) 49 | -------------------------------------------------------------------------------- /FishboneMoncriefID/tests/trusted_values_dict.py: -------------------------------------------------------------------------------- 1 | from mpmath import mpf, mp, mpc 2 | from UnitTesting.standard_constants import precision 3 | 4 | mp.dps = precision 5 | trusted_values_dict = {} 6 | 7 | # Generated on: 2019-10-17 8 | # 2019-10-17: added uBL4D[],uBL4U[],uKS4U[] 9 | trusted_values_dict['FishBoneMoncriefID__FishboneMoncriefID__globals'] = {'hm1': mpf('-0.171485955353078502409782000685'), 'IDalpha': mpf('0.755931060760646734042892329128'), 'IDbetaU[0]': mpf('0.214647582556345478472109360788'), 'IDbetaU[1]': mpf('0.241217364020809745026116799651'), 'IDbetaU[2]': mpf('0.281800155328952481540896834564'), 'IDgammaDD[0][0]': mpf('2.03851677635221747252808792529'), 'IDgammaDD[0][1]': mpf('0.0765438379382772951846664774809'), 'IDgammaDD[0][2]': mpf('0.570539327433565151458524763213'), 'IDgammaDD[1][0]': mpf('0.0765438379382772951846664774809'), 'IDgammaDD[1][1]': mpf('0.913606065931442108137857582766'), 'IDgammaDD[1][2]': mpf('-0.103931622633820480927404911623'), 'IDgammaDD[2][0]': mpf('0.570539327433565151458524763213'), 'IDgammaDD[2][1]': mpf('-0.103931622633820480927404911623'), 'IDgammaDD[2][2]': mpf('1.40437373720499632990640350854'), 'IDKDD[0][0]': mpf('0.241372059893883246591607766283'), 'IDKDD[0][1]': mpf('-0.343388055113192085165370755198'), 'IDKDD[0][2]': mpf('-0.590459359927831888413702466884'), 'IDKDD[1][0]': mpf('-0.343388055113192085165370755198'), 'IDKDD[1][1]': mpf('0.401858289197972155074762087828'), 'IDKDD[1][2]': mpf('-0.422578171813805727676385503333'), 'IDKDD[2][0]': mpf('-0.590459359927831888413702466884'), 'IDKDD[2][1]': mpf('-0.422578171813805727676385503333'), 'IDKDD[2][2]': mpf('-0.013929878808310394879734504443'), 'IDValencia3velocityU[0]': mpf('-0.0100201358532234154260289073442'), 'IDValencia3velocityU[1]': mpf('0.580690518031488621148890984891'), 'IDValencia3velocityU[2]': mpf('0.37278552232712119473473993627'), 'rho_initial': mpf('0.284819845942236515284909115899'), 'uBL4D[0]': mpf('-0.614072962517390468138181646896'), 'uBL4D[1]': mpf('0.0'), 'uBL4D[2]': mpf('0.0'), 'uBL4D[3]': mpf('0.105130532924779727240517791034'), 'uBL4U[0]': mpf('2.13294264904845268804664985272'), 'uBL4U[1]': mpf('0.0'), 'uBL4U[2]': mpf('0.0'), 'uBL4U[3]': mpf('0.858967220098163204426095668694'), 'uKS4U[0]': mpf('2.13294264904845268804664985272'), 'uKS4U[1]': mpf('0.0'), 'uKS4U[2]': mpf('0.0'), 'uKS4U[3]': mpf('0.858967220098163204426095668694')} 10 | -------------------------------------------------------------------------------- /Deprecated/CurviBoundaryConditions/boundary_conditions/BCs_data_structs.h: -------------------------------------------------------------------------------- 1 | 2 | typedef struct __ghostzone_map__ { 3 | short i0,i1,i2; // i0,i1,i2 stores values from -1 (used to indicate outer boundary) 4 | // to Nxx_plus_2NGHOSTS*. We assume that grid extents beyond the 5 | // limits of short (i.e., beyond about 32,000) are unlikely. This 6 | // can be easily extended if needed, though. 7 | } gz_map; 8 | 9 | const int8_t MAXFACE = -1; 10 | const int8_t NUL = +0; 11 | const int8_t MINFACE = +1; 12 | 13 | typedef struct __parity__ { 14 | int8_t parity[10]; // We store the 10 parity conditions in 10 int8_t integers, 15 | // one for each condition. Note that these conditions can 16 | // only take one of two values: +1 or -1, hence the use of 17 | // int8_t, the smallest C data type. 18 | } parity_condition; 19 | 20 | typedef struct __inner_bc__ { 21 | gz_map inner_bc_dest_pt; 22 | gz_map inner_bc_src_pt; 23 | int8_t parity[10]; // We store the 10 parity conditions in 10 int8_t integers, 24 | // one for each condition. Note that these conditions can 25 | // only take one of two values: +1 or -1, hence the use of 26 | // int8_t, the smallest C data type. 27 | } inner_bc; 28 | 29 | typedef struct __outer_bc__ { 30 | gz_map outer_bc_dest_pt; 31 | int8_t FACEi0,FACEi1,FACEi2; // FACEi* takes values of -1, 0, and +1 only, 32 | // corresponding to MAXFACE, NUL, and MINFACE 33 | // respectively. 34 | // Thus int8_t (one byte each, the smallest C 35 | // type) is sufficient. 36 | } outer_bc; 37 | 38 | typedef struct __bcstruct__ { 39 | outer_bc **outer; // Array of 1D arrays, of length 40 | // [NGHOSTS][num_ob_gz_pts[which_outer_ghostzone_point]] 41 | 42 | inner_bc **inner; // Array of 1D arrays, of length 43 | // [NGHOSTS][num_ib_gz_pts[which_inner_ghostzone_point]] 44 | 45 | // Arrays storing number of outer/inner boundary ghostzone points at each ghostzone, 46 | // of length NGHOSTS: 47 | int *num_ob_gz_pts; 48 | int *num_ib_gz_pts; 49 | } bc_struct; 50 | -------------------------------------------------------------------------------- /NRPyPN/PN_Hamiltonian_SSS.py: -------------------------------------------------------------------------------- 1 | # As documented in the NRPyPN notebook 2 | # PN-Hamiltonian-SSS.ipynb, this Python script 3 | # generates spin-spin-spin coupling pieces of the 4 | # post-Newtonian (PN) Hamiltonian, up to and 5 | # including 3PN order. 6 | 7 | # Core functions: 8 | # f_H_SSS_3PN(m1,m2, n12U,n21U, S1U,S2U, p1U,p2U, q) 9 | # Compute the complete H_SSS_3PN term and store to 10 | # global variable of the same name. 11 | 12 | # Author: Zach Etienne 13 | # zachetie **at** gmail **dot* com 14 | 15 | # Step 0: Add NRPy's directory to the path 16 | # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory 17 | import indexedexpNRPyPN as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 18 | from NRPyPN_shortcuts import div,dot,cross # NRPyPN: shortcuts for e.g., vector operations 19 | 20 | ################################# 21 | ################################# 22 | # Step 1: 3PN spin-spin-spin term, from Eq. 3.12 of 23 | # Levi and Steinhoff (2015): 24 | # https://arxiv.org/abs/1410.2601 25 | def f_H_SSS_3PN(m1,m2, n12U,n21U, S1U,S2U, p1U,p2U, r12): 26 | def f_H_SSS_3PN_pt(m1,m2, nU, S1U,S2U, p1U,p2U, r): 27 | p2_minus_m2_over_4m1_p1 = ixp.zerorank1() 28 | for i in range(3): 29 | p2_minus_m2_over_4m1_p1[i] = p2U[i] - m2/(4*m1)*p1U[i] 30 | H_SSS_3PN_pt = (+div(3,2)*(+dot(S1U,S1U)*dot(S2U,cross(nU,p1U)) 31 | +dot(S1U,nU)*dot(S2U,cross(S1U,p1U)) 32 | -5*dot(S1U,nU)**2*dot(S2U,cross(nU,p1U)) 33 | +dot(nU,cross(S1U,S2U))*(+dot(S1U,p1U) 34 | -5*dot(S1U,nU)*dot(p1U,nU))) 35 | -3*m1/(2*m2)*( +dot(S1U,S1U) *dot(S2U,cross(nU,p2U)) 36 | +2*dot(S1U,nU) *dot(S2U,cross(S1U,p2U)) 37 | -5*dot(S1U,nU)**2*dot(S2U,cross(nU,p2U))) 38 | -dot(cross(S1U,nU),p2_minus_m2_over_4m1_p1)*(dot(S1U,S1U) - 5*dot(S1U,nU)**2))/(m1**2*r**4) 39 | return H_SSS_3PN_pt 40 | global H_SSS_3PN 41 | H_SSS_3PN = (+f_H_SSS_3PN_pt(m1,m2, n12U, S1U,S2U, p1U,p2U, r12) 42 | +f_H_SSS_3PN_pt(m2,m1, n21U, S2U,S1U, p2U,p1U, r12)) 43 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NRPy+, SENRv2, and the NRPy+ Jupyter Tutorial 2 | [![CI: Ubuntu 20.04](https://github.com/zachetienne/nrpytutorial/actions/workflows/github-actions-ubuntu20.yml/badge.svg)](https://github.com/zachetienne/nrpytutorial/actions/workflows/github-actions-ubuntu20.yml) 3 | [![CI: Ubuntu 22.04](https://github.com/zachetienne/nrpytutorial/actions/workflows/github-actions-ubuntu22.yml/badge.svg)](https://github.com/zachetienne/nrpytutorial/actions/workflows/github-actions-ubuntu22.yml) 4 | [![CI: MacOS 12](https://github.com/zachetienne/nrpytutorial/actions/workflows/github-actions-MacOS12.yml/badge.svg)](https://github.com/zachetienne/nrpytutorial/actions/workflows/github-actions-MacOS12.yml) 5 | [![CI: Windows 2022](https://github.com/zachetienne/nrpytutorial/actions/workflows/github-actions-windows2022.yml/badge.svg)](https://github.com/zachetienne/nrpytutorial/actions/workflows/github-actions-windows2022.yml) 6 | [![Binder](https://mybinder.org/badge.svg)](https://mybinder.org/v2/gh/zachetienne/nrpytutorial/master?filepath=NRPyPlus_Tutorial.ipynb) 7 | 8 | This repository houses 9 | * [The newest version of NRPy+: Python-Based Code Generation for Numerical Relativity... and Beyond](https://arxiv.org/abs/1712.07658), 10 | * The second version of SENR, the Simple, Efficient Numerical Relativity code (see the "Colliding Black Holes" Start-to-Finish tutorial notebook), and 11 | * The NRPy+ Jupyter Tutorial: An Introduction to Python-Based Code Generation for Numerical Relativity... and Beyond! 12 | 13 | To explore the NRPy+ tutorial without downloading, check out the [NRPy+ tutorial mybinder](https://mybinder.org/v2/gh/zachetienne/nrpytutorial/master?filepath=NRPyPlus_Tutorial.ipynb). 14 | 15 | If you would like to explore the NRPy+ tutorial on your local computer, you'll need to install Python, Jupyter, Sympy, and Matplotlib. Once they are installed, [you may find this useful](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/execute.html) 16 | 17 | In certain circumstances, developers may wish to execute one of these Jupyter notebooks from the command line. For example, when the notebook constructs an [Einstein Toolkit](https://einsteintoolkit.org) thorn. In such a case, the following command should be useful: 18 | 19 | `jupyter nbconvert --to notebook --inplace --execute --ExecutePreprocessor.timeout=-1 [Jupyter notebook file]` 20 | 21 | Alternatively one can simply use the script: 22 | 23 | `./run_Jupyter_notebook.sh [Jupyter notebook file]` -------------------------------------------------------------------------------- /UnitTesting/evaluate_globals.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from importlib import import_module 3 | 4 | # evaluate_globals sequentially imports self.module, runs self.initialization_string, calls self.function on 5 | # self.module, and gets an expression for each global in self.global_list. It returns a dictionary whose keys represent 6 | # the names of the globals and whose values represent the expressions calculated for each respective global. 7 | 8 | # Called by run_test 9 | 10 | # Uses self.module, self.module_name, self.initialization_string, self.function, self.global_list 11 | 12 | 13 | def evaluate_globals(self): 14 | 15 | logging.debug(' Importing ' + self.module + '...') 16 | 17 | # Try to import self.module 18 | try: 19 | imported_module = import_module(self.module) 20 | # If user supplied an incorrect module, error 21 | except ImportError: 22 | logging.error(" Attribute 'module' for " + self.module_name + " does not exist as a module. This attribute " 23 | "should be what you would type if you were importing 'module' in your own file.\n") 24 | self.assertTrue(False) 25 | except ValueError: 26 | logging.error(" Attribute 'module' for " + self.module_name + " is empty -- it must have a value.") 27 | self.assertTrue(False) 28 | 29 | logging.debug(' ...Success: Imported module.') 30 | 31 | logging.debug(' Executing initialization_string...') 32 | 33 | logging.debug('initialization_string: ' + self.initialization_string) 34 | 35 | # Execute self.initialization_string 36 | exec(self.initialization_string) 37 | 38 | logging.debug(' ...Successfully executed.') 39 | 40 | # If the user supplied a function, add it to string_exec 41 | if self.function != '': 42 | string_exec = self.module_name + '.' + self.function + '\n' 43 | else: 44 | string_exec = '' 45 | 46 | # Add each global to string_exec 47 | for glob in self.global_list: 48 | string_exec += glob + '=' + self.module_name + '.' + glob + '\n' 49 | 50 | # Initializing location that result will be stored in 51 | var_dict = {} 52 | 53 | logging.debug(' Executing function call and global assignment...') 54 | 55 | # Execute string_exec with imported_module as environment and store result in var_dict 56 | exec(string_exec, {self.module_name: imported_module}, var_dict) 57 | 58 | logging.debug(' ...Successfully executed.') 59 | 60 | return var_dict 61 | -------------------------------------------------------------------------------- /benchmark-z4cnrpy.par: -------------------------------------------------------------------------------- 1 | ActiveThorns = " 2 | ADMBase 3 | CarpetX 4 | ErrorEstimator 5 | Formaline 6 | IOUtil 7 | ODESolvers 8 | SystemTopology 9 | TimerReport 10 | TmunuBase 11 | Z4cNRPy 12 | " 13 | 14 | $nlevels = 8 15 | $ncells_x = 128 16 | $ncells_y = 128 17 | $ncells_z = 128 18 | $blocking_factor = 8 19 | $max_grid_size = 32 20 | $max_tile_size_x = 1073741824 21 | $max_tile_size_y = 16 22 | $max_tile_size_z = 16 23 | 24 | CarpetX::verbose = yes #TODO no 25 | Cactus::cctk_show_schedule = yes #TODO no 26 | CarpetX::poison_undefined_values = yes #TODO no 27 | 28 | Cactus::presync_mode = "mixed-error" 29 | 30 | Cactus::terminate = "iteration" 31 | Cactus::cctk_itlast = 10 32 | 33 | CarpetX::xmin = 0.0 34 | CarpetX::ymin = 0.0 35 | CarpetX::zmin = 0.0 36 | 37 | CarpetX::xmax = 1.0 38 | CarpetX::ymax = 1.0 39 | CarpetX::zmax = 1.0 40 | 41 | CarpetX::ncells_x = $ncells_x 42 | CarpetX::ncells_y = $ncells_y 43 | CarpetX::ncells_z = $ncells_z 44 | 45 | CarpetX::blocking_factor_x = $blocking_factor 46 | CarpetX::blocking_factor_y = $blocking_factor 47 | CarpetX::blocking_factor_z = $blocking_factor 48 | 49 | CarpetX::max_grid_size_x = $max_grid_size 50 | CarpetX::max_grid_size_y = $max_grid_size 51 | CarpetX::max_grid_size_z = $max_grid_size 52 | 53 | CarpetX::max_tile_size_x = $max_tile_size_x 54 | CarpetX::max_tile_size_y = $max_tile_size_y 55 | CarpetX::max_tile_size_z = $max_tile_size_z 56 | 57 | CarpetX::reflection_x = yes 58 | CarpetX::reflection_y = yes 59 | CarpetX::reflection_z = yes 60 | CarpetX::dirichlet_upper_x = yes 61 | CarpetX::dirichlet_upper_y = yes 62 | CarpetX::dirichlet_upper_z = yes 63 | 64 | CarpetX::max_num_levels = $nlevels 65 | CarpetX::regrid_every = 0 66 | CarpetX::regrid_error_threshold = 2.0 67 | 68 | ErrorEstimator::region_shape = "cube" 69 | ErrorEstimator::scale_by_resolution = yes 70 | 71 | CarpetX::prolongation_type = "ddf" 72 | CarpetX::prolongation_order = 5 73 | 74 | CarpetX::ghost_size = 3 75 | 76 | ODESolvers::method = "RK4" 77 | CarpetX::dtfac = 0.5 78 | 79 | ADMBase::initial_data = "Cartesian Minkowski" 80 | ADMBase::initial_lapse = "one" 81 | ADMBase::initial_shift = "zero" 82 | 83 | IO::out_dir = $parfile 84 | IO::out_every = 1 # TODO 0 85 | 86 | TimerReport::out_every = 10 87 | TimerReport::out_filename = "TimerReport" 88 | TimerReport::output_all_timers = no 89 | TimerReport::output_all_timers_readable = no 90 | TimerReport::output_all_timers_together = no 91 | TimerReport::output_schedule_timers = no 92 | TimerReport::n_top_timers = 50 93 | -------------------------------------------------------------------------------- /in_progress-SEOBNR/SEOBNR/constant_coeffs.py: -------------------------------------------------------------------------------- 1 | # Constants of fit to numerical relativity for the spinning effective one-body formulation 2 | 3 | # Import necessary NumPy, SymPy, and SEOBNR modules 4 | import numpy as np 5 | 6 | # Compute fits to numerical relativity 7 | def compute_const_coeffs(eta, gamma, a): 8 | 9 | # Define frequently-used constants 10 | asq = a*a 11 | pisq = np.pi*np.pi 12 | 13 | # Define constants that determine the fitting parameter K 14 | # See the discussion in https://arxiv.org/pdf/1311.2544.pdf between Equations (3) and (4) 15 | K0 = 1.712 16 | K1 = -1.803949138004582 17 | K2 = -39.77229225266885 18 | K3 = 103.16588921239249 19 | 20 | # Compute the fitting parameter K 21 | # See https://arxiv.org/abs/0912.3517 Equation (5.67) and the discussion following Equation 6.9 22 | # as well as https://arxiv.org/pdf/1311.2544.pdf 23 | K = K0 + K1*eta + K2*eta*eta + K3*eta*eta*eta 24 | 25 | # Define more frequently-used constants 26 | EtaKm1 = eta*K - 1. 27 | EtaKm1sq = EtaKm1*EtaKm1 28 | 29 | # Compute the Post-Newtonian coefficients 30 | # See https://arxiv.org/abs/0912.3517 Equations (5.77) to (5.81) and 31 | # https://arxiv.org/pdf/1311.2544.pdf Equation (2) 32 | Delta0 = K*(EtaKm1 - 1.) 33 | Delta1 = -2.*(Delta0 + K)*EtaKm1 34 | 35 | Delta1sq = Delta1*Delta1 36 | Delta1cu = Delta1*Delta1sq 37 | Delta1ft = Delta1cu*Delta1 38 | 39 | Delta2 = 0.5*Delta1*(Delta1 - 4.*EtaKm1) - asq*EtaKm1sq*Delta0 40 | 41 | Delta2sq = Delta2*Delta2 42 | 43 | Delta3 = -Delta1cu/3. + Delta1*Delta2 + Delta1sq*EtaKm1 - 2.*(Delta2 - EtaKm1)*EtaKm1 - asq*Delta1*EtaKm1sq 44 | Delta4 = 1./12.*(6*asq*(Delta1sq - 2*Delta2)*EtaKm1sq + 3*Delta1ft - 8*EtaKm1*Delta1cu - 12*Delta2*Delta1sq 45 | + 12*(2*EtaKm1*Delta2 + Delta3)*Delta1 + 12*(94./3. - 41./32.*pisq)*EtaKm1sq 46 | + 6*(Delta2*Delta2 - 4*Delta3*EtaKm1)) 47 | Delta5 = EtaKm1sq*(-4237./60. + 128./5.*gamma + 2275./512.*pisq - asq*(Delta1cu - 3.*Delta1*Delta2 + 3.*Delta3)/3. 48 | - (Delta1ft*Delta1 - 5.*Delta1cu*Delta2 + 5.*Delta1*Delta2sq + 5.*Delta1sq*Delta3 49 | - 5.*Delta2*Delta3 - 5.*Delta1*Delta4)/(5.*EtaKm1sq) + (Delta1ft - 4.*Delta1sq*Delta2 50 | + 2.*Delta2sq + 4.*Delta1*Delta3 - 4.*Delta4)/(2*EtaKm1) + (256./5.)*np.log(2)) 51 | Delta5l = (64./5.)*EtaKm1sq 52 | 53 | #Add comment here 54 | dSO = -74.71 - 156.*eta + 627.5*eta*eta 55 | dSS = 8.127 - 154.2*eta + 830.8*eta*eta 56 | 57 | return K, Delta0, Delta1, Delta2, Delta3, Delta4, Delta5, Delta5l, dSO, dSS 58 | -------------------------------------------------------------------------------- /NRPyEOS/NRPyEOS_validation.c: -------------------------------------------------------------------------------- 1 | #include "NRPyEOS.h" 2 | 3 | void NRPyEOS_validation(const NRPyEOS_params_tabulated *restrict eos_params, const int N, const double Ye, const char *restrict filename) { 4 | // Step 1: Set test parameters 5 | const int keys[NRPyEOS_ntablekeys] = { 6 | NRPyEOS_press_key,NRPyEOS_eps_key,NRPyEOS_entropy_key, 7 | NRPyEOS_munu_key,NRPyEOS_cs2_key,NRPyEOS_depsdT_key, 8 | NRPyEOS_dPdrho_key,NRPyEOS_dPdeps_key,NRPyEOS_muhat_key, 9 | NRPyEOS_mu_e_key,NRPyEOS_mu_p_key,NRPyEOS_mu_n_key, 10 | NRPyEOS_Xa_key,NRPyEOS_Xh_key,NRPyEOS_Xn_key,NRPyEOS_Xp_key, 11 | NRPyEOS_Abar_key,NRPyEOS_Zbar_key,NRPyEOS_Gamma_key 12 | }; 13 | NRPyEOS_error_report report; 14 | double outvars[NRPyEOS_ntablekeys]; 15 | 16 | // Step 2.a: Print information about the test 17 | printf("(NRPyEOS) Performing test for Ye = %.2lf\n",Ye); 18 | printf("(NRPyEOS) The following quantities will be interpolated:\n"); 19 | for(int i=0;ieos_tempmin + j*eos_params->dtemp; 27 | for(int i=0;ieos_rhomin + i*eos_params->drho; 30 | 31 | // Step 3.c: Interpolate all EOS quantities 32 | NRPyEOS_from_rho_Ye_T_interpolate_n_quantities( eos_params, NRPyEOS_ntablekeys,rho,Ye,T, keys,outvars, &report ); 33 | 34 | // Step 3.d: Output results to file 35 | fprintf(fp,"%d %d %.15e %.15e",i,j,rho,T); 36 | for(int nn=0;nn $i-new && mv $i-new $i 9 | git diff $i |grep -v "image/png"|grep -E "^\-|^\+"|grep -v '^\-\-\-'| \ 10 | grep -v "metadata\":"|grep -v "\"execution\":"|grep -v "\"iopub."| \ 11 | grep -v "\"shell.execute"|grep -v "\"version\":"|grep -v " }"$|grep -v " },"$| cdiff |cat 12 | done 13 | 14 | exit 15 | 16 | # Skip Baikal and all Start-to-Finish notebooks, except ScalarWave for now 17 | # Tutorial-Start_to_Finish-ScalarWave*.ipynb 18 | # Let's try all but Psi4 Start-to-Finish and Baikal notebooks. Also cmdlinehelper yields whitespace differences in Python 2.7 19 | for i in Tutorial-[A]*.ipynb Tutorial-[C-RT-Z]*.ipynb Tutorial-B[B-Z]*.ipynb Tutorial-S[A-SU-Z]*.ipynb Tutorial-Start_to_Finish-*[^4].ipynb NRPyPN/PN*.ipynb; do 20 | # For some reason (as of ~July 20, 2020) the hydro-without-hydro notebook takes too long in Travis, causing a timeout: 21 | # Also as of Aug 3, 2020 the new WaveToyNRPy notebook is broken, as it seems Travis doesn't support parallel codegens 22 | if [ $i != "Tutorial-Start_to_Finish-BSSNCurvilinear-Neutron_Star-Hydro_without_Hydro.ipynb" ] && [ $i != "Tutorial-ETK_thorn-WaveToyNRPy.ipynb" ] && [ $i != "Tutorial-Start_to_Finish-FishboneMoncriefID_standalone.ipynb" ]; then 23 | ./run_Jupyter_notebook.sh $i # notimer 24 | cat $i | sed "s/\\\r\\\n/\\\n/g" > $i-new && mv $i-new $i 25 | git diff $i |grep -v "image/png"|grep -E "^\-|^\+"|grep -v '^\-\-\-'| \ 26 | grep -v "metadata\":"|grep -v "\"execution\":"|grep -v "\"iopub."| \ 27 | grep -v "\"shell.execute"|grep -v "\"version\":"|grep -v " }"$|grep -v " },"$| cdiff |cat 28 | # git diff $i | grep -v "image/png" | cdiff | cat 29 | # echo Number of lines different in the git diff: `git diff|grep -v image/png|wc -l` 30 | fi 31 | done 32 | 33 | # ./run_Jupyter_notebook.sh Tutorial-Finite_Difference_Derivatives.ipynb && git diff Tutorial-Finite_Difference_Derivatives.ipynb && \ 34 | # ./run_Jupyter_notebook.sh Tutorial-Numerical_Grids.ipynb && git diff Tutorial-Numerical_Grids.ipynb && \ 35 | # ./run_Jupyter_notebook.sh Tutorial-Coutput__Parameter_Interface.ipynb && git diff Tutorial-Coutput__Parameter_Interface.ipynb && \ 36 | # ./run_Jupyter_notebook.sh Tutorial-cmdline_helper.ipynb && git diff Tutorial-cmdline_helper.ipynb && \ 37 | # ./run_Jupyter_notebook.sh Tutorial-Symbolic_Tensor_Rotation.ipynb && git diff Tutorial-Symbolic_Tensor_Rotation.ipynb 38 | -------------------------------------------------------------------------------- /in_progress-GiRaFFE_NRPy/GiRaFFEfood_NRPy/BasisTransform.py: -------------------------------------------------------------------------------- 1 | import os, sys # Standard Python modules for multiplatform OS-level functions 2 | # First, we'll add the parent directory to the list of directories Python will check for modules. 3 | nrpy_dir_path = os.path.join("..") 4 | if nrpy_dir_path not in sys.path: 5 | sys.path.append(nrpy_dir_path) 6 | 7 | # Import needed Python modules 8 | import NRPy_param_funcs as par # NRPy+: Parameter interface 9 | import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 10 | import reference_metric as rfm # NRPy+: Reference metric support 11 | 12 | #Step 0: Set the spatial dimension parameter to 3. 13 | par.set_parval_from_str("grid::DIM", 3) 14 | DIM = par.parval_from_str("grid::DIM") 15 | 16 | def basis_transform(CoordSystem, AD, ValenciavU, BU=None): 17 | global AD_dst, ValenciavU_dst, BU_dst 18 | 19 | # Set coordinate system to dst_basis 20 | par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem) 21 | rfm.reference_metric() 22 | 23 | # We define Jacobians relative to the center of the destination grid, at a point $x^j_{\rm dst}=$(`xx0,xx1,xx2`)${}_{\rm dst}$ on the destination grid: 24 | # $$ 25 | # {\rm Jac\_dUCart\_dDdstUD[i][j]} = \frac{\partial x^i_{\rm Cart}}{\partial x^j_{\rm dst}}, 26 | # $$ 27 | 28 | # via exact differentiation (courtesy SymPy), and the inverse Jacobian 29 | # $$ 30 | # {\rm Jac\_dUdst\_dDCartUD[i][j]} = \frac{\partial x^i_{\rm dst}}{\partial x^j_{\rm Cart}}, 31 | # $$ 32 | 33 | # using NRPy+'s `generic_matrix_inverter3x3()` function. In terms of these, the transformation of BSSN tensors from Cartesian to the destination grid's `"reference_metric::CoordSystem"` coordinates may be written: 34 | 35 | # $$ 36 | # B^i_{\rm dst} = \frac{\partial x^i_{\rm dst}}{\partial x^\ell_{\rm Cart}} B^\ell_{\rm Cart}, 37 | # $$ 38 | 39 | # while for lowered indices we have 40 | 41 | # $$ 42 | # A^{\rm dst}_{i} = 43 | # \frac{\partial x^\ell_{\rm Cart}}{\partial x^i_{\rm dst}} A^{\rm Cart}_{\ell}\\ 44 | # $$ 45 | 46 | # Step 3.a: Next construct Jacobian and inverse Jacobian matrices: 47 | Jac_dUCart_dDrfmUD,Jac_dUrfm_dDCartUD = rfm.compute_Jacobian_and_inverseJacobian_tofrom_Cartesian() 48 | 49 | # Step 3.b: Convert basis of all BSSN *vectors* from Cartesian to destination basis 50 | ValenciavU_dst = rfm.basis_transform_vectorU_from_Cartesian_to_rfmbasis(Jac_dUrfm_dDCartUD, ValenciavU) 51 | 52 | # Note that the below the function should really be "...basis_transform_vectorUDfrom_Cartesian_to_rfmbasis.." 53 | AD_dst = rfm.basis_transform_vectorU_from_Cartesian_to_rfmbasis(Jac_dUCart_dDrfmUD, AD) 54 | 55 | BU_dst = ixp.zerorank1() 56 | if BU != None: 57 | BU_dst = rfm.basis_transform_vectorU_from_Cartesian_to_rfmbasis(Jac_dUrfm_dDCartUD, BU) 58 | -------------------------------------------------------------------------------- /nbconvert_latex_settings/style_ipython.tex.j2: -------------------------------------------------------------------------------- 1 | ((= IPython input/output style =)) 2 | 3 | ((*- extends 'base.tex.j2' -*)) 4 | 5 | % Custom definitions 6 | ((* block definitions *)) 7 | ((( super() ))) 8 | 9 | % Pygments definitions 10 | ((( resources.latex.pygments_definitions ))) 11 | 12 | % Exact colors from NB 13 | \definecolor{incolor}{rgb}{0.0, 0.0, 0.5} 14 | \definecolor{outcolor}{rgb}{0.545, 0.0, 0.0} 15 | 16 | ((* endblock definitions *)) 17 | 18 | %=============================================================================== 19 | % Input 20 | %=============================================================================== 21 | 22 | ((* block input scoped *)) 23 | ((*- if resources.global_content_filter.include_input_prompt *)) 24 | ((( add_prompt(cell.source | highlight_code(strip_verbatim=True, metadata=cell.metadata), cell, 'In ', 'incolor') ))) 25 | ((*- else *)) 26 | ((( cell.source | highlight_code(strip_verbatim=True, metadata=cell.metadata) ))) 27 | ((* endif *)) 28 | ((* endblock input *)) 29 | 30 | 31 | %=============================================================================== 32 | % Output 33 | %=============================================================================== 34 | 35 | ((* block execute_result scoped *)) 36 | ((*- for type in output.data | filter_data_type -*)) 37 | ((*- if resources.global_content_filter.include_output_prompt -*)) 38 | ((*- if type in ['text/plain'] *)) 39 | ((( add_prompt(output.data['text/plain'] | escape_latex, cell, 'Out', 'outcolor') ))) 40 | ((* else -*)) 41 | \texttt{\color{outcolor}Out[{\color{outcolor}((( cell.execution_count )))}]:}((( super() ))) 42 | ((*- endif -*)) 43 | ((*- else -*)) 44 | ((*- if type in ['text/plain'] *)) 45 | ((( output.data['text/plain'] | escape_latex ))) 46 | ((* else -*)) 47 | ((( super() ))) 48 | ((*- endif -*)) 49 | ((*- endif -*)) 50 | ((*- endfor -*)) 51 | ((* endblock execute_result *)) 52 | 53 | 54 | %============================================================================== 55 | % Support Macros 56 | %============================================================================== 57 | 58 | % Name: draw_prompt 59 | % Purpose: Renders an output/input prompt 60 | ((* macro add_prompt(text, cell, prompt, prompt_color) -*)) 61 | ((*- if cell.execution_count is defined -*)) 62 | ((*- set execution_count = "" ~ (cell.execution_count | replace(None, " ")) -*)) 63 | ((*- else -*)) 64 | ((*- set execution_count = " " -*)) 65 | ((*- endif -*)) 66 | ((*- set indention = " " * (execution_count | length + 7) -*)) 67 | \begin{Verbatim}[commandchars=\\\{\}] 68 | ((( text | add_prompts(first='{\\color{' ~ prompt_color ~ '}' ~ prompt ~ '[{\\color{' ~ prompt_color ~ '}' ~ execution_count ~ '}]:} ', cont=indention) ))) 69 | \end{Verbatim} 70 | ((*- endmacro *)) 71 | -------------------------------------------------------------------------------- /in_progress-GiRaFFE_NRPy/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py: -------------------------------------------------------------------------------- 1 | # Step 0: Add NRPy's directory to the path 2 | # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory 3 | import os,sys 4 | nrpy_dir_path = os.path.join("..") 5 | if nrpy_dir_path not in sys.path: 6 | sys.path.append(nrpy_dir_path) 7 | nrpy_dir_path = os.path.join("../..") 8 | if nrpy_dir_path not in sys.path: 9 | sys.path.append(nrpy_dir_path) 10 | 11 | # Step 0.a: Import the NRPy+ core modules and set the reference metric to Cartesian 12 | import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends 13 | import NRPy_param_funcs as par # NRPy+: Parameter interface 14 | import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 15 | import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Common_Functions as gfcf # Some useful functions for GiRaFFE initial data. 16 | 17 | import reference_metric as rfm # NRPy+: Reference metric support 18 | par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") 19 | rfm.reference_metric() 20 | 21 | # Step 1a: Set commonly used parameters. 22 | thismodule = __name__ 23 | 24 | # The angular velocity of the "neutron star" 25 | Omega_aligned_rotator = par.Cparameters("REAL",thismodule,"Omega_aligned_rotator",1e3) 26 | B_p_aligned_rotator,R_NS_aligned_rotator = par.Cparameters("REAL",thismodule, 27 | # B_p_aligned_rotator = the intensity of the magnetic field and 28 | # R_NS_aligned_rotator= "Neutron star" radius 29 | ["B_p_aligned_rotator","R_NS_aligned_rotator"], 30 | [1e-5, 1.0]) 31 | 32 | # Step 2: Set the vectors A and E in Spherical coordinates 33 | def Ar_AR(r,theta,phi, **params): 34 | return sp.sympify(0) 35 | 36 | def Ath_AR(r,theta,phi, **params): 37 | return sp.sympify(0) 38 | 39 | def Aph_AR(r,theta,phi, **params): 40 | # \mu \varpi / r^3 41 | # \varpi = sqrt(x^2+y^2) = r \sin(\theta) 42 | varpi = r * sp.sin(theta) 43 | mu = B_p_aligned_rotator * R_NS_aligned_rotator**3 / 2 44 | return mu * varpi**2 / (r**3) 45 | 46 | import Min_Max_and_Piecewise_Expressions as noif 47 | #Step 3: Compute v^i 48 | def ValenciavU_func_AR(**params): 49 | LeviCivitaSymbolDDD = ixp.LeviCivitaSymbol_dim3_rank3() 50 | 51 | unit_zU = ixp.zerorank1() 52 | unit_zU[2] = sp.sympify(1) 53 | 54 | r = rfm.xxSph[0] 55 | 56 | ValenciavU = ixp.zerorank1() 57 | for i in range(3): 58 | for j in range(3): 59 | for k in range(3): 60 | ValenciavU[i] += noif.coord_leq_bound(r,R_NS_aligned_rotator)*LeviCivitaSymbolDDD[i][j][k] * Omega_aligned_rotator * unit_zU[j] * rfm.xx[k] 61 | 62 | return ValenciavU -------------------------------------------------------------------------------- /Tutorial-Loop_Generation_Cache_Blocking_soln.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "\n", 8 | "\n", 15 | "\n", 16 | "# Exercise (Loop Generation) Solution\n", 17 | "\n", 18 | "## *Courtesy Ken Sible*" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 1, 24 | "metadata": { 25 | "execution": { 26 | "iopub.execute_input": "2021-03-07T17:14:40.245536Z", 27 | "iopub.status.busy": "2021-03-07T17:14:40.244431Z", 28 | "iopub.status.idle": "2021-03-07T17:14:40.249741Z", 29 | "shell.execute_reply": "2021-03-07T17:14:40.249146Z" 30 | } 31 | }, 32 | "outputs": [ 33 | { 34 | "name": "stdout", 35 | "output_type": "stream", 36 | "text": [ 37 | "for (int n = 0; n < (Nt - 1); n++) {\n", 38 | " u[n][0] = u[n][Nx] = 0;\n", 39 | " for (int k = 1; k < (Nx - 1); k++) {\n", 40 | " u[n + 1][k] = u[n][k] + r*(u[n][k + 1] - 2*u[n][k] + u[n][k - 1]);\n", 41 | " } // END LOOP: for (int k = 1; k < (Nx - 1); k++)\n", 42 | " for (int k = 0; k < Nx; k++) {\n", 43 | " u[n][k] = u[n + 1][k];\n", 44 | " } // END LOOP: for (int k = 0; k < Nx; k++)\n", 45 | "} // END LOOP: for (int n = 0; n < (Nt - 1); n++)\n", 46 | "\n" 47 | ] 48 | } 49 | ], 50 | "source": [ 51 | "from loop import loop # Import NRPy+ module for loop generation\n", 52 | "\n", 53 | "boundary = 'u[n][0] = u[n][Nx] = 0;\\n'\n", 54 | "inner_1 = loop('k', '1', '(Nx - 1)', '1', '', interior='u[n + 1][k] = u[n][k] + r*(u[n][k + 1] - 2*u[n][k] + u[n][k - 1]);')\n", 55 | "inner_2 = loop('k', '0', 'Nx', '1', '', interior='u[n][k] = u[n + 1][k];')\n", 56 | "print(loop('n', '0', '(Nt - 1)', '1', '', interior=(boundary + inner_1 + inner_2[:-1])))" 57 | ] 58 | } 59 | ], 60 | "metadata": { 61 | "file_extension": ".py", 62 | "kernelspec": { 63 | "display_name": "Python 3", 64 | "language": "python", 65 | "name": "python3" 66 | }, 67 | "language_info": { 68 | "codemirror_mode": { 69 | "name": "ipython", 70 | "version": 3 71 | }, 72 | "file_extension": ".py", 73 | "mimetype": "text/x-python", 74 | "name": "python", 75 | "nbconvert_exporter": "python", 76 | "pygments_lexer": "ipython3", 77 | "version": "3.8.2" 78 | }, 79 | "mimetype": "text/x-python", 80 | "name": "python", 81 | "npconvert_exporter": "python", 82 | "pygments_lexer": "ipython3", 83 | "version": 3 84 | }, 85 | "nbformat": 4, 86 | "nbformat_minor": 2 87 | } 88 | -------------------------------------------------------------------------------- /UnitTesting/core_Jupyter_notebook_testsuite.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # Error out if any commands complete with an error. 4 | 5 | # Run GiRaFFE unit tests first: 6 | # for i in in_progress/Tutorial-Start_to_Finish-GiRaFFE_NRPy-1D_tests-staggered.ipynb in_progress/Tutorial-Start_to_Finish_UnitTest*; do 7 | # ./run_Jupyter_notebook.sh $i # notimer 8 | # cat $i | sed "s/\\\r\\\n/\\\n/g" > $i-new && mv $i-new $i 9 | # git diff $i |grep -v "image/png"|grep -E "^\-|^\+"|grep -v '^\-\-\-'| \ 10 | # grep -v "metadata\":"|grep -v "\"execution\":"|grep -v "\"iopub."| \ 11 | # grep -v "\"shell.execute"|grep -v "\"version\":"|grep -v " }"$|grep -v " },"$| cdiff |cat 12 | # done 13 | 14 | # Skip Baikal and all Start-to-Finish notebooks, except ScalarWave for now 15 | # Tutorial-Start_to_Finish-ScalarWave*.ipynb 16 | # Let's try all but Psi4 Start-to-Finish and Baikal notebooks. Also cmdlinehelper yields whitespace differences in Python 2.7 17 | for i in Tutorial-[A]*.ipynb Tutorial-[C-RT-Z]*.ipynb Tutorial-B[B-Z]*.ipynb Tutorial-S[A-SU-Z]*.ipynb Tutorial-Start_to_Finish-*[^4].ipynb NRPyPN/PN*.ipynb; do 18 | # For some reason (as of ~July 20, 2020) the hydro-without-hydro notebook takes too long in Travis, causing a timeout: 19 | # Also as of Aug 3, 2020 the new WaveToyNRPy notebook is broken, as it seems Travis doesn't support parallel codegens 20 | if [ $i != "Tutorial-Start_to_Finish-BSSNCurvilinear-Neutron_Star-Hydro_without_Hydro.ipynb" ] && [ $i != "Tutorial-ETK_thorn-WaveToyNRPy.ipynb" ] && [ $i != "Tutorial-Start_to_Finish-FishboneMoncriefID_standalone.ipynb" ] && [ $i != "Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide-Psi4_new_way.ipynb" ]; then 21 | ./run_Jupyter_notebook.sh $i # notimer 22 | cat $i | sed "s/\\\r\\\n/\\\n/g" > $i-new && mv $i-new $i 23 | git diff $i |grep -v "image/png"|grep -E "^\-|^\+"|grep -v '^\-\-\-'| \ 24 | grep -v "metadata\":"|grep -v "\"execution\":"|grep -v "\"iopub."| \ 25 | grep -v "\"shell.execute"|grep -v "\"version\":"|grep -v " }"$|grep -v " },"$| \ 26 | grep -v "(BENCH)"|grep -v "(EXEC)"|grep -v "execution_count"|grep -v "\"Overwriting"|grep -v "\"Writing"|cdiff |cat 27 | # git diff $i | grep -v "image/png" | cdiff | cat 28 | # echo Number of lines different in the git diff: `git diff|grep -v image/png|wc -l` 29 | fi 30 | done 31 | 32 | # ./run_Jupyter_notebook.sh Tutorial-Finite_Difference_Derivatives.ipynb && git diff Tutorial-Finite_Difference_Derivatives.ipynb && \ 33 | # ./run_Jupyter_notebook.sh Tutorial-Numerical_Grids.ipynb && git diff Tutorial-Numerical_Grids.ipynb && \ 34 | # ./run_Jupyter_notebook.sh Tutorial-Coutput__Parameter_Interface.ipynb && git diff Tutorial-Coutput__Parameter_Interface.ipynb && \ 35 | # ./run_Jupyter_notebook.sh Tutorial-cmdline_helper.ipynb && git diff Tutorial-cmdline_helper.ipynb && \ 36 | # ./run_Jupyter_notebook.sh Tutorial-Symbolic_Tensor_Rotation.ipynb && git diff Tutorial-Symbolic_Tensor_Rotation.ipynb 37 | -------------------------------------------------------------------------------- /in_progress-GiRaFFE_NRPy/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Fast_Wave.py: -------------------------------------------------------------------------------- 1 | # Step 0: Add NRPy's directory to the path 2 | # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory 3 | import os,sys 4 | nrpy_dir_path = os.path.join("..") 5 | if nrpy_dir_path not in sys.path: 6 | sys.path.append(nrpy_dir_path) 7 | nrpy_dir_path = os.path.join("../..") 8 | if nrpy_dir_path not in sys.path: 9 | sys.path.append(nrpy_dir_path) 10 | 11 | # Step 0.a: Import the NRPy+ core modules and set the reference metric to Cartesian 12 | import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends 13 | import NRPy_param_funcs as par # NRPy+: Parameter interface 14 | import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 15 | import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Common_Functions as gfcf # Some useful functions for GiRaFFE initial data. 16 | 17 | import reference_metric as rfm # NRPy+: Reference metric support 18 | par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") 19 | rfm.reference_metric() 20 | 21 | # Step 1a: Set commonly used parameters. 22 | thismodule = __name__ 23 | 24 | import Min_Max_and_Piecewise_Expressions as noif 25 | bound = sp.Rational(1,10) 26 | 27 | def Ax_FW(x,y,z, **params): 28 | return sp.sympify(0) 29 | 30 | def Ay_FW(x,y,z, **params): 31 | return sp.sympify(0) 32 | 33 | def Az_FW(x,y,z, **params): 34 | # A_z = y+ (-x-0.0075) if x <= -0.1 35 | # (0.75x^2 - 0.85x) if -0.1 < x <= 0.1 36 | # (-0.7x-0.0075) if x > 0.1 37 | Azleft = y - x - sp.Rational(75,10000) 38 | Azcenter = y + sp.Rational(75,100)*x*x - sp.Rational(85,100)*x 39 | Azright = y - sp.Rational(7,10)*x - sp.Rational(75,10000) 40 | 41 | out = noif.coord_leq_bound(x,-bound)*Azleft\ 42 | +noif.coord_greater_bound(x,-bound)*noif.coord_leq_bound(x,bound)*Azcenter\ 43 | +noif.coord_greater_bound(x,bound)*Azright 44 | return out 45 | 46 | def ValenciavU_func_FW(**params): 47 | # B^x(0,x) = 1.0 48 | # B^y(0,x) = 1.0 if x <= -0.1 49 | # 1.0-1.5(x+0.1) if -0.1 < x <= 0.1 50 | # 0.7 if x > 0.1 51 | # B^z(0,x) = 0 52 | x = rfm.xx_to_Cart[0] 53 | y = rfm.xx_to_Cart[1] 54 | 55 | Byleft = sp.sympify(1) 56 | Bycenter = sp.sympify(1) - sp.Rational(15,10)*(x+sp.Rational(1,10)) 57 | Byright = sp.Rational(7,10) 58 | 59 | BU = ixp.zerorank1() 60 | BU[0] = sp.sympify(1) 61 | BU[1] = noif.coord_leq_bound(x,-bound)*Byleft\ 62 | +noif.coord_greater_bound(x,-bound)*noif.coord_leq_bound(x,bound)*Bycenter\ 63 | +noif.coord_greater_bound(x,bound)*Byright 64 | BU[2] = 0 65 | 66 | # E^x(0,x) = 0.0 , E^y(x) = 0.0 , E^z(x) = -B^y(0,x) 67 | EU = ixp.zerorank1() 68 | EU[0] = sp.sympify(0) 69 | EU[1] = sp.sympify(0) 70 | EU[2] = -BU[1] 71 | 72 | # In flat space, ED and EU are identical, so we can still use this function. 73 | return gfcf.compute_ValenciavU_from_ED_and_BU(EU, BU) -------------------------------------------------------------------------------- /nbconvert_latex_settings/document_contents.tex.j2: -------------------------------------------------------------------------------- 1 | ((*- extends 'display_priority.j2' -*)) 2 | 3 | %=============================================================================== 4 | % Support blocks 5 | %=============================================================================== 6 | 7 | % Displaying simple data text 8 | ((* block data_text *)) 9 | \begin{Verbatim}[commandchars=\\\{\}] 10 | ((( output.data['text/plain'] | escape_latex | ansi2latex ))) 11 | \end{Verbatim} 12 | ((* endblock data_text *)) 13 | 14 | % Display python error text with colored frame (saves printer ink vs bkgnd) 15 | ((* block error *)) 16 | \begin{Verbatim}[commandchars=\\\{\}, frame=single, framerule=2mm, rulecolor=\color{outerrorbackground}] 17 | (((- super() ))) 18 | \end{Verbatim} 19 | ((* endblock error *)) 20 | % Display error lines with coloring 21 | ((*- block traceback_line *)) 22 | ((( line | escape_latex | ansi2latex ))) 23 | ((*- endblock traceback_line *)) 24 | 25 | % Display stream ouput with coloring 26 | ((* block stream *)) 27 | \begin{Verbatim}[commandchars=\\\{\}] 28 | ((( output.text | escape_latex | ansi2latex ))) 29 | \end{Verbatim} 30 | ((* endblock stream *)) 31 | 32 | % Display latex 33 | ((* block data_latex -*)) 34 | ((( output.data['text/latex'] | strip_files_prefix ))) 35 | ((* endblock data_latex *)) 36 | 37 | % Display markdown 38 | ((* block data_markdown -*)) 39 | ((( output.data['text/markdown'] | citation2latex | strip_files_prefix | convert_pandoc('markdown+tex_math_double_backslash', 'latex')))) 40 | ((* endblock data_markdown *)) 41 | 42 | % Default mechanism for rendering figures 43 | ((*- block data_png -*))((( draw_figure(output.metadata.filenames['image/png']) )))((*- endblock -*)) 44 | ((*- block data_jpg -*))((( draw_figure(output.metadata.filenames['image/jpeg']) )))((*- endblock -*)) 45 | ((*- block data_svg -*))((( draw_figure(output.metadata.filenames['image/svg+xml']) )))((*- endblock -*)) 46 | ((*- block data_pdf -*))((( draw_figure(output.metadata.filenames['application/pdf']) )))((*- endblock -*)) 47 | 48 | % Draw a figure using the graphicx package. 49 | ((* macro draw_figure(filename) -*)) 50 | ((* set filename = filename | posix_path *)) 51 | ((*- block figure scoped -*)) 52 | \begin{center} 53 | \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{((( filename )))} 54 | \end{center} 55 | { \hspace*{\fill} \\} 56 | ((*- endblock figure -*)) 57 | ((*- endmacro *)) 58 | 59 | % Redirect execute_result to display data priority. 60 | ((* block execute_result scoped *)) 61 | ((* block data_priority scoped *)) 62 | ((( super() ))) 63 | ((* endblock *)) 64 | ((* endblock execute_result *)) 65 | 66 | % Render markdown 67 | ((* block markdowncell scoped *)) 68 | ((( cell.source | citation2latex | strip_files_prefix | convert_pandoc('markdown+tex_math_double_backslash', 'json',extra_args=[]) | resolve_references | convert_pandoc('json','latex')))) 69 | ((* endblock markdowncell *)) 70 | 71 | % Don't display unknown types 72 | ((* block unknowncell scoped *)) 73 | ((* endblock unknowncell *)) 74 | -------------------------------------------------------------------------------- /functional.py: -------------------------------------------------------------------------------- 1 | """ Functional Programming Toolkit """ 2 | # Author: Ken Sible 3 | # Email: ksible *at* outlook *dot* com 4 | 5 | import sys 6 | 7 | def pipe(x, *f): 8 | """ Pipe Operator 9 | 10 | >>> pipe(range(5, 0, -1), reversed, list) 11 | [1, 2, 3, 4, 5] 12 | 13 | >>> pipe([3, 2, 2, 4, 5, 1], sorted, set, list) 14 | [1, 2, 3, 4, 5] 15 | """ 16 | if not f: return x 17 | return pipe(f[0](x), *f[1:]) 18 | 19 | def repeat(f, x, n): 20 | """ Repeat Function 21 | 22 | >>> list(repeat(flatten, [1, 2, [3, [4]], 5], 2)) 23 | [1, 2, 3, 4, 5] 24 | """ 25 | if n == 0: return x 26 | return repeat(f, f(x), n - 1) 27 | 28 | def chain(*iterable): 29 | """ Chain Iterable(s) 30 | 31 | >>> list(chain([1], [2, 3], [4, 5])) 32 | [1, 2, 3, 4, 5] 33 | """ 34 | for iter_ in iterable: 35 | try: iter(iter_) 36 | except TypeError: 37 | iter_ = [iter_] 38 | for element in iter_: 39 | yield element 40 | 41 | def flatten(iterable): 42 | """ Flatten Iterable 43 | 44 | >>> list(flatten([1, [2, 3], [4, 5]])) 45 | [1, 2, 3, 4, 5] 46 | """ 47 | return chain(*iterable) 48 | 49 | def reduce(f, iterable, initializer=None): 50 | """ Reduction Operation 51 | 52 | >>> reduce(lambda x, y: x + y, [1, 2, 3, 4, 5]) 53 | 15 54 | 55 | >>> reduce(lambda x, y: x + y, ['w', 'o', 'r', 'd']) 56 | 'word' 57 | 58 | >>> x = [1, 2, [3, 4], 'aabb'] 59 | >>> reduce(lambda i, _: i + 1, [1] + x[1:]) 60 | 4 61 | """ 62 | iterable = iter(iterable) 63 | result = next(iterable) if initializer is None \ 64 | else initializer 65 | for element in iterable: 66 | result = f(result, element) 67 | return result 68 | 69 | def uniquify(iterable): 70 | """ Uniquify Iterable 71 | 72 | >>> uniquify(([1, 1, 2, 3, 3, 3, 4, 5, 5])) 73 | [1, 2, 3, 4, 5] 74 | """ 75 | return reduce(lambda l, x: l if x in l else l + [x], iterable, []) 76 | 77 | def product(*iterable, **kwargs): 78 | """ Cartesian Product 79 | 80 | >>> list(product(['a', 'b'], [1, 2, 3])) 81 | [('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2), ('b', 3)] 82 | 83 | >>> list(product([1, 2, 3], repeat=2)) 84 | [(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (3, 3)] 85 | 86 | >>> for i, j in product(['a', 'b'], range(1, 3)): 87 | ... print('%s: %d' % (i, j)) 88 | a: 1 89 | a: 2 90 | b: 1 91 | b: 2 92 | """ 93 | if 'repeat' in kwargs: 94 | if kwargs['repeat'] > 1 and len(iterable) == 1: 95 | iterable = kwargs['repeat'] * iterable 96 | f = lambda A, B: [list(flatten([x] + [y])) for x in A for y in B] 97 | for prod in reduce(f, iterable): 98 | yield tuple(prod) 99 | 100 | if __name__ == "__main__": 101 | import doctest 102 | sys.exit(doctest.testmod()[0]) 103 | -------------------------------------------------------------------------------- /in_progress-GiRaFFE_NRPy/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Exact_Wald.py: -------------------------------------------------------------------------------- 1 | # Step 0: Import the NRPy+ core modules and set the reference metric to Cartesian 2 | # Step 0: Add NRPy's directory to the path 3 | # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory 4 | import os,sys 5 | nrpy_dir_path = os.path.join("..") 6 | if nrpy_dir_path not in sys.path: 7 | sys.path.append(nrpy_dir_path) 8 | nrpy_dir_path = os.path.join("../..") 9 | if nrpy_dir_path not in sys.path: 10 | sys.path.append(nrpy_dir_path) 11 | giraffefood_dir_path = os.path.join("GiRaFFEfood_NRPy") 12 | if giraffefood_dir_path not in sys.path: 13 | sys.path.append(giraffefood_dir_path) 14 | 15 | # Step 0.a: Import the NRPy+ core modules and set the reference metric to Cartesian 16 | import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends 17 | import NRPy_param_funcs as par # NRPy+: Parameter interface 18 | import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 19 | import GiRaFFEfood_NRPy_Common_Functions as gfcf # Some useful functions for GiRaFFE initial data. 20 | 21 | import reference_metric as rfm 22 | par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") 23 | rfm.reference_metric() 24 | 25 | # Step 1a: Set commonly used parameters. 26 | thismodule = __name__ 27 | 28 | # Step 2: Set the vectors A and E in Spherical coordinates 29 | def Ar_EW(r,theta,phi, **params): 30 | return sp.sympify(0) 31 | 32 | def Ath_EW(r,theta,phi, **params): 33 | return sp.sympify(0) 34 | 35 | def Aph_EW(r,theta,phi, **params): 36 | # 1/2 r^2 \sin^2 \theta 37 | return sp.Rational(1,2) * (r * r * sp.sin(theta)**2) 38 | 39 | #Step 3: Compute v^i from A_i and E_i 40 | def ValenciavU_func_EW(**params): 41 | M = params["M"] 42 | gammaDD = params["gammaDD"] # Note that this must use a Cartesian basis! 43 | sqrtgammaDET = params["sqrtgammaDET"] 44 | KerrSchild_radial_shift = params["KerrSchild_radial_shift"] 45 | r = rfm.xxSph[0] + KerrSchild_radial_shift # We are setting the data up in Shifted Kerr-Schild coordinates 46 | theta = rfm.xxSph[1] 47 | 48 | LeviCivitaTensorUUU = ixp.LeviCivitaTensorUUU_dim3_rank3(sqrtgammaDET) 49 | 50 | AD = gfcf.Axyz_func_spherical(Ar_EW,Ath_EW,Aph_EW,False,**params) 51 | # For the initial data, we can analytically take the derivatives of A_i 52 | ADdD = ixp.zerorank2() 53 | for i in range(3): 54 | for j in range(3): 55 | ADdD[i][j] = sp.simplify(sp.diff(AD[i],rfm.xx_to_Cart[j])) 56 | 57 | BU = ixp.zerorank1() 58 | for i in range(3): 59 | for j in range(3): 60 | for k in range(3): 61 | BU[i] += LeviCivitaTensorUUU[i][j][k] * ADdD[k][j] 62 | 63 | EsphD = ixp.zerorank1() 64 | # 2 M ( 1+ 2M/r )^{-1/2} \sin^2 \theta 65 | EsphD[2] = 2 * M * sp.sin(theta)**2 / sp.sqrt(1+2*M/r) 66 | 67 | ED = rfm.basis_transform_vectorD_from_rfmbasis_to_Cartesian(gfcf.Jac_dUrfm_dDCartUD, EsphD) 68 | 69 | return gfcf.compute_ValenciavU_from_ED_and_BU(ED, BU, gammaDD) 70 | -------------------------------------------------------------------------------- /nbconvert_latex_settings/LICENSE: -------------------------------------------------------------------------------- 1 | # Licensing terms 2 | 3 | This project is licensed under the terms of the Modified BSD License 4 | (also known as New or Revised or 3-Clause BSD), as follows: 5 | 6 | - Copyright (c) 2001-2015, IPython Development Team 7 | - Copyright (c) 2015-, Jupyter Development Team 8 | 9 | All rights reserved. 10 | 11 | Redistribution and use in source and binary forms, with or without 12 | modification, are permitted provided that the following conditions are met: 13 | 14 | Redistributions of source code must retain the above copyright notice, this 15 | list of conditions and the following disclaimer. 16 | 17 | Redistributions in binary form must reproduce the above copyright notice, this 18 | list of conditions and the following disclaimer in the documentation and/or 19 | other materials provided with the distribution. 20 | 21 | Neither the name of the Jupyter Development Team nor the names of its 22 | contributors may be used to endorse or promote products derived from this 23 | software without specific prior written permission. 24 | 25 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 26 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 27 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 29 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 32 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 34 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 | 36 | ## About the Jupyter Development Team 37 | 38 | The Jupyter Development Team is the set of all contributors to the Jupyter project. 39 | This includes all of the Jupyter subprojects. 40 | 41 | The core team that coordinates development on GitHub can be found here: 42 | https://github.com/jupyter/. 43 | 44 | ## Our Copyright Policy 45 | 46 | Jupyter uses a shared copyright model. Each contributor maintains copyright 47 | over their contributions to Jupyter. But, it is important to note that these 48 | contributions are typically only changes to the repositories. Thus, the Jupyter 49 | source code, in its entirety is not the copyright of any single person or 50 | institution. Instead, it is the collective copyright of the entire Jupyter 51 | Development Team. If individual contributors want to maintain a record of what 52 | changes/contributions they have specific copyright on, they should indicate 53 | their copyright in the commit message of the change, when they commit the 54 | change to one of the Jupyter repositories. 55 | 56 | With this in mind, the following banner should be used in any source code file 57 | to indicate the copyright and license terms: 58 | 59 | # Copyright (c) Jupyter Development Team. 60 | # Distributed under the terms of the Modified BSD License. -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests/planewave_along_3D_diagonal-dx_0.2__FD4-RK4.par: -------------------------------------------------------------------------------- 1 | 2 | ActiveThorns = " 3 | Boundary 4 | CartGrid3d 5 | CoordBase 6 | GenericFD 7 | IOASCII 8 | IOUtil 9 | IOBasic 10 | MoL 11 | NanChecker 12 | Periodic 13 | PUGH 14 | PUGHReduce 15 | LocalReduce 16 | PUGHSlab 17 | IDScalarWaveNRPy 18 | WaveToyNRPy 19 | Slab 20 | SymBase 21 | Time 22 | NewRad 23 | " 24 | 25 | ############################################################# 26 | # Grid 27 | ############################################################# 28 | 29 | WaveToyNRPy::FD_order = 4 30 | 31 | CoordBase::boundary_size_x_lower = 2 32 | CoordBase::boundary_size_y_lower = 2 33 | CoordBase::boundary_size_z_lower = 2 34 | 35 | CoordBase::boundary_size_x_upper = 2 36 | CoordBase::boundary_size_y_upper = 2 37 | CoordBase::boundary_size_z_upper = 2 38 | 39 | # Size of the grid (including boundary points) 40 | PUGH::global_nx = 129 41 | PUGH::global_ny = 129 42 | PUGH::global_nz = 129 43 | 44 | PUGH::ghost_size = 2 45 | 46 | CartGrid3D::type = "byrange" 47 | CartGrid3D::avoid_origin = "no" 48 | 49 | CartGrid3D::xmin = -12.8 50 | CartGrid3D::ymin = -12.8 51 | CartGrid3D::zmin = -12.8 52 | CartGrid3D::xmax = 12.8 53 | CartGrid3D::ymax = 12.8 54 | CartGrid3D::zmax = 12.8 55 | 56 | ############################################################# 57 | # Time integration 58 | ############################################################# 59 | 60 | Cactus::terminate = "time" 61 | Cactus::cctk_final_time = 3 62 | 63 | Time::dtfac = 0.5 64 | MethodOfLines::ode_method = "RK4" 65 | MethodOfLines::MoL_Intermediate_Steps = 4 66 | MethodOfLines::MoL_Num_Scratch_Levels = 1 67 | MethodOfLines::MoL_NaN_Check = "yes" 68 | 69 | ############################################################# 70 | # Boundary conditions 71 | ############################################################# 72 | 73 | #WaveToyNRPy::evolved_group_bound = "none" 74 | 75 | ############################################################# 76 | # Output 77 | ############################################################# 78 | 79 | IO::out_dir = $parfile 80 | IO::out_fileinfo = "none" 81 | IO::new_filename_scheme = "no" 82 | 83 | IOBasic::outInfo_every = 1 84 | IOBasic::outInfo_vars = "WaveToyNRPy::uuGF" 85 | 86 | 87 | IOASCII::out1D_style = "gnuplot f(t,x)" 88 | IOASCII::out1D_every = 2 89 | IOASCII::out1D_x = "yes" 90 | IOASCII::out1D_y = "no" 91 | IOASCII::out1D_z = "no" 92 | IOASCII::out1D_d = "no" 93 | IOASCII::out1D_vars = "WaveToyNRPy::uuGF WaveToyNRPy::vvGF" 94 | 95 | ############################################################# 96 | #Initial Data: "PlaneWave" or "SphericalGaussian" 97 | ############################################################# 98 | IDScalarWaveNRPy::initial_data = "PlaneWave" 99 | IDScalarWaveNRPy::kk0 = 0.4 100 | IDScalarWaveNRPy::kk1 = 0.4 101 | IDScalarWaveNRPy::kk2 = 0.4 -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/planewave_along_3D_diagonal-dx_0.4__FD8-RK8.par: -------------------------------------------------------------------------------- 1 | 2 | ActiveThorns = " 3 | Boundary 4 | CartGrid3d 5 | CoordBase 6 | GenericFD 7 | IOASCII 8 | IOUtil 9 | IOBasic 10 | MoL 11 | NanChecker 12 | Periodic 13 | PUGH 14 | PUGHReduce 15 | LocalReduce 16 | PUGHSlab 17 | IDScalarWaveNRPy 18 | WaveToyNRPy 19 | Slab 20 | SymBase 21 | Time 22 | NewRad 23 | " 24 | 25 | ############################################################# 26 | # Grid 27 | ############################################################# 28 | 29 | WaveToyNRPy::FD_order = 8 30 | 31 | CoordBase::boundary_size_x_lower = 4 32 | CoordBase::boundary_size_y_lower = 4 33 | CoordBase::boundary_size_z_lower = 4 34 | 35 | CoordBase::boundary_size_x_upper = 4 36 | CoordBase::boundary_size_y_upper = 4 37 | CoordBase::boundary_size_z_upper = 4 38 | 39 | # Size of the grid (including boundary points) 40 | PUGH::global_nx = 69 41 | PUGH::global_ny = 69 42 | PUGH::global_nz = 69 43 | 44 | PUGH::ghost_size = 4 45 | 46 | CartGrid3D::type = "byrange" 47 | CartGrid3D::avoid_origin = "no" 48 | 49 | CartGrid3D::xmin = -13.6 50 | CartGrid3D::ymin = -13.6 51 | CartGrid3D::zmin = -13.6 52 | CartGrid3D::xmax = 13.6 53 | CartGrid3D::ymax = 13.6 54 | CartGrid3D::zmax = 13.6 55 | 56 | ############################################################# 57 | # Time integration 58 | ############################################################# 59 | 60 | Cactus::terminate = "time" 61 | Cactus::cctk_final_time = 3 62 | 63 | Time::dtfac = 0.5 64 | MethodOfLines::ode_method = "RK87" 65 | MethodOfLines::MoL_Intermediate_Steps = 13 66 | MethodOfLines::MoL_Num_Scratch_Levels = 13 67 | MethodOfLines::MoL_NaN_Check = "yes" 68 | 69 | ############################################################# 70 | # Boundary conditions 71 | ############################################################# 72 | 73 | #WaveToyNRPy::evolved_group_bound = "none" 74 | 75 | ############################################################# 76 | # Output 77 | ############################################################# 78 | 79 | IO::out_dir = $parfile 80 | IO::out_fileinfo = "none" 81 | IO::new_filename_scheme = "no" 82 | 83 | IOBasic::outInfo_every = 1 84 | IOBasic::outInfo_vars = "WaveToyNRPy::uuGF" 85 | 86 | 87 | IOASCII::out1D_style = "gnuplot f(t,x)" 88 | IOASCII::out1D_every = 1 89 | IOASCII::out1D_x = "yes" 90 | IOASCII::out1D_y = "no" 91 | IOASCII::out1D_z = "no" 92 | IOASCII::out1D_d = "no" 93 | IOASCII::out1D_vars = "WaveToyNRPy::uuGF WaveToyNRPy::vvGF" 94 | 95 | ############################################################# 96 | #Initial Data: "PlaneWave" or "SphericalGaussian" 97 | ############################################################# 98 | IDScalarWaveNRPy::initial_data = "PlaneWave" 99 | IDScalarWaveNRPy::kk0 = 0.4 100 | IDScalarWaveNRPy::kk1 = 0.4 101 | IDScalarWaveNRPy::kk2 = 0.4 -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/planewave_along_3D_diagonal-dx_0.2__FD8-RK8.par: -------------------------------------------------------------------------------- 1 | 2 | ActiveThorns = " 3 | Boundary 4 | CartGrid3d 5 | CoordBase 6 | GenericFD 7 | IOASCII 8 | IOUtil 9 | IOBasic 10 | MoL 11 | NanChecker 12 | Periodic 13 | PUGH 14 | PUGHReduce 15 | LocalReduce 16 | PUGHSlab 17 | IDScalarWaveNRPy 18 | WaveToyNRPy 19 | Slab 20 | SymBase 21 | Time 22 | NewRad 23 | " 24 | 25 | ############################################################# 26 | # Grid 27 | ############################################################# 28 | 29 | WaveToyNRPy::FD_ordern = 8 30 | 31 | CoordBase::boundary_size_x_lower = 4 32 | CoordBase::boundary_size_y_lower = 4 33 | CoordBase::boundary_size_z_lower = 4 34 | 35 | CoordBase::boundary_size_x_upper = 4 36 | CoordBase::boundary_size_y_upper = 4 37 | CoordBase::boundary_size_z_upper = 4 38 | 39 | # Size of the grid (including boundary points) 40 | PUGH::global_nx = 137 41 | PUGH::global_ny = 137 42 | PUGH::global_nz = 137 43 | 44 | PUGH::ghost_size = 4 45 | 46 | CartGrid3D::type = "byrange" 47 | CartGrid3D::avoid_origin = "no" 48 | 49 | CartGrid3D::xmin = -13.6 50 | CartGrid3D::ymin = -13.6 51 | CartGrid3D::zmin = -13.6 52 | CartGrid3D::xmax = 13.6 53 | CartGrid3D::ymax = 13.6 54 | CartGrid3D::zmax = 13.6 55 | 56 | ############################################################# 57 | # Time integration 58 | ############################################################# 59 | 60 | Cactus::terminate = "time" 61 | Cactus::cctk_final_time = 3 62 | 63 | Time::dtfac = 0.5 64 | MethodOfLines::ode_method = "RK87" 65 | MethodOfLines::MoL_Intermediate_Steps = 13 66 | MethodOfLines::MoL_Num_Scratch_Levels = 13 67 | MethodOfLines::MoL_NaN_Check = "yes" 68 | 69 | ############################################################# 70 | # Boundary conditions 71 | ############################################################# 72 | 73 | #WaveToyNRPy::evolved_group_bound = "none" 74 | 75 | ############################################################# 76 | # Output 77 | ############################################################# 78 | 79 | IO::out_dir = $parfile 80 | IO::out_fileinfo = "none" 81 | IO::new_filename_scheme = "no" 82 | 83 | IOBasic::outInfo_every = 1 84 | IOBasic::outInfo_vars = "WaveToyNRPy::uuGF" 85 | 86 | 87 | IOASCII::out1D_style = "gnuplot f(t,x)" 88 | IOASCII::out1D_every = 2 89 | IOASCII::out1D_x = "yes" 90 | IOASCII::out1D_y = "no" 91 | IOASCII::out1D_z = "no" 92 | IOASCII::out1D_d = "no" 93 | IOASCII::out1D_vars = "WaveToyNRPy::uuGF WaveToyNRPy::vvGF" 94 | 95 | ############################################################# 96 | #Initial Data: "PlaneWave" or "SphericalGaussian" 97 | ############################################################# 98 | IDScalarWaveNRPy::initial_data = "PlaneWave" 99 | IDScalarWaveNRPy::kk0 = 0.4 100 | IDScalarWaveNRPy::kk1 = 0.4 101 | IDScalarWaveNRPy::kk2 = 0.4 -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests/sphericalgaussian-dx_0.2__FD4-RK4.par: -------------------------------------------------------------------------------- 1 | 2 | ActiveThorns = " 3 | Boundary 4 | CartGrid3d 5 | CoordBase 6 | GenericFD 7 | IOASCII 8 | IOUtil 9 | IOBasic 10 | MoL 11 | NanChecker 12 | Periodic 13 | PUGH 14 | PUGHReduce 15 | LocalReduce 16 | PUGHSlab 17 | IDScalarWaveNRPy 18 | WaveToyNRPy 19 | Slab 20 | SymBase 21 | Time 22 | NewRad 23 | " 24 | 25 | ############################################################# 26 | # Grid 27 | ############################################################# 28 | 29 | WaveToyNRPy::FD_order = 4 30 | 31 | CoordBase::boundary_size_x_lower = 2 32 | CoordBase::boundary_size_y_lower = 2 33 | CoordBase::boundary_size_z_lower = 2 34 | 35 | CoordBase::boundary_size_x_upper = 2 36 | CoordBase::boundary_size_y_upper = 2 37 | CoordBase::boundary_size_z_upper = 2 38 | 39 | # Size of the grid (including boundary points) 40 | PUGH::global_nx = 131 41 | PUGH::global_ny = 131 42 | PUGH::global_nz = 131 43 | 44 | PUGH::ghost_size = 2 45 | 46 | CartGrid3D::type = "byrange" 47 | CartGrid3D::avoid_origin = "no" 48 | 49 | CartGrid3D::xmin = -13. 50 | CartGrid3D::ymin = -13. 51 | CartGrid3D::zmin = -13. 52 | CartGrid3D::xmax = 13. 53 | CartGrid3D::ymax = 13. 54 | CartGrid3D::zmax = 13. 55 | 56 | ############################################################# 57 | # Time integration 58 | ############################################################# 59 | 60 | Cactus::terminate = "time" 61 | Cactus::cctk_final_time = 3 62 | 63 | Time::dtfac = 0.5 64 | MethodOfLines::ode_method = "RK4" 65 | MethodOfLines::MoL_Intermediate_Steps = 4 66 | MethodOfLines::MoL_Num_Scratch_Levels = 1 67 | MethodOfLines::MoL_NaN_Check = "yes" 68 | 69 | ############################################################# 70 | # Boundary conditions 71 | ############################################################# 72 | 73 | #WaveToyNRPy::evolved_group_bound = "none" 74 | 75 | ############################################################# 76 | # Output 77 | ############################################################# 78 | 79 | IO::out_dir = $parfile 80 | IO::out_fileinfo = "none" 81 | IO::new_filename_scheme = "no" 82 | 83 | IOBasic::outInfo_every = 1 84 | IOBasic::outInfo_vars = "WaveToyNRPy::uuGF" 85 | 86 | 87 | IOASCII::out1D_style = "gnuplot f(t,x)" 88 | IOASCII::out1D_every = 2 89 | IOASCII::out1D_x = "yes" 90 | IOASCII::out1D_y = "no" 91 | IOASCII::out1D_z = "no" 92 | IOASCII::out1D_d = "no" 93 | IOASCII::out1d_xline_y = 0.2 94 | IOASCII::out1d_xline_z = 0.2 95 | IOASCII::out1D_vars = "WaveToyNRPy::uuGF WaveToyNRPy::vvGF" 96 | 97 | ############################################################# 98 | #Initial Data: "PlaneWave" or "SphericalGaussian" 99 | ############################################################# 100 | IDScalarWaveNRPy::initial_data = "SphericalGaussian" 101 | IDScalarWaveNRPy::sigma = 3.0 -------------------------------------------------------------------------------- /in_progress-NRPyCritCol/ScalarField/ScalarField_Tmunu.py: -------------------------------------------------------------------------------- 1 | # This module provides functions for setting up the energy-momentum 2 | # tensor of a massless Scalar Field as documented in 3 | # Tutorial-ScalarField_Tmunu.ipynb 4 | 5 | # Authors: Leonardo R. Werneck 6 | # wernecklr **at** gmail **dot* com 7 | # Zachariah B. Etienne 8 | 9 | # First we import needed core NRPy+ modules 10 | import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends 11 | import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support 12 | import reference_metric as rfm # NRPy+: Reference metric support 13 | import BSSN.BSSN_quantities as Bq # NRPy+: BSSN quantities 14 | import BSSN.ADM_in_terms_of_BSSN as BtoA # NRPy+: ADM quantities in terms of BSSN quantities 15 | import BSSN.ADMBSSN_tofrom_4metric as ADMg # NRPy+: ADM 4-metric to/from ADM or BSSN quantities 16 | 17 | def ScalarField_Tmunu(): 18 | 19 | global T4UU 20 | 21 | # Step 1.c: Set spatial dimension (must be 3 for BSSN, as BSSN is 22 | # a 3+1-dimensional decomposition of the general 23 | # relativistic field equations) 24 | DIM = 3 25 | 26 | # Step 1.d: Given the chosen coordinate system, set up 27 | # corresponding reference metric and needed 28 | # reference metric quantities 29 | # The following function call sets up the reference metric 30 | # and related quantities, including rescaling matrices ReDD, 31 | # ReU, and hatted quantities. 32 | rfm.reference_metric() 33 | 34 | # Step 1.e: Import all basic (unrescaled) BSSN scalars & tensors 35 | Bq.BSSN_basic_tensors() 36 | alpha = Bq.alpha 37 | betaU = Bq.betaU 38 | 39 | # Step 1.g: Define ADM quantities in terms of BSSN quantities 40 | BtoA.ADM_in_terms_of_BSSN() 41 | gammaDD = BtoA.gammaDD 42 | gammaUU = BtoA.gammaUU 43 | 44 | # Step 1.h: Define scalar field quantitites 45 | sf_dD = ixp.declarerank1("sf_dD") 46 | Pi = sp.Symbol("sfM",real=True) 47 | 48 | # Step 2a: Set up \partial^{t}\varphi = Pi/alpha 49 | sf4dU = ixp.zerorank1(DIM=4) 50 | sf4dU[0] = Pi/alpha 51 | 52 | # Step 2b: Set up \partial^{i}\varphi = -Pi*beta^{i}/alpha + gamma^{ij}\partial_{j}\varphi 53 | for i in range(DIM): 54 | sf4dU[i+1] = -Pi*betaU[i]/alpha 55 | for j in range(DIM): 56 | sf4dU[i+1] += gammaUU[i][j]*sf_dD[j] 57 | 58 | # Step 2c: Set up \partial^{i}\varphi\partial_{i}\varphi = -Pi**2 + gamma^{ij}\partial_{i}\varphi\partial_{j}\varphi 59 | sf4d2 = -Pi**2 60 | for i in range(DIM): 61 | for j in range(DIM): 62 | sf4d2 += gammaUU[i][j]*sf_dD[i]*sf_dD[j] 63 | 64 | # Step 3a: Setting up g^{\mu\nu} 65 | ADMg.g4UU_ito_BSSN_or_ADM("ADM",gammaDD=gammaDD,betaU=betaU,alpha=alpha, gammaUU=gammaUU) 66 | g4UU = ADMg.g4UU 67 | 68 | # Step 3b: Setting up T^{\mu\nu} for a massless scalar field 69 | T4UU = ixp.zerorank2(DIM=4) 70 | for mu in range(4): 71 | for nu in range(4): 72 | T4UU[mu][nu] = sf4dU[mu]*sf4dU[nu] - g4UU[mu][nu]*sf4d2/2 73 | -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/sphericalgaussian-dx_0.2__FD8-RK8.par: -------------------------------------------------------------------------------- 1 | 2 | ActiveThorns = " 3 | Boundary 4 | CartGrid3d 5 | CoordBase 6 | GenericFD 7 | IOASCII 8 | IOUtil 9 | IOBasic 10 | MoL 11 | NanChecker 12 | Periodic 13 | PUGH 14 | PUGHReduce 15 | LocalReduce 16 | PUGHSlab 17 | IDScalarWaveNRPy 18 | WaveToyNRPy 19 | Slab 20 | SymBase 21 | Time 22 | NewRad 23 | " 24 | 25 | ############################################################# 26 | # Grid 27 | ############################################################# 28 | 29 | WaveToyNRPy::FD_order = 8 30 | 31 | CoordBase::boundary_size_x_lower = 4 32 | CoordBase::boundary_size_y_lower = 4 33 | CoordBase::boundary_size_z_lower = 4 34 | 35 | CoordBase::boundary_size_x_upper = 4 36 | CoordBase::boundary_size_y_upper = 4 37 | CoordBase::boundary_size_z_upper = 4 38 | 39 | # Size of the grid (including boundary points) 40 | PUGH::global_nx = 139 41 | PUGH::global_ny = 139 42 | PUGH::global_nz = 139 43 | 44 | PUGH::ghost_size = 4 45 | 46 | CartGrid3D::type = "byrange" 47 | CartGrid3D::avoid_origin = "no" 48 | 49 | CartGrid3D::xmin = -13.8 50 | CartGrid3D::ymin = -13.8 51 | CartGrid3D::zmin = -13.8 52 | CartGrid3D::xmax = 13.8 53 | CartGrid3D::ymax = 13.8 54 | CartGrid3D::zmax = 13.8 55 | 56 | ############################################################# 57 | # Time integration 58 | ############################################################# 59 | 60 | Cactus::terminate = "time" 61 | Cactus::cctk_final_time = 3 62 | 63 | Time::dtfac = 0.5 64 | MethodOfLines::ode_method = "RK87" 65 | MethodOfLines::MoL_Intermediate_Steps = 13 66 | MethodOfLines::MoL_Num_Scratch_Levels = 13 67 | MethodOfLines::MoL_NaN_Check = "yes" 68 | 69 | ############################################################# 70 | # Boundary conditions 71 | ############################################################# 72 | 73 | #WaveToyNRPy::evolved_group_bound = "none" 74 | 75 | ############################################################# 76 | # Output 77 | ############################################################# 78 | 79 | IO::out_dir = $parfile 80 | IO::out_fileinfo = "none" 81 | IO::new_filename_scheme = "no" 82 | 83 | IOBasic::outInfo_every = 1 84 | IOBasic::outInfo_vars = "WaveToyNRPy::uuGF" 85 | 86 | 87 | IOASCII::out1D_style = "gnuplot f(t,x)" 88 | IOASCII::out1D_every = 2 89 | IOASCII::out1D_x = "yes" 90 | IOASCII::out1D_y = "no" 91 | IOASCII::out1D_z = "no" 92 | IOASCII::out1D_d = "no" 93 | IOASCII::out1d_xline_y = 0.2 94 | IOASCII::out1d_xline_z = 0.2 95 | IOASCII::out1D_vars = "WaveToyNRPy::uuGF WaveToyNRPy::vvGF" 96 | 97 | ############################################################# 98 | #Initial Data: "PlaneWave" or "SphericalGaussian" 99 | ############################################################# 100 | IDScalarWaveNRPy::initial_data = "SphericalGaussian" 101 | IDScalarWaveNRPy::sigma = 3.0 -------------------------------------------------------------------------------- /WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/sphericalgaussian-dx_0.4__FD8-RK8.par: -------------------------------------------------------------------------------- 1 | 2 | ActiveThorns = " 3 | Boundary 4 | CartGrid3d 5 | CoordBase 6 | GenericFD 7 | IOASCII 8 | IOUtil 9 | IOBasic 10 | MoL 11 | NanChecker 12 | Periodic 13 | PUGH 14 | PUGHReduce 15 | LocalReduce 16 | PUGHSlab 17 | IDScalarWaveNRPy 18 | WaveToyNRPy 19 | Slab 20 | SymBase 21 | Time 22 | NewRad 23 | " 24 | 25 | ############################################################# 26 | # Grid 27 | ############################################################# 28 | 29 | WaveToyNRPy::FD_order = 8 30 | 31 | CoordBase::boundary_size_x_lower = 4 32 | CoordBase::boundary_size_y_lower = 4 33 | CoordBase::boundary_size_z_lower = 4 34 | 35 | CoordBase::boundary_size_x_upper = 4 36 | CoordBase::boundary_size_y_upper = 4 37 | CoordBase::boundary_size_z_upper = 4 38 | 39 | # Size of the grid (including boundary points) 40 | PUGH::global_nx = 70 41 | PUGH::global_ny = 70 42 | PUGH::global_nz = 70 43 | 44 | PUGH::ghost_size = 4 45 | 46 | CartGrid3D::type = "byrange" 47 | CartGrid3D::avoid_origin = "no" 48 | 49 | CartGrid3D::xmin = -13.8 50 | CartGrid3D::ymin = -13.8 51 | CartGrid3D::zmin = -13.8 52 | CartGrid3D::xmax = 13.8 53 | CartGrid3D::ymax = 13.8 54 | CartGrid3D::zmax = 13.8 55 | 56 | ############################################################# 57 | # Time integration 58 | ############################################################# 59 | 60 | Cactus::terminate = "time" 61 | Cactus::cctk_final_time = 3 62 | 63 | Time::dtfac = 0.5 64 | MethodOfLines::ode_method = "RK87" 65 | MethodOfLines::MoL_Intermediate_Steps = 13 66 | MethodOfLines::MoL_Num_Scratch_Levels = 13 67 | MethodOfLines::MoL_NaN_Check = "yes" 68 | 69 | ############################################################# 70 | # Boundary conditions 71 | ############################################################# 72 | 73 | #WaveToyNRPy::evolved_group_bound = "none" 74 | 75 | ############################################################# 76 | # Output 77 | ############################################################# 78 | 79 | IO::out_dir = $parfile 80 | IO::out_fileinfo = "none" 81 | IO::new_filename_scheme = "no" 82 | 83 | IOBasic::outInfo_every = 1 84 | IOBasic::outInfo_vars = "WaveToyNRPy::uuGF" 85 | 86 | 87 | IOASCII::out1D_style = "gnuplot f(t,x)" 88 | IOASCII::out1D_every = 1 89 | IOASCII::out1D_x = "yes" 90 | IOASCII::out1D_y = "no" 91 | IOASCII::out1D_z = "no" 92 | IOASCII::out1D_d = "no" 93 | IOASCII::out1d_xline_y = 0.2 94 | IOASCII::out1d_xline_z = 0.2 95 | IOASCII::out1D_vars = "WaveToyNRPy::uuGF WaveToyNRPy::vvGF" 96 | 97 | ############################################################# 98 | #Initial Data: "PlaneWave" or "SphericalGaussian" 99 | ############################################################# 100 | IDScalarWaveNRPy::initial_data = "SphericalGaussian" 101 | IDScalarWaveNRPy::sigma = 3.0 -------------------------------------------------------------------------------- /in_progress-Maxwell/MaxwellVacuum/example_parfiles/maxwell_toroidaldipole-0.25_OB8.par: -------------------------------------------------------------------------------- 1 | 2 | ActiveThorns = " 3 | Boundary 4 | CartGrid3d 5 | CoordBase 6 | GenericFD 7 | IOASCII 8 | IOUtil 9 | IOBasic 10 | MoL 11 | NanChecker 12 | Periodic 13 | PUGH 14 | PUGHReduce 15 | LocalReduce 16 | PUGHSlab 17 | MaxwellVacuumID 18 | MaxwellVacuum 19 | Slab 20 | SymBase 21 | Time 22 | NewRad 23 | " 24 | 25 | ############################################################# 26 | # Grid 27 | ############################################################# 28 | 29 | MaxwellVacuum::FD_order = 4 30 | # Use to set ghost zones and such 31 | 32 | CoordBase::boundary_size_x_lower = 2 33 | CoordBase::boundary_size_y_lower = 2 34 | CoordBase::boundary_size_z_lower = 2 35 | 36 | CoordBase::boundary_size_x_upper = 2 37 | CoordBase::boundary_size_y_upper = 2 38 | CoordBase::boundary_size_z_upper = 2 39 | 40 | # Case: Outer Boundary at 8, 32^3 points (per quadrant) 41 | # Size of the grid (including boundary points) 42 | PUGH::global_nx = 67 43 | PUGH::global_ny = 67 44 | PUGH::global_nz = 67 45 | 46 | PUGH::ghost_size = 2 47 | 48 | CartGrid3D::type = "byrange" 49 | CartGrid3D::avoid_origin = "yes" 50 | CartGrid3D::domain = "full" 51 | 52 | CartGrid3D::xmin = -8.125 53 | CartGrid3D::ymin = -8.125 54 | CartGrid3D::zmin = -8.125 55 | CartGrid3D::xmax = 8.375 56 | CartGrid3D::ymax = 8.375 57 | CartGrid3D::zmax = 8.375 58 | 59 | ############################################################# 60 | # Time integration 61 | ############################################################# 62 | 63 | Cactus::terminate = "time" 64 | Cactus::cctk_final_time = 4.0 65 | 66 | Time::dtfac = 0.5 67 | MethodOfLines::ode_method = "ICN" 68 | MethodOfLines::MoL_Intermediate_Steps = 4 69 | MethodOfLines::MoL_Num_Scratch_Levels = 1 70 | MethodOfLines::MoL_NaN_Check = "yes" 71 | 72 | ############################################################# 73 | # Boundary conditions 74 | ############################################################# 75 | 76 | #MaxwellEvol::bound = "radiation" 77 | 78 | ############################################################# 79 | # Output 80 | ############################################################# 81 | 82 | IO::out_dir = $parfile 83 | IO::out_fileinfo = "none" 84 | IO::new_filename_scheme = "no" 85 | 86 | IOBasic::outInfo_every = 1 87 | IOBasic::outInfo_vars = "MaxwellVacuum::DivEIGF MaxwellVacuum::DivEIIGF" 88 | IOBasic::outInfo_reductions = "norm2" 89 | 90 | IOBasic::outScalar_every = 1 91 | IOBasic::outScalar_vars = "MaxwellVacuum::DivEIGF MaxwellVacuum::DivEIIGF" 92 | IOBasic::outScalar_reductions = "norm2" 93 | IOBasic::outScalar_style = "gnuplot" 94 | 95 | ############################################################# 96 | # Initial Data 97 | ############################################################# 98 | MaxwellVacuumID::wavespeed = 1.0 99 | MaxwellVacuumID::amp = 1.0 100 | MaxwellVacuumID::lam = 1.0 101 | -------------------------------------------------------------------------------- /in_progress-Maxwell/MaxwellVacuum/example_parfiles/maxwell_toroidaldipole-0.125_OB4.par: -------------------------------------------------------------------------------- 1 | 2 | ActiveThorns = " 3 | Boundary 4 | CartGrid3d 5 | CoordBase 6 | GenericFD 7 | IOASCII 8 | IOUtil 9 | IOBasic 10 | MoL 11 | NanChecker 12 | Periodic 13 | PUGH 14 | PUGHReduce 15 | LocalReduce 16 | PUGHSlab 17 | MaxwellVacuumID 18 | MaxwellVacuum 19 | Slab 20 | SymBase 21 | Time 22 | NewRad 23 | " 24 | 25 | ############################################################# 26 | # Grid 27 | ############################################################# 28 | 29 | MaxwellVacuum::FD_order = 4 30 | # Use to set ghost zones and such 31 | 32 | CoordBase::boundary_size_x_lower = 2 33 | CoordBase::boundary_size_y_lower = 2 34 | CoordBase::boundary_size_z_lower = 2 35 | 36 | CoordBase::boundary_size_x_upper = 2 37 | CoordBase::boundary_size_y_upper = 2 38 | CoordBase::boundary_size_z_upper = 2 39 | 40 | # Case: Outer Boundary at 4, 32^3 points (per quadrant) 41 | # Size of the grid (including boundary points) 42 | PUGH::global_nx = 67 43 | PUGH::global_ny = 67 44 | PUGH::global_nz = 67 45 | 46 | PUGH::ghost_size = 2 47 | 48 | CartGrid3D::type = "byrange" 49 | CartGrid3D::avoid_origin = "yes" 50 | CartGrid3D::domain = "full" 51 | 52 | CartGrid3D::xmin = -4.0625 53 | CartGrid3D::ymin = -4.0625 54 | CartGrid3D::zmin = -4.0625 55 | CartGrid3D::xmax = 4.1875 56 | CartGrid3D::ymax = 4.1875 57 | CartGrid3D::zmax = 4.1875 58 | 59 | ############################################################# 60 | # Time integration 61 | ############################################################# 62 | 63 | Cactus::terminate = "time" 64 | Cactus::cctk_final_time = 4.0 65 | 66 | Time::dtfac = 0.5 67 | MethodOfLines::ode_method = "ICN" 68 | MethodOfLines::MoL_Intermediate_Steps = 4 69 | MethodOfLines::MoL_Num_Scratch_Levels = 1 70 | MethodOfLines::MoL_NaN_Check = "yes" 71 | 72 | ############################################################# 73 | # Boundary conditions 74 | ############################################################# 75 | 76 | #MaxwellEvol::bound = "radiation" 77 | 78 | ############################################################# 79 | # Output 80 | ############################################################# 81 | 82 | IO::out_dir = $parfile 83 | IO::out_fileinfo = "none" 84 | IO::new_filename_scheme = "no" 85 | 86 | IOBasic::outInfo_every = 1 87 | IOBasic::outInfo_vars = "MaxwellVacuum::DivEIGF MaxwellVacuum::DivEIIGF" 88 | IOBasic::outInfo_reductions = "norm2" 89 | 90 | IOBasic::outScalar_every = 1 91 | IOBasic::outScalar_vars = "MaxwellVacuum::DivEIGF MaxwellVacuum::DivEIIGF" 92 | IOBasic::outScalar_reductions = "norm2" 93 | IOBasic::outScalar_style = "gnuplot" 94 | 95 | ############################################################# 96 | # Initial Data 97 | ############################################################# 98 | MaxwellVacuumID::wavespeed = 1.0 99 | MaxwellVacuumID::amp = 1.0 100 | MaxwellVacuumID::lam = 1.0 101 | --------------------------------------------------------------------------------