├── .gitignore ├── README.md ├── alert_schema ├── elasticc.v0_9_1.alert.avsc ├── elasticc.v0_9_1.brokerClassification.avsc ├── elasticc.v0_9_1.diaForcedSource.avsc ├── elasticc.v0_9_1.diaNondetectionLimit.avsc ├── elasticc.v0_9_1.diaObject.avsc ├── elasticc.v0_9_1.diaSource.avsc ├── elasticc.v0_9_1.lvkAlertContent.avsc ├── elasticc.v0_9_1.ssObject.avsc ├── elasticc_origmap.txt └── parse_schema.py ├── jupyter └── sprint_week_2024oct │ ├── elasticc2_demo1.ipynb │ ├── elasticc2_demo2.ipynb │ ├── elasticc2_demo3.ipynb │ ├── elasticc2_schema.txt │ ├── elasticc_sprintweek_2024-10.odp │ └── elasticc_sprintweek_2024-10.pdf ├── kn_skymaps ├── B19-SIM-TEMPLATE-INDEX-KN-INDEX.JSON ├── ELASTICC_TRAIN_KN_B19_MASS_EJECTA_COINC.sqlite ├── ELASTICC_TRAIN_KN_B19_MASS_EJECTA_INJ.sqlite ├── ELASTICC_TRAIN_KN_B19_MASS_EJECTA_MAP.csv ├── ELASTICC_TRAIN_KN_K17_MASS_EJECTA_COINC.sqlite ├── ELASTICC_TRAIN_KN_K17_MASS_EJECTA_INJ.sqlite ├── ELASTICC_TRAIN_KN_K17_MASS_EJECTA_MAP.csv ├── K17-SIM-TEMPLATE-INDEX-KN-INDEX.JSON ├── README.md ├── psd-o4.xml └── scripts │ ├── bayestar-realize-coinc.slurm │ ├── create-inj.sh │ ├── kn-inspinj.py │ ├── kn-inspinj.sh │ ├── kn_rapid_utils.py │ ├── mej_to_masses.py │ ├── mej_to_masses.slurm │ ├── run-bayestar-localize.slurm │ └── skymap-post-proc.py ├── lib_elasticc2 ├── read_snana.py ├── tests │ └── test_read_snana.py └── write_snana_parquet.py ├── model_config ├── SIMGEN_INCLUDE_BULLA-BNS-M2-2COMP.INPUT ├── SIMGEN_INCLUDE_CART-MOSFIT.INPUT ├── SIMGEN_INCLUDE_ILOT-MOSFIT.INPUT ├── SIMGEN_INCLUDE_KN-K17.INPUT ├── SIMGEN_INCLUDE_LCLIB_AGN-LSST.INPUT ├── SIMGEN_INCLUDE_LCLIB_Cepheid-LSST.INPUT ├── SIMGEN_INCLUDE_LCLIB_EB-PHOEBE.INPUT ├── SIMGEN_INCLUDE_LCLIB_MIRA-ISW2011.INPUT ├── SIMGEN_INCLUDE_LCLIB_Mdwarf-LSST.INPUT ├── SIMGEN_INCLUDE_LCLIB_Mdwarf-flare-LSST.INPUT ├── SIMGEN_INCLUDE_LCLIB_RRL-LSST.INPUT ├── SIMGEN_INCLUDE_LCLIB_d-Sct-LSST.INPUT ├── SIMGEN_INCLUDE_LCLIB_dwarf-nova-LSST.INPUT ├── SIMGEN_INCLUDE_LCLIB_uLens-Binary.INPUT ├── SIMGEN_INCLUDE_LCLIB_uLens-Single-GenLens.INPUT ├── SIMGEN_INCLUDE_LCLIB_uLens-Single-PyLIMA.INPUT ├── SIMGEN_INCLUDE_PISN-MOSFIT.INPUT ├── SIMGEN_INCLUDE_PISN-STELLA-HECORE.INPUT ├── SIMGEN_INCLUDE_PISN-STELLA-HYDROGENIC.INPUT ├── SIMGEN_INCLUDE_SLSN-I-MOSFIT.INPUT ├── SIMGEN_INCLUDE_SNII-NMF.INPUT ├── SIMGEN_INCLUDE_SNII-Templates.INPUT ├── SIMGEN_INCLUDE_SNIIn-MOSFIT.INPUT ├── SIMGEN_INCLUDE_SNIa-91bg.INPUT ├── SIMGEN_INCLUDE_SNIa-SALT2.INPUT ├── SIMGEN_INCLUDE_SNIax.INPUT ├── SIMGEN_INCLUDE_SNIb-Templates.INPUT ├── SIMGEN_INCLUDE_SNIc-Templates.INPUT ├── SIMGEN_INCLUDE_TDE-MOSFIT.INPUT ├── SIMGEN_INCLUDE_V19_SNII+HostXT.INPUT ├── SIMGEN_INCLUDE_V19_SNIIb+HostXT.INPUT ├── SIMGEN_INCLUDE_V19_SNIIn+HostXT.INPUT ├── SIMGEN_INCLUDE_V19_SNIb+HostXT.INPUT ├── SIMGEN_INCLUDE_V19_SNIc+HostXT.INPUT └── SIMGEN_INCLUDE_V19_SNIcBL+HostXT.INPUT ├── stream-to-zads ├── Dockerfile ├── README.md ├── consume_zads.py ├── fakebroker.py ├── msgconsumer.py ├── read_elasticc_alert_stream.py ├── run-on-brahms.sh └── stream-to-zads.py ├── survey_config ├── SIMGEN_MASTER_LSST_WFD.INPUT ├── SIMGEN_TEMPLATE_LSST.INPUT └── elasticc_origmap.txt ├── taxonomy └── taxonomy.ipynb └── tom_management ├── Dockerfile.pgdump ├── add_elasticc_alerts.py ├── load_elasticc_objecttruth.py ├── load_elasticc_truth.py ├── run_pgdump.sh ├── tom-desc-app.yaml ├── tom-desc-postgres.yaml ├── tomconnection.py └── truthloader.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | **~ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ELAsTiCC 2 | Repo for settings, config, metrics for DESC's Extended LSST Astronomical Time Series Classification Challenge. More information can be found on the ELAsTiCC web page at: https://portal.nersc.gov/cfs/lsst/DESC_TD_PUBLIC/ELASTICC/ 3 | 4 | Much of the code supporting the ELAsTiCC and (especially) ELAsTiCC2 campaigns can be found in the repo for the [DESC TOM](https://github.com/LSSTDESC/tom_desc/). Some of the code here is out of date (was used for ELAsTiCC, not ELAsTiCC2). 5 | 6 | ## Taxonomy 7 | Code to generate an illustration of the extendable taxonomy lives in the taxonomy subdirectory, in a [jupyter notebook](https://github.com/LSSTDESC/elasticc/blob/main/taxonomy/taxonomy.ipynb) 8 | 9 | ## Metrics 10 | ELAsTiCC metrics have their own github repository: https://github.com/LSSTDESC/elasticc_metrics/ 11 | 12 | -------------------------------------------------------------------------------- /alert_schema/elasticc.v0_9_1.alert.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "elasticc.v0_9_1", 3 | "type": "record", 4 | "name": "alert", 5 | "doc": "sample avro alert schema v4.1", 6 | "fields": [ 7 | {"name": "alertId", "type": "long", "doc": "unique alert identifer"}, 8 | {"name": "diaSource", "type": "elasticc.v0_9_1.diaSource"}, 9 | {"name": "prvDiaSources", "type": ["null", { 10 | "type": "array", 11 | "items": "elasticc.v0_9_1.diaSource"}], "default": null}, 12 | {"name": "prvDiaForcedSources", "type": ["null", { 13 | "type": "array", 14 | "items": "elasticc.v0_9_1.diaForcedSource"}], "default": null}, 15 | {"name": "diaObject", "type": ["null", "elasticc.v0_9_1.diaObject"], "default": null} 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /alert_schema/elasticc.v0_9_1.brokerClassification.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "elasticc.v0_9_1", 3 | "type": "record", 4 | "name": "brokerClassfication", 5 | "fields": [ 6 | {"name": "alertId", "type": "long", "doc": "unique alert identifer"}, 7 | {"name": "diaSourceId", "type": "long", "doc": "id of source that triggered this classification"}, 8 | {"name": "elasticcPublishTimestamp", 9 | "type": {"type": "long", "logicalType": "timestamp-millis"}, 10 | "doc": "timestamp from originating ELAsTiCC alert" 11 | }, 12 | {"name": "brokerIngestTimestamp", 13 | "type": ["null", {"type": "long", "logicalType": "timestamp-millis"}], 14 | "doc": "timestamp of broker ingestion of ELAsTiCC alert" 15 | }, 16 | {"name": "brokerName", "type": "string", "doc": "Name of broker (never changes)" }, 17 | {"name": "brokerVersion", "type": "string", "doc": "Version/Release of broker's software" }, 18 | {"name": "classifierName", "type": "string", 19 | "doc": "Name of classifier broker is using, including software version" }, 20 | {"name": "classifierParams", "type": "string", 21 | "doc": "Any classifier parameter information worth noting for this classification" }, 22 | {"name": "classifications", "type": { 23 | "type": "array", 24 | "items": { 25 | "type": "record", 26 | "name": "classificationDict", 27 | "fields": [ 28 | {"name": "classId", "type": "int", "doc": "See https://github.com/LSSTDESC/elasticc/tree/main/taxonomy/taxonomy.ipynb for specification" }, 29 | {"name": "probability", "type": "float", "doc": "0-1" } 30 | ] 31 | } 32 | } 33 | } 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /alert_schema/elasticc.v0_9_1.diaForcedSource.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "elasticc.v0_9_1", 3 | "name": "diaForcedSource", 4 | "type": "record", 5 | "fields": [ 6 | {"name": "diaForcedSourceId", "type": "long"}, 7 | {"name": "diaObjectId", "type": "long"}, 8 | {"name": "midPointTai", "type": "double"}, 9 | {"name": "filterName", "type": "string"}, 10 | {"name": "psFlux", "type": "float"}, 11 | {"name": "psFluxErr", "type": "float"} 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /alert_schema/elasticc.v0_9_1.diaNondetectionLimit.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "elasticc.v0_9_1", 3 | "name": "diaNondetectionLimit", 4 | "type": "record", 5 | "fields": [ 6 | {"name": "ccdVisitId", "type": "long"}, 7 | {"name": "midPointTai", "type": "double"}, 8 | {"name": "filterName", "type": "string"}, 9 | {"name": "diaNoise", "type": "float"} 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /alert_schema/elasticc.v0_9_1.diaObject.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "elasticc.v0_9_1", 3 | "name": "diaObject", 4 | "type": "record", 5 | "fields": [ 6 | {"name": "diaObjectId", "type": "long"}, 7 | {"name": "simVersion", "type": ["null", "string"], "doc": "diaObject provenance"}, 8 | {"name": "ra", "type": "double"}, 9 | {"name": "decl", "type": "double"}, 10 | {"name": "mwebv", "type": ["null", "float"], "default": null}, 11 | {"name": "mwebv_err", "type": ["null", "float"], "default": null}, 12 | {"name": "z_final", "type": ["null", "float"], "default": null}, 13 | {"name": "z_final_err", "type": ["null", "float"], "default": null}, 14 | {"name": "hostgal_ellipticity", "type": ["null", "float"], "default": null}, 15 | {"name": "hostgal_sqradius", "type": ["null", "float"], "default": null}, 16 | {"name": "hostgal_zspec", "type": ["null", "float"], "default": null}, 17 | {"name": "hostgal_zspec_err", "type": ["null", "float"], "default": null}, 18 | {"name": "hostgal_zphot", "type": ["null", "float"], "default": null}, 19 | {"name": "hostgal_zphot_err", "type": ["null", "float"], "default": null}, 20 | {"name": "hostgal_zphot_q000", "type": ["null", "float"], "default": null}, 21 | {"name": "hostgal_zphot_q010", "type": ["null", "float"], "default": null}, 22 | {"name": "hostgal_zphot_q020", "type": ["null", "float"], "default": null}, 23 | {"name": "hostgal_zphot_q030", "type": ["null", "float"], "default": null}, 24 | {"name": "hostgal_zphot_q040", "type": ["null", "float"], "default": null}, 25 | {"name": "hostgal_zphot_q050", "type": ["null", "float"], "default": null}, 26 | {"name": "hostgal_zphot_q060", "type": ["null", "float"], "default": null}, 27 | {"name": "hostgal_zphot_q070", "type": ["null", "float"], "default": null}, 28 | {"name": "hostgal_zphot_q080", "type": ["null", "float"], "default": null}, 29 | {"name": "hostgal_zphot_q090", "type": ["null", "float"], "default": null}, 30 | {"name": "hostgal_zphot_q100", "type": ["null", "float"], "default": null}, 31 | {"name": "hostgal_mag_u", "type": ["null", "float"], "default": null}, 32 | {"name": "hostgal_mag_g", "type": ["null", "float"], "default": null}, 33 | {"name": "hostgal_mag_r", "type": ["null", "float"], "default": null}, 34 | {"name": "hostgal_mag_i", "type": ["null", "float"], "default": null}, 35 | {"name": "hostgal_mag_z", "type": ["null", "float"], "default": null}, 36 | {"name": "hostgal_mag_Y", "type": ["null", "float"], "default": null}, 37 | {"name": "hostgal_ra", "type": ["null", "float"], "default": null}, 38 | {"name": "hostgal_dec", "type": ["null", "float"], "default": null}, 39 | {"name": "hostgal_snsep", "type": ["null", "float"], "default": null}, 40 | {"name": "hostgal_magerr_u", "type": ["null", "float"], "default": null}, 41 | {"name": "hostgal_magerr_g", "type": ["null", "float"], "default": null}, 42 | {"name": "hostgal_magerr_r", "type": ["null", "float"], "default": null}, 43 | {"name": "hostgal_magerr_i", "type": ["null", "float"], "default": null}, 44 | {"name": "hostgal_magerr_z", "type": ["null", "float"], "default": null}, 45 | {"name": "hostgal_magerr_Y", "type": ["null", "float"], "default": null}, 46 | {"name": "hostgal2_ellipticity", "type": ["null", "float"], "default": null}, 47 | {"name": "hostgal2_sqradius", "type": ["null", "float"], "default": null}, 48 | {"name": "hostgal2_zspec", "type": ["null", "float"], "default": null}, 49 | {"name": "hostgal2_zspec_err", "type": ["null", "float"], "default": null}, 50 | {"name": "hostgal2_zphot", "type": ["null", "float"], "default": null}, 51 | {"name": "hostgal2_zphot_err", "type": ["null", "float"], "default": null}, 52 | {"name": "hostgal2_zphot_q000", "type": ["null", "float"], "default": null}, 53 | {"name": "hostgal2_zphot_q010", "type": ["null", "float"], "default": null}, 54 | {"name": "hostgal2_zphot_q020", "type": ["null", "float"], "default": null}, 55 | {"name": "hostgal2_zphot_q030", "type": ["null", "float"], "default": null}, 56 | {"name": "hostgal2_zphot_q040", "type": ["null", "float"], "default": null}, 57 | {"name": "hostgal2_zphot_q050", "type": ["null", "float"], "default": null}, 58 | {"name": "hostgal2_zphot_q060", "type": ["null", "float"], "default": null}, 59 | {"name": "hostgal2_zphot_q070", "type": ["null", "float"], "default": null}, 60 | {"name": "hostgal2_zphot_q080", "type": ["null", "float"], "default": null}, 61 | {"name": "hostgal2_zphot_q090", "type": ["null", "float"], "default": null}, 62 | {"name": "hostgal2_zphot_q100", "type": ["null", "float"], "default": null}, 63 | {"name": "hostgal2_mag_u", "type": ["null", "float"], "default": null}, 64 | {"name": "hostgal2_mag_g", "type": ["null", "float"], "default": null}, 65 | {"name": "hostgal2_mag_r", "type": ["null", "float"], "default": null}, 66 | {"name": "hostgal2_mag_i", "type": ["null", "float"], "default": null}, 67 | {"name": "hostgal2_mag_z", "type": ["null", "float"], "default": null}, 68 | {"name": "hostgal2_mag_Y", "type": ["null", "float"], "default": null}, 69 | {"name": "hostgal2_ra", "type": ["null", "float"], "default": null}, 70 | {"name": "hostgal2_dec", "type": ["null", "float"], "default": null}, 71 | {"name": "hostgal2_snsep", "type": ["null", "float"], "default": null}, 72 | {"name": "hostgal2_magerr_u", "type": ["null", "float"], "default": null}, 73 | {"name": "hostgal2_magerr_g", "type": ["null", "float"], "default": null}, 74 | {"name": "hostgal2_magerr_r", "type": ["null", "float"], "default": null}, 75 | {"name": "hostgal2_magerr_i", "type": ["null", "float"], "default": null}, 76 | {"name": "hostgal2_magerr_z", "type": ["null", "float"], "default": null}, 77 | {"name": "hostgal2_magerr_Y", "type": ["null", "float"], "default": null} 78 | ] 79 | } 80 | -------------------------------------------------------------------------------- /alert_schema/elasticc.v0_9_1.diaSource.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "elasticc.v0_9_1", 3 | "name": "diaSource", 4 | "type": "record", 5 | "fields": [ 6 | {"name": "diaSourceId", "type": "long"}, 7 | {"name": "diaObjectId", "type": ["null", "long"], "default": null}, 8 | {"name": "midPointTai", "type": "double"}, 9 | {"name": "filterName", "type": "string"}, 10 | {"name": "ra", "type": "double"}, 11 | {"name": "decl", "type": "double"}, 12 | {"name": "psFlux", "type": "float"}, 13 | {"name": "psFluxErr", "type": "float"}, 14 | {"name": "snr", "type": "float"} 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /alert_schema/elasticc.v0_9_1.lvkAlertContent.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "elasticc.v0_9_1", 3 | "name": "LVKAlert", 4 | "type": "record", 5 | "fields": [ 6 | {"name": "supereventId", "type": "string", "default": null}, 7 | {"name": "gpstime", "type": "float", "default": null}, 8 | {"name": "skymapFilename", "type": "string", "default": null}, 9 | {"name": "skymapHealpix", "type": { 10 | "type": "bytes", "doc": "multiorder.fits"}} 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /alert_schema/elasticc_origmap.txt: -------------------------------------------------------------------------------- 1 | # NOTE -- these are the types used to identify the models that were 2 | # created for the simulation. They are *not* the class IDs to report. 3 | # Those can be found in ../taxonomy 4 | 5 | # TRUE_GENTYPE MODEL_NAME 6 | 10 SNIa-SALT2 7 | 11 SNIa-91bg 8 | 12 SNIax 9 | 10 | 20 SNIb-Templates 11 | 21 SNIb+HostXT_V19 12 | 25 SNIc-Templates 13 | 26 SNIc+HostXT_V19 14 | 27 SNIcBL+HostXT_V19 15 | 16 | 30 SNII-NMF 17 | 31 SNII-Templates 18 | 32 SNII+HostXT_V19 19 | 35 SNIIn-MOSFIT 20 | 36 SNIIn+HostXT_V19 21 | 37 SNIIb+HostXT_V19 22 | 23 | 40 SLSN-I+host 24 | 40 SLSN-I_no_host 25 | 42 TDE 26 | 45 ILOT 27 | 46 CART 28 | 29 | 50 KN_K17 30 | 51 KN_B19 31 | 57 PISN-STELLA-HECORE 32 | 58 PISN-STELLA-HYDROGENIC 33 | 59 PISN-MOSFIT 34 | 35 | 60 AGN 36 | 37 | 71 SL-SNIa 38 | 72 SL-SNII+HostXT_V19 39 | 73 SL-SNIb+HostXT_V19 40 | 74 SL-SNIc+HostXT_V19 41 | 42 | 80 RRL 43 | 82 Mdwarf-flare 44 | 83 EB 45 | 84 dwarf-nova 46 | 87 uLens-Single_PyLIMA 47 | 88 uLens-Single-GenLens 48 | 89 uLens-Binary 49 | 90 Cepheid 50 | 91 d-Sct 51 | 98 Techno-sig 52 | -------------------------------------------------------------------------------- /alert_schema/parse_schema.py: -------------------------------------------------------------------------------- 1 | # A quick-n-dirty script to read a schema to make sure its syntax is OK 2 | 3 | import sys 4 | import fastavro 5 | import fastavro.schema 6 | 7 | if len(sys.argv) != 2: 8 | print( "Usage: python parse_schema.py " ) 9 | sys.exit(1) 10 | 11 | schema = fastavro.schema.load_schema( sys.argv[1] ) 12 | 13 | print( fastavro.schema.to_parsing_canonical_form( schema ) ) 14 | -------------------------------------------------------------------------------- /jupyter/sprint_week_2024oct/elasticc2_schema.txt: -------------------------------------------------------------------------------- 1 | The schema below don't include which things are foreign keys to which 2 | other things. Usually the mapping is pretty obvious, as the column name 3 | is the same in both tables. Use the PostgreSQL information_schema 4 | interface if you really need that information (or ask Rob). 5 | 6 | Table "public.elasticc2_diaobject" 7 | Column | Type | Collation | Nullable | Default 8 | ----------------------+------------------+-----------+----------+--------- 9 | diaobject_id | bigint | | not null | 10 | simversion | text | | | 11 | ra | double precision | | not null | 12 | decl | double precision | | not null | 13 | mwebv | real | | | 14 | mwebv_err | real | | | 15 | z_final | real | | | 16 | z_final_err | real | | | 17 | hostgal_ellipticity | real | | | 18 | hostgal_sqradius | real | | | 19 | hostgal_zspec | real | | | 20 | hostgal_zspec_err | real | | | 21 | hostgal_zphot | real | | | 22 | hostgal_zphot_err | real | | | 23 | hostgal_zphot_q000 | real | | | 24 | hostgal_zphot_q010 | real | | | 25 | hostgal_zphot_q020 | real | | | 26 | hostgal_zphot_q030 | real | | | 27 | hostgal_zphot_q040 | real | | | 28 | hostgal_zphot_q050 | real | | | 29 | hostgal_zphot_q060 | real | | | 30 | hostgal_zphot_q070 | real | | | 31 | hostgal_zphot_q080 | real | | | 32 | hostgal_zphot_q090 | real | | | 33 | hostgal_zphot_q100 | real | | | 34 | hostgal_mag_u | real | | | 35 | hostgal_mag_g | real | | | 36 | hostgal_mag_r | real | | | 37 | hostgal_mag_i | real | | | 38 | hostgal_mag_z | real | | | 39 | hostgal_mag_y | real | | | 40 | hostgal_ra | real | | | 41 | hostgal_dec | real | | | 42 | hostgal_snsep | real | | | 43 | hostgal_magerr_u | real | | | 44 | hostgal_magerr_g | real | | | 45 | hostgal_magerr_r | real | | | 46 | hostgal_magerr_i | real | | | 47 | hostgal_magerr_z | real | | | 48 | hostgal_magerr_y | real | | | 49 | hostgal2_ellipticity | real | | | 50 | hostgal2_sqradius | real | | | 51 | hostgal2_zspec | real | | | 52 | hostgal2_zspec_err | real | | | 53 | hostgal2_zphot | real | | | 54 | hostgal2_zphot_err | real | | | 55 | hostgal2_zphot_q000 | real | | | 56 | hostgal2_zphot_q010 | real | | | 57 | hostgal2_zphot_q020 | real | | | 58 | hostgal2_zphot_q030 | real | | | 59 | hostgal2_zphot_q040 | real | | | 60 | hostgal2_zphot_q050 | real | | | 61 | hostgal2_zphot_q060 | real | | | 62 | hostgal2_zphot_q070 | real | | | 63 | hostgal2_zphot_q080 | real | | | 64 | hostgal2_zphot_q090 | real | | | 65 | hostgal2_zphot_q100 | real | | | 66 | hostgal2_mag_u | real | | | 67 | hostgal2_mag_g | real | | | 68 | hostgal2_mag_r | real | | | 69 | hostgal2_mag_i | real | | | 70 | hostgal2_mag_z | real | | | 71 | hostgal2_mag_y | real | | | 72 | hostgal2_ra | real | | | 73 | hostgal2_dec | real | | | 74 | hostgal2_snsep | real | | | 75 | hostgal2_magerr_u | real | | | 76 | hostgal2_magerr_g | real | | | 77 | hostgal2_magerr_r | real | | | 78 | hostgal2_magerr_i | real | | | 79 | hostgal2_magerr_z | real | | | 80 | hostgal2_magerr_y | real | | | 81 | isddf | boolean | | not null | 82 | Indexes: 83 | "elasticc2_diaobject_pkey" PRIMARY KEY, btree (diaobject_id) 84 | "idx_elasticc2_diaobject_q3c" btree (q3c_ang2ipix(ra, decl)) 85 | 86 | 87 | 88 | Table "public.elasticc2_diasource" 89 | Column | Type | Collation | Nullable | Default 90 | --------------+------------------+-----------+----------+--------- 91 | diasource_id | bigint | | not null | 92 | midpointtai | double precision | | not null | 93 | filtername | text | | not null | 94 | ra | double precision | | not null | 95 | decl | double precision | | not null | 96 | psflux | real | | not null | 97 | psfluxerr | real | | not null | 98 | snr | real | | not null | 99 | diaobject_id | bigint | | | 100 | Indexes: 101 | "elasticc2_diasource_pkey" PRIMARY KEY, btree (diasource_id) 102 | "elasticc2_diasource_diaobject_id_597363da" btree (diaobject_id) 103 | "elasticc2_diasource_midPointTai_f4168933" btree (midpointtai) 104 | "idx_elasticc2_diasource_q3c" btree (q3c_ang2ipix(ra, decl)) 105 | 106 | 107 | 108 | Table "public.elasticc2_diaforcedsource" 109 | Column | Type | Collation | Nullable | Default 110 | --------------------+------------------+-----------+----------+--------- 111 | diaforcedsource_id | bigint | | not null | 112 | midpointtai | double precision | | not null | 113 | filtername | text | | not null | 114 | psflux | real | | not null | 115 | psfluxerr | real | | not null | 116 | diaobject_id | bigint | | not null | 117 | Indexes: 118 | "elasticc2_diaforcedsource_pkey" PRIMARY KEY, btree (diaforcedsource_id) 119 | "elasticc2_diaforcedsource_diaobject_id_8de7d36e" btree (diaobject_id) 120 | "elasticc2_diaforcedsource_midPointTai_b15bb818" btree (midpointtai) 121 | 122 | 123 | 124 | Table "public.elasticc2_brokerclassifier" 125 | Column | Type | Collation | Nullable | Default 126 | ------------------+--------------------------+-----------+----------+---------------------------------- 127 | classifier_id | bigint | | not null | generated by default as identity 128 | brokername | character varying(100) | | not null | 129 | brokerversion | text | | | 130 | classifiername | character varying(200) | | not null | 131 | classifierparams | text | | | 132 | modified | timestamp with time zone | | not null | 133 | Indexes: 134 | "elasticc2_brokerclassifier_pkey" PRIMARY KEY, btree (classifier_id) 135 | "elasticc2_b_brokern_177700_idx" btree (brokername, classifiername) 136 | "elasticc2_b_brokern_7aa2ad_idx" btree (brokername, brokerversion) 137 | "elasticc2_b_brokern_de7788_idx" btree (brokername) 138 | "elasticc2_b_brokern_fad924_idx" btree (brokername, brokerversion, classifiername, classifierparams) 139 | 140 | 141 | 142 | Table "public.elasticc2_brokermessage" 143 | Column | Type | Collation | Nullable | Default 144 | --------------------------+--------------------------+-----------+----------+---------------------------------- 145 | brokermessage_id | bigint | | not null | generated by default as identity 146 | streammessage_id | bigint | | | 147 | topicname | character varying(200) | | | 148 | alert_id | bigint | | not null | 149 | diasource_id | bigint | | not null | 150 | msghdrtimestamp | timestamp with time zone | | | 151 | descingesttimestamp | timestamp with time zone | | not null | 152 | elasticcpublishtimestamp | timestamp with time zone | | | 153 | brokeringesttimestamp | timestamp with time zone | | | 154 | classid | smallint[] | | not null | 155 | classifier_id | bigint | | not null | 156 | probability | real[] | | not null | 157 | Indexes: 158 | "elasticc2_brokermessage_pkey" PRIMARY KEY, btree (brokermessage_id) 159 | "elasticc2_brokermessage_alert_id_e70d1567" btree (alert_id) 160 | "elasticc2_brokermessage_classifier_id_fc087abe" btree (classifier_id) 161 | "elasticc2_brokermessage_descingesttimestamp_5072267b" btree (descingesttimestamp) 162 | "elasticc2_brokermessage_diasource_id_ea301371" btree (diasource_id) 163 | 164 | 165 | 166 | Table "public.elasticc2_diaobjecttruth" 167 | Column | Type | Collation | Nullable | Default 168 | --------------------+------------------+-----------+----------+--------- 169 | libid | integer | | not null | 170 | sim_searcheff_mask | integer | | not null | 171 | gentype | integer | | not null | 172 | sim_template_index | integer | | not null | 173 | zcmb | real | | not null | 174 | zhelio | real | | not null | 175 | zcmb_smear | real | | not null | 176 | ra | double precision | | not null | 177 | dec | double precision | | not null | 178 | mwebv | real | | not null | 179 | galid | bigint | | | 180 | galzphot | real | | | 181 | galzphoterr | real | | | 182 | galsnsep | real | | | 183 | galsnddlr | real | | | 184 | rv | real | | not null | 185 | av | real | | not null | 186 | mu | real | | not null | 187 | lensdmu | real | | not null | 188 | peakmjd | real | | not null | 189 | mjd_detect_first | double precision | | not null | 190 | mjd_detect_last | double precision | | not null | 191 | dtseason_peak | real | | not null | 192 | peakmag_u | real | | not null | 193 | peakmag_g | real | | not null | 194 | peakmag_r | real | | not null | 195 | peakmag_i | real | | not null | 196 | peakmag_z | real | | not null | 197 | peakmag_y | real | | not null | 198 | snrmax | real | | not null | 199 | snrmax2 | real | | not null | 200 | snrmax3 | real | | not null | 201 | nobs | integer | | not null | 202 | nobs_saturate | integer | | not null | 203 | diaobject_id | bigint | | not null | 204 | Indexes: 205 | "elasticc2_diaobjecttruth_pkey" PRIMARY KEY, btree (diaobject_id) 206 | "elasticc2_diaobjecttruth_gentype_2556e91f" btree (gentype) 207 | "elasticc2_diaobjecttruth_mjd_detect_first_0d848f4e" btree (mjd_detect_first) 208 | "elasticc2_diaobjecttruth_mjd_detect_last_0cc4d44e" btree (mjd_detect_last) 209 | "elasticc2_diaobjecttruth_peakmjd_6f57b2d9" btree (peakmjd) 210 | "elasticc2_diaobjecttruth_sim_template_index_ac66e808" btree (sim_template_index) 211 | "elasticc2_diaobjecttruth_zcmb_1c9a663e" btree (zcmb) 212 | "elasticc2_diaobjecttruth_zhelio_0fd095ea" btree (zhelio) 213 | -------------------------------------------------------------------------------- /jupyter/sprint_week_2024oct/elasticc_sprintweek_2024-10.odp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LSSTDESC/elasticc/818ae71e78d20b6f41810d7d6d9108ba4c17bcb1/jupyter/sprint_week_2024oct/elasticc_sprintweek_2024-10.odp -------------------------------------------------------------------------------- /jupyter/sprint_week_2024oct/elasticc_sprintweek_2024-10.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LSSTDESC/elasticc/818ae71e78d20b6f41810d7d6d9108ba4c17bcb1/jupyter/sprint_week_2024oct/elasticc_sprintweek_2024-10.pdf -------------------------------------------------------------------------------- /kn_skymaps/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_COINC.sqlite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LSSTDESC/elasticc/818ae71e78d20b6f41810d7d6d9108ba4c17bcb1/kn_skymaps/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_COINC.sqlite -------------------------------------------------------------------------------- /kn_skymaps/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_INJ.sqlite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LSSTDESC/elasticc/818ae71e78d20b6f41810d7d6d9108ba4c17bcb1/kn_skymaps/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_INJ.sqlite -------------------------------------------------------------------------------- /kn_skymaps/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_COINC.sqlite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LSSTDESC/elasticc/818ae71e78d20b6f41810d7d6d9108ba4c17bcb1/kn_skymaps/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_COINC.sqlite -------------------------------------------------------------------------------- /kn_skymaps/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_INJ.sqlite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LSSTDESC/elasticc/818ae71e78d20b6f41810d7d6d9108ba4c17bcb1/kn_skymaps/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_INJ.sqlite -------------------------------------------------------------------------------- /kn_skymaps/README.md: -------------------------------------------------------------------------------- 1 | # Creating a map between `SIM_MODEL_INDEX` and the `KN_INDEX` for B19 and K17 models 2 | - For B19, for example, 3 | ``` 4 | grep SED: $PLASTICC_ROOT/model_libs_updates/SIMSED.BULLA-BNS-M2-2COMP/SED.INFO | awk '{ print $3"\t"$4"\t"$5"\t"$6 }' | tee B19-SIM-TEMPLATE-INDEX-KN-INDEX.map 5 | ``` 6 | - For K17, for example 7 | ``` 8 | grep SED: $PLASTICC_ROOT/model_libs/SIMSED.KN-K17/SED.INFO | awk '{ print $3"\t"$4"\t"$5"\t"$6 }' | tee K17-SIM-TEMPLATE-INDEX-KN-INDEX.map 9 | ``` 10 | - Then converted to JSON for ease of use (included in the repo). 11 | 12 | 13 | # Steps to create skymaps from KN simulations 14 | Following is brief description of the content of the `scripts` directory. These are used to create skymaps from the SNANA KN header files. The procedure is that mentioned in [Chatterjee et. al (2022)](https://doi.org/10.1093/mnras/stab3023). The skymaps are created using BAYESTAR described in [Singer and Price (2016)](https://doi.org/10.1103/PhysRevD.93.024013). 15 | - First step: map the ejecta mass parameters to binary parameters. Done using `mej_to_masses.py`. See `mej_to_masses.slurm` for CLI. 16 | - Put the binary values in a LIGO light weight table. Done using `kn-inspinj.py` (does not require slurm). See `kn-inspinj.sh`. 17 | - The PSD used for the skymaps involved following BNS inspiral ranges: H1, L1 ~ 180Mpc, V ~ 115 Mpc, K ~ 25 Mpc. 18 | - Run `bayestar-realize-coincs` on the binaries. This tells 1) binaries that are detected 2) provides the SNR time-series. This is done in `bayestar-realize-coinc.slurm`. The result is LIGO LW coinc XML file, which is also converted to an sqlite database for easy querying. It is worth pointing out that the number of KNs which are jointly detected in this exercise is a small subset of the total number of KN in the SNANA header files. 19 | The reason is twofold: 1) In converting from ejecta mass to component mass, we only consider $M_{\text{ej}} < 0.05 M_{\odot}$. This is because we restrict to the SLY equation of state which does not allow for larger ejecta masses. We use an empirical relation based on relation by Dietrich and Ujevic (2017) as implemented in the `gwemlightcurves` project. 2) Out of those lightcurves that satisfy the previous condition, not all pass the detection threshold of a minium of 4 in single detector and 8 in the network they are detected in, which at least involves two detectors. 20 | - Run `bayestar-localize-coincs` on the recovered SNR time-series. This is done in `run-bayestar-localize.slurm`. 21 | - Output files have filenames as: `0.fits`, `1.fits` etc. The number corresponds to the row number in the `sim_inspiral` table of the output XML/sqlite produced. To join the GW and EM parameters i.e. the sqlite and SNANA FITS headers, perform a join on the `source` column of the `sim_inspiral` table and the `SNID` column of the header. 22 | Code snippet 23 | ``` 24 | >>> # load the SNANA header files for, say, B19 models 25 | >>> import glob, sqlite3 26 | >>> import pandas as pd 27 | >>> from astropy.table import Table 28 | >>> fnames = glob.glob('TRAINING_SAMPLES/ELASTICC_TRAIN_KN_B19/*HEAD*FITS.gz') 29 | >>> header_files = [] 30 | >>> for fname in fnames: 31 | ... header_files.append(Table.read(fname, format='fits').to_pandas()) 32 | >>> df_lightcurves = pd.concat(header_files, ignore_index=True) 33 | 34 | >>> # load the GW coincidence properties 35 | >>> with sqlite3.connect('ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_COINC.sqlite') as conn: 36 | ... df_gw = pd.read_sql("SELECT * FROM sim_inspiral", conn) 37 | >>> # create a SNID column in the GW dataframe for joining 38 | >>> df_gw['SNID'] = df_gw.source.apply(lambda x: x.strip('GAL')).astype(int) 39 | >>> df_lightcurves['SNID'] = df_lightcurves.SNID.astype(int) 40 | >>> df_coinc = df_gw.merge(df_lightcurves, on='SNID') 41 | >>> df_coinc.loc[:, ['SNID', 'simulation_id', 'SIM_RA', 'SIM_DEC']].head() 42 | SNID simulation_id SIM_RA SIM_DEC 43 | 0 1335095 0 70.512573 -39.288109 44 | 1 64081858 1 308.673981 -42.913502 45 | 2 2653700 2 113.815804 -46.176556 46 | 3 12352119 3 336.922913 -30.235270 47 | 4 85508186 4 171.113525 -23.298241 48 | ``` 49 | In the above snippet, the value of the `simulation_id` corresponding to the skymap file name corresponding to the `SNID`. For example, `ELASTICC_KN_SKYMAPS/0.fits` corresponds to the SNANA object with `SNID=1335095`. Finally, the filenames are renamed tagging them using the SNID and the MJD from the SNANA headers. This is done using `skymap-postproc.py`. 50 | -------------------------------------------------------------------------------- /kn_skymaps/scripts/bayestar-realize-coinc.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --qos=shared 3 | #SBATCH --mail-type=FAIL 4 | #SBATCH --mail-user=deep1018@illinois.edu 5 | #SBATCH --time=02:00:00 6 | #SBATCH --nodes=1 7 | #SBATCH --cpus-per-task=30 8 | #SBATCH --mem=32G 9 | #SBATCH --output=./ELASTICC_KN_SKYMAPS/bayestar-realize-coincs.out 10 | #SBATCH --error=./ELASTICC_KN_SKYMAPS/bayestar-realize-coincs.err 11 | 12 | source /global/cfs/cdirs/lsst/groups/TD/setup_td_dev.sh 13 | 14 | # COINCs for the Bulla model 15 | srun bayestar-realize-coincs ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_INJ.xml \ 16 | -o ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_COINC.xml \ 17 | --reference-psd elasticc-test-data/psd-o4.xml \ 18 | --detector H1 L1 V1 K1 \ 19 | --measurement-error gaussian-noise \ 20 | --snr-threshold 4.0 \ 21 | --net-snr-threshold 8.0 \ 22 | --min-triggers 2 \ 23 | --seed 1234 -j 29 24 | 25 | # COINCs for the Kasen model 26 | srun bayestar-realize-coincs ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_INJ.xml \ 27 | -o ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_COINC.xml \ 28 | --reference-psd elasticc-test-data/psd-o4.xml \ 29 | --detector H1 L1 V1 K1 \ 30 | --measurement-error gaussian-noise \ 31 | --snr-threshold 4.0 \ 32 | --net-snr-threshold 8.0 \ 33 | --min-triggers 2 \ 34 | --seed 1234 -j 29 35 | -------------------------------------------------------------------------------- /kn_skymaps/scripts/create-inj.sh: -------------------------------------------------------------------------------- 1 | lalapps_inspinj \ 2 | -o inj.xml \ 3 | --m-distr source \ 4 | --mass-file masses.dat \ 5 | --disable-spin \ 6 | --t-distr uniform \ 7 | --gps-start-time 1000000000 \ 8 | --gps-end-time 1000400000 \ 9 | --time-step 200 \ 10 | --l-distr source \ 11 | --source-file locations.dat \ 12 | --i-distr uniform \ 13 | --f-lower 30 --disable-spin \ 14 | --waveform TaylorF2threePointFivePN \ 15 | --d-distr source \ 16 | --disable-milkyway \ 17 | --seed 1234 18 | #--sourcecomplete \ 19 | #--seed 1234 20 | #--fixed-mass1 1.4 --fixed-mass2 1.4 \ 21 | #--min-distance 50e3 --max-distance 400e3 \ 22 | #--m-distr componentMass --min-mass1 1.0 --max-mass1 2.5 --min-mass2 1.0 --max-mass2 2.5 \ 23 | # --max-mtotal 4.0 \ 24 | -------------------------------------------------------------------------------- /kn_skymaps/scripts/kn-inspinj.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | 3 | from ligo.lw import ligolw, lsctables 4 | from gwpy import table, time 5 | import numpy as np 6 | 7 | parser = ArgumentParser( 8 | "Construct an inj.xml for bayestar-realize-coinc" 9 | ) 10 | parser.add_argument("-i", "--input", required=True, 11 | help="CSV output table of mej_to_masses.py" 12 | ) 13 | parser.add_argument("-o", "--output", required=True, 14 | help="Output table in LIGOLW XML format" 15 | ) 16 | parser.add_argument("--gpsstart", type=int, default=1000000000, 17 | help="Assign a gpstime start and step time for injections." 18 | " Not required. Only used if input is missing a `first_detection` entry." 19 | ) 20 | parser.add_argument("--gpsstep", type=int, default=200, 21 | help="Time step between injetions. Not required. " 22 | "Only used if input is missing a `first_detection` entry." 23 | ) 24 | parser.add_argument("--sample-process-table", default='process-tables.xml', 25 | help="Sample LIGOLW process and process_params table." 26 | ) 27 | 28 | args = parser.parse_args() 29 | 30 | # create and write a fiducial process and 31 | # process_params table for bayestar-realize-coincs 32 | #process_table = lsctables.ProcessTable() 33 | #process_params_table = lsctables.ProcessParamsTable() 34 | process_table = table.Table.read(args.sample_process_table, 35 | tablename='process') 36 | process_params_table = table.Table.read( 37 | args.sample_process_table, tablename='process_params' 38 | ) 39 | process_table.write( 40 | args.output, format='ligolw', tablename='process', 41 | append=True, overwrite=True, ilwdchar_compat=False 42 | ) 43 | process_params_table.write( 44 | args.output, format='ligolw', tablename='process_params', 45 | append=True, overwrite=True, ilwdchar_compat=False 46 | ) 47 | # load the input table 48 | data_table = table.Table.read(args.input) 49 | # various quantities that go into the XML files 50 | num_inj = len(data_table) 51 | process_id = [lsctables.ProcessID(0) for ii in range(num_inj)] 52 | waveform = ['TaylorF2threePointFivePN'] * num_inj 53 | try: 54 | geocent_end_time = data_table['first_detection'] 55 | geocent_end_time -= 7200 # subtract an ad-hoc delay 56 | except KeyError: 57 | # fiducial gpstime for the injections 58 | geocent_end_time = [args.gpsstart + args.gpsstep*ii for ii in range(num_inj)] 59 | geocent_end_time_ns = np.zeros(num_inj) 60 | 61 | h_end_time = geocent_end_time.copy() 62 | h_end_time_ns = geocent_end_time_ns.copy() 63 | l_end_time = geocent_end_time.copy() 64 | l_end_time_ns = geocent_end_time_ns.copy() 65 | g_end_time = geocent_end_time.copy() 66 | g_end_time_ns = geocent_end_time_ns.copy() 67 | t_end_time = geocent_end_time.copy() 68 | t_end_time_ns = geocent_end_time_ns.copy() 69 | v_end_time = geocent_end_time.copy() 70 | v_end_time_ns = geocent_end_time_ns.copy() 71 | 72 | end_time_gmst = [ 73 | time.Time(time.from_gps(ii)).jd for ii in geocent_end_time 74 | ] 75 | source = [f'GAL{ii}' for ii in data_table['snid']] 76 | 77 | mass1 = data_table['mass1'].data 78 | mass2 = data_table['mass2'].data 79 | 80 | mchirp = data_table['chirp_mass'].data 81 | eta = mass1 * mass2 / (mass1 + mass2)**2 82 | distance = data_table['distance'].data 83 | longitude = data_table['ra'] / 180. * np.pi 84 | latitude = data_table['dec'] / 180. * np.pi 85 | 86 | inclination = np.arccos(data_table['costheta']) 87 | # for kasen models, the costheta column is cannibalized to 88 | # add lanthanide fraction of the ejecta. 89 | if np.all(np.isnan(inclination)): 90 | inclination = np.arccos(np.random.uniform(-1, 1, num_inj)) 91 | 92 | coa_phase = np.random.uniform(0, 2*np.pi, num_inj) 93 | polarization = np.random.uniform(0, 2*np.pi, num_inj) 94 | 95 | psi0 = np.zeros(num_inj) 96 | psi3 = np.zeros(num_inj) 97 | alpha = np.zeros(num_inj) 98 | alpha1 = np.zeros(num_inj) 99 | alpha2 = np.zeros(num_inj) 100 | alpha3 = np.zeros(num_inj) 101 | alpha4 = np.zeros(num_inj) 102 | alpha5 = np.zeros(num_inj) 103 | alpha6 = np.zeros(num_inj) 104 | beta = np.zeros(num_inj) 105 | spin1x = np.zeros(num_inj) 106 | spin1y = np.zeros(num_inj) 107 | spin1z = np.zeros(num_inj) 108 | spin2x = np.zeros(num_inj) 109 | spin2y = np.zeros(num_inj) 110 | spin2z = np.zeros(num_inj) 111 | 112 | theta0 = np.zeros(num_inj) 113 | phi0 = np.zeros(num_inj) 114 | f_lower = 20 * np.ones(num_inj) 115 | f_final = np.zeros(num_inj) 116 | 117 | numrel_mode_min = np.zeros(num_inj) 118 | numrel_mode_max = np.zeros(num_inj) 119 | numrel_data = [" "] * num_inj 120 | amp_order = [-1] * num_inj 121 | taper = ['TAPER_NONE'] * num_inj 122 | 123 | bandpass = np.zeros(num_inj) 124 | simulation_id = [lsctables.SimInspiralID(ii) for ii in range(num_inj)] 125 | 126 | eff_dist_h = distance.copy() 127 | eff_dist_l = distance.copy() 128 | eff_dist_g = distance.copy() 129 | eff_dist_t = distance.copy() 130 | eff_dist_v = distance.copy() 131 | 132 | new_table = table.Table( 133 | [table.Column(waveform, dtype='str', name='waveform'), 134 | table.Column(geocent_end_time, name='geocent_end_time', dtype='int'), 135 | table.Column(geocent_end_time_ns, name='geocent_end_time_ns', dtype='int'), 136 | table.Column(h_end_time, name='h_end_time', dtype='int'), 137 | table.Column(g_end_time, name='g_end_time', dtype='int'), 138 | table.Column(l_end_time, name='l_end_time', dtype='int'), 139 | table.Column(v_end_time, name='v_end_time', dtype='int'), 140 | table.Column(t_end_time, name='t_end_time', dtype='int'), 141 | table.Column(h_end_time_ns, name='h_end_time_ns', dtype='int'), 142 | table.Column(g_end_time_ns, name='g_end_time_ns', dtype='int'), 143 | table.Column(l_end_time_ns, name='l_end_time_ns', dtype='int'), 144 | table.Column(v_end_time_ns, name='v_end_time_ns', dtype='int'), 145 | table.Column(t_end_time_ns, name='t_end_time_ns', dtype='int'), 146 | table.Column(end_time_gmst, name='end_time_gmst', dtype='int'), 147 | table.Column(source, name='source', dtype='str'), 148 | table.Column(mass1, name='mass1', dtype='float'), 149 | table.Column(mass2, name='mass2', dtype='float'), 150 | table.Column(mchirp, name='mchirp', dtype='float'), 151 | table.Column(eta, name='eta', dtype='float'), 152 | table.Column(distance, name='distance', dtype='float'), 153 | table.Column(eff_dist_g, name='eff_dist_g', dtype='float'), 154 | table.Column(eff_dist_h, name='eff_dist_h', dtype='float'), 155 | table.Column(eff_dist_l, name='eff_dist_l', dtype='float'), 156 | table.Column(eff_dist_v, name='eff_dist_v', dtype='float'), 157 | table.Column(eff_dist_t, name='eff_dist_t', dtype='float'), 158 | table.Column(coa_phase, name='coa_phase', dtype='float'), 159 | table.Column(longitude, name='longitude', dtype='float'), 160 | table.Column(latitude, name='latitude', dtype='float'), 161 | table.Column(inclination, name='inclination', dtype='float'), 162 | table.Column(polarization, name='polarization', dtype='float'), 163 | table.Column(psi0, name='psi0', dtype='float'), 164 | table.Column(psi3, name='psi3', dtype='float'), 165 | table.Column(spin1x, name='spin1x', dtype='float'), 166 | table.Column(spin1y, name='spin1y', dtype='float'), 167 | table.Column(spin1z, name='spin1z', dtype='float'), 168 | table.Column(spin2x, name='spin2x', dtype='float'), 169 | table.Column(spin2y, name='spin2y', dtype='float'), 170 | table.Column(spin2z, name='spin2z', dtype='float'), 171 | table.Column(f_final, name='f_final', dtype='float'), 172 | table.Column(f_lower, name='f_lower', dtype='float'), 173 | table.Column(alpha, name='alpha', dtype='float'), 174 | table.Column(alpha1, name='alpha1', dtype='float'), 175 | table.Column(alpha2, name='alpha2', dtype='float'), 176 | table.Column(alpha3, name='alpha3', dtype='float'), 177 | table.Column(alpha4, name='alpha4', dtype='float'), 178 | table.Column(alpha5, name='alpha5', dtype='float'), 179 | table.Column(alpha6, name='alpha6', dtype='float'), 180 | table.Column(beta, name='beta', dtype='float'), 181 | table.Column(theta0, name='theta0', dtype='float'), 182 | table.Column(phi0, name='phi0', dtype='float'), 183 | table.Column(amp_order, name='amp_order', dtype='int'), 184 | table.Column(numrel_mode_min, name='numrel_mode_min', dtype='int'), 185 | table.Column(numrel_mode_max, name='numrel_mode_max', dtype='int'), 186 | table.Column(numrel_data, name='numrel_data', dtype='str'), 187 | table.Column(bandpass, name='bandpass', dtype='int'), 188 | table.Column(taper, name='taper', dtype='str'), 189 | table.Column(process_id, name='process_id', dtype='object'), 190 | table.Column(simulation_id, name='simulation_id', dtype='object')] 191 | ) 192 | 193 | new_table.write(args.output, format='ligolw', tablename='sim_inspiral', 194 | append=True, overwrite=True) 195 | -------------------------------------------------------------------------------- /kn_skymaps/scripts/kn-inspinj.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python kn-rapid-inspinj.py \ 3 | --input ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_MAP.csv \ 4 | --output ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_INJ.xml \ 5 | --sample-process-table process-tables.xml 6 | 7 | python kn-rapid-inspinj.py \ 8 | --input ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_MAP.csv \ 9 | --output ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_INJ.xml \ 10 | --sample-process-table process-tables.xml 11 | -------------------------------------------------------------------------------- /kn_skymaps/scripts/kn_rapid_utils.py: -------------------------------------------------------------------------------- 1 | import gzip 2 | import logging 3 | import sqlite3 4 | import pickle 5 | 6 | import numpy as np 7 | import pandas as pd 8 | from scipy import stats 9 | 10 | from ligo.skymap.io import read_sky_map 11 | from ligo.skymap.postprocess import crossmatch 12 | 13 | common_snana_keys = [ 14 | 'SIM_AV', 15 | 'SIM_DEC', 16 | 'SIM_DLMU', 17 | 'SIM_EXPOSURE_g', 18 | 'SIM_EXPOSURE_i', 19 | 'SIM_EXPOSURE_r', 20 | 'SIM_HOSTLIB_GALID', 21 | 'SIM_LENSDMU', 22 | 'SIM_LIBID', 23 | 'SIM_MAGSMEAR_COH', 24 | 'SIM_MODEL_INDEX', 25 | 'SIM_MODEL_NAME', 26 | 'SIM_MWEBV', 27 | 'SIM_NGEN_LIBID', 28 | 'SIM_NOBS_UNDEFINED', 29 | 'SIM_PEAKMAG_g', 30 | 'SIM_PEAKMAG_i', 31 | 'SIM_PEAKMAG_r', 32 | 'SIM_PEAKMJD', 33 | 'SIM_RA', 34 | 'SIM_REDSHIFT_CMB', 35 | 'SIM_REDSHIFT_FLAG', 36 | 'SIM_REDSHIFT_HELIO', 37 | 'SIM_REDSHIFT_HOST', 38 | 'SIM_RV', 39 | 'SIM_SEARCHEFF_MASK', 40 | 'SIM_SUBSAMPLE_INDEX', 41 | 'SIM_TEMPLATE_INDEX', 42 | 'SIM_TYPE_INDEX', 43 | 'SIM_TYPE_NAME', 44 | 'SIM_VPEC', 45 | 'delta_t_g', 46 | 'delta_t_i', 47 | 'delta_t_r', 48 | 'fluxcal_g', 49 | 'fluxcal_i', 50 | 'fluxcal_r', 51 | 'fluxcalerr_g', 52 | 'fluxcalerr_i', 53 | 'fluxcalerr_r', 54 | 'host_photoz', 55 | 'host_specz', 56 | 'libid', 57 | 'mag_g', 58 | 'mag_i', 59 | 'mag_r', 60 | 'magerr_g', 61 | 'magerr_i', 62 | 'magerr_r', 63 | 'magobs_g', 64 | 'magobs_i', 65 | 'magobs_r', 66 | 'median_delta_t_g', 67 | 'median_delta_t_i', 68 | 'median_delta_t_r', 69 | 'mjd_g', 70 | 'mjd_i', 71 | 'mjd_r', 72 | 'photflag_g', 73 | 'photflag_i', 74 | 'photflag_r', 75 | 'photprob_g', 76 | 'photprob_i', 77 | 'photprob_r', 78 | 'pkmag_g', 79 | 'pkmag_i', 80 | 'pkmag_r', 81 | 'pkmjd', 82 | 'psf_sig1_g', 83 | 'psf_sig1_i', 84 | 'psf_sig1_r', 85 | 'sky_sig_g', 86 | 'sky_sig_i', 87 | 'sky_sig_r', 88 | 'snid', 89 | 'snr_g', 90 | 'snr_i', 91 | 'snr_r', 92 | 'z', 93 | 'zeropt_g', 94 | 'zeropt_i', 95 | 'zeropt_r' 96 | ] 97 | """Keys that are common to all SNANA simulations""" 98 | 99 | def get_sim_coinc_map(db_name): 100 | """ 101 | Return join of sim_inspiral and coinc_inspiral for a LIGOLW 102 | sqlite database as a dataframe 103 | """ 104 | conn = sqlite3.connect(db_name) 105 | cur = conn.cursor() 106 | 107 | query = """ 108 | SELECT A.mass1, A.mass2, A.spin1x, A.spin1y, A.spin1z, 109 | A.distance, A.source, A.inclination, A.simulation_id, B.snr, B.ifos 110 | FROM sim_inspiral AS A JOIN coinc_inspiral AS B 111 | WHERE A.simulation_id == B.coinc_event_id; 112 | """ 113 | 114 | data = cur.execute(query).fetchall() 115 | rows = ( 116 | "mass1,mass2,spin1x,spin1y,spin1z,distance," 117 | "source,inclination,simulation_id,snr,ifos".split(",") 118 | ) 119 | return pd.DataFrame(data=data, columns=rows) 120 | 121 | 122 | def add_redshift_to_dataframe(sim_coinc_df, **kwargs): 123 | """ 124 | Add cosmological redshift (z column) based on luminosity distance. 125 | extra arguments passed to `FlatLambdaCDM` 126 | """ 127 | from astropy import cosmology, units as u 128 | kwargs = kwargs or dict(H0=70, Om0=0.3) 129 | cosmo = cosmology.FlatLambdaCDM(**kwargs) 130 | redshift = [ 131 | cosmology.z_at_value(cosmo.luminosity_distance, d * u.Mpc) 132 | for d in sim_coinc_df.distance 133 | ] 134 | sim_coinc_df['z'] = redshift 135 | return sim_coinc_df 136 | 137 | 138 | def add_snid_to_dataframe(sim_coinc_df, colname='snid', source_prefix='GAL'): 139 | """Add a SNID column to sim_coinc dataframe""" 140 | source_id = [int(src.split('GAL')[1]) for src in sim_coinc_df.source] 141 | sim_coinc_df['snid'] = source_id 142 | return sim_coinc_df 143 | 144 | 145 | # implemented from plasticc validation repo 146 | def read_serialized_snana_data(filename): 147 | """Read data from pickled file to a pandas dataframe""" 148 | with gzip.open(filename, 'rb') as f: 149 | data = pickle.load(f) 150 | 151 | X = _to_dataframe(data) 152 | y = pd.get_dummies(X.type == 0, prefix='SNIa', drop_first=True) 153 | X = X.drop(columns=['type']) 154 | 155 | return X, y 156 | 157 | 158 | def create_logger( 159 | log_file, logger_name='kn_rapid', 160 | fmt= '%(asctime)s -- %(name)s -- %(levelname)s -- %(message)s'): 161 | """Get a logger object 162 | 163 | Parameters 164 | ---------- 165 | log_file : str 166 | Name of log file 167 | logger_name : str 168 | name of the logger 169 | fmt : str 170 | format, default "%(asctime)s -- %(name)s -- %(message)s" 171 | """ 172 | logger = logging.getLogger(logger_name) 173 | logger.setLevel(logging.INFO) 174 | formatter = logging.Formatter(fmt) 175 | file_handler = logging.FileHandler(log_file) 176 | file_handler.setFormatter(formatter) 177 | file_handler.setLevel(logging.INFO) 178 | logger.addHandler(file_handler) 179 | return logger 180 | 181 | 182 | # implemented from the plasticc validation repo 183 | def _to_dataframe(data, filters='gri'): 184 | """Converts from a python dictionary to a pandas dataframe""" 185 | for idx in data: 186 | sn = data[idx] 187 | for filt in filters: 188 | sn['mjd_%s' % filt] = np.array(sn[filt]['mjd']) 189 | sn['fluxcal_%s' % filt] = np.array(sn[filt]['fluxcal']) 190 | sn['fluxcalerr_%s' % filt] = np.array(sn[filt]['fluxcalerr']) 191 | 192 | # photflag 193 | sn['photflag_%s' % filt] = np.array(sn[filt]['photflag']) 194 | sn['photprob_%s' % filt] = np.array(sn[filt]['photprob']) 195 | sn['psf_sig1_%s' % filt] = np.array(sn[filt]['psf_sig1']) 196 | sn['sky_sig_%s' % filt] = np.array(sn[filt]['sky_sig']) 197 | sn['zeropt_%s' % filt] = np.array(sn[filt]['zeropt']) 198 | # make mag 199 | sn['mag_%s' % filt] = np.array(-2.5*np.log10(np.abs(sn[filt]['fluxcal'])))+27.5 200 | sn['snr_%s' % filt] = (sn[filt]['fluxcalerr'] / np.abs(sn[filt]['fluxcal'])) 201 | sn['magerr_%s' % filt] = np.array(1.086 * sn['snr_%s' % filt]) 202 | sn['magerr_%s' % filt][sn['magerr_%s' % filt] > 0.5] = 0.5 203 | # find cadence 204 | sn['delta_t_%s' % filt] = [j-i for i, j in zip(sn['mjd_%s' % filt][:-1], sn['mjd_%s' % filt][1:])] 205 | sn['median_delta_t_%s' % filt] = np.array(np.median(sn['delta_t_%s' % filt])) 206 | sn['magobs_%s' % filt] = np.array(np.median(sn['delta_t_%s' % filt])) 207 | # Mask to keep only photflag obs 208 | mask = (sn['magerr_%s' % filt] != 0) & (sn['photflag_%s' % filt] != 0) 209 | sn['snr_%s' % filt] = sn['snr_%s' % filt][mask] 210 | sn['mag_%s' % filt] = sn['mag_%s' % filt][mask] 211 | sn['magerr_%s' % filt] = sn['magerr_%s' % filt][mask] 212 | sn['fluxcal_%s' % filt] = sn['fluxcal_%s' % filt][mask] 213 | sn['fluxcalerr_%s' % filt] = sn['fluxcalerr_%s' % filt][mask] 214 | sn['photflag_%s' % filt] = sn['photflag_%s' % filt][mask] 215 | sn['mjd_%s' % filt] = sn['mjd_%s' % filt][mask] 216 | del sn[filt] 217 | sn.update(sn['header']) 218 | del sn['header'] 219 | 220 | return pd.DataFrame.from_dict(data, orient='index') 221 | 222 | 223 | def get_ligo_skymap_crossmatch(skymap_filename, *args, **kwargs): 224 | skymap = read_sky_map(skymap_filename, moc=True) 225 | return crossmatch(skymap, *args, **kwargs) 226 | -------------------------------------------------------------------------------- /kn_skymaps/scripts/mej_to_masses.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | from functools import partial 3 | from glob import glob 4 | import json 5 | 6 | from astropy import table, cosmology 7 | from gwpy import time 8 | import numpy as np 9 | import pandas as pd 10 | from multiprocessing import Pool 11 | 12 | from gwemlightcurves.EjectaFits import DiUj2017 13 | from ligo.em_bright.computeDiskMass import computeCompactness, computeDiskMass 14 | 15 | TEMPLATE_INDEX_MODEL_PARAM_MAP = { 16 | "B19": {"COSTHETA", "MEJ", "PHI"}, 17 | "K17": {"VK", "LOGXLAN", "LOGMASS"} 18 | } 19 | 20 | def get_ejecta_mass(m1, m2, eos="APR4_EPP"): 21 | """Calculate ejecta mass based on Dietrich & Ujevic (2017)""" 22 | c_ns_1, m_b_1, _ = computeCompactness(m1, eos) 23 | c_ns_2, m_b_2, _ = computeCompactness(m2, eos) 24 | try: 25 | m_rem = np.zeros(m1.size) 26 | except AttributeError: 27 | # treat as scalar 28 | if m_b_2 == 0.0 or m_b_1 == 0.0: 29 | # treat as NSBH 30 | m_rem = 0.0 31 | else: 32 | # treat as BNS 33 | m_rem = DiUj2017.calc_meje(m1, m_b_1, c_ns_1, m2, m_b_2, c_ns_2) 34 | return m_rem 35 | else: 36 | non_zero_rem_idx = np.where((m_b_1 > 0) & (m_b_2 > 0))[0] 37 | m_rem[non_zero_rem_idx] = DiUj2017.calc_meje( 38 | m1[non_zero_rem_idx], m_b_1[non_zero_rem_idx], 39 | c_ns_1[non_zero_rem_idx], m2[non_zero_rem_idx], 40 | m_b_2[non_zero_rem_idx], c_ns_2[non_zero_rem_idx] 41 | ) 42 | return m_rem 43 | 44 | 45 | def get_chirp_mass(mass1, mass2): 46 | return (mass1 * mass2)**(3./5.) / (mass1 + mass2)**(1./5.) 47 | 48 | 49 | def get_component_mass(chirp_mass, mass_ratio): 50 | m_tot = chirp_mass * (1 + mass_ratio) ** 1.2 / mass_ratio ** 0.6 51 | m1 = m_tot / (1 + mass_ratio) 52 | m2 = m_tot - m1 53 | return m1, m2 54 | 55 | 56 | def find_matching_component_mass( 57 | mej, maxiter=2000, mchirp_ul=1.6, mchirp_ll=0.87, 58 | mass_ratio_ul=1.0, mass_ratio_ll=0.5, 59 | component_mass_ll=1.0, component_mass_ul=2.21, 60 | tol=0.005, eos='APR4_EPP'): 61 | for _ in range(maxiter): 62 | mchirp = np.random.uniform(mchirp_ll, mchirp_ul) 63 | q = np.random.uniform(mass_ratio_ll, mass_ratio_ul) 64 | m1, m2 = get_component_mass(mchirp, q) 65 | if m2 < component_mass_ll or m1 > component_mass_ul: 66 | continue 67 | m_rem = get_ejecta_mass(m1, m2, eos=eos) 68 | if np.isclose(m_rem, mej, atol=tol): 69 | break 70 | else: 71 | print(f"Reached max iterations for Mej: {mej}") 72 | m1 = m2 = 0.0 73 | return m1, m2 74 | 75 | 76 | parser = ArgumentParser( 77 | "Map Mej to component masses using Dietrich & Ujevic fit.") 78 | input_group = parser.add_mutually_exclusive_group(required=True) 79 | input_group.add_argument("-i", "--input", 80 | help="SNANA header file in FITS format.", default=None) 81 | input_group.add_argument("-j", "--input-files", 82 | help="Path to header files and glob pattern. Ex. /full/path/*HEAD*FITS.gz", 83 | default=None) 84 | parser.add_argument("-o", "--output", required=True, 85 | help="Output csv file having masses and other properties.") 86 | parser.add_argument("--chirp-mass-ul", default=1.6, type=float, 87 | help="Chirp mass upper limit.") 88 | parser.add_argument("--chirp-mass-ll", default=0.87, type=float, 89 | help="Chirp mass lower limit.") 90 | parser.add_argument("--mass-ratio-ul", default=1.0, type=float, 91 | help="Mass ratio upper limit.") 92 | parser.add_argument("--mass-ratio-ll", default=0.5, type=float, 93 | help="Mass ratio lower limit.") 94 | parser.add_argument("--component-mass-ll", default=1.0, type=float, 95 | help="Reject component masses below this limit.") 96 | parser.add_argument("--component-mass-ul", default=2.21, type=float, 97 | help="Reject component masses above this limit.") 98 | parser.add_argument("--eos-name", default="SLY", 99 | help="Equation of state used to compute compactness.") 100 | parser.add_argument( 101 | "--ejecta-mass-threshold", default=0.05, type=float, 102 | help="Leave out entries with ejecta masses above this value.") 103 | parser.add_argument( 104 | "--sed-model", choices=TEMPLATE_INDEX_MODEL_PARAM_MAP.keys(), 105 | help="KN SED model") 106 | parser.add_argument( 107 | "--sim-index-mapping-file", required=True, 108 | help="JSON file that maps SIM_TEMPLATE_INDEX to SED parameters") 109 | parser.add_argument( 110 | "--maxiter", default=2000, type=int, 111 | help="Maximum iterations when searching finding component mass" 112 | ) 113 | parser.add_argument( 114 | "--tolerance", default=0.005, type=float, 115 | help="Tolerance to use to find ejecta mass" 116 | ) 117 | parser.add_argument( 118 | "--ligo-gpstime-format", action='store_true', 119 | help="Add to convert first detection times to LIGOGPS format" 120 | ) 121 | parser.add_argument("--pool", default=1, type=int, 122 | help="Multiprocessing pool count.") 123 | parser.add_argument("--verbose", action='store_true', default=False) 124 | args = parser.parse_args() 125 | 126 | if args.input: 127 | df = table.Table.read(args.input, format='fits').to_pandas() 128 | elif args.input_files: 129 | fnames = glob(args.input_files) 130 | if args.verbose: 131 | print("Total files selected: ", len(fnames)) 132 | tables = [] 133 | for fname in fnames: 134 | tables.append(table.Table.read(fname, format='fits').to_pandas()) 135 | df = pd.concat(tables, ignore_index=True) 136 | if args.verbose: 137 | print("Total entries before cuts: ", len(df)) 138 | # remove negative redshifts, if any 139 | df = df.loc[df.REDSHIFT_FINAL > 0.] 140 | # load sim index template mapper, get SED parameters, stack to dataframe 141 | sed_model_parameter_dict = TEMPLATE_INDEX_MODEL_PARAM_MAP[args.sed_model] 142 | with open(args.sim_index_mapping_file) as f: 143 | sim_template_index_map = json.load(f) 144 | sed_params = df.SIM_TEMPLATE_INDEX.astype(str).apply(sim_template_index_map.get) 145 | df = pd.concat((df, pd.DataFrame(list(sed_params))), axis=1) 146 | # handle log mass for K17 147 | if args.sed_model == 'K17': 148 | if args.verbose: 149 | print("Performing LOGMASS -> MEJ for K17 model.") 150 | df['MEJ'] = 10**df['LOGMASS'] 151 | # remove entries above the ejecta mass threshold being considered 152 | df = df.loc[df.MEJ <= args.ejecta_mass_threshold] 153 | if args.verbose: 154 | print(f"Total number of entries selected: {len(df)}") 155 | 156 | # extract ejecta mass values and call solver 157 | mej_vals = df['MEJ'].values 158 | func = partial( 159 | find_matching_component_mass, 160 | maxiter=args.maxiter, mchirp_ul=args.chirp_mass_ul, 161 | mchirp_ll=args.chirp_mass_ll, mass_ratio_ul=args.mass_ratio_ul, 162 | mass_ratio_ll=args.mass_ratio_ll, component_mass_ll=args.component_mass_ll, 163 | component_mass_ul=args.component_mass_ul, 164 | tol=args.tolerance, eos=args.eos_name 165 | ) 166 | if args.verbose: 167 | print(f"Running solver with pool size {args.pool}.") 168 | with Pool(args.pool) as pool: 169 | r = list(pool.map(func, mej_vals)) # map maintains ordering 170 | if args.verbose: 171 | print("Finished running solver.") 172 | m1_vals, m2_vals = np.array(r).T 173 | df_out = pd.DataFrame( 174 | data=np.array(r), columns=('mass1_source', 'mass2_source') 175 | ) 176 | # add columns from original dataframe 177 | df_out['ra'] = df.SIM_RA.astype(float).values 178 | df_out['dec'] = df.SIM_DEC.astype(float).values 179 | df_out['redshift'] = df.REDSHIFT_FINAL.astype(float).values 180 | df_out['snid'] = df.SNID.astype(int).values 181 | df_out['mej'] = df['MEJ'].values 182 | df_out['costheta'] = df['COSTHETA'].values if args.sed_model=='B19' \ 183 | else np.random.uniform(-1, 1, len(df)) # random inclination for K17; U[cos(iota)] 184 | # add detector-frame masses 185 | df_out['mass1'] = df_out.mass1_source.values * (1 + df.REDSHIFT_FINAL.values) 186 | df_out['mass2'] = df_out.mass2_source.values * (1 + df.REDSHIFT_FINAL.values) 187 | df_out['chirp_mass'] = get_chirp_mass(df_out.mass1.values, df_out.mass2.values) 188 | df_out['first_detection'] = df.PEAKMJD.values 189 | # convert to LIGOGPSTime format if needed 190 | if args.ligo_gpstime_format: 191 | df_out['first_detection'] = [ 192 | time.to_gps(t) for t in time.Time( 193 | df.PEAKMJD, 194 | format='mjd' 195 | ) 196 | ] 197 | # add distance based on Planck18 198 | df_out['distance'] = cosmology.Planck18.luminosity_distance( 199 | df.REDSHIFT_FINAL.values).to('Mpc').value 200 | # throw failed values away; failed instances are hard-coded to zero value 201 | df_out = df_out.loc[df_out.mass1 > 0.] 202 | df_out.to_csv(args.output, index=False, float_format='%.4f') 203 | 204 | -------------------------------------------------------------------------------- /kn_skymaps/scripts/mej_to_masses.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --qos=debug 3 | #SBATCH --constraint=haswell 4 | #SBATCH --mail-type=FAIL 5 | #SBATCH --mail-user=deep1018@illinois.edu 6 | #SBATCH --time=00:30:00 7 | #SBATCH --nodes=1 8 | #SBATCH --cpus-per-task=40 9 | #SBATCH --mem=16G 10 | #SBATCH --output=./ELASTICC_KN_SKYMAPS/mej_to_masses.out 11 | #SBATCH --error=./ELASTICC_KN_SKYMAPS/mej_to_masses.err 12 | 13 | source /global/cfs/cdirs/lsst/groups/TD/setup_td_dev.sh 14 | 15 | python mej_to_masses.py \ 16 | --input-files "/global/cfs/cdirs/lsst/www/DESC_TD_PUBLIC/ELASTICC/TRAINING_SAMPLES/ELASTICC_TRAIN_KN_K17/*HEAD*FITS.gz" \ 17 | --output "${HOME}/github/uiucsn/retraining-rapid/kn-rapid/utils/ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_MAP.csv" \ 18 | --chirp-mass-ul 1.5 \ 19 | --chirp-mass-ll 0.8 \ 20 | --mass-ratio-ul 1.0 \ 21 | --mass-ratio-ll 0.5 \ 22 | --component-mass-ll 1.0 \ 23 | --component-mass-ul 2.1 \ 24 | --sed-model "K17" \ 25 | --sim-index-mapping-file "${HOME}/github/uiucsn/retraining-rapid/kn-rapid/utils/K17-SIM-TEMPLATE-INDEX-KN-INDEX.JSON" \ 26 | --eos-name "SLY" \ 27 | --ejecta-mass-threshold 0.05 \ 28 | --maxiter 2000 \ 29 | --tolerance 1e-3 \ 30 | --ligo-gpstime-format \ 31 | --pool 40 \ 32 | --verbose 33 | 34 | python mej_to_masses.py \ 35 | --input-files "/global/cfs/cdirs/lsst/www/DESC_TD_PUBLIC/ELASTICC/TRAINING_SAMPLES/ELASTICC_TRAIN_KN_B19/*HEAD*FITS.gz" \ 36 | --output "${HOME}/github/uiucsn/retraining-rapid/kn-rapid/utils/ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_MAP.csv" \ 37 | --chirp-mass-ul 1.5 \ 38 | --chirp-mass-ll 0.8 \ 39 | --mass-ratio-ul 1.0 \ 40 | --mass-ratio-ll 0.5 \ 41 | --component-mass-ll 1.0 \ 42 | --component-mass-ul 2.1 \ 43 | --sed-model "B19" \ 44 | --sim-index-mapping-file "${HOME}/github/uiucsn/retraining-rapid/kn-rapid/utils/B19-SIM-TEMPLATE-INDEX-KN-INDEX.JSON" \ 45 | --eos-name "SLY" \ 46 | --ejecta-mass-threshold 0.05 \ 47 | --maxiter 2000 \ 48 | --tolerance 1e-3 \ 49 | --ligo-gpstime-format \ 50 | --pool 40 \ 51 | --verbose 52 | 53 | -------------------------------------------------------------------------------- /kn_skymaps/scripts/run-bayestar-localize.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --qos=shared 3 | #SBATCH --constraint=haswell 4 | #SBATCH --mail-type=FAIL 5 | #SBATCH --mail-user=deep1018@illinois.edu 6 | #SBATCH --time=6:00:00 7 | #SBATCH --nodes=1 8 | #SBATCH --cpus-per-task=32 9 | #SBATCH --mem=32G 10 | #SBATCH --output=ELASTICC_KN_SKYMAPS/bayestar-localize-coincs.out 11 | #SBATCH --error=ELASTICC_KN_SKYMAPS/bayestar-localize-coincs.err 12 | 13 | source /global/cfs/cdirs/lsst/groups/TD/setup_td_dev.sh 14 | 15 | export OMP_NUM_THREADS=64 16 | 17 | # B19 skymaps 18 | # srun bayestar-localize-coincs ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_B19_MASS_EJECTA_COINC.xml 19 | # K17 skymaps 20 | srun bayestar-localize-coincs ELASTICC_KN_SKYMAPS/ELASTICC_TRAIN_KN_K17_MASS_EJECTA_COINC.xml 21 | -------------------------------------------------------------------------------- /kn_skymaps/scripts/skymap-post-proc.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | from glob import glob 3 | import os 4 | import sqlite3 5 | 6 | from astropy.io import fits 7 | from astropy.table import Table 8 | import pandas as pd 9 | 10 | 11 | parser = ArgumentParser( 12 | "Rename skymaps based on MJD-OBS fits card.") 13 | parser.add_argument("-i", "--snana-header-files", 14 | help="SNANA B19/K17 header files. Full path + glob pattern.", 15 | required=True) 16 | parser.add_argument("-j", "--skymap-fits-files", 17 | help="Skymap fits files. Full path + glob pattern.", 18 | required=True) 19 | parser.add_argument("-s", "--sqlite-file", 20 | help="SQLite file storing the sim_inspiral/coinc_inspiral tables.") 21 | parser.add_argument("-o", "--output-dir", required=True, 22 | help="To store renamed fits files") 23 | parser.add_argument("-v", "--verbose", action='store_true', default=False, 24 | help="Add verbosity") 25 | args = parser.parse_args() 26 | # put all SNANA header files in a dataframe 27 | snana_header_files = glob(args.snana_header_files) 28 | if args.verbose: 29 | print("Loading all SNANA header files") 30 | _header_files = [] 31 | for fname in snana_header_files: 32 | _header_files.append(Table.read(fname, format='fits').to_pandas()) 33 | df_lightcurves = pd.concat(_header_files, ignore_index=True) 34 | # load gw coincidence properties 35 | with sqlite3.connect(args.sqlite_file) as conn: 36 | df_gw = pd.read_sql("SELECT * FROM sim_inspiral", conn) 37 | # create a SNID column in the GW dataframe and join with SNANA headers 38 | if args.verbose: 39 | print("Joining GW and SNANA info") 40 | df_gw['SNID'] = df_gw.source.apply(lambda x: x.strip('GAL')).astype(int) 41 | df_lightcurves['SNID'] = df_lightcurves.SNID.astype(int) 42 | df_coinc = df_gw.merge(df_lightcurves, on='SNID') 43 | 44 | skymap_filenames = glob(args.skymap_fits_files) 45 | assert len(df_coinc) == len(skymap_filenames), ("Number of simulation_ids " 46 | "should match number of skymaps") 47 | 48 | if args.verbose: 49 | print(f"Renaming {len(skymap_filenames)} skymap fits files") 50 | for fname in skymap_filenames: 51 | if args.verbose: 52 | print(f"Renaming {fname}") 53 | simulation_id = int(os.path.basename(fname).strip('.fits')) 54 | snid = df_coinc.loc[df_coinc.simulation_id==simulation_id]['SNID'] 55 | assert len(snid) == 1, "Only one SNID should match" 56 | snid = snid.values[0] 57 | 58 | hdu = fits.open(fname) 59 | # Remove content of ORIGIN card since this is not from LVK 60 | hdu[1].header['ORIGIN'] = '' 61 | mjd = hdu[1].header['MJD-OBS'] 62 | new_fname = f"SNID_{snid}_MJD_{mjd}.fits" 63 | new_path = os.path.join(args.output_dir, new_fname) 64 | hdu.writeto(new_path) 65 | 66 | -------------------------------------------------------------------------------- /lib_elasticc2/tests/test_read_snana.py: -------------------------------------------------------------------------------- 1 | # These tests won't pass if you aren't pointing at an exact copy of the original ELAsTiCC2 FITS data files 2 | # 3 | # Run these tests from the lib_elasticc2 directory with 4 | # TD= PYTHONPATH=$PWD:$PYTHONPATH python -m pytest -v tests/test_read_snana.py 5 | 6 | import pytest 7 | import pandas 8 | import polars 9 | import numpy 10 | import time 11 | 12 | from read_snana import elasticc2_snana_reader 13 | 14 | @pytest.fixture 15 | def esr(): 16 | yield elasticc2_snana_reader() 17 | 18 | @pytest.fixture 19 | def wastefulesr(): 20 | yield elasticc2_snana_reader( waste_memory_on_heads=True ) 21 | 22 | 23 | def test_obj_class_names( esr ): 24 | assert esr.obj_class_names == [ 'AGN', 'CART', 'Cepheid', 'EB', 'ILOT', 'KN_B19', 'KN_K17', 25 | 'Mdwarf-flare', 'PISN-MOSFIT', 'PISN-STELLA_HECORE', 26 | 'PISN-STELLA_HYDROGENIC', 'RRL', 'SL-SN1a', 'SL-SNII', 'SL-SNIb', 27 | 'SL-SNIc', 'SLSN-I+host', 'SLSN-I_no_host', 'SNII+HostXT_V19', 28 | 'SNII-NMF', 'SNII-Templates', 'SNIIb+HostXT_V19', 'SNIIn+HostXT_V19', 29 | 'SNIIn-MOSFIT', 'SNIa-91bg', 'SNIa-SALT3', 'SNIax', 'SNIb+HostXT_V19', 30 | 'SNIb-Templates', 'SNIc+HostXT_V19', 'SNIc-Templates', 31 | 'SNIcBL+HostXT_V19', 'TDE', 'd-Sct', 'dwarf-nova', 'uLens-Binary', 32 | 'uLens-Single-GenLens', 'uLens-Single_PyLIMA' ] 33 | 34 | def test_get_object_truth( esr ): 35 | # Just a spot check, not checking every single one 36 | with pytest.raises( ValueError, match='Unknown object class name nonexistent' ): 37 | esr.get_object_truth( 'nonexistent' ) 38 | with pytest.raises( ValueError, match="Unknown return_format foo" ): 39 | esr.get_object_truth( 'AGN', return_format='foo' ) 40 | 41 | truth = esr.get_object_truth( 'AGN' ) 42 | assert isinstance( truth, polars.DataFrame ) 43 | assert set( truth.columns ) == { 'GALID', 'PEAKMJD', 'PEAKMAG_z', 'MU', 'WIDTH_Y', 'PEAKMAG_g', 'GALZPHOTERR', 44 | 'ZHELIO', 'WIDTH_z', 'GALZPHOT', 'AV', 'DTSEASON_PEAK', 'MAGSMEAR_COH', 45 | 'ZCMB_SMEAR', 'MJD_DETECT_LAST', 'WIDTH_r', 'RA', 'PERIOD', 'PEAKMAG_i', 46 | 'NON1A_INDEX', 'WIDTH_g', 'DEC', 'RV', 'GENTYPE', 'NOBS_SATURATE', 'VPEC', 47 | 'SIM_SEARCHEFF_MASK', 'LENSDMU', 'SNRMAX2', 'SNRMAX', 'WIDTH_u', 'GALSNSEP', 48 | 'GALRANDOM_RADIUS', 'PEAKMAG_u', 'GALNMATCH', 'SNID', 'PEAKMAG_r', 'GALSNDDLR', 49 | 'SNRMAX3', 'GALRANDOM_PHI', 'PEAKMAG_Y', 'NOBS', 'LIBID', 'MWEBV', 'ZCMB', 50 | 'WIDTH_i', 'MJD_DETECT_FIRST' } 51 | assert len(truth) == 108556 52 | assert truth['ZCMB'].min() == pytest.approx( 0.1, abs=0.001 ) 53 | assert truth['ZCMB'].max() == pytest.approx( 2.9, abs=0.001 ) 54 | 55 | truth = esr.get_object_truth( 'AGN', return_format='pandas' ) 56 | assert isinstance( truth, pandas.DataFrame ) 57 | assert set( truth.columns.values ) == { 'DEC', 'PERIOD', 'NON1A_INDEX', 'PEAKMAG_u', 58 | 'GALSNDDLR', 'GENTYPE', 'SIM_SEARCHEFF_MASK', 'PEAKMJD', 59 | 'WIDTH_r', 'RV', 'WIDTH_i', 'GALZPHOT', 'WIDTH_z', 'PEAKMAG_i', 60 | 'NOBS_SATURATE', 'WIDTH_g', 'LENSDMU', 'AV', 'WIDTH_u', 61 | 'PEAKMAG_g', 'VPEC', 'SNRMAX2', 'SNRMAX', 'MJD_DETECT_LAST', 62 | 'SNID', 'GALID', 'MU', 'PEAKMAG_z', 'NOBS', 'GALZPHOTERR', 63 | 'SNRMAX3', 'GALSNSEP', 'GALRANDOM_PHI', 'GALNMATCH', 'MWEBV', 64 | 'PEAKMAG_r', 'DTSEASON_PEAK', 'PEAKMAG_Y', 'MAGSMEAR_COH', 65 | 'RA', 'MJD_DETECT_FIRST', 'ZCMB_SMEAR', 'LIBID', 'ZCMB', 66 | 'WIDTH_Y', 'GALRANDOM_RADIUS', 'ZHELIO' } 67 | assert len(truth) == 108556 68 | assert truth.ZCMB.min() == pytest.approx( 0.1, abs=0.001 ) 69 | assert truth.ZCMB.max() == pytest.approx( 2.9, abs=0.001 ) 70 | 71 | 72 | def test_get_head( esr, wastefulesr ): 73 | with pytest.raises( ValueError, match='Unknown object class name nonexistent' ): 74 | esr.get_head( 'nonexistent' ) 75 | with pytest.raises( ValueError, match="Unknown return_format foo" ): 76 | esr.get_head( 'AGN', return_format='foo' ) 77 | 78 | assert esr._head_cache is None; 79 | assert esr._head_cache_class is None 80 | 81 | head = esr.get_head( 'AGN' ) 82 | assert len(head) == 108556 83 | assert id(head) == id(esr._head_cache) 84 | assert head['SNID'].dtype == polars.Int64 85 | assert head['SNID'].min() == 1002462 86 | assert head['SNID'].max() == 159511074 87 | assert set( head['file_num'] ) == { f'{i:04}' for i in range(1,41) } 88 | 89 | t0 = time.perf_counter() 90 | head = wastefulesr.get_head( 'AGN' ) 91 | dt_first = time.perf_counter() - t0 92 | assert len(head) == 108556 93 | firstcache = wastefulesr._heads_cache['AGN'] 94 | assert id( firstcache ) == id( head ) 95 | 96 | t0 = time.perf_counter() 97 | head = wastefulesr.get_head( 'AGN' ) 98 | dt_second = time.perf_counter() - t0 99 | # What's a good timing test? This isn't going to be easily reproducible, as it depends on filesystem speed. 100 | assert dt_second < dt_first / 2. 101 | assert id( wastefulesr._heads_cache['AGN'] ) == id( firstcache ) 102 | 103 | head = wastefulesr.get_head( 'CART' ) 104 | assert len(head) == 8926 105 | assert id( wastefulesr._heads_cache['CART'] ) == id( head ) 106 | 107 | head = wastefulesr.get_head( 'AGN' ) 108 | assert id( wastefulesr._heads_cache['AGN'] ) == id( firstcache ) 109 | 110 | pahead = esr.get_head( 'AGN', return_format='pandas' ) 111 | assert isinstance( pahead, pandas.DataFrame ) 112 | assert len(head) == 108556 113 | 114 | 115 | def test_get_ltcv( esr ): 116 | with pytest.raises( ValueError, match='Unknown object class name nonexistent' ): 117 | esr.get_ltcv( 'nonexistent', 1 ) 118 | with pytest.raises( ValueError, match='Unknown CART object id 1' ): 119 | esr.get_ltcv( 'CART', 1 ) 120 | 121 | # Again, just a spot check 122 | ltcv = esr.get_ltcv( 'CART', 10388951 ) 123 | assert isinstance( ltcv, polars.DataFrame ) 124 | assert len(ltcv) == 126 125 | assert set( ltcv['BAND']) == { 'u', 'g', 'r', 'i', 'z', 'Y' } 126 | assert ltcv['MJD'].min() == pytest.approx( 61086.34, abs=0.01 ) 127 | assert ltcv['MJD'].max() == pytest.approx( 61581.00, abs=0.01 ) 128 | assert ( ltcv['FLUXCAL'] / ltcv['FLUXCALERR'] ).max() == pytest.approx( 5.0, abs=0.1 ) 129 | 130 | ltcv = esr.get_ltcv( 'CART', 10388951, return_format='pandas' ) 131 | assert isinstance( ltcv, pandas.DataFrame ) 132 | assert len(ltcv) == 126 133 | assert set( ltcv['BAND'].values) == { 'u', 'g', 'r', 'i', 'z', 'Y' } 134 | assert ltcv['MJD'].min() == pytest.approx( 61086.34, abs=0.01 ) 135 | assert ltcv['MJD'].max() == pytest.approx( 61581.00, abs=0.01 ) 136 | assert ( ltcv['FLUXCAL'] / ltcv['FLUXCALERR'] ).max() == pytest.approx( 5.0, abs=0.1 ) 137 | 138 | 139 | def test_get_all_ltcvs( esr ): 140 | # Use ILOT since there aren't very many so the test will go fast 141 | ltcvs23 = esr.get_all_ltcvs( 'ILOT', file_num=23 ) 142 | assert len(ltcvs23) == 9827 143 | 144 | # Make sure no -777's slipped through 145 | assert all( ltcvs23['MJD'] > 60790. ) 146 | 147 | # Make sure this is consistent with at leaset one case of get_ltcv 148 | snid = ltcvs23[ len(ltcvs23) //2 ]['SNID'] 149 | ltcv = esr.get_ltcv( 'ILOT', snid ) 150 | 151 | assert len( ltcv ) == len( ltcvs23.filter( polars.col('SNID') == snid ) ) 152 | # Sort ltcv the same way ltcvs23 was sorted 153 | ltcv = ltcv.sort( [ 'BAND', 'MJD' ] ) 154 | for col in ltcv.columns: 155 | assert ( ltcv[col] == ltcvs23.filter( polars.col('SNID') == snid )[col] ).all() 156 | 157 | allltcvs = esr.get_all_ltcvs( 'ILOT' ) 158 | assert len(allltcvs) == 295471 159 | for col in ltcv.columns: 160 | assert ( ltcv[col] == allltcvs.filter( polars.col('SNID') == snid )[col] ).all() 161 | 162 | allltcvs = esr.get_all_ltcvs( 'ILOT', agg=True ) 163 | assert len(allltcvs) == 1143 164 | assert set( allltcvs.columns ) == { 'SNID', 'MJD', 'BAND', 'PHOTFLAG', 'PHOTPROB', 'FLUXCAL', 165 | 'FLUXCALERR', 'PSF_SIG1', 'SKY_SIG', 'RDNOISE', 'ZEROPT', 166 | 'ZEROPT_ERR', 'GAIN', 'SIM_MAGOBS' } 167 | assert all( allltcvs[c].dtype==polars.List for c in allltcvs.columns if c != 'SNID' ) 168 | assert allltcvs['SNID'].dtype == polars.Int64 169 | 170 | allltcvs = esr.get_all_ltcvs( 'ILOT', agg=True, include_header=True ) 171 | assert len(allltcvs.columns) == 175 172 | assert allltcvs['SIM_PEAKMAG_g'].dtype == polars.Float32 173 | 174 | allltcvs = esr.get_all_ltcvs( 'ILOT', agg=True, include_truth=True ) 175 | assert len(allltcvs.columns) == 60 176 | assert allltcvs['PEAKMAG_g'].dtype == polars.Float64 177 | 178 | allltcvs = esr.get_all_ltcvs( 'ILOT', agg=True, include_header=True, include_truth=True ) 179 | assert len(allltcvs.columns) == 221 180 | assert allltcvs.select( a=(polars.col('SIM_PEAKMAG_g') - polars.col('PEAKMAG_g')).abs() < 0.0001 )['a'].all() 181 | 182 | 183 | 184 | 185 | -------------------------------------------------------------------------------- /lib_elasticc2/write_snana_parquet.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import logging 3 | import pathlib 4 | 5 | import polars 6 | 7 | from read_snana import elasticc2_snana_reader 8 | 9 | 10 | def main(): 11 | esr = elasticc2_snana_reader() 12 | esr.logger.setLevel( logging.DEBUG ) 13 | outdir = pathlib.Path( "/data/raknop/ELASTICC2_parquet" ) 14 | 15 | for objclass in [ 'AGN', 'CART', 'Cepheid', 'EB', 'ILOT', 'KN_B19', 'KN_K17', 16 | 'Mdwarf-flare', 'PISN-MOSFIT', 'PISN-STELLA_HECORE', 17 | 'PISN-STELLA_HYDROGENIC', 'RRL', 'SL-SN1a', 'SL-SNII', 'SL-SNIb', 18 | 'SL-SNIc', 'SLSN-I+host', 'SLSN-I_no_host', 'SNII+HostXT_V19', 19 | 'SNII-NMF', 'SNII-Templates', 'SNIIb+HostXT_V19', 'SNIIn+HostXT_V19', 20 | 'SNIIn-MOSFIT', 'SNIa-91bg', 'SNIa-SALT3', 'SNIax', 'SNIb+HostXT_V19', 21 | 'SNIb-Templates', 'SNIc+HostXT_V19', 'SNIc-Templates', 22 | 'SNIcBL+HostXT_V19', 'TDE', 'd-Sct', 'dwarf-nova', 'uLens-Binary', 23 | 'uLens-Single-GenLens', 'uLens-Single_PyLIMA' ]: 24 | # for objclass in [ 'KN_K17' ]: 25 | ltcvs = esr.get_all_ltcvs( objclass, agg=True, include_header=True ) 26 | ltcvs.write_parquet( outdir / f'{objclass}.parquet' ) 27 | sys.stderr.write( f"Did {objclass}\n" ) 28 | 29 | # ====================================================================== 30 | if __name__ == "__main__": 31 | main() 32 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_BULLA-BNS-M2-2COMP.INPUT: -------------------------------------------------------------------------------- 1 | # Alex Gagliano 10/06/21 Added RV Keys 2 | GENMODEL: $PLASTICC_ROOT/model_libs_updates/SIMSED.BULLA-BNS-M2-2COMP 3 | 4 | SIMSED_USE_BINARY: 1 5 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 6 | 7 | # Extinction from WV07 model, with half expon component 8 | # to approximately account for large kicks. 9 | WV07_REWGT_EXPAV: 0.5 10 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 11 | GENPEAK_RV: 3.1 # peak prob dust parameter 12 | GENRANGE_RV: 2.0 4.0 # min and max limits for RV generation 13 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 14 | 15 | # Volumetric rate (per yr per Mpc^3) base on 16 | # Abbott et. al. (2021) 17 | # https://arxiv.org/abs/2010.14533 (Sec Abstract) 18 | # Multiply rate by 6 so that participants cannot 19 | # game the metric. 20 | 21 | DNDZ: POWERLAW 320E-9 0.0 # /yr/MPc^3, no z-dependence 22 | GENRANGE_REDSHIFT: 0.011 0.28 23 | 24 | # ==================================== 25 | # SIMSED GRID PARAMETERS 26 | # Eqal prob per SED 27 | 28 | SIMSED_GRIDONLY: KN_INDEX # a particular index from SED.INFO 29 | # could do gaussian as well 30 | 31 | # Some of the models plateau at end of epoch range, 32 | # so force min mag/day slope for late-time extrapolation. 33 | MINSLOPE_EXTRAPMAG_LATE: 0.1 34 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_CART-MOSFIT.INPUT: -------------------------------------------------------------------------------- 1 | # Ca Rich Transients (from MOSFIT group) 2 | # Nov 1 2021 R.Kessler - Add explicit RV 3 | 4 | GENMODEL: $PLASTICC_MODELS/SIMSED.CART-MOSFIT 5 | GENAV_WV07: 1 # WV07, Eq. 2 6 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 7 | 8 | GENPEAK_RV: 3.1 # peak prob dust parameter 9 | GENRANGE_RV: 3.0 3.2 # min and max limits for RV generation 10 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 11 | 12 | SIMSED_USE_BINARY: 1 13 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 14 | 15 | # Volumetric rate 16 | DNDZ: MD14 2.3E-6 # MD14 SFR(z) with 2.3E-6/yr/Mpc^3 at z=0 17 | GENRANGE_REDSHIFT: 0.012 1.4 18 | 19 | GENRANGE_TREST: -100.0 500.0 20 | #GENRANGE_TREST: -50.0 100.0 21 | 22 | # ============================== 23 | # SIMSED GRID PARAMETERS 24 | # Eqal prob per SED 25 | 26 | SIMSED_GRIDONLY: Cart_INDEX 27 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_ILOT-MOSFIT.INPUT: -------------------------------------------------------------------------------- 1 | # ILOT (Intermediate Luminosity Optical Transients) 2 | # Nov 1 2021: add explicit RV 3 | # Dec 27 2021 R.Kessler - reduce zmax from 1.5 to 0.5 4 | 5 | GENMODEL: $PLASTICC_MODELS/SIMSED.ILOT-MOSFIT 6 | GENAV_WV07: 1 # WV07, Eq. 2 7 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 8 | 9 | GENPEAK_RV: 3.1 # peak prob dust parameter 10 | GENRANGE_RV: 3.0 3.2 # min and max limits for RV generation 11 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 12 | 13 | SIMSED_USE_BINARY: 1 14 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 15 | 16 | # Volumetric rate is same as for IIn 17 | DNDZ: CC_S15 # rate from Strolger 2015 (HST) 18 | DNDZ_ALLSCALE: 0.06 19 | #GENRANGE_REDSHIFT: 0.011 1.50 20 | GENRANGE_REDSHIFT: 0.011 0.50 21 | 22 | GENRANGE_TREST: -100.0 1000.0 23 | 24 | # ============================== 25 | # SIMSED GRID PARAMETERS 26 | # Eqal prob per SED 27 | 28 | SIMSED_GRIDONLY: ILOT_INDEX 29 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_KN-K17.INPUT: -------------------------------------------------------------------------------- 1 | # Alex Gagliano 10/06/21 Added RV Keys 2 | 3 | GENMODEL: $PLASTICC_MODELS/SIMSED.KN-K17 4 | 5 | SIMSED_USE_BINARY: 1 6 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 7 | 8 | # Extinction from WV07 model, with half expon component 9 | # to approximately account for large kicks. 10 | WV07_REWGT_EXPAV: 0.5 11 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 12 | GENPEAK_RV: 3.1 # peak prob dust parameter 13 | GENRANGE_RV: 2.0 4.0 # min and max limits for RV generation 14 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 15 | 16 | # Volumetric rate (per yr per Mpc^3) base on 17 | # Abbott et. al. (2021) 18 | # https://arxiv.org/abs/2010.14533 (Sec Abstract) 19 | # Multiply rate by 6 so that participants cannot 20 | # game the metric. 21 | 22 | DNDZ: POWERLAW 320E-9 0.0 # /yr/MPc^3, no z-dependence 23 | GENRANGE_REDSHIFT: 0.011 0.28 24 | 25 | # ==================================== 26 | # SIMSED GRID PARAMETERS 27 | # Eqal prob per SED 28 | 29 | SIMSED_GRIDONLY: KN_INDEX 30 | 31 | # Some of the models plateau at end of epoch range, 32 | # so force min mag/day slope for late-time extrapolation. 33 | MINSLOPE_EXTRAPMAG_LATE: 0.1 34 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_AGN-LSST.INPUT: -------------------------------------------------------------------------------- 1 | GENMODEL: LCLIB $PLASTICC_MODELS/LCLIB_AGN-LSST.TEXT 1+2+3+4 2 | 3 | # Rel rate = 5th order polynominal in cos(b) 4 | DNDB: COSBPOLY 1 0 0 0 0 0 # AGN are isotropic 5 | NGENTOT_LC: 175000 # total number to generate 6 | 7 | # restrict AGN redshift to ZTRUE range of G18 photo-z library 8 | #LCLIB_CUTWIN: REDSHIFT 0.05 3.45 9 | LCLIB_CUTWIN: REDSHIFT 0.05 2.95 # 21/Dec/2021: To be within ELASTICC hostlib range 10 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_Cepheid-LSST.INPUT: -------------------------------------------------------------------------------- 1 | GENMODEL: LCLIB $PLASTICC_ROOT/model_libs_updates/LCLIB_Cepheid-LSST.TEXT 1+2+3+4 2 | 3 | NGENTOT_LC: 10000 4 | 5 | # (1 - |b/90|)**5 + 0.001 6 | DNDB: BPOLY 1.001,-5.55555556e-02,1.23456790e-03,-1.37174211e-05,7.62078951e-08,-1.69350878e-10 7 | 8 | # Turn extinction off 9 | OPT_MWEBV: 0 10 | OPT_MWCOLORLAW: 0 11 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_EB-PHOEBE.INPUT: -------------------------------------------------------------------------------- 1 | GENMODEL: LCLIB $PLASTICC_MODELS/LCLIB_EB-PHOEBE.TEXT 1+2+3+4 2 | 3 | # rel rate = 5th order poly in cos(b) 4 | DNDB: BPOLY 20383 -1757.7 65.293 -1.1885 0.10405E-01 -0.35119E-04 5 | 6 | NGENTOT_LC: 220000 # total number to generate 7 | 8 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_MIRA-ISW2011.INPUT: -------------------------------------------------------------------------------- 1 | GENMODEL: LCLIB $PLASTICC_MODELS/LCLIB_MIRA_ISW2011.TEXT 1+2+3+4 2 | 3 | # Fit b>5 deg to 5th order polynomial and use for both 4 | # b>0 and b<0 --> ignore structure from Sagitarius. 5 | # 6 | 7 | DNDB: BPOLY 20383 -1757.7 65.293 -1.1885 0.10405E-01 -0.35119E-04 8 | 9 | NGENTOT_LC: 1500 10 | 11 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_Mdwarf-LSST.INPUT: -------------------------------------------------------------------------------- 1 | GENMODEL: LCLIB $PLASTICC_MODELS/LCLIB_Mdwarf-LSST.TEXT 1+2+3+4 2 | 3 | # rel rate = b0 + b1*cosb + b2*cosb^2 + b3*cosb^3 4 | DNDB: COSBPOLY 0.1 1.0 1.0 1.0 0 0 # b0 b1 b2 b3 b4 b5 5 | NGENTOT_LC: 800000 # total number to generate 6 | 7 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_Mdwarf-flare-LSST.INPUT: -------------------------------------------------------------------------------- 1 | # Jan 5 2022 PLASTICC 2 | # - set NGENTOT back to 10 million 3 | # Dec 30 2021 RK 4 | # - set NGENTOT to 2 million; with ~1% efficiency for single-detection trigger, 5 | # expect 20k events. 6 | 7 | GENMODEL: LCLIB $PLASTICC_ROOT/model_libs_updates/LCLIB_Mdwarf-flare-LSST.TEXT 1+2+3+4 8 | 9 | NGENTOT_LC: 10000000 10 | 11 | # LCLIB |b| fit 12 | DNDB: BPOLY -3.28230212e-03,3.01446333e-02,-4.65144034e-03,3.16808686e-04,-1.20480381e-05,2.76898385e-07,-3.92467157e-09,3.34750950e-11,-1.57113870e-13,3.10724077e-16 13 | 14 | # Turn extinction off 15 | OPT_MWEBV: 0 16 | OPT_MWCOLORLAW: 0 17 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_RRL-LSST.INPUT: -------------------------------------------------------------------------------- 1 | # R.Kessler Jan 5 2022: 2 | # reduce NGENTOT fom 200k -> 50k to avoid swamping file system 3 | # with too many alerts. 4 | 5 | GENMODEL: LCLIB $PLASTICC_MODELS/LCLIB_RRL-LSST.TEXT 1+2+3+4 6 | 7 | # Rate from 8 | # http://iopscience.iop.org/article/10.3847/1538-3881/aa661b/pdf 9 | # http://adsabs.harvard.edu/abs/2017yCat..18380107S 10 | # 11 | # Fit b>5 deg to 5th order polynomial and use for both 12 | # b>0 and b<0 --> ignore structure from Sagitarius. 13 | # 14 | 15 | DNDB: BPOLY 20383 -1757.7 65.293 -1.1885 0.10405E-01 -0.35119E-04 16 | 17 | 18 | NGENTOT_LC: 50000 # total number to generate 19 | 20 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_d-Sct-LSST.INPUT: -------------------------------------------------------------------------------- 1 | GENMODEL: LCLIB $PLASTICC_ROOT/model_libs_updates/LCLIB_d-Sct-LSST.TEXT 1+2+3+4 2 | 3 | NGENTOT_LC: 10000 4 | 5 | # (1 - |b/90|)**5 + 0.001 6 | DNDB: BPOLY 1.001,-5.55555556e-02,1.23456790e-03,-1.37174211e-05,7.62078951e-08,-1.69350878e-10 7 | 8 | # Turn extinction off 9 | OPT_MWEBV: 0 10 | OPT_MWCOLORLAW: 0 11 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_dwarf-nova-LSST.INPUT: -------------------------------------------------------------------------------- 1 | GENMODEL: LCLIB $PLASTICC_ROOT/model_libs_updates/LCLIB_dwarf-nova-LSST.TEXT 1+2+3+4 2 | 3 | NGENTOT_LC: 10000 4 | 5 | # (1 - |b/90|)**5 + 0.001 6 | DNDB: BPOLY 1.001,-5.55555556e-02,1.23456790e-03,-1.37174211e-05,7.62078951e-08,-1.69350878e-10 7 | 8 | # Turn extinction off 9 | OPT_MWEBV: 0 10 | OPT_MWCOLORLAW: 0 11 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_uLens-Binary.INPUT: -------------------------------------------------------------------------------- 1 | # binary from CFA group (Rosanne & Arturo) 2 | GENMODEL: LCLIB $PLASTICC_MODELS/LCLIB_uLens-Binary.TEXT 1+2+3+4 3 | 4 | # rel rate = 5th order poly 5 | DNDB: BPOLY 20383 -1757.7 65.293 -1.1885 0.10405E-01 -0.35119E-04 6 | 7 | NGENTOT_LC: 1000 8 | 9 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_uLens-Single-GenLens.INPUT: -------------------------------------------------------------------------------- 1 | # Point lens from CFA (Rosanne & Arturo) 2 | GENMODEL: LCLIB $PLASTICC_MODELS/LCLIB_uLens-Single-GenLens.TEXT 1+2+3+4 3 | 4 | # rel rate = 5th order poly 5 | DNDB: BPOLY 20383 -1757.7 65.293 -1.1885 0.10405E-01 -0.35119E-04 6 | 7 | NGENTOT_LC: 1400 8 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_LCLIB_uLens-Single-PyLIMA.INPUT: -------------------------------------------------------------------------------- 1 | # based on Gaia stars (from Etienne Bachelet) 2 | GENMODEL: LCLIB $PLASTICC_MODELS/LCLIB_uLens-Single-PyLIMA.TEXT 1+2+3+4 3 | 4 | # rel rate = 5th order poly 5 | DNDB: BPOLY 20383 -1757.7 65.293 -1.1885 0.10405E-01 -0.35119E-04 6 | 7 | NGENTOT_LC: 1400 8 | 9 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_PISN-MOSFIT.INPUT: -------------------------------------------------------------------------------- 1 | # Pair-Instability SN model (from MOSFIT group) 2 | # Nov 1 2021: add explicit RV 3 | 4 | GENMODEL: $PLASTICC_MODELS/SIMSED.PISN-MOSFIT 5 | GENAV_WV07: 1 # WV07, Eq. 2 6 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 7 | 8 | GENPEAK_RV: 3.1 # peak prob dust parameter 9 | GENRANGE_RV: 3.0 3.2 # min and max limits for RV generation 10 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 11 | 12 | SIMSED_USE_BINARY: 1 13 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 14 | 15 | # Volumetric rate: # arxiv.org/pdf/1111.3648.pdf, Fig 2 16 | DNDZ: PISN_PLK12 17 | 18 | GENRANGE_REDSHIFT: 0.02 2.2 19 | 20 | # ============================== 21 | # SIMSED GRID PARAMETERS 22 | # Eqal prob per SED 23 | 24 | SIMSED_GRIDONLY: PISN_INDEX 25 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_PISN-STELLA-HECORE.INPUT: -------------------------------------------------------------------------------- 1 | # Nov 24 2021 2 | # Pair-Instability SN model from S.Blondin, 3 | # based on Heger, A.; Woosley, S. E. 2002ApJ...567..532H 4 | 5 | GENMODEL: $PLASTICC_ROOT/model_libs_updates/SIMSED.PISN-STELLA-HECORE 6 | GENAV_WV07: 1 # WV07, Eq. 2 7 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 8 | 9 | GENPEAK_RV: 3.1 # peak prob dust parameter 10 | GENRANGE_RV: 3.0 3.2 # min and max limits for RV generation 11 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 12 | 13 | SIMSED_USE_BINARY: 1 14 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 15 | 16 | # Volumetric rate: # arxiv.org/pdf/1111.3648.pdf, Fig 2 17 | DNDZ: PISN_PLK12 18 | 19 | GENRANGE_REDSHIFT: 0.02 2.2 20 | 21 | # ============================== 22 | # SIMSED GRID PARAMETERS 23 | # Eqal prob per SED 24 | 25 | SIMSED_GRIDONLY: PISN_INDEX 26 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_PISN-STELLA-HYDROGENIC.INPUT: -------------------------------------------------------------------------------- 1 | # Nov 24 2021 2 | # Pair-Instability SN model from S.Blondin, 3 | # based on Gilmer, Matthew S. et al. 2017ApJ...846..100G 4 | 5 | GENMODEL: $PLASTICC_ROOT/model_libs_updates/SIMSED.PISN-STELLA-HYDROGENIC 6 | GENAV_WV07: 1 # WV07, Eq. 2 7 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 8 | 9 | GENPEAK_RV: 3.1 # peak prob dust parameter 10 | GENRANGE_RV: 3.0 3.2 # min and max limits for RV generation 11 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 12 | 13 | SIMSED_USE_BINARY: 1 14 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 15 | 16 | # Volumetric rate: # arxiv.org/pdf/1111.3648.pdf, Fig 2 17 | DNDZ: PISN_PLK12 18 | 19 | GENRANGE_REDSHIFT: 0.02 2.2 20 | 21 | # ============================== 22 | # SIMSED GRID PARAMETERS 23 | # Eqal prob per SED 24 | 25 | SIMSED_GRIDONLY: PISN_INDEX 26 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_SLSN-I-MOSFIT.INPUT: -------------------------------------------------------------------------------- 1 | # SLSN using Magnetar model (from MOSFIT group) 2 | # C Alves, R.Kessler (13 Aug 2021) Added the RV parameters to avoid SIM abort 3 | # 4 | GENMODEL: $PLASTICC_MODELS/SIMSED.SLSN-I-MOSFIT 5 | GENAV_WV07: 1 # WV07, Eq. 2 6 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 7 | 8 | GENPEAK_RV: 3.1 # peak prob dust parameter 9 | GENRANGE_RV: 3.0 3.2 # min and max limits for RV generation 10 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 11 | 12 | SIMSED_USE_BINARY: 1 13 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 14 | 15 | # Volumetric rate 16 | DNDZ: MD14 2.0E-8 # MD14 SFR(z), with 2E-8/yr/Mpc^3 at z=0 17 | #GENRANGE_REDSHIFT: 0.02 3.9 18 | #GENRANGE_REDSHIFT: 0.02 3.45 # stay within photo-z range 19 | GENRANGE_REDSHIFT: 0.02 2.95 # 21/Dec/2021 C.Alves, R.Kessler: stay within hostlib range 20 | 21 | GENRANGE_TREST: -100.0 500.0 22 | 23 | # ============================== 24 | # SIMSED GRID PARAMETERS 25 | # Eqal prob per SED 26 | 27 | SIMSED_GRIDONLY: SLSN_INDEX 28 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_SNII-NMF.INPUT: -------------------------------------------------------------------------------- 1 | # II-NMF model From Santiago and Lluis 2 | # 3 | # 29/Oct/2021 C.Alves: Changed DNDZ_ALLSCALE from 0.35 to 0.175 because for ELAsTiCC 4 | # half of the CC SN are from V19 models 5 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.162 = 93.3% * 69.6% * 25% = 6 | # 93.3% of Hydrogen rich CC SN are not SN IIn 7 | # * 69.6% of CC SN are Hydrogen rich 8 | # * 50% simulated with V19 models and the other half divided between 9 | # this model and SNII-Templates 10 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 11 | # 21/Dec/2021 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.5 to 1.6 12 | # 4/Feb/2022 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.6 to 1.8 (for DDF) 13 | # (R.Kessler - lowered to 1.78 to stay within SED BINARY limit) 14 | # 15 | 16 | GENMODEL: $PLASTICC_MODELS/SIMSED.SNII-NMF 17 | 18 | SIMSED_USE_BINARY: 1 19 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 20 | 21 | # Volumetric rate 22 | DNDZ: CC_S15 # rate from Strolger 2015 (HST; 1509.06574) 23 | DNDZ_ALLSCALE: 0.162 # 11/Nov/2021 C.Alves: 93.3% * 69.6% * 25% ; see comment from same date 24 | 25 | GENRANGE_REDSHIFT: 0.011 1.78 26 | 27 | # ================================================= 28 | # SIMSED parameters are correlated & interpolated 29 | # Last update: Aug 13 2018 30 | 31 | SIMSED_PARAM: pc1 32 | GENPEAK_pc1: 0.0854 33 | #GENSIGMA_pc1: 0.0854 0.0686 # COV doesn't work with asymm Gaussian 34 | GENSIGMA_pc1: 0.075 0.075 35 | GENRANGE_pc1: 0.01 0.5 36 | 37 | SIMSED_PARAM: pc2 38 | GENPEAK_pc2: 0.0199 39 | #GENSIGMA_pc2: 0.0169 0.025 40 | GENSIGMA_pc2: 0.021 0.021 41 | GENRANGE_pc2: 0.001 0.07 42 | 43 | SIMSED_PARAM: pc3 44 | GENPEAK_pc3: 0.0250 45 | #GENSIGMA_pc3: 0.019 0.015 46 | GENSIGMA_pc3: 0.017 0.017 47 | GENRANGE_pc3: 0.001 0.07 48 | 49 | SIMSED_REDCOR(pc1,pc2): 0.241 50 | SIMSED_REDCOR(pc1,pc3): 0.052 51 | SIMSED_REDCOR(pc2,pc3): -0.074 52 | 53 | # I doubt these are COV, but probably REDCOR 54 | #SIMSED_COV(pc1,pc2): 0.241 55 | #SIMSED_COV(pc1,pc3): 0.052 56 | #SIMSED_COV(pc2,pc3): -0.074 57 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_SNII-Templates.INPUT: -------------------------------------------------------------------------------- 1 | # SN II templates 2 | # To match Li 2011 Table 6 after removing some templates. 3 | # MAGOFF = 2.100 -> 1.500 (0.6 mag brighter) 4 | # 28/Oct/2021 C.Alves: Changed DNDZ_ALLSCALE from 0.35 to 0.175 because for ELAsTiCC half of the CC SN are from V19 models 5 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.162 = 93.3% * 69.6% * 25% = 6 | # 93.3% of Hydrogen rich CC SN are not SN IIn 7 | # * 69.6% of CC SN are Hydrogen rich 8 | # * 50% simulated with V19 models and the other half divided between this model and SNII-NMF 9 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 10 | # 21/Dec/2021 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.5 to 1.6 to ensure all the possibly detected supernovae are simulated. 11 | # 4/Feb/2022 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.6 to 1.8 to ensure all the possibly detected supernovae are simulated. 12 | 13 | GENMODEL: NON1ASED 14 | PATH_NON1ASED: $PLASTICC_MODELS/NON1ASED.SNII-Templates 15 | 16 | # Volumetric rate 17 | DNDZ: CC_S15 # Strolger 2015 (HST; 1509.06574) 18 | DNDZ_ALLSCALE: 0.162 # 11/Nov/2021 C.Alves: 93.3% * 69.6% * 25% ; see comment from same date 19 | 20 | GENRANGE_REDSHIFT: 0.011 1.80 21 | 22 | GENRANGE_TREST: -50 300 23 | 24 | # Some of the models plateau at end of epoch range, 25 | # so force min mag/day slope for late-time extrapolation. 26 | MINSLOPE_EXTRAPMAG_LATE: 0.01 27 | 28 | # ----------------------------------------- 29 | # 30 | # User-generated comments: 31 | # MAGOFF and MAGSMEAR adjusted to match m_R peak and sigma 32 | # in Table 6 of Li et al, 2010 (arXiv:1006.4612) 33 | # 34 | # 35 | 36 | NON1A_KEYS: 5 37 | INDEX WGT MAGOFF MAGSMEAR SNTYPE 38 | NON1A: 201 0.0246 1.500 1.050 2 # IIP (SDSS-000018) 39 | NON1A: 204 0.0246 1.500 1.050 2 # IIP (SDSS-003818) 40 | NON1A: 208 0.0246 1.500 1.050 2 # IIP (SDSS-013376) 41 | NON1A: 210 0.0246 1.500 1.050 2 # IIP (SDSS-014450) 42 | NON1A: 213 0.0246 1.500 1.050 2 # IIP (SDSS-014599) 43 | NON1A: 214 0.0246 1.500 1.050 2 # IIP (SDSS-015031) 44 | NON1A: 215 0.0246 1.500 1.050 2 # IIP (SDSS-015320) 45 | NON1A: 216 0.0246 1.500 1.050 2 # IIP (SDSS-015339) 46 | NON1A: 220 0.0246 1.500 1.050 2 # IIP (SDSS-017862) 47 | NON1A: 221 0.0246 1.500 1.050 2 # IIP (SDSS-018109) 48 | NON1A: 222 0.0246 1.500 1.050 2 # IIP (SDSS-018297) 49 | NON1A: 223 0.0246 1.500 1.050 2 # IIP (SDSS-018408) 50 | NON1A: 224 0.0246 1.500 1.050 2 # IIP (SDSS-018441) 51 | NON1A: 225 0.0246 1.500 1.050 2 # IIP (SDSS-018457) 52 | NON1A: 226 0.0246 1.500 1.050 2 # IIP (SDSS-018590) 53 | NON1A: 227 0.0246 1.500 1.050 2 # IIP (SDSS-018596) 54 | NON1A: 228 0.0246 1.500 1.050 2 # IIP (SDSS-018700) 55 | NON1A: 229 0.0246 1.500 1.050 2 # IIP (SDSS-018713) 56 | NON1A: 230 0.0246 1.500 1.050 2 # IIP (SDSS-018734) 57 | NON1A: 235 0.0246 1.500 1.050 2 # IIP (SDSS-020038) 58 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_SNIIn-MOSFIT.INPUT: -------------------------------------------------------------------------------- 1 | # IIn model (from MOSFIT group) 2 | # 13/Aug/2021 C.Alves, R.Kessler: Added the RV parameters to avoid SIM abort 3 | # 28/Oct/2021 C.Alves: Changed DNDZ_ALLSCALE from 0.06 to 0.03 because for ELAsTiCC half of the CC SN are from V19 models 4 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.0235 = 6.7% * 69.6% * 50% = 5 | # 6.7% of Hydrogen rich CC SN are SN IIn 6 | # * 69.6% of CC SN are Hydrogen rich 7 | # * 50% simulated with V19 models and the other half with this model 8 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 9 | # 21/Dec/2021 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 2.0 to 2.2 to ensure all the possibly detected supernovae are simulated. 10 | 11 | GENMODEL: $PLASTICC_MODELS/SIMSED.SNIIn-MOSFIT 12 | GENAV_WV07: 1 # WV07, Eq. 2 13 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 14 | 15 | GENPEAK_RV: 3.1 # peak prob dust parameter 16 | GENRANGE_RV: 3.0 3.2 # min and max limits for RV generation 17 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 18 | 19 | SIMSED_USE_BINARY: 1 20 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 21 | 22 | # S15 rate at z=0 is R0(CC) = 0.7E-4. 23 | # IIn rate at z=0 from MOSFIT is R0(IIn) = 4E-6 24 | # --> IIn fraction is 6% 25 | 26 | # Volumetric rate 27 | DNDZ: CC_S15 # rate from Strolger 2015 (HST; 1509.06574) 28 | DNDZ_ALLSCALE: 0.0235 # 11/Nov/2021 C.Alves: 6.7% * 69.6% * 50% ; see comment from same date 29 | 30 | #GENRANGE_REDSHIFT: 0.03 2.0 31 | GENRANGE_REDSHIFT: 0.03 2.2 32 | 33 | # ============================== 34 | # SIMSED GRID PARAMETERS 35 | # Eqal prob per SED 36 | 37 | SIMSED_GRIDONLY: IIN_INDEX 38 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_SNIa-91bg.INPUT: -------------------------------------------------------------------------------- 1 | 2 | GENMODEL: $PLASTICC_ROOT/model_libs_updates/SIMSED.SNIa-91bg 3 | 4 | SIMSED_USE_BINARY: 1 5 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 6 | 7 | # Volumetric rate 8 | 9 | DNDZ: POWERLAW 3.0E-6 1.5 # 3E-6/yr/Mpc^3 x (1+z)^1.5 10 | GENRANGE_REDSHIFT: 0.011 1.2 11 | 12 | # ============================== 13 | 14 | # var(stretch) = 0.096^2 15 | # var(color) = 0.175^2 16 | # reduced covar = -0.656 17 | 18 | SIMSED_PARAM: stretch 19 | GENPEAK_stretch: 0.975 20 | GENSIGMA_stretch: 0.096 0.096 21 | GENRANGE_stretch: 0.65 1.25 22 | 23 | SIMSED_PARAM: color 24 | GENPEAK_color: 0.557 25 | GENSIGMA_color: 0.175 0.175 26 | GENRANGE_color: 0.0 1.0 27 | 28 | SIMSED_REDCOR(stretch,color): -0.656 29 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_SNIa-SALT2.INPUT: -------------------------------------------------------------------------------- 1 | # Jun 24 2018: include improved late-time model extrapolation 2 | # 21/Dec/2021 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.55 to 1.65 to ensure all the possibly detected supernovae are simulated. 3 | 4 | GENMODEL: SALT2.WFIRST-H17 5 | GENMODEL_EXTRAP_LATETIME: $PLASTICC_MODELS/SNIa_Extrap_LateTime_2expon.TEXT 6 | 7 | GENMAG_SMEAR_MODELNAME: G10 8 | 9 | # Use rate assumption from Housell 2017 (WFIRST sims) 10 | DNDZ: POWERLAW2 2.5E-5 1.5 0.0 1.0 # from Didlay 2008 11 | DNDZ: POWERLAW2 9.7E-5 -0.5 1.0 3.0 # high-z extenstion from Lou 12 | 13 | GENRANGE_REDSHIFT: 0.011 1.65 14 | GENRANGE_TREST: -100 300 15 | 16 | # -------------------------------------------------- 17 | # define SALT2 params from Scolnic et al 2016, using G10 High-z row 18 | 19 | GENPEAK_SALT2c: -0.054 20 | GENSIGMA_SALT2c: 0.043 0.101 # bifurcated sigmas 21 | GENRANGE_SALT2c: -0.300 0.500 # color range 22 | 23 | GENPEAK_SALT2x1: 0.973 24 | GENSIGMA_SALT2x1: 1.472 0.222 # bifurcated sigmas 25 | GENRANGE_SALT2x1: -3.0 2.0 # x1 (stretch) range 26 | 27 | 28 | GENMEAN_SALT2ALPHA: 0.14 29 | GENMEAN_SALT2BETA: 3.1 30 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_SNIax.INPUT: -------------------------------------------------------------------------------- 1 | # SN~Iax from Saurabh 2 | 3 | 4 | GENMODEL: $PLASTICC_ROOT/model_libs_updates/SIMSED.SNIax 5 | 6 | SIMSED_USE_BINARY: 1 7 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 8 | 9 | 10 | # Volumetric rate 11 | DNDZ: MD14 6.0E-6 # MD14 SFR(z), with 6E-6/yr/Mpc^3 at z=0 12 | GENRANGE_REDSHIFT: 0.011 1.5 13 | 14 | # Extinction 15 | # GN updated 20210321 per RK - these are Maria Vincenzi's Galaxy Extinction params 16 | GENPEAK_RV: 3.1 # peak prob dust parameter 17 | GENRANGE_RV: 3.0 3.2 # min and max limits for RV generation 18 | GENSIGMA_RV: 0.0 0.0 # lo & hi Gaussian sigmas 19 | GENRANGE_AV: 0.001 3.0 # CCM89 V-band extinction 20 | GENTAU_AV: 1.7 # dN/dAV = exp(-AV/xxx) 21 | GENSIG_AV: 0.6 # += Guass(AV,sigma) 22 | GENRATIO_AV0: 4.0 23 | 24 | # random pick of each SED sequence 25 | 26 | SIMSED_GRIDONLY: Iax_INDEX 27 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_SNIb-Templates.INPUT: -------------------------------------------------------------------------------- 1 | # SN Ib template models 2 | # 19/July/2018: remove 270 due to strange LC artifacts 3 | # 12/Aug/2018: remove SDSS-013195, SDSS-014475, SNLS-04D1la 4 | # (artifact warning from Justin) 5 | # 9/Nov/2021 C.Alves: Set only the SN Ib models in this file instead of including both SN Ib and SN Ic 6 | # 9/Nov/2021 C.Alves: Change DNDZ_ALLSCALE to 0.054 = 36% * 30% * 50% = Table 3 of the Shivvers+2017 paper * 30% of CC SN are SN Ibc * half will be simulated using V19 models and the other half from this model 7 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.054 = 35.6% * 30.4% * 50% = 8 | # 35.6% of Stripped Envelope CC SN are SN Ib 9 | # * 30.4% of CC SN are Stripped Envelope 10 | # * 50% simulated with V19 models and the other half with this model 11 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 12 | 13 | GENMODEL: NON1ASED 14 | PATH_NON1ASED: $PLASTICC_MODELS/NON1ASED.SNIbc-Templates 15 | 16 | # Volumetric rate 17 | DNDZ: CC_S15 # Strolger 2015 (HST; 1509.06574) 18 | DNDZ_ALLSCALE: 0.054 # 11/Nov/2021 C.Alves: 35.6% * 30.4% * 50% ; see comment from same date 19 | 20 | GENRANGE_REDSHIFT: 0.011 1.50 21 | 22 | GENRANGE_TREST: -50 300 23 | 24 | # Some of the models plateau at end of epoch range, 25 | # so force min mag/day slope for late-time extrapolation. 26 | MINSLOPE_EXTRAPMAG_LATE: 0.01 27 | 28 | # ----------------------------------------- 29 | # User-generated comments: 30 | # MAGOFF and MAGSMEAR adjusted to match m_R peak and sigma 31 | # in Table 6 of Li et al, 2010 (arXiv:1006.4612) 32 | # 33 | 34 | NON1A_KEYS: 5 35 | INDEX WGT MAGOFF MAGSMEAR SNTYPE 36 | NON1A: 103 0.0191 0.770 0.410 3 # Ib (CSP-2004gv) 37 | NON1A: 104 0.0191 2.670 0.410 3 # Ib (CSP-2006ep) 38 | NON1A: 105 0.0191 -0.628 0.410 3 # Ib (CSP-2007Y) 39 | NON1A: 202 0.0191 0.346 0.410 3 # Ib (SDSS-000020) 40 | NON1A: 203 0.0191 -0.099 0.410 3 # Ib (SDSS-002744) 41 | NON1A: 212 0.0191 0.950 0.410 3 # Ib (SDSS-014492) 42 | NON1A: 234 0.0191 0.281 0.410 3 # Ib (SDSS-019323) 43 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_SNIc-Templates.INPUT: -------------------------------------------------------------------------------- 1 | # SN Ic template models 2 | # 19/July/2018: remove 270 due to strange LC artifacts 3 | # 12/Aug/2018: remove SDSS-013195, SDSS-014475, SNLS-04D1la 4 | # (artifact warning from Justin) 5 | # 9/Nov/2021 C.Alves: Set only the SN Ic models in this file instead of including both SN Ib and SN Ic 6 | # 9/Nov/2021 C.Alves: Change DNDZ_ALLSCALE to 0.033 = 22% * 30% * 50% = Table 3 of the Shivvers+2017 paper * 30% of CC SN are SN Ibc * half will be simulated using V19 models and the other half from this model 7 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.0375 = 24.7% * 30.4% * 50% = 8 | # 24.7% of Stripped Envelope CC SN are SN Ic 9 | # * 30.4% of CC SN are Stripped Envelope 10 | # * 50% simulated with V19 models and the other half with this model 11 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 12 | 13 | GENMODEL: NON1ASED 14 | PATH_NON1ASED: $PLASTICC_MODELS/NON1ASED.SNIbc-Templates 15 | 16 | # Volumetric rate 17 | DNDZ: CC_S15 # Strolger 2015 (HST; 1509.06574) 18 | DNDZ_ALLSCALE: 0.0375 # 11/Nov/2021 C.Alves: 24.7% * 35.6% * 50% ; see comment from same date 19 | 20 | GENRANGE_REDSHIFT: 0.011 1.50 21 | 22 | GENRANGE_TREST: -50 300 23 | 24 | # Some of the models plateau at end of epoch range, 25 | # so force min mag/day slope for late-time extrapolation. 26 | MINSLOPE_EXTRAPMAG_LATE: 0.01 27 | 28 | # ----------------------------------------- 29 | # User-generated comments: 30 | # MAGOFF and MAGSMEAR adjusted to match m_R peak and sigma 31 | # in Table 6 of Li et al, 2010 (arXiv:1006.4612) 32 | # 33 | 34 | NON1A_KEYS: 5 35 | INDEX WGT MAGOFF MAGSMEAR SNTYPE 36 | NON1A: 022 0.0167 1.480 1.100 3 # Ic (SNLS-04D4jv) 37 | NON1A: 101 0.0167 1.480 1.100 3 # Ic (CSP-2004fe) 38 | NON1A: 102 0.0167 1.480 1.100 3 # Ic (CSP-2004gq) 39 | NON1A: 205 0.0167 1.480 1.100 3 # Ic (SDSS-004012) 40 | NON1A: 217 0.0167 1.480 1.100 3 # Ic (SDSS-015475) 41 | NON1A: 218 0.0167 1.480 1.100 3 # Ic (SDSS-017548) 42 | 43 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_TDE-MOSFIT.INPUT: -------------------------------------------------------------------------------- 1 | # TDE from MOSFIT group 2 | 3 | GENMODEL: $PLASTICC_MODELS/SIMSED.TDE-MOSFIT 4 | #GENAV_WV07: 1 # WV07, Eq. 2 5 | GENTAU_AV: 0.4 # expon component only, no Gauss core 6 | GENRANGE_AV: 0 3 # CCM89 AV-extinctionrange 7 | 8 | GENPEAK_RV: 3.1 9 | GENRANGE_RV: 1.0 5.0 10 | GENSIGMA_RV: 0.0 0.0 11 | 12 | SIMSED_USE_BINARY: 1 13 | SIMSED_PATH_BINARY: $PLASTICC_MODELS/SIMSED_BINARIES 14 | 15 | # Volumetric rate from 1707.03458, (8 +- 4) × 10−7 /Mpc^3/yr 16 | # z-dependence from 17 | # http://adsabs.harvard.edu/abs/2016MNRAS.tmp..944K 18 | # 19 | #DNDZ: MD14 1.0E-6 # MD14 SFR(z), with 1E-6/yr/Mpc^3 at z=0 20 | #DNDZ_ZPOLY_REWGT: 1.0 -0.8 0.25 -0.03 # temp fudge 21 | 22 | 23 | #DNDZ: EPM 0.8E-7 # from van Velson 2017, Sec 5.2 24 | DNDZ: TDE 1.0E-6 25 | GENRANGE_REDSHIFT: 0.01 2.9 26 | 27 | GENRANGE_TREST: -100.0 500.0 28 | #GENRANGE_TREST: -50.0 100.0 29 | 30 | # ============================== 31 | # SIMSED GRID PARAMETERS 32 | # Eqal prob per SED 33 | 34 | SIMSED_GRIDONLY: TDE_INDEX 35 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_V19_SNII+HostXT.INPUT: -------------------------------------------------------------------------------- 1 | # SN II (IIP, IIL) models from Vincenzi et al. 2019 2 | # 31/Aug/2021 C.Alves, R.Kessler: SN II subset of V19 models + change DNDZ_ALLSCALE from 1.0 to 0.7 3 | # 28/Oct/2021 C.Alves: Changed DNDZ_ALLSCALE from 0.7 to 0.35 because for ELAsTiCC half of the CC SN are from V19 models and the other half are other models 4 | # 9/Nov/2021 C.Alves: SN II (IIP, IIL) subset of V19 models + change DNDZ_ALLSCALE to 0.35 = 70% * 50% = 70% of CC SN are NOT stripped envelope * 50% simulated using V19 models and 50% from other models 5 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.325 = 93.3% * 69.6% * 50% = 6 | # 93.3% of Hydrogen rich CC SN are not SN IIn 7 | # * 69.6% of CC SN are Hydrogen rich 8 | # * 50% simulated with V19 models and the other half divided between other models 9 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 10 | # 21/Dec/2021 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.5 to 1.6 to ensure all the possibly detected supernovae are simulated. 11 | # 4/Feb/2022 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.6 to 1.8 to ensure all the possibly detected supernovae are simulated. 12 | 13 | GENMODEL: NON1ASED 14 | PATH_NON1ASED: $PLASTICC_ROOT/model_libs_updates/NON1ASED.V19_CC+HostXT 15 | 16 | # Volumetric rate 17 | DNDZ: CC_S15 # rate from Strolger 2015 (HST; 1509.06574) 18 | DNDZ_ALLSCALE: 0.325 # 11/Nov/2021 C.Alves: 93.3% * 69.6% * 50% ; see comment from same date 19 | 20 | GENRANGE_REDSHIFT: 0.011 1.80 21 | 22 | # GN: we'll just assert that since we're simulating all CC types 23 | # we can just follow the Strolger 2015 rate 24 | # this needs the relative rates in the CC sample 25 | # in Vincenzi 19 to follow the same distribution 26 | # as actual CC relative rate 27 | # note below indicates that's happeing (Shivvers et al. 2017) 28 | # probably good to check this though 29 | GENRANGE_TREST: -50 300 30 | 31 | # Some of the models plateau at end of epoch range, 32 | # so force min mag/day slope for late-time extrapolation. 33 | MINSLOPE_EXTRAPMAG_LATE: 0.01 34 | 35 | # ----------------------------------------- 36 | # TEMPLATES presented in Vincenzi et al. 2019 (arXiv:1908.05228) 37 | # TEMPLATES used here HAS BEEN corrected for host and MW extinction. 38 | # MAGOFF and MAGSMEAR adjusted to match LF from Richardson. et al 2014. 39 | # For more details see Vincenzi et al. 2019 (Table 1, Section 4.1) 40 | # Richardson et al. LFs use a sample of SNe that HAS BEEN corrected 41 | # for host extinction. 42 | # WGT (relative rates) from Shivvars et al. 2017 43 | # (also reported in Vincenzi et al. 2019, Table 1) 44 | # 45 | # 46 | # II --> 25 (IIP+IIL, no distinction) # current file 47 | # IIn --> 21 48 | # IIb --> 23 49 | # Ib --> 32 50 | # Ic --> 33 51 | # Ic-BL --> 35 52 | 53 | 54 | NON1A_KEYS: 5 55 | INDEX WGT MAGOFF MAGSMEAR SNTYPE 56 | NON1A: 701 0.033563 -0.40 0.40 20 ## IIP ASASSN14jb 57 | NON1A: 735 0.033563 -0.40 0.40 20 ## IIP SN2008in 58 | NON1A: 744 0.033563 -0.40 0.40 20 ## IIP SN2009N 59 | NON1A: 717 0.033563 -0.40 0.40 20 ## IIP SN2005cs 60 | NON1A: 737 0.033563 -0.40 0.40 20 ## IIP SN2009bw 61 | NON1A: 739 0.033563 -0.40 0.40 20 ## IIP SN2009ib 62 | NON1A: 731 0.033563 -0.40 0.40 20 ## IIP SN2008bj 63 | NON1A: 752 0.033563 -0.40 0.40 20 ## IIP SN2012A 64 | NON1A: 711 0.033563 -0.40 0.40 20 ## IIP SN2004et 65 | NON1A: 708 0.033563 -0.40 0.40 20 ## IIP SN1999em 66 | NON1A: 756 0.033563 -0.40 0.40 20 ## IIP SN2013ab 67 | NON1A: 757 0.033563 -0.40 0.40 20 ## IIP SN2013am 68 | NON1A: 761 0.033563 -0.40 0.40 20 ## IIP SN2013fs 69 | NON1A: 766 0.033563 -0.40 0.40 20 ## IIP SN2016X 70 | NON1A: 767 0.033563 -0.40 0.40 20 ## IIP SN1987A 71 | NON1A: 764 0.033563 -0.40 0.40 20 ## IIP SN2016bkv 72 | NON1A: 755 0.033563 -0.40 0.40 20 ## IIP SN2012aw 73 | NON1A: 743 0.011297 -0.35 0.65 22 ## IIL SN2009kr 74 | NON1A: 702 0.011297 -0.35 0.65 22 ## IIL ASASSN15oz 75 | NON1A: 724 0.011297 -0.35 0.65 22 ## IIL SN2007od 76 | NON1A: 763 0.011297 -0.35 0.65 22 ## IIL SN2014G 77 | NON1A: 760 0.011297 -0.35 0.65 22 ## IIL SN2013ej 78 | NON1A: 738 0.011297 -0.35 0.65 22 ## IIL SN2009dd 79 | NON1A: 758 0.011297 -0.35 0.65 22 ## IIL SN2013by 80 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_V19_SNIIb+HostXT.INPUT: -------------------------------------------------------------------------------- 1 | # SN IIb models from Vincenzi et al. 2019 2 | # 9/Nov/2021 C.Alves: SN IIb subset of V19 models + change DNDZ_ALLSCALE to 0.054 = 36% * 30% * 50% = Table 3 of Shivvers+2017 * 30% of CC SN are stripped envelope * 50% simulated using V19 models and 50% from other models 3 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.109 = 36% * 30.4% = 4 | # 36% of Stripped Envelope CC SN are SN IIb 5 | # * 30.4% of CC SN are Stripped Envelope 6 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 7 | # 4/Feb/2022 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.5 to 1.8 to ensure all the possibly detected supernovae are simulated. 8 | 9 | GENMODEL: NON1ASED 10 | PATH_NON1ASED: $PLASTICC_ROOT/model_libs_updates/NON1ASED.V19_CC+HostXT 11 | 12 | # Volumetric rate 13 | DNDZ: CC_S15 # Strolger 2015 (HST; 1509.06574) 14 | DNDZ_ALLSCALE: 0.109 # 11/Nov/2021 C.Alves: 36% * 30.4% ; see comment from same date 15 | 16 | GENRANGE_REDSHIFT: 0.011 1.80 17 | 18 | # GN: we'll just assert that since we're simulating all CC types 19 | # we can just follow the Strolger 2015 rate 20 | # this needs the relative rates in the CC sample 21 | # in Vincenzi 19 to follow the same distribution 22 | # as actual CC relative rate 23 | # note below indicates that's happeing (Shivvers et al. 2017) 24 | # probably good to check this though 25 | GENRANGE_TREST: -50 300 26 | 27 | # Some of the models plateau at end of epoch range, 28 | # so force min mag/day slope for late-time extrapolation. 29 | MINSLOPE_EXTRAPMAG_LATE: 0.01 30 | 31 | # ----------------------------------------- 32 | # TEMPLATES presented in Vincenzi et al. 2019 (arXiv:1908.05228) 33 | # TEMPLATES used here HAS BEEN corrected for host and MW extinction. 34 | # MAGOFF and MAGSMEAR adjusted to match LF from Richardson. et al 2014. 35 | # For more details see Vincenzi et al. 2019 (Table 1, Section 4.1) 36 | # Richardson et al. LFs use a sample of SNe that HAS BEEN corrected 37 | # for host extinction. 38 | # WGT (relative rates) from Shivvars et al. 2017 39 | # (also reported in Vincenzi et al. 2019, Table 1) 40 | # 41 | # 42 | # II --> 25 (IIP+IIL, no distinction) 43 | # IIn --> 21 44 | # IIb --> 23 # current file 45 | # Ib --> 32 46 | # Ic --> 33 47 | # Ic-BL --> 35 48 | 49 | 50 | NON1A_KEYS: 5 51 | INDEX WGT MAGOFF MAGSMEAR SNTYPE 52 | NON1A: 748 0.009919 -0.39 0.92 23 ## IIb SN2011ei 53 | NON1A: 749 0.009919 1.63 0.92 23 ## IIb SN2011fu 54 | NON1A: 750 0.009919 -1.28 0.92 23 ## IIb SN2011hs 55 | NON1A: 722 0.009919 0.73 0.92 23 ## IIb SN2006T 56 | NON1A: 732 0.009919 -1.53 0.92 23 ## IIb SN2008bo 57 | NON1A: 747 0.009919 -1.00 0.92 23 ## IIb SN2011dh 58 | NON1A: 730 0.009919 -0.34 0.92 23 ## IIb SN2008ax 59 | NON1A: 729 0.009919 -0.39 0.92 23 ## IIb SN2008aq 60 | NON1A: 759 0.009919 -1.09 0.92 23 ## IIb SN2013df 61 | NON1A: 704 0.009919 -3.77 0.92 23 ## IIb SN1993J 62 | NON1A: 765 0.009919 -0.38 0.92 23 ## IIb SN2016gkg 63 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_V19_SNIIn+HostXT.INPUT: -------------------------------------------------------------------------------- 1 | # SN IIn models from Vincenzi et al. 2019 2 | # 31/Aug/2021 C.Alves, R.Kessler: SN II subset of V19 models + change DNDZ_ALLSCALE from 1.0 to 0.7 3 | # 28/Oct/2021 C.Alves: Changed DNDZ_ALLSCALE from 0.7 to 0.35 because for ELAsTiCC half of the CC SN are from V19 models and the other half are other models 4 | # 9/Nov/2021 C.Alves: SN II (IIP, IIL, IIn) subset of V19 models + change DNDZ_ALLSCALE to 0.35 = 70% * 50% = 70% of CC SN are NOT stripped envelope * 50% simulated using V19 models and 50% from other models 5 | # 10/Nov/2021 C.Alves: SN IIn subset of V19 models 6 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.0235 = 6.7% * 69.6% * 50% = 7 | # 6.7% of Hydrogen rich CC SN are SN IIn 8 | # * 69.6% of CC SN are Hydrogen rich 9 | # * 50% simulated with V19 models and the other half with SNIIn-MOSFIT model 10 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 11 | # 21/Dec/2021 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.5 to 1.6 to ensure all the possibly detected supernovae are simulated. 12 | # 4/Feb/2022 C.Alves: Changed GENRANGE_REDSHIFT upper bound from 1.6 to 1.8 to ensure all the possibly detected supernovae are simulated. 13 | 14 | GENMODEL: NON1ASED 15 | PATH_NON1ASED: $PLASTICC_ROOT/model_libs_updates/NON1ASED.V19_CC+HostXT 16 | 17 | # Volumetric rate 18 | DNDZ: CC_S15 # rate from Strolger 2015 (HST; 1509.06574) 19 | DNDZ_ALLSCALE: 0.0235 # 11/Nov/2021 C.Alves: 6.7% * 69.6% * 50% ; see comment from same date 20 | 21 | GENRANGE_REDSHIFT: 0.011 1.80 22 | 23 | # GN: we'll just assert that since we're simulating all CC types 24 | # we can just follow the Strolger 2015 rate 25 | # this needs the relative rates in the CC sample 26 | # in Vincenzi 19 to follow the same distribution 27 | # as actual CC relative rate 28 | # note below indicates that's happeing (Shivvers et al. 2017) 29 | # probably good to check this though 30 | GENRANGE_TREST: -50 300 31 | 32 | # Some of the models plateau at end of epoch range, 33 | # so force min mag/day slope for late-time extrapolation. 34 | MINSLOPE_EXTRAPMAG_LATE: 0.01 35 | 36 | # ----------------------------------------- 37 | # TEMPLATES presented in Vincenzi et al. 2019 (arXiv:1908.05228) 38 | # TEMPLATES used here HAS BEEN corrected for host and MW extinction. 39 | # MAGOFF and MAGSMEAR adjusted to match LF from Richardson. et al 2014. 40 | # For more details see Vincenzi et al. 2019 (Table 1, Section 4.1) 41 | # Richardson et al. LFs use a sample of SNe that HAS BEEN corrected 42 | # for host extinction. 43 | # WGT (relative rates) from Shivvars et al. 2017 44 | # (also reported in Vincenzi et al. 2019, Table 1) 45 | # 46 | # 47 | # II --> 25 (IIP+IIL, no distinction) 48 | # IIn --> 21 # current file 49 | # IIb --> 23 50 | # Ib --> 32 51 | # Ic --> 33 52 | # Ic-BL --> 35 53 | 54 | 55 | NON1A_KEYS: 5 56 | INDEX WGT MAGOFF MAGSMEAR SNTYPE 57 | NON1A: 751 0.007841 -0.35 1.15 21 ## IIn SN2011ht 58 | NON1A: 745 0.007841 -0.35 1.15 21 ## IIn SN2010al 59 | NON1A: 740 0.007841 -0.35 1.15 21 ## IIn SN2009ip 60 | NON1A: 725 0.007841 -0.35 1.15 21 ## IIn SN2007pk 61 | NON1A: 734 0.007841 -0.35 1.15 21 ## IIn SN2008fq 62 | NON1A: 719 0.007841 -0.35 1.15 21 ## IIn SN2006aa 63 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_V19_SNIb+HostXT.INPUT: -------------------------------------------------------------------------------- 1 | # SN Ib models from Vincenzi et al. 2019 2 | # 21/Sept/2021 A.Gagliano: SN Ib subset 3 | # 28/Oct/2021 C.Alves: Changed DNDZ back to CC_S15 to obtain the correct proportion of CC SN 4 | # 28/Oct/2021 C.Alves: Changed DNDZ_ALLSCALE from 0.3 to 0.35 because for ELAsTiCC half of the CC SN are from V19 models and the other half are other models 5 | # 8/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.054 = 36% * 30% * 50% = Table 3 of the Shivvers+2017 paper * 30% of CC SN are SN Ibc * half will be simulated using V19 models and the other half from other models 6 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.054 = 35.6% * 30.4% * 50% = 7 | # 35.6% of Stripped Envelope CC SN are SN Ib 8 | # * 30.4% of CC SN are Stripped Envelope 9 | # * 50% simulated with V19 models and the other half with SNIb-Templates model 10 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 11 | 12 | GENMODEL: NON1ASED 13 | PATH_NON1ASED: $PLASTICC_ROOT/model_libs_updates/NON1ASED.V19_CC+HostXT 14 | 15 | # Volumetric rate 16 | DNDZ: CC_S15 # Strolger 2015 (HST; 1509.06574) 17 | DNDZ_ALLSCALE: 0.054 # 11/Nov/2021 C.Alves: 35.6% * 30.4% * 50% ; see comment from same date 18 | 19 | GENRANGE_REDSHIFT: 0.011 1.50 20 | 21 | # GN: we'll just assert that since we're simulating all CC types 22 | # we can just follow the Strolger 2015 rate 23 | # this needs the relative rates in the CC sample 24 | # in Vincenzi 19 to follow the same distribution 25 | # as actual CC relative rate 26 | # note below indicates that's happeing (Shivvers et al. 2017) 27 | # probably good to check this though 28 | GENRANGE_TREST: -50 300 29 | 30 | # Some of the models plateau at end of epoch range, 31 | # so force min mag/day slope for late-time extrapolation. 32 | MINSLOPE_EXTRAPMAG_LATE: 0.01 33 | 34 | # ----------------------------------------- 35 | # TEMPLATES presented in Vincenzi et al. 2019 (arXiv:1908.05228) 36 | # TEMPLATES used here HAS BEEN corrected for host and MW extinction. 37 | # MAGOFF and MAGSMEAR adjusted to match LF from Richardson. et al 2014. 38 | # For more details see Vincenzi et al. 2019 (Table 1, Section 4.1) 39 | # Richardson et al. LFs use a sample of SNe that HAS BEEN corrected 40 | # for host extinction. 41 | # WGT (relative rates) from Shivvars et al. 2017 42 | # (also reported in Vincenzi et al. 2019, Table 1) 43 | # 44 | # 45 | # II --> 25 (IIP+IIL, no distinction) 46 | # IIn --> 21 47 | # IIb --> 23 48 | # Ib --> 32 # current file 49 | # Ic --> 33 50 | # Ic-BL --> 35 51 | 52 | 53 | NON1A_KEYS: 5 54 | INDEX WGT MAGOFF MAGSMEAR SNTYPE 55 | NON1A: 707 0.008316 -0.83 1.12 32 ## Ib SN1999dn 56 | NON1A: 754 0.008316 -0.06 1.12 32 ## Ib SN2012au 57 | NON1A: 728 0.008316 -1.13 1.12 32 ## Ib SN2007Y 58 | NON1A: 713 0.008316 -0.48 1.12 32 ## Ib SN2004gq 59 | NON1A: 703 0.008316 -0.91 1.12 32 ## Ib iPTF13bvn 60 | NON1A: 742 0.008316 0.23 1.12 32 ## Ib SN2009jf 61 | NON1A: 741 0.008316 0.24 1.12 32 ## Ib SN2009iz 62 | NON1A: 715 0.008316 -0.06 1.12 32 ## Ib SN2004gv 63 | NON1A: 727 0.008316 0.89 1.12 32 ## Ib SN2007uy 64 | NON1A: 716 0.008316 0.99 1.12 32 ## Ib SN2005bf 65 | NON1A: 733 0.008316 -0.98 1.12 32 ## Ib SN2008D 66 | NON1A: 721 0.008316 -0.29 1.12 32 ## Ib SN2006ep 67 | NON1A: 718 0.008316 0.57 1.12 32 ## Ib SN2005hg 68 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_V19_SNIc+HostXT.INPUT: -------------------------------------------------------------------------------- 1 | # SN Ic models from Vincenzi et al. 2019 2 | # 21/Sept/2021 A.Gagliano: SN Ic subset 3 | # 9/Nov/2021 C.Alves: Change DNDZ_ALLSCALE to 0.033 = 22% * 30% * 50% = Table 3 of the Shivvers+2017 paper * 30% of CC SN are SN Ibc * half will be simulated using V19 models and the other half from other models 4 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.0375 = 24.7% * 30.4% * 50% = 5 | # 24.7% of Stripped Envelope CC SN are SN Ic 6 | # * 30.4% of CC SN are Stripped Envelope 7 | # * 50% simulated with V19 models and the other half with SNIc-Templates model 8 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 9 | 10 | GENMODEL: NON1ASED 11 | PATH_NON1ASED: $PLASTICC_ROOT/model_libs_updates/NON1ASED.V19_CC+HostXT 12 | 13 | # Volumetric rate 14 | DNDZ: CC_S15 # Strolger 2015 (HST; 1509.06574) 15 | DNDZ_ALLSCALE: 0.0375 # 11/Nov/2021 C.Alves: 24.7% * 30.4% * 50% ; see comment from same date 16 | 17 | GENRANGE_REDSHIFT: 0.011 1.50 18 | 19 | # GN: we'll just assert that since we're simulating all CC types 20 | # we can just follow the Strolger 2015 rate 21 | # this needs the relative rates in the CC sample 22 | # in Vincenzi 19 to follow the same distribution 23 | # as actual CC relative rate 24 | # note below indicates that's happeing (Shivvers et al. 2017) 25 | # probably good to check this though 26 | GENRANGE_TREST: -50 300 27 | 28 | # Some of the models plateau at end of epoch range, 29 | # so force min mag/day slope for late-time extrapolation. 30 | MINSLOPE_EXTRAPMAG_LATE: 0.01 31 | 32 | # ----------------------------------------- 33 | # TEMPLATES presented in Vincenzi et al. 2019 (arXiv:1908.05228) 34 | # TEMPLATES used here HAS BEEN corrected for host and MW extinction. 35 | # MAGOFF and MAGSMEAR adjusted to match LF from Richardson. et al 2014. 36 | # For more details see Vincenzi et al. 2019 (Table 1, Section 4.1) 37 | # Richardson et al. LFs use a sample of SNe that HAS BEEN corrected 38 | # for host extinction. 39 | # WGT (relative rates) from Shivvars et al. 2017 40 | # (also reported in Vincenzi et al. 2019, Table 1) 41 | # 42 | # 43 | # II --> 25 (IIP+IIL, no distinction) 44 | # IIn --> 21 45 | # IIb --> 23 46 | # Ib --> 32 47 | # Ic --> 33 # current file 48 | # Ic-BL --> 35 49 | 50 | 51 | NON1A_KEYS: 5 52 | INDEX WGT MAGOFF MAGSMEAR SNTYPE 53 | NON1A: 723 0.010725 -1.46 1.18 33 ## Ic SN2007gr 54 | NON1A: 762 0.010725 -1.09 1.18 33 ## Ic SN2013ge 55 | NON1A: 705 0.010725 -0.61 1.18 33 ## Ic SN1994I 56 | NON1A: 746 0.010725 -4.56 1.18 33 ## Ic SN2011bm 57 | NON1A: 710 0.010725 0.00 1.18 33 ## Ic SN2004aw 58 | NON1A: 712 0.010725 -0.61 1.18 33 ## Ic SN2004fe 59 | NON1A: 714 0.010725 -1.00 1.18 33 ## Ic SN2004gt 60 | -------------------------------------------------------------------------------- /model_config/SIMGEN_INCLUDE_V19_SNIcBL+HostXT.INPUT: -------------------------------------------------------------------------------- 1 | # SN Ic-BL models from Vincenzi et al. 2019 2 | # 21/Sept/2021 A.Gagliano: SN IcBL subset 3 | # 9/Nov/2021 C.Alves: Change DNDZ_ALLSCALE to 0.006 = 4% * 30% * 50% = Table 3 of the Shivvers+2017 paper * 30% of CC SN are SN Ibc * half will be simulated using V19 models and the other half from other models 4 | # 11/Nov/2021 C.Alves: Changed DNDZ_ALLSCALE to 0.11 = 3.7% * 30.4% = 5 | # 3.7% of Stripped Envelope CC SN are SN Ic-BL 6 | # * 30.4% of CC SN are Stripped Envelope 7 | # Values from Table 3 of Shivvers+2017 (10.1088/1538-3873/aa54a6) 8 | 9 | GENMODEL: NON1ASED 10 | PATH_NON1ASED: $PLASTICC_ROOT/model_libs_updates/NON1ASED.V19_CC+HostXT 11 | 12 | # Volumetric rate 13 | DNDZ: CC_S15 # Strolger 2015 (HST; 1509.06574) 14 | DNDZ_ALLSCALE: 0.011 # 11/Nov/2021 C.Alves: 3.7% * 30.4% ; see comment from same date 15 | 16 | GENRANGE_REDSHIFT: 0.011 1.50 17 | 18 | # GN: we'll just assert that since we're simulating all CC types 19 | # we can just follow the Strolger 2015 rate 20 | # this needs the relative rates in the CC sample 21 | # in Vincenzi 19 to follow the same distribution 22 | # as actual CC relative rate 23 | # note below indicates that's happeing (Shivvers et al. 2017) 24 | # probably good to check this though 25 | GENRANGE_TREST: -50 300 26 | 27 | # Some of the models plateau at end of epoch range, 28 | # so force min mag/day slope for late-time extrapolation. 29 | MINSLOPE_EXTRAPMAG_LATE: 0.01 30 | 31 | # ----------------------------------------- 32 | # TEMPLATES presented in Vincenzi et al. 2019 (arXiv:1908.05228) 33 | # TEMPLATES used here HAS BEEN corrected for host and MW extinction. 34 | # MAGOFF and MAGSMEAR adjusted to match LF from Richardson. et al 2014. 35 | # For more details see Vincenzi et al. 2019 (Table 1, Section 4.1) 36 | # Richardson et al. LFs use a sample of SNe that HAS BEEN corrected 37 | # for host extinction. 38 | # WGT (relative rates) from Shivvars et al. 2017 39 | # (also reported in Vincenzi et al. 2019, Table 1) 40 | # 41 | # 42 | # II --> 25 (IIP+IIL, no distinction) 43 | # IIn --> 21 44 | # IIb --> 23 45 | # Ib --> 32 46 | # Ic --> 33 47 | # Ic-BL --> 35 # current file 48 | 49 | 50 | NON1A_KEYS: 5 51 | INDEX WGT MAGOFF MAGSMEAR SNTYPE 52 | NON1A: 709 0.001835 -0.59 1.18 35 ## Ic-BL SN2002ap 53 | NON1A: 753 0.001835 0.00 1.18 35 ## Ic-BL SN2012ap 54 | NON1A: 726 0.001835 0.99 1.18 35 ## Ic-BL SN2007ru 55 | NON1A: 720 0.001835 1.67 1.18 35 ## Ic-BL SN2006aj 56 | NON1A: 706 0.001835 1.27 1.18 35 ## Ic-BL SN1998bw 57 | NON1A: 736 0.001835 1.06 1.18 35 ## Ic-BL SN2009bb 58 | -------------------------------------------------------------------------------- /stream-to-zads/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rknop/devuan-chimaera-rknop 2 | MAINTAINER Rob Knop 3 | 4 | SHELL ["/bin/bash", "-c"] 5 | 6 | RUN mkdir /home/stream 7 | ENV HOME /home/stream 8 | 9 | ENV DEBIAN_FRONTEND=noninteractive 10 | RUN apt-get update && apt-get upgrade -y \ 11 | && apt-get install -y less python3 python3-pip \ 12 | && apt-get clean \ 13 | && rm -rf /var/lib/apt/lists/* 14 | 15 | RUN pip3 install \ 16 | python-dateutil \ 17 | requests \ 18 | fastavro \ 19 | confluent-kafka \ 20 | && rm -rf /home/stream/.cache/pip 21 | 22 | RUN mkdir /nightcache 23 | RUN mkdir /alerts 24 | RUN mkdir /elasticc 25 | WORKDIR /home/stream 26 | ADD stream-to-zads.py /home/stream/stream-to-zads.py 27 | 28 | ENTRYPOINT [ "python3", "/home/stream/stream-to-zads.py" ] 29 | -------------------------------------------------------------------------------- /stream-to-zads/README.md: -------------------------------------------------------------------------------- 1 | Maintainer: Rob Knop (raknop@lbl.gov) 2 | 3 | The Dockerfile builds an image that should stream ELAsTiCC alerts 4 | (original alerts with embedded diaObject and diaSource). 5 | 6 | It's set up on NERSC Spin (producton m1727, namespace elasticc-alerts, 7 | workload elasticc-alert-streamer). It's configured with four 8 | environment variables: 9 | 10 | * `ELASTICC_ALERT_SERVER` -- the server to stream to 11 | * `ELASTICC_ALERT_TOPIC` -- the topic to stream to 12 | * `ELASTICC_COMPRESSION_FACTOR` -- number of simulated days to stream each day 13 | * `ELASTICC_START_TIME` -- the date that the campaign starts. (The code 14 | will look at the current time and decide which range of simulated days 15 | to stream based on this.) 16 | 17 | It needs to mount three external volumes: 18 | 19 | * `/alerts` -- the directory to find the alerts. Has subdirectories 20 | (currently, needs to be updated for real elasticc) 21 | `ELASTICC_ALERTS_TEST_EXTRAGALACTIC-SNIa/ALERTS`, 22 | `ELASTICC_ALERTS_TEST_EXTRAGALACTIC-nonIa/ALERTS`, and 23 | `ELASTICC_ALERTS_TEST_GALACTIC`. 24 | * `/elasticc` -- A checkout of the elasticc github archive. Needs to 25 | have subdirectory `alert_schema` with the alert schema. 26 | * `/nightcache` -- Can be a persistent volume instead of a bind mount. 27 | The code dumps a list of simulated nights its streamed to a file in 28 | this directory, so that it won't redo if the code is restarted. 29 | 30 | 31 | -------------------------------------------------------------------------------- /stream-to-zads/consume_zads.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import io 4 | import datetime 5 | import logging 6 | import pathlib 7 | import pandas 8 | import fastavro 9 | import confluent_kafka 10 | 11 | from msgconsumer import MsgConsumer 12 | 13 | _logger = logging.getLogger(__name__) 14 | _logout = logging.StreamHandler( sys.stderr ) 15 | _logger.addHandler( _logout ) 16 | _formatter = logging.Formatter( f'[%(asctime)s - %(levelname)s] - %(message)s', 17 | datefmt='%Y-%m-%d %H:%M:%S' ) 18 | _logout.setFormatter( _formatter ) 19 | _logger.setLevel( logging.DEBUG ) 20 | 21 | 22 | def _do_nothing( *args, **kwargs ): 23 | pass 24 | 25 | class ElasticcAlertConsumer: 26 | def __init__( self, server="brahms.lbl.gov:9092", groupid="rob_elasticc-test-2", 27 | topic="elasticc-test-only-1", schema=None, timeout=5, nmsgs=100, 28 | polltime=datetime.timedelta(hours=1), reset=False, logger=_logger ): 29 | self.logger = logger 30 | if schema is None: 31 | schema = ( pathlib.Path( os.getenv("HOME") ) / 32 | "desc/elasticc/alert_schema/elasticc.v0_9.alert.avsc" ) 33 | self.consumer = MsgConsumer( server, groupid, topic, schema, consume_nmsgs=nmsgs, 34 | consume_timeout=timeout, logger=self.logger ) 35 | self.consumer.print_topics() 36 | 37 | if reset: 38 | for topic in self.consumer.topics: 39 | self.logger.debug( f'Resetting {topic}' ) 40 | self.consumer.reset_to_start( topic ) 41 | 42 | self.polltime = polltime 43 | 44 | self.totnmsgs = 0 45 | self.lastmerge = 0 46 | self.alertids = [] 47 | self.alertsources = [] 48 | self.alertobjects = [] 49 | self.alerttab = None 50 | 51 | def handle_messages( self, msgs, merge_every=400 ): 52 | for msg in msgs: 53 | alert = fastavro.schemaless_reader( io.BytesIO( msg.value() ), self.consumer.schema ) 54 | self.totnmsgs += 1 55 | self.alertids.append( alert['alertId'] ) 56 | self.alertsources.append( alert['diaSource']['diaSourceId'] ) 57 | self.alertobjects.append( alert['diaObject']['diaObjectId'] ) 58 | 59 | if self.totnmsgs >= ( self.lastmerge + merge_every ): 60 | self.logger.info( f'Ingested {self.totnmsgs} so far.' ) 61 | self.merge() 62 | self.lastmerge = self.totnmsgs 63 | 64 | def merge( self ): 65 | if len(self.alertids) == 0: 66 | return 67 | 68 | tmptab = pandas.DataFrame( { 'alertId': self.alertids, 69 | 'diaSourceId': self.alertsources, 70 | 'diaObjectId': self.alertobjects } ) 71 | if self.alerttab is None: 72 | self.alerttab = tmptab 73 | else: 74 | self.alerttab = pandas.concat( [ self.alerttab, tmptab ] ) 75 | self.alertids = [] 76 | self.alertsources = [] 77 | self.alertobjects = [] 78 | 79 | def __call__( self ): 80 | self.consumer.poll_loop( handler=lambda msgs: self.handle_messages( msgs, merge_every=100 ), 81 | stopafter=self.polltime ) 82 | self.merge() 83 | self.consumer.close() 84 | 85 | def main(): 86 | # server = 'public.alerts.ztf.uw.edu:9092' 87 | # topic = 'elasticc-2022fall' 88 | server = 'brahms.lbl.gov:9092' 89 | topic = 'elasticc-test-omg-test' 90 | 91 | eac = ElasticcAlertConsumer( polltime=datetime.timedelta(seconds=30), reset=True, logger=_logger, 92 | server=server, topic=topic ) 93 | _logger.info( "Starting poll loop" ) 94 | # import pdb; pdb.set_trace() 95 | eac() 96 | 97 | # ====================================================================== 98 | 99 | if __name__ == "__main__": 100 | main() 101 | -------------------------------------------------------------------------------- /stream-to-zads/fakebroker.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import io 3 | import math 4 | import pathlib 5 | import logging 6 | import argparse 7 | import datetime 8 | import random 9 | import confluent_kafka 10 | import fastavro 11 | 12 | from msgconsumer import MsgConsumer 13 | 14 | _rundir = pathlib.Path( __file__ ).parent 15 | 16 | _logger = logging.getLogger( __name__ ) 17 | _logger.propagate = False 18 | if not _logger.hasHandlers(): 19 | _logout = logging.StreamHandler( sys.stderr ) 20 | _logger.addHandler( _logout ) 21 | _formatter = logging.Formatter( f'[%(asctime)s - %(levelname)s] - %(message)s', 22 | datefmt='%Y-%m-%d %H:%M:%S' ) 23 | _logout.setFormatter( _formatter ) 24 | _logger.setLevel( logging.INFO ) 25 | 26 | # ====================================================================== 27 | 28 | class Classifier: 29 | def __init__( self, brokername, brokerversion, classifiername, classifierparams, 30 | kafkaserver="brahms.lbl.gov:9092", topic="somebody-didnt-replace-a-default", 31 | alertschema=None, brokermessageschema=None ): 32 | self.brokername = brokername 33 | self.brokerversion = brokerversion 34 | self.classifiername = classifiername 35 | self.classifierparams = classifierparams 36 | self.kafkaserver = kafkaserver 37 | self.topic = topic 38 | self.alertschema = alertschema 39 | self.brokermessageschema = brokermessageschema 40 | 41 | self.nclassified = 0 42 | self.logevery = 10 43 | self.nextlog = self.logevery 44 | 45 | def determine_types_and_probabilities( self, alert ): 46 | """Given an alert (a dict in the format of the elasticc alert schema), return a list of 47 | two-element tuples that is (classId, probability).""" 48 | raise RuntimeError( "Need to implement this function in a subclass!" ) 49 | 50 | def classify_alerts( self, messages ): 51 | producer = confluent_kafka.Producer( { 'bootstrap.servers': self.kafkaserver, 52 | 'batch.size': 131072, 53 | 'linger.ms': 50 } ) 54 | for msg in messages: 55 | alert = fastavro.schemaless_reader( io.BytesIO(msg.value()), self.alertschema ) 56 | probs = self.determine_types_and_probabilities( alert ) 57 | brokermsg = { "alertId": alert["alertId"], 58 | "diaSourceId": alert["diaSource"]["diaSourceId"], 59 | "elasticcPublishTimestamp": msg.timestamp()[1], 60 | "brokerIngestTimestamp": datetime.datetime.now(), 61 | "brokerName": self.brokername, 62 | "brokerVersion": self.brokerversion, 63 | "classifierName": self.classifiername, 64 | "classifierParams": self.classifierparams, 65 | "classifications": [] 66 | } 67 | for prob in probs: 68 | brokermsg['classifications'].append( { "classId": prob[0], 69 | "probability": prob[1] } ) 70 | outdata = io.BytesIO() 71 | fastavro.write.schemaless_writer( outdata, self.brokermessageschema, brokermsg ) 72 | producer.produce( self.topic, outdata.getvalue() ) 73 | producer.flush() 74 | 75 | self.nclassified += len(messages) 76 | if ( self.nclassified > self.nextlog ): 77 | _logger.info( f"{self.classifiername} has classified {self.nclassified} alerts" ) 78 | self.nextlog = self.logevery * ( math.floor( self.nclassified / self.logevery ) + 1 ) 79 | 80 | # ====================================================================== 81 | 82 | class NugentClassifier(Classifier): 83 | def __init__( self, *args, **kwargs ): 84 | super().__init__( "FakeBroker", "v1.0", "NugentClassifier", "100%", **kwargs ) 85 | 86 | def determine_types_and_probabilities( self, alert ): 87 | return [ ( 111, 1.0 ) ] 88 | 89 | # ====================================================================== 90 | 91 | class RandomSNType(Classifier): 92 | def __init__( self, *args, **kwargs ): 93 | super().__init__( "FakeBroker", "v1.0", "RandomSNType", "Perfect", **kwargs ) 94 | random.seed() 95 | 96 | def determine_types_and_probabilities( self, alert ): 97 | totprob = 0. 98 | types = [ 111, 112, 113, 114, 115, 135 ] 99 | retval = [] 100 | for sntype in types: 101 | thisprob = random.random() * ( 1 - totprob ) 102 | totprob += thisprob 103 | retval.append( ( sntype, thisprob ) ) 104 | # SLSN seems to be the default type.... 105 | retval.append( ( 131, 1-totprob ) ) 106 | return retval 107 | 108 | # ====================================================================== 109 | 110 | def main(): 111 | parser = argparse.ArgumentParser( description="Pretend to be an elasticc broker", 112 | formatter_class=argparse.ArgumentDefaultsHelpFormatter ) 113 | parser.add_argument( "--source", default="brahms.lbl.gov:9092", 114 | help="Server to pull ELAsTiCC alerts from" ) 115 | parser.add_argument( "-t", "--source-topic", required=True, help="Topic on source server" ) 116 | parser.add_argument( "-g", "--group-id", default="rknop-test", 117 | help="Group ID to use on source server" ) 118 | parser.add_argument( "-r", "--reset", action='store_true', default=False, 119 | help="Reset to beginning of source stream?" ) 120 | parser.add_argument( "--dest", default="brahms.lbl.gov:9092", 121 | help="Server to push broker message alerts to" ) 122 | parser.add_argument( "-u", "--dest-topic", required=True, help="Topic on dest server" ) 123 | parser.add_argument( "-s", "--alert-schema", default=f"{_rundir.parent}/alert_schema/elasticc.v0_9_1.alert.avsc", 124 | help="File with elasticc alert schema" ) 125 | parser.add_argument( "-b", "--brokermessage-schema", 126 | default=f"{_rundir.parent}/alert_schema/elasticc.v0_9_1.brokerClassification.avsc", 127 | help="File with broker message alert schema" ) 128 | 129 | args = parser.parse_args() 130 | 131 | alertschema = fastavro.schema.load_schema( args.alert_schema ) 132 | brokermsgschema = fastavro.schema.load_schema( args.brokermessage_schema ) 133 | classifiers = [ NugentClassifier( kafkaserver=args.dest, topic=args.dest_topic, 134 | alertschema=alertschema, brokermessageschema=brokermsgschema ), 135 | RandomSNType( kafkaserver=args.dest, topic=args.dest_topic, 136 | alertschema=alertschema, brokermessageschema=brokermsgschema ) 137 | ] 138 | 139 | consumer = MsgConsumer( args.source, args.group_id, args.source_topic, args.alert_schema, logger=_logger, 140 | consume_nmsgs=100 ) 141 | if args.reset: 142 | consumer.reset_to_start( args.source_topic ) 143 | 144 | def handle_message_batch( msgs ): 145 | for cfer in classifiers: 146 | cfer.classify_alerts( msgs ) 147 | 148 | consumer.poll_loop( handler = handle_message_batch ) 149 | 150 | # ====================================================================== 151 | 152 | if __name__ == "__main__": 153 | main() 154 | -------------------------------------------------------------------------------- /stream-to-zads/msgconsumer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import io 3 | import time 4 | import datetime 5 | import atexit 6 | import json 7 | import collections 8 | import logging 9 | import fastavro 10 | import confluent_kafka 11 | 12 | _logger = logging.getLogger(__name__) 13 | if not _logger.hasHandlers(): 14 | _logout = logging.StreamHandler( sys.stderr ) 15 | _logger.addHandler( _logout ) 16 | _formatter = logging.Formatter( f'[msgconsumer - %(asctime)s - %(levelname)s] - %(message)s', 17 | datefmt='%Y-%m-%d %H:%M:%S' ) 18 | _logout.setFormatter( _formatter ) 19 | # _logger.setLevel( logging.INFO ) 20 | _logger.setLevel( logging.DEBUG ) 21 | 22 | def _do_nothing( *args, **kwargs ): 23 | pass 24 | 25 | class DateTimeEncoder( json.JSONEncoder ): 26 | def default( self, obj ): 27 | if isinstance( obj, datetime.datetime ): 28 | return str( obj.isoformat() ) 29 | else: 30 | # Should I use super() here? 31 | return json.JSONEncoder.default( self, obj ) 32 | 33 | 34 | class MsgConsumer(object): 35 | def __init__( self, server, groupid, topics, schema, 36 | extraconsumerconfig=None, 37 | consume_nmsgs=10, consume_timeout=1, nomsg_sleeptime=1, 38 | logger=_logger ): 39 | self.logger = logger 40 | self.tot_handled = 0 41 | if topics is None: 42 | self.topics = [] 43 | elif isinstance( topics, str ): 44 | self.topics = [ topics ] 45 | elif isinstance( topics, collections.abc.Sequence ): 46 | self.topics = list( topics ) 47 | else: 48 | raise ValueError( f'topics must be either a string or a list' ) 49 | self.schema = fastavro.schema.load_schema( schema ) 50 | self.consume_nmsgs = consume_nmsgs 51 | self.consume_timeout = consume_timeout 52 | self.nomsg_sleeptime = nomsg_sleeptime 53 | 54 | consumerconfig = { "bootstrap.servers": server, 55 | "auto.offset.reset": "earliest", 56 | "group.id": groupid } 57 | if extraconsumerconfig is not None: 58 | consumerconfig.update( extraconsumerconfig ) 59 | self.logger.debug( f'Initializing Kafka consumer with\n{json.dumps(consumerconfig, indent=4)}' ) 60 | self.consumer = confluent_kafka.Consumer( consumerconfig ) 61 | atexit.register( self.__del__ ) 62 | 63 | self.subscribed = False 64 | self.subscribe( self.topics ) 65 | 66 | def close( self ): 67 | if self.consumer is not None: 68 | self.consumer.close() 69 | self.consumer = None 70 | 71 | def __del__( self ): 72 | self.close() 73 | 74 | def subscribe( self, topics ): 75 | if topics is not None and len(topics) > 0: 76 | self.consumer.subscribe( topics, on_assign=self._sub_callback ) 77 | else: 78 | self.logger.warning( f'No topics given, not subscribing.' ) 79 | 80 | def reset_to_start( self, topic ): 81 | self.logger.info( f'Resetting partitions for topic {topic}\n' ) 82 | # Poll once to make sure things are connected 83 | msg = self.consume_one_message( timeout=4, handler=_do_nothing ) 84 | self.logger.debug( "got throwaway message" if msg is not None else "did't get throwaway message" ) 85 | # Now do the reset 86 | partitions = self.consumer.list_topics( topic ).topics[topic].partitions 87 | # partitions is a kmap 88 | partlist = [] 89 | # for partid, partinfo in partitions.items(): 90 | # self.logger.info( f'...resetting {partid} ( {partinfo} )' ) 91 | # # Is this next one redundant? partinfo should already have the right stuff! 92 | # curpart = confluent_kafka.TopicPartition( topic, partinfo.id ) 93 | for i in range(len(partitions)): 94 | self.logger.info( f'...resetting partition {i}' ) 95 | curpart = confluent_kafka.TopicPartition( topic, i ) 96 | lowmark, highmark = self.consumer.get_watermark_offsets( curpart ) 97 | self.logger.debug( f'Partition {curpart.topic} has id {curpart.partition} ' 98 | f'and current offset {curpart.offset}; lowmark={lowmark} ' 99 | f'and highmark={highmark}' ) 100 | curpart.offset = lowmark 101 | # curpart.offset = confluent_kafka.OFFSET_BEGINNING 102 | if lowmark < highmark: 103 | self.consumer.seek( curpart ) 104 | partlist.append( curpart ) 105 | self.logger.info( f'Committing partition offsets.' ) 106 | self.consumer.commit( offsets=partlist, asynchronous=False ) 107 | 108 | def print_topics( self ): 109 | cluster_meta = self.consumer.list_topics() 110 | topics = [ n for n in cluster_meta.topics ] 111 | topics.sort() 112 | topicstxt = '\n '.join(topics) 113 | self.logger.info( f"\nTopics:\n {topicstxt}" ) 114 | 115 | def _get_positions( self, partitions ): 116 | return self.consumer.position( partitions ) 117 | 118 | def _dump_assignments( self, ofp, partitions ): 119 | ofp.write( f'{"Topic":<32s} {"partition":>9s} {"offset":>12s}\n' ) 120 | for par in partitions: 121 | ofp.write( f"{par.topic:32s} {par.partition:9d} {par.offset:12d}\n" ) 122 | ofp.write( "\n" ) 123 | 124 | def print_assignments( self ): 125 | asmgt = self._get_positions( self.consumer.assignment() ) 126 | ofp = io.StringIO() 127 | ofp.write( "Current partition assignments\n" ) 128 | self._dump_assignments( ofp, asmgt ) 129 | self.logger.info( ofp.getvalue() ) 130 | ofp.close() 131 | 132 | def _sub_callback( self, consumer, partitions ): 133 | self.subscribed = True 134 | ofp = io.StringIO() 135 | ofp.write( "Consumer subscribed. Assigned partitions:\n" ) 136 | self._dump_assignments( ofp, self._get_positions( partitions ) ) 137 | self.logger.info( ofp.getvalue() ) 138 | ofp.close() 139 | 140 | def poll_loop( self, handler=None, timeout=None, stopafter=datetime.timedelta(hours=1) ): 141 | """Calls handler with batches of messages.""" 142 | if timeout is None: 143 | timeout = self.consume_timeout 144 | t0 = datetime.datetime.now() 145 | done = False 146 | while not done: 147 | self.logger.info( f"Trying to consume {self.consume_nmsgs} messages " 148 | f"with timeout {timeout} sec...\n" ) 149 | msgs = self.consumer.consume( self.consume_nmsgs, timeout=timeout ) 150 | if len(msgs) == 0: 151 | self.logger.info( f"No messages, sleeping {self.nomsg_sleeptime} sec" ) 152 | time.sleep( self.nomsg_sleeptime ) 153 | else: 154 | if handler is not None: 155 | handler( msgs ) 156 | else: 157 | self.default_handle_message_batch( msgs ) 158 | if ( datetime.datetime.now() - t0 ) >= stopafter: 159 | self.logger.info( f"Ending poll loop." ) 160 | done = True 161 | 162 | def consume_one_message( self, timeout=None, handler=None ): 163 | """Both calls handler and returns a batch of 1 message.""" 164 | if timeout is None: 165 | timeout = self.consume_timeout 166 | self.logger.info( f"Trying to consume one message with timeout {timeout} sec...\n" ) 167 | # msgs = self.consumer.consume( 1, timeout=self.consume_timeout ) 168 | msg = self.consumer.poll( timeout ) 169 | if msg is not None: 170 | if handler is not None: 171 | handler( [ msg ] ) 172 | else: 173 | self.default_handle_message_batch( [ msg ] ) 174 | return msg 175 | 176 | def default_handle_message_batch( self, msgs ): 177 | self.logger.info( f'Handling {len(msgs)} messages' ) 178 | timestamp_name = { confluent_kafka.TIMESTAMP_NOT_AVAILABLE: "TIMESTAMP_NOT_AVAILABLE", 179 | confluent_kafka.TIMESTAMP_CREATE_TIME: "TIMESTAMP_CREATE_TIME", 180 | confluent_kafka.TIMESTAMP_LOG_APPEND_TIME: "TIMESTAMP_LOG_APPEND_TIME" } 181 | for msg in msgs: 182 | ofp = io.StringIO() 183 | ofp.write( f"{msg.topic()} {msg.partition()} {msg.offset()} {msg.key()}\n" ) 184 | if msg.headers() is not None: 185 | ofp.write( "HEADERS:\n" ) 186 | for key, value in msg.headers(): 187 | ofp.write( f" {key} : {value}\n" ) 188 | timestamp = msg.timestamp() 189 | ofp.write( f"Timestamp: {timestamp[1]} (type {timestamp_name[timestamp[0]]})\n" ) 190 | ofp.write( "MESSAGE PAYLOAD:\n" ) 191 | alert = fastavro.schemaless_reader( io.BytesIO(msg.value()), self.schema ) 192 | # # They are datetime -- Convert to numbers 193 | # alert['elasticcPublishTimestamp'] = alert['elasticcPublishTimestamp'].timestamp() 194 | # alert['brokerIngestTimestamp'] = alert['brokerIngestTimestamp'].timestamp() 195 | ofp.write( json.dumps( alert, indent=4, sort_keys=True, cls=DateTimeEncoder ) ) 196 | ofp.write( "\n" ) 197 | self.logger.info( ofp.getvalue() ) 198 | ofp.close() 199 | self.tot_handled += len(msgs) 200 | self.logger.info( f'Have handled {self.tot_handled} messages so far' ) 201 | self.print_assignments() 202 | 203 | -------------------------------------------------------------------------------- /stream-to-zads/read_elasticc_alert_stream.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | import logging 4 | import datetime 5 | from msgconsumer import MsgConsumer 6 | 7 | _logger = logging.getLogger(__name__) 8 | if not _logger.hasHandlers(): 9 | _logout = logging.StreamHandler( sys.stderr ) 10 | _logger.addHandler( _logout ) 11 | _formatter = logging.Formatter( f'[msgconsumer - %(asctime)s - %(levelname)s] - %(message)s', 12 | datefmt='%Y-%m-%d %H:%M:%S' ) 13 | _logout.setFormatter( _formatter ) 14 | # _logger.setLevel( logging.INFO ) 15 | _logger.setLevel( logging.DEBUG ) 16 | 17 | def main(): 18 | parser = argparse.ArgumentParser( description="Pull alerts from an elasticc alerts server", 19 | formatter_class=argparse.ArgumentDefaultsHelpFormatter ) 20 | parser.add_argument( "schema", help="File with schema to poll" ) 21 | # parser.add_argument( "-s", "--server", default="public.alerts.ztf.uw.edu:9092", 22 | # help="Kafka server to read from" ) 23 | parser.add_argument( "-s", "--server", default="brahms.lbl.gov:9092", 24 | help="Kafka server to read from" ) 25 | parser.add_argument( "--list-topics", action='store_true', default=False, help="Just list topics" ) 26 | parser.add_argument( "-t", "--topic", default=None, help="Topic to poll" ) 27 | parser.add_argument( "-r", "--reset-to-start", default=False, action='store_true', 28 | help="Reset topic to start" ) 29 | parser.add_argument( "-b", "--batch-size", type=int, default=100, help="Batch size" ) 30 | parser.add_argument( "-d", "--duration", type=float, default=1., help="Duration in minutes to keep polling " ) 31 | parser.add_argument( "-g", "--groupid", default="rknop-test", help="group.id" ) 32 | 33 | args = parser.parse_args() 34 | 35 | topics = [ args.topic ] if args.topic is not None else None 36 | 37 | consumer = MsgConsumer( args.server, args.groupid, topics, args.schema, 38 | consume_nmsgs=args.batch_size, logger=_logger ) 39 | if args.list_topics: 40 | consumer.print_topics() 41 | return 42 | 43 | if topics is None: 44 | _logger.warning( "No topics, exiting." ) 45 | return 46 | 47 | if args.reset_to_start: 48 | for topic in topics: 49 | consumer.reset_to_start( topic ) 50 | 51 | runtime = datetime.timedelta( minutes=args.duration ) 52 | 53 | consumer.poll_loop( stopafter=runtime ) 54 | 55 | # ====================================================================== 56 | 57 | if __name__ == "__main__": 58 | main() 59 | -------------------------------------------------------------------------------- /stream-to-zads/run-on-brahms.sh: -------------------------------------------------------------------------------- 1 | # NOTE! This is just a shortcut script that Rob wrote to launch 2 | # the docker image on his desktop for testing purposes. It's 3 | # probably not useful to anybody else. 4 | 5 | sudo docker run -d --name 'stream-to-zads' --network host \ 6 | --mount type=bind,source=/data/raknop/temp/alerttest,target=/alerts \ 7 | --mount type=bind,source=/home/raknop/desc/elasticc,target=/elasticc \ 8 | --mount type=bind,source=/home/raknop/desc/stream-to-zads,target=/nightcache \ 9 | --env ELASTICC_COMPRESSION_FACTOR=1 \ 10 | --env ELASTICC_START_TIME=2022-06-08T07:00 \ 11 | --env ELASTICC_ALERT_SERVER=brahms.lbl.gov:9092 \ 12 | --env ELASTICC_ALERT_TOPIC=elasticc-test-only-4 \ 13 | rknop/elasticc-stream-to-zads 14 | 15 | # --env ELASTICC_ALERT_SERVER=public.alerts.ztf.uw.edu:9092 \ 16 | # --env ELASTICC_TOPIC=elasticc-test-only-1 \ 17 | -------------------------------------------------------------------------------- /stream-to-zads/stream-to-zads.py: -------------------------------------------------------------------------------- 1 | raise RuntimeError( "Deprecated. See elasticc2/management/commands/send_elasticc2_alerts.py in desc-tom" ) 2 | 3 | import sys 4 | import os 5 | import re 6 | import io 7 | import time 8 | import requests 9 | import json 10 | import logging 11 | import logging.handlers 12 | import datetime 13 | import dateutil.parser 14 | import pathlib 15 | import tarfile 16 | import gzip 17 | import fastavro 18 | import confluent_kafka 19 | 20 | _logger = logging.getLogger(__name__) 21 | _logout = logging.handlers.TimedRotatingFileHandler( "/nightcache/stream-to-zads.log", when='d', interval=1 ) 22 | _logger.addHandler( _logout ) 23 | _formatter = logging.Formatter( f'[%(asctime)s - %(levelname)s] - %(message)s', 24 | datefmt='%Y-%m-%d %H:%M:%S' ) 25 | _logout.setFormatter( _formatter ) 26 | _logger.setLevel( logging.INFO ) 27 | # _logger.setLevel( logging.DEBUG ) 28 | 29 | class AlertStreamer: 30 | def __init__( self, alertdirs=None, schemafile=None, kafka_broker='brahms.lbl.gov:9092', 31 | kafka_topic='elasticc-test-only-1', compression_factor=10, 32 | campaign_start=datetime.datetime(2022,7,6,7,0,0), nights_done_cache="/nightcache/nightsdone.lis", 33 | simnight0=60274, simnight1=61378, 34 | tom_url='https://desc-tom.lbl.gov', tom_username='rknop', tom_passwdfile='/secrets/tom_passwd', 35 | dry_run=False, logger=_logger ): 36 | self.logger = logger 37 | 38 | if alertdirs is None: 39 | self.alertdirs = [ '/alerts/ELASTICC_ALERTS_SUBSET_EXTRAGALACTIC-SNIa/ALERTS', 40 | '/alerts/ELASTICC_ALERTS_SUBSET_EXTRAGALACTIC-nonIa/ALERTS', 41 | '/alerts/ELASTICC_ALERTS_SUBSET_GALACTIC/ALERTS' ] 42 | else: 43 | self.alertdirs = alertdirs 44 | 45 | if schemafile is None: 46 | self.schemafile = '/elasticc/alert_schema/elasticc.v0_9.alert.avsc' 47 | else: 48 | self.schemafile = schemafile 49 | self.logger.info( f"Reading schema from {self.schemafile}" ) 50 | self.schema = fastavro.schema.load_schema( self.schemafile ) 51 | 52 | self.kafka_broker = kafka_broker 53 | self.kafka_topic = kafka_topic 54 | 55 | self.logger.info( f"Kafka broker={kafka_broker} ; topic={kafka_topic}" ) 56 | 57 | self.compression_factor = compression_factor 58 | self.kafka_batch_size_bytes = 131072 59 | self.kafka_linger_ms = 50 60 | 61 | self.dry_run = dry_run 62 | self.t0 = campaign_start 63 | # Make sure this is a timezone-aware datetime 64 | if self.t0.tzinfo is None or self.t0.tzinfo.utcoffset(self.t0) is None: 65 | self.t0 = self.t0.replace( tzinfo=datetime.timezone.utc ) 66 | 67 | self.totaln0 = simnight0 68 | self.totaln1 = simnight1 69 | 70 | self.logger.info( f"AlertStreamer: t0 = {self.t0.isoformat()} " ) 71 | self.logger.info( f"AlertStreamer: simnight0={self.totaln0}, simnight1={self.totaln1}" ) 72 | self.logger.info( f"AlertStreamer: compression factor = {self.compression_factor} ; dry run = {self.dry_run}" ) 73 | 74 | self.nights_done_cache = pathlib.Path( nights_done_cache ) 75 | if self.nights_done_cache.is_file(): 76 | self.logger.info( f"Reading nights done from {self.nights_done_cache}" ) 77 | with open( self.nights_done_cache ) as ifp: 78 | self.nights_done = [ int(n) for n in ifp.readlines() ] 79 | else: 80 | self.logger.warning( f"No nights done cache {self.nights_done_cache}." ) 81 | self.nights_done = [] 82 | 83 | self.tom_url = tom_url 84 | self.tom_username = tom_username 85 | self.tom_passwdfile = tom_passwdfile 86 | with open( tom_passwdfile ) as ifp: 87 | self.tom_passwd = ifp.readline().strip() 88 | 89 | def log_into_tom( self ): 90 | self.logger.debug( f"Logging into the TOM at {self.tom_url}" ) 91 | rqs = requests.session() 92 | rqs.get( f'{self.tom_url}/accounts/login/' ) 93 | res = rqs.post( f'{self.tom_url}/accounts/login/', 94 | data={ "username": self.tom_username, 95 | "password": self.tom_passwd, 96 | "csrfmiddlewaretoken": rqs.cookies['csrftoken'] } ) 97 | if res.status_code != 200: 98 | raise RuntimeError( f"Failed to log in; http status: {res.status_code}" ) 99 | if 'Please enter a correct' in res.text: 100 | raise RuntimeError( "Failed to log in. I think. Put in a debug break and look at res.text" ) 101 | rqs.headers.update( { 'X-CSRFToken': rqs.cookies['csrftoken'] } ) 102 | self.logger.debug( f"TOM login successful (as far as I can tell)" ) 103 | return rqs 104 | 105 | def notify_tom( self, rqs, ids ): 106 | if len(ids) == 0: 107 | return 108 | 109 | self.logger.debug( f"Notifying TOM of {len(ids)} alerts streamed." ) 110 | self.logger.debug( f"type(rqs)={type(rqs)}, rqs={rqs}" ) 111 | 112 | outercountdown = 5 113 | while outercountdown >= 0: 114 | countdown = 5 115 | try: 116 | rqs = self.log_into_tom() 117 | while countdown >= 0: 118 | try: 119 | res = rqs.post( f'{self.tom_url}/elasticc/markalertsent', json=ids ) 120 | if res.status_code != 200: 121 | raise RuntimeError( f"Got status_code={res.status_code} from TOM" ) 122 | data = json.loads( res.text ) 123 | if ( not 'status' in data ) or ( data['status'] != 'ok' ): 124 | strio = io.StringIO( "" ) 125 | if data['status'] == 'error': 126 | strio.write( "Error return from TOM:\n" ) 127 | strio.write( f" message: {data['message']}\n" ) 128 | strio.write( f" exception: {data['exception']}\n" ) 129 | strio.write( f" traceback:\n{data['traceback']}\n" ) 130 | else: 131 | strio.write( f"Unexpected return from TOM: {data}" ) 132 | raise RuntimeError( strio.getvalue() ) 133 | countdown = -1 134 | except Exception as e: 135 | self.logger.error( f"Error notifying TOM: {str(e)}" ) 136 | if countdown <= 0: 137 | self.logger.error( "Failed too many times, bailing." ) 138 | raise e 139 | self.logger.error( "Retrying in 1 second..." ) 140 | time.sleep( 1 ) 141 | countdown -= 1 142 | outercountdown = -1 143 | except Exception as e: 144 | if outercountdown <= 0: 145 | self.logger.error( "Outer tom notification failed too many times, bailing." ) 146 | raise e 147 | time.sleep( 1 ) 148 | self.logger.error( "Going to try logging back into the tom." ) 149 | rqs = self.log_into_tom() 150 | outercountdown -= 1 151 | return rqs 152 | 153 | def stream_todays_batch( self, alert_delay=0, diffmjd_delay=0.05, diffnight_delay=5 ): 154 | now = datetime.datetime.now( datetime.timezone.utc ) 155 | curday = ( now - self.t0 ).days 156 | n0 = self.totaln0 + curday * self.compression_factor 157 | n1 = n0 + self.compression_factor - 1 158 | 159 | if ( n0 < self.totaln0 ): n0 = self.totaln0 160 | if ( n1 > self.totaln1 ): n1 = self.totaln1 161 | 162 | if ( n0 > self.totaln1 ) or ( n1 < self.totaln0 ): 163 | self.logger.error( f"Today's range {n0}..{n1} is outside the " 164 | f"overall range {self.totaln0}..{self.totaln1}\n" ) 165 | return False 166 | 167 | if self.dry_run: 168 | self.logger.info( f"DRY RUN: reading and planning to stream alerts from nights {n0} through {n1}." ) 169 | self.logger.info( f"{'Fake-S' if self.dry_run else 'S'}treaming alerts from nights {n0} through {n1}." ) 170 | self.logger.info( f"Will delay {alert_delay}s between alerts, {diffmjd_delay}s between " 171 | f"exposures, and {diffnight_delay}s between nights." ) 172 | 173 | nameparse = re.compile( 'alert_mjd([0-9]+\.[0-9]+)_obj([0-9]+)_src([0-9]+).avro.gz' ) 174 | 175 | if not self.dry_run: 176 | producer = confluent_kafka.Producer( { 'bootstrap.servers': self.kafka_broker, 177 | 'batch.size': self.kafka_batch_size_bytes, 178 | 'linger.ms': self.kafka_linger_ms 179 | } ) 180 | rqs = self.log_into_tom() 181 | 182 | nstreamed = 0 183 | bytesstreamed = 0 184 | idsproduced = [] 185 | 186 | # Do it one "night" at a time 187 | for n in range( n0, n1+1 ): 188 | nightnstreamed = 0 189 | nightbytesstreamed = 0 190 | 191 | if n in self.nights_done: 192 | self.logger.warning( f"Night {n} already done, not doing it again." ) 193 | continue 194 | self.logger.info( f"Doing night {n}" ) 195 | 196 | # Build the full list of alerts to stream 197 | # I'm assuming that no filename will be repeated in different 198 | # tar files. Since the source ID is embedded in the filename, 199 | # this should be a good assumption. 200 | alertfilenames = [] 201 | alerts = {} 202 | for adir in self.alertdirs: 203 | self.logger.info( f"Looking in {adir}" ) 204 | tarpath = pathlib.Path( adir ) / f"NITE{n}.tar.gz" 205 | if not tarpath.is_file(): 206 | self.logger.error( f"{str(tarpath)} is not a regular file! Moving on." ) 207 | continue 208 | self.logger.info( f"Reading {tarpath.name}..." ) 209 | with tarfile.open( tarpath, "r" ) as tar: 210 | members = [ m.name for m in tar.getmembers() if nameparse.search(m.name) ] 211 | for alertfile in members: 212 | alertfilenames.append( alertfile ) 213 | if alertfile in alerts: 214 | self.logger.warning( f"alert['{alertfilename}'] exists, and shouldn't!" ) 215 | else: 216 | alerts[ alertfile ] = [] 217 | fstream = gzip.open( tar.extractfile( alertfile ), 'rb' ) 218 | rawalert = fastavro.schemaless_reader( fstream, self.schema ) 219 | alerts[ alertfile ].append( rawalert ) 220 | # reader = fastavro.reader( fstream ) 221 | # for rawalert in reader: 222 | # alerts[ alertfile ].append( rawalert ) 223 | fstream.close() 224 | self.logger.info( f"...done reading {tarpath.name}; up to {len(alertfilenames)} alert files." ) 225 | 226 | # Sort by mjd (which is the same as sorting by filename) 227 | alertfilenames.sort() 228 | 229 | self.logger.info( f"Streaming {len(alertfilenames)} alerts for night {n}" ) 230 | 231 | lastmjd = '' 232 | idsproduced = [] 233 | for alertfile in alertfilenames: 234 | match = nameparse.search( alertfile ) 235 | if not match: 236 | self.logger.error( f"Failed to parse {alertfile}; this should not happen!" ) 237 | continue 238 | mjd = match.group(1) 239 | if mjd != lastmjd: 240 | if not self.dry_run: 241 | producer.flush() 242 | rqs = self.notify_tom( rqs, idsproduced ) 243 | idsproduced = [] 244 | self.logger.debug( f'Starting exposure mjd {mjd}; ' 245 | f'have {"fake-" if self.dry_run else " "}streamed ' 246 | f'{nightnstreamed} for night {n}; ' 247 | f'sleeping {diffmjd_delay} sec' ) 248 | if diffmjd_delay > 0: 249 | time.sleep( diffmjd_delay ) 250 | lastmjd = mjd 251 | for alert in alerts[ alertfile ]: 252 | if ( nightnstreamed % 500 ) == 0: 253 | self.logger.info( f'Have {"fake-" if self.dry_run else ""}streamed ' 254 | f'{nightnstreamed} for night {n}.' ) 255 | alertbytes = io.BytesIO() 256 | fastavro.write.schemaless_writer( alertbytes, self.schema, alert ) 257 | if not self.dry_run: 258 | producer.produce( self.kafka_topic, alertbytes.getvalue() ) 259 | idsproduced.append( alert['alertId'] ) 260 | nightbytesstreamed += len( alertbytes.getvalue() ) 261 | nightnstreamed += 1 262 | if alert_delay > 0: 263 | time.sleep( alert_delay ) 264 | 265 | if not self.dry_run: 266 | producer.flush() 267 | rqs = self.notify_tom( rqs, idsproduced ) 268 | idsproduced = [] 269 | self.logger.info( f'{"Fake-s" if self.dry_run else "S"}treamed {nightnstreamed} total alerts for night {n} ' 270 | f'({nightbytesstreamed/1024/1024:.3f} MiB).' ) 271 | nstreamed += nightnstreamed 272 | bytesstreamed += nightbytesstreamed 273 | self.nights_done.append( n ) 274 | with open( self.nights_done_cache, "a" ) as ofp: 275 | ofp.write( f"{n}\n" ) 276 | time.sleep( diffnight_delay ) 277 | 278 | # This next flush is gratuitous, I think 279 | if not self.dry_run: 280 | producer.flush() 281 | rqs = self.notify_tom( rqs, idsproduced ) 282 | idsproduced = [] 283 | self.logger.info( f"Done with today's batch. {'Fake-s' if self.dry_run else 'S'}treamed {nstreamed} alerts " 284 | f"({bytesstreamed/1024/1024:.3f} MiB)." ) 285 | 286 | # ====================================================================== 287 | 288 | def main(): 289 | if os.getenv( "ELASTICC_COMPRESSION_FACTOR" ) is not None: 290 | compression_factor = int( os.getenv( "ELASTICC_COMPRESSION_FACTOR" ) ) 291 | else: 292 | compression_factor = 10 293 | 294 | if os.getenv( "ELASTICC_START_TIME" ) is not None: 295 | t0 = dateutil.parser.isoparse( os.getenv( "ELASTICC_START_TIME" ) ) 296 | else: 297 | t0 = datetime.datetime( 2022, 7, 6, 7, 0 ) 298 | 299 | if os.getenv( "ELASTICC_DRY_RUN", None ) is not None: 300 | dry_run = True 301 | else: 302 | dry_run = False 303 | 304 | kafka_broker = os.getenv( "ELASTICC_ALERT_SERVER", default="brahms.lbl.gov:9092" ) 305 | kafka_topic = os.getenv( "ELASTICC_ALERT_TOPIC", default="elasticc-test-only-1" ) 306 | tom_url = os.getenv( "TOM_URL", default="https://desc-tom.lbl.gov" ) 307 | simnight0 = int( os.getenv( "ELASTICC_SIMNIGHT_START", default=60274 ) ) 308 | simnight1 = int( os.getenv( "ELASTICC_SIMNIGHT_END", default=62378 ) ) 309 | 310 | streamer = AlertStreamer( compression_factor=compression_factor, campaign_start=t0, 311 | simnight0=simnight0, simnight1=simnight1, 312 | kafka_broker=kafka_broker, kafka_topic=kafka_topic, tom_url=tom_url, 313 | dry_run=dry_run ) 314 | while True: 315 | streamer.stream_todays_batch() 316 | _logger.info( f'Sleeping 1 hour' ) 317 | time.sleep( 3600 ) 318 | 319 | # ====================================================================== 320 | if __name__ == "__main__": 321 | main() 322 | -------------------------------------------------------------------------------- /survey_config/SIMGEN_TEMPLATE_LSST.INPUT: -------------------------------------------------------------------------------- 1 | # May 26 2022 R.Kessler - add few more SIMGEN-dump variables 2 | 3 | NGENTOT_LC: 255 4 | GENVERSION: RKTEST_BLA 5 | GENSOURCE: RANDOM 6 | GENMODEL: TRANSIENT 7 | #2ndARG # see INCLUDE files 8 | 9 | #DNDZ: FLAT # XXX 10 | 11 | SIMLIB_FILE: $SNANA_LSST_ROOT/simlibs/PLASTICC_ORIGINAL_DDF.SIMLIB 12 | 13 | SIMLIB_MSKOPT: 128 # include the entire season containing detection 14 | 15 | 16 | # realistic photo-z library from Graham 2018; 17 | # hostlib is overwritten in simgen-master file 18 | HOSTLIB_FILE: $SNANA_LSST_ROOT/simlibs/PLASTICC_ORIGINAL_photoz_G18.HOSTLIB 19 | HOSTLIB_DZTOL: 0.02 0.01 0.007 # Jan 2022 recommended by Martine+Alex 20 | HOSTLIB_MSKOPT: 258 # 2=noise + 256=verbose 21 | 22 | # --------------------------------------------------- 23 | 24 | SOLID_ANGLE: 0.0 # 3/Nov/2021: 0 is a flag to use solid angle in SIMLIB 25 | 26 | RANSEED: 123459 27 | FORMAT_MASK: 32 ! 2=TEXT 32=FITS 28 | 29 | GENFILTERS: ugrizY 30 | KCOR_FILE: $SNANA_LSST_ROOT/kcor/2017-04_approx/kcor_LSST.fits 31 | 32 | # Baseline v2.0 below (17/Jan/2022; C.Alves) 33 | GENRANGE_MJD: 60275 61380 # explicit MJD cut after 3 years 34 | GENRANGE_PEAKMJD: 60225 61455 # PEAKMJD cut is wider to catch rise time 35 | GENSIGMA_SEARCH_PEAKMJD: 1.0 36 | 37 | # obsolete minion below (17/Jan/2022; C.Alves) 38 | #GENRANGE_MJD: 59570 60675 # explicit MJD cut after 3 years 39 | #GENRANGE_PEAKMJD: 59520 60750 # PEAKMJD cut is wider to catch rise time 40 | 41 | 42 | GENRANGE_REDSHIFT: 0.02 1.20 # XXX 43 | GENSIGMA_REDSHIFT: 0.001 # 44 | GENRANGE_TREST: -100.0 400.0 # XXX 45 | 46 | GENSIGMA_VPEC: 300.0 # peculiar velocity spread without correction, km/sec 47 | VPEC_ERR: 300.0 # --> no correction 48 | 49 | UVLAM_EXTRAPFLUX: 500 # extrapolate fluxes down to 500 A. 50 | 51 | MXRADIUS_RANDOM_SHIFT: 0.25 # add a small random coordinate shift each time a simlib entry is accessed 52 | 53 | LENSING_PROBMAP_FILE: $SNDATA_ROOT/models/lensing/LENSING_PROBMAP_LogNormal+MICECATv1.DAT 54 | 55 | # smear flags: 0=off, 1=on 56 | SMEARFLAG_FLUX: 1 # photo-stat smearing of signal, sky, etc ... 57 | SMEARFLAG_ZEROPT: 3 # smear zero-point with zptsig 58 | 59 | # negative MWEBV-option ==> correct FLUXCAL with map and central filter wave 60 | OPT_MWEBV: 3 # SFD98 + Schalfly2011 61 | OPT_MWCOLORLAW: 99 # Fitzpatrick 62 | 63 | # 1=> software trigger 3=software & spec trigger 64 | APPLY_SEARCHEFF_OPT: 1 65 | SEARCHEFF_PIPELINE_EFF_FILE: $SNANA_LSST_ROOT/models/searcheff/PLASTICC_ORIGINAL_SEARCHEFF_PIPELINE_LSST.DAT 66 | 67 | # define pipeline logic for trigger (e.g., 2 detetions) 68 | SEARCHEFF_PIPELINE_LOGIC_FILE: $SNANA_LSST_ROOT/models/searcheff/SEARCHEFF_PIPELINE_LOGIC_LSST.DAT 69 | 70 | # spectroscopic identification efficiency vs. i-band mag 71 | SEARCHEFF_SPEC_FILE: $SNANA_LSST_ROOT/models/searcheff/PLASTICC_ORIGINAL_LSST_SPECEFF.DAT 72 | 73 | # efficiency vs. redshift for getting zSpec from host 74 | SEARCHEFF_zHOST_FILE: $SNANA_LSST_ROOT/models/searcheff/PLASTICC_ORIGINAL_zHOST_4MOST_DDF.DAT 75 | 76 | APPLY_CUTWIN_OPT: 1 77 | CUTWIN_NOBS_NOSATURATE: 10 9999 grizY 78 | #CUTWIN_REDSHIFT_FINAL: 0 3 # cut on zFINAL = zSpec or zPhot 79 | CUTWIN_HOST_ZPHOT: 0 3 # cut on zPHOT(host) 80 | CUTWIN_MWEBV: 0 3 # cut on E(B-V) 81 | CUTWIN_PEAKMAG_ALL: 14 999 # cut bright flux or template, any band 82 | 83 | NEWMJD_DIF: 0.021 # to make trigger, 30 min between detections 84 | 85 | # Create one-row-per-transient summary of the events written out 86 | SIMGEN_DUMP: 35 87 | CID LIBID SIM_SEARCHEFF_MASK GENTYPE NON1A_INDEX 88 | ZCMB ZHELIO ZCMB_SMEAR RA DEC MWEBV 89 | GALID GALZPHOT GALZPHOTERR GALSNSEP GALSNDDLR RV AV 90 | MU LENSDMU PEAKMJD MJD_DETECT_FIRST MJD_DETECT_LAST DTSEASON_PEAK 91 | PEAKMAG_u PEAKMAG_g PEAKMAG_r PEAKMAG_i PEAKMAG_z PEAKMAG_Y 92 | SNRMAX SNRMAX2 SNRMAX3 NOBS NOBS_SATURATE 93 | 94 | -------------------------------------------------------------------------------- /survey_config/elasticc_origmap.txt: -------------------------------------------------------------------------------- 1 | # TRUE_GENTYPE MODEL_NAME 2 | 10 SNIa-SALT2 3 | 11 SNIa-91bg 4 | 12 SNIax 5 | 6 | 20 SNIb-Templates 7 | 21 SNIb+HostXT_V19 8 | 25 SNIc-Templates 9 | 26 SNIc+HostXT_V19 10 | 27 SNIcBL+HostXT_V19 11 | 12 | 30 SNII-NMF 13 | 31 SNII-Templates 14 | 32 SNII+HostXT_V19 15 | 35 SNIIn-MOSFIT 16 | 36 SNIIn+HostXT_V19 17 | 37 SNIIb+HostXT_V19 18 | 19 | 40 SLSN-I 20 | 42 TDE 21 | 45 ILOT 22 | 46 CART 23 | 24 | 50 KN_K17 25 | 51 KN_B19 26 | 59 PISN 27 | 28 | 60 AGN 29 | 30 | 80 RRL 31 | 81 Mdwarf 32 | 82 Mdwarf-flare 33 | 83 EB 34 | 84 dwarf-nova 35 | 87 uLens-Single_PyLIMA 36 | 88 uLens-Single-GenLens 37 | 89 uLens-Binary 38 | 90 Cepheid 39 | 91 dSct 40 | -------------------------------------------------------------------------------- /taxonomy/taxonomy.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# ELAsTiCC taxonomy\n", 8 | "\n", 9 | "_Alex Malz (GCCL@RUB)_ , _Rob Knop (raknop@lbl.gov)_\n", 10 | "\n", 11 | "The original thought behind this was to define a \"bit\" mask (really, the decimal equivalent), but in pratice that's not what we have. Rather, what we have is a hierarchical classification, where each level of the hierarchy is one power of 10, and categories the next level down are different for each parent category. The lowest power of 10 that is not zero represents how specific the category is; if the ones digit is not zero, then the category is as specific as the taxonomy gets.\n" 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "## Use of the taxonomy\n", 19 | "\n", 20 | "Brokers will ingest alerts produced by the ELAsTiCC team (eventually, produced by LSST). Each alert will have information about one _DiaSource_ (DIA=\"Differential Imaging Analysis\"); this is a single observation at one time of a _DiaObject_. A _DiaObject_ represents a single astronomical object or event; in practice, it's defined by a position on the sky. A new source found at the position of an existing object will be assigned to that existing object. The alert for a _DiaSource_ will also include the data for that source's _DiaObject_, as well as previously-detected sources for that same object, and (if the object was discovered at least a day ago) forced photometry (stored using the _DiaForcedSource_ schema) dating back to at most 30 days before the first detection fo the object.\n", 21 | "\n", 22 | "Brokers will then apply whatever algorithms they have to estimate classifications for the source. Each different algorithm a broker uses is called a _classifier_. When a classifier responds to an alert, it should produce a set of (_classId_, _probability_) pairs. All of the _probability_ values for a single source should sum to 1. The _classId_ values are described by this taxonomy; the actual hierarchical list of values can be found near the bottom of this notebook.\n", 23 | "\n", 24 | "### Avro schema\n", 25 | "\n", 26 | "All schema for ELAsTiCC can be found in https://github.com/LSSTDESC/elasticc/tree/main/alert_schema\n", 27 | "\n", 28 | "Brokers will ingest the \"alert\" schema; as of this writing, the current version is 0.9.1 : https://github.com/LSSTDESC/elasticc/blob/main/alert_schema/elasticc.v0_9_1.alert.avsc That schema includes the object, source, and forcedsource schema (all in the same directory).\n", 29 | "\n", 30 | "Brokers will publish the \"brokerClasification\" schema; again, as of this writing, the current verison is 0.9.1 : https://github.com/LSSTDESC/elasticc/blob/main/alert_schema/elasticc.v0_9_1.brokerClassification.avsc\n", 31 | "\n", 32 | "\n", 33 | "### Documentation of specific categories\n", 34 | "\n", 35 | "* **Meta/Residual** -- All of the probabilities returned for a single alert should sum to 1. This is the categorory to put the probaiblity for \"not any of the other things I've assigned a probability for\". One use of this would be for yes/no binary categorizer. Suppose you just want to report the probability that an event is a SNIa. You'd assign that probability to the SNIa categority, and one minus that probability to this category. So, if the algorithm produced a 33% chance it's a SNIa, the SNIa category would get 0.33, and the Meta/Residual category would get 0.67.\n", 36 | "\n", 37 | "* **Meta/NotClassified** -- Use this category to report that your algoirthm chose not to classify a source; assign a probability of 1 to this category in that case. The purpose of this is so that we can diagnose whether and why alerts are getting dropped. If we receive a this classification, we know the alert made it all the way through the system from, but that the algorithm did not think it had enough information to actually supply a classification." 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "### Generating the integer codes\n", 45 | "\n", 46 | "The idea is that every level of the tree corresponds to one digit in the number for the classification.\n", 47 | "\n", 48 | "* 1000s : General category (Meta info, static object, non-recurring object, recurring object)\n", 49 | "* 100s : Variable vs. Static object. (Static object would be something like a persistent subtraction artifact that doesn't get caught by the LSST R/B system.)\n", 50 | "* 10s : Specific category (e.g. is it a SN-like variable, a periodic recurring object, non-perodic recurring object, etc.)\n", 51 | "* 1s : Specific cateogorization (SNIa, SNIb, AGN, etc.)" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 1, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "from treelib import Node, Tree\n", 61 | "import string" 62 | ] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "metadata": {}, 67 | "source": [ 68 | "## Building a phylogenetic tree\n", 69 | "\n", 70 | "Given the hierarchical class relationships, make a tree diagram (and record some hopefully useful information)." 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "## Housekeeping\n", 78 | "\n", 79 | "We need to think about how to sort through the classification information.\n", 80 | "`directory` and `index` are very simplistic starting points.\n", 81 | "It'll be easier when we have a better idea of what subsampling operations we'll perform." 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 2, 87 | "metadata": {}, 88 | "outputs": [], 89 | "source": [ 90 | "directory = {}\n", 91 | "index = {}" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 3, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "maxdep = 3\n", 101 | "def branch(tree, parent, children, prepend=[\"Other\"], append=None, directory=directory, index=index):\n", 102 | " if prepend is not None:\n", 103 | " proc_pre = [parent + \"/\" + pre for pre in prepend]\n", 104 | " children = proc_pre + children\n", 105 | " if append is not None:\n", 106 | " proc_app = [parent + \"/\" + appe for app in append]\n", 107 | " children = children + proc_app\n", 108 | " tmp = parent\n", 109 | " level = 0\n", 110 | " while tree.ancestor(tmp) is not None:\n", 111 | " level += 1\n", 112 | " tmp = tree.ancestor(tmp)\n", 113 | " directory[parent] = {}\n", 114 | " for i, child in enumerate( children ):\n", 115 | " directory[parent][child] = i\n", 116 | "# print(index[parent], type(index[parent]))\n", 117 | " if index[parent] != '':\n", 118 | " index[child] = str(int(index[parent]) + (i+1)* 10 ** (maxdep-level))\n", 119 | " else:\n", 120 | " index[child] = str((i+1)* 10 ** (maxdep-level))\n", 121 | " tree.create_node(index[child]+\" \"+child, child, parent=parent)" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": {}, 127 | "source": [ 128 | "It would be better to start with something like `directory` than to build it as we go along, but, hey, this is a hack." 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 4, 134 | "metadata": {}, 135 | "outputs": [ 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | " Alert\n", 141 | "├── 0 Meta\n", 142 | "│ ├── 100 Meta/Other\n", 143 | "│ ├── 200 Residual\n", 144 | "│ └── 300 NotClassified\n", 145 | "├── 1000 Static\n", 146 | "│ └── 1100 Static/Other\n", 147 | "└── 2000 Variable\n", 148 | " ├── 2100 Variable/Other\n", 149 | " ├── 2200 Non-Recurring\n", 150 | " │ ├── 2210 Non-Recurring/Other\n", 151 | " │ ├── 2220 SN-like\n", 152 | " │ │ ├── 2221 SN-like/Other\n", 153 | " │ │ ├── 2222 Ia\n", 154 | " │ │ ├── 2223 Ib/c\n", 155 | " │ │ ├── 2224 II\n", 156 | " │ │ ├── 2225 Iax\n", 157 | " │ │ └── 2226 91bg\n", 158 | " │ ├── 2230 Fast\n", 159 | " │ │ ├── 2231 Fast/Other\n", 160 | " │ │ ├── 2232 KN\n", 161 | " │ │ ├── 2233 M-dwarf Flare\n", 162 | " │ │ ├── 2234 Dwarf Novae\n", 163 | " │ │ └── 2235 uLens\n", 164 | " │ └── 2240 Long\n", 165 | " │ ├── 2241 Long/Other\n", 166 | " │ ├── 2242 SLSN\n", 167 | " │ ├── 2243 TDE\n", 168 | " │ ├── 2244 ILOT\n", 169 | " │ ├── 2245 CART\n", 170 | " │ └── 2246 PISN\n", 171 | " └── 2300 Recurring\n", 172 | " ├── 2310 Recurring/Other\n", 173 | " ├── 2320 Periodic\n", 174 | " │ ├── 2321 Periodic/Other\n", 175 | " │ ├── 2322 Cepheid\n", 176 | " │ ├── 2323 RR Lyrae\n", 177 | " │ ├── 2324 Delta Scuti\n", 178 | " │ ├── 2325 EB\n", 179 | " │ └── 2326 LPV/Mira\n", 180 | " └── 2330 Non-Periodic\n", 181 | " ├── 2331 Non-Periodic/Other\n", 182 | " └── 2332 AGN\n", 183 | "\n" 184 | ] 185 | } 186 | ], 187 | "source": [ 188 | "tree = Tree()\n", 189 | "\n", 190 | "basename = \"Alert\"\n", 191 | "\n", 192 | "index[basename] = ''\n", 193 | "tree.create_node(index[basename] + \" \" + basename, basename)\n", 194 | "\n", 195 | "# need spot for residual, choose not to classify -- metacategory? possibly rename to \"Flagged\"?\n", 196 | "index[\"Meta\"] = \"0\"\n", 197 | "tree.create_node(index[\"Meta\"] + \" Meta\", \"Meta\", parent = basename)\n", 198 | "branch(tree, \"Meta\", [\"Residual\", \"NotClassified\"])\n", 199 | "\n", 200 | "branch(tree, basename, [\"Static\", \"Variable\"], prepend=[] )\n", 201 | "\n", 202 | "branch(tree, \"Static\", [] )\n", 203 | "\n", 204 | "branch(tree, \"Variable\", [\"Non-Recurring\", \"Recurring\"] )\n", 205 | "\n", 206 | "branch(tree, \"Recurring\", [\"Periodic\", \"Non-Periodic\"])\n", 207 | "\n", 208 | "branch(tree, \"Periodic\", [\"Cepheid\", \"RR Lyrae\", \"Delta Scuti\", \"EB\", \"LPV/Mira\"])\n", 209 | "\n", 210 | "branch(tree, \"Non-Periodic\", [\"AGN\"])\n", 211 | "\n", 212 | "branch(tree, \"Non-Recurring\", [\"SN-like\", \"Fast\", \"Long\"])\n", 213 | "\n", 214 | "branch(tree, \"SN-like\", [\"Ia\", \"Ib/c\", \"II\", \"Iax\", \"91bg\"])\n", 215 | "\n", 216 | "branch(tree, \"Fast\", [\"KN\", \"M-dwarf Flare\", \"Dwarf Novae\", \"uLens\"])\n", 217 | "\n", 218 | "branch(tree, \"Long\", [\"SLSN\", \"TDE\", \"ILOT\", \"CART\", \"PISN\"])\n", 219 | "\n", 220 | "tree.show()" 221 | ] 222 | }, 223 | { 224 | "cell_type": "markdown", 225 | "metadata": { 226 | "tags": [] 227 | }, 228 | "source": [ 229 | "## Building a structure for hierarchical classification\n", 230 | "\n", 231 | "The whole point of this, for me, is for the classification to have corresponding posterior probabilities, or at least confidence flags or scores, because I'd want to use them to rapidly select follow-up candidates.\n", 232 | "[This](https://community.lsst.org/t/projects-involving-irregularly-shaped-data/4466) looks potentially relevant.\n", 233 | "I guess it could also be used for packaging up additional features into an alert without bloating it up too much." 234 | ] 235 | } 236 | ], 237 | "metadata": { 238 | "kernelspec": { 239 | "display_name": "rob_mess_conda", 240 | "language": "python", 241 | "name": "rob_mess_conda" 242 | }, 243 | "language_info": { 244 | "codemirror_mode": { 245 | "name": "ipython", 246 | "version": 3 247 | }, 248 | "file_extension": ".py", 249 | "mimetype": "text/x-python", 250 | "name": "python", 251 | "nbconvert_exporter": "python", 252 | "pygments_lexer": "ipython3", 253 | "version": "3.11.3" 254 | } 255 | }, 256 | "nbformat": 4, 257 | "nbformat_minor": 4 258 | } 259 | -------------------------------------------------------------------------------- /tom_management/Dockerfile.pgdump: -------------------------------------------------------------------------------- 1 | FROM rknop/devuan-chimaera-rknop 2 | MAINTAINER Rob Knop 3 | 4 | ENV DEBIAN_FRONTEND=noninteractive 5 | RUN apt-get update && apt-get upgrade -y \ 6 | && apt-get install -y postgresql-client-13 \ 7 | && apt-get clean \ 8 | && rm -rf /var/lib/apt/lists/* 9 | 10 | RUN mkdir /home/pgdump 11 | ENV HOME /home/pgdump 12 | ADD run_pgdump.sh /home/pgdump/run_pgdump.sh 13 | 14 | ENTRYPOINT [ "/bin/bash", "/home/pgdump/run_pgdump.sh" ] 15 | -------------------------------------------------------------------------------- /tom_management/add_elasticc_alerts.py: -------------------------------------------------------------------------------- 1 | raise RuntimeError( "Deprecated. See elasticc2/management/commands/load_snana_fits.py in desc-tom" ) 2 | 3 | import sys 4 | import io 5 | import time 6 | import argparse 7 | import pathlib 8 | import logging 9 | import requests 10 | import json 11 | import tarfile 12 | import gzip 13 | import fastavro 14 | 15 | from tomconnection import TomConnection 16 | 17 | class AlertLoader(TomConnection): 18 | def __init__( self, *args, dryrun=False, **kwargs ): 19 | super().__init__( *args, **kwargs ) 20 | self.dryrun = dryrun 21 | self.alertcache = [] 22 | self.tot_alerts_loaded = 0 23 | self.tot_missing = 0 24 | self.tot_objs_loaded = 0 25 | self.tot_sources_loaded = 0 26 | self.tot_forced_loaded = 0 27 | self.alert_cache_size = 1000 28 | 29 | self.schema = fastavro.schema.load_schema( "../alert_schema/elasticc.v0_9.alert.avsc" ) 30 | 31 | def flush_alert_cache( self ): 32 | if len( self.alertcache ) > 0: 33 | self.logger.debug( f"Posting {sys.getsizeof(json.dumps(self.alertcache))/1024/1024:.2f} MiB " 34 | f"for {len(self.alertcache)} alerts" ) 35 | if self.dryrun: 36 | self.logger.warning( f'Not actually posting, this is a dry run.' ) 37 | else: 38 | # Keep resending until we get a good result. The code on the server 39 | # side should be smart enough to not add alerts more than once. 40 | ok = False 41 | while not ok: 42 | resp = self.rqs.post( f'{self.urlbase}/elasticc/addelasticcalert', json=self.alertcache ) 43 | if resp.status_code != 200: 44 | self.logger.error( f"ERROR : got status code {resp.status_code}; retrying after 1s..." ) 45 | time.sleep(1) 46 | else: 47 | ok = True 48 | rjson = json.loads( resp.text ) 49 | if rjson['status'] != 'ok': 50 | with io.StringIO() as strstr: 51 | strstr.write( f"ERROR: got status {rjson['status']}\n" ) 52 | for key, val in rjson.items(): 53 | if key != 'status': 54 | strstr.write( f" {key} : {val}\n" ) 55 | self.logger.error( strstr.getvalue() ) 56 | else: 57 | self.tot_objs_loaded += rjson["message"]["objects"] 58 | self.tot_sources_loaded += rjson["message"]["sources"] 59 | self.tot_alerts_loaded += rjson["message"]["alerts"] 60 | self.tot_forced_loaded += rjson["message"]["forcedsources"] 61 | self.logger.info( f'{rjson["message"]["alerts"]} alerts ' 62 | f'({self.tot_alerts_loaded}), ' 63 | f'{rjson["message"]["objects"]} objects ' 64 | f'({self.tot_objs_loaded}), ' 65 | f'{rjson["message"]["sources"]} sources ' 66 | f'({self.tot_sources_loaded}), ' 67 | f'{rjson["message"]["forcedsources"]} forced ' 68 | f'({self.tot_forced_loaded})' ) 69 | self.alertcache = [] 70 | 71 | # def read_alerts( self, fstream ): 72 | # reader = fastavro.schemaless_reader( fstream ) 73 | # alerts = [] 74 | 75 | # for rawalert in reader: 76 | # # I *think* that the schema in the database match the avro schema directly, 77 | # # and as such the webap is expecting all the fields in the alert. 78 | # alerts.append( rawalert ) 79 | # return alerts 80 | 81 | def load_directory( self, direc, top=True ): 82 | direc = pathlib.Path( direc ) 83 | ndid = 0 84 | for alertfile in direc.iterdir(): 85 | if ( ndid % 10 == 0 ): 86 | self.logger.debug( f"Did {ndid} files in {direc}" ) 87 | ndid += 1 88 | if alertfile.is_dir(): 89 | self.logger.debug( f'Going into subdirectory {alertfile}' ) 90 | alertcache = self.load_directory( alertfile, top=False ) 91 | else: 92 | if ( len(alertfile.name) > 8 ) and ( alertfile.name[-8:] == ".avro.gz" ): 93 | fstream = gzip.open( alertfile, "rb") 94 | elif ( len(alertfile.name) > 5 ) and ( alertfile.name[-5:] == ".avro" ): 95 | fstream = open( alertfile, "rb") 96 | elif ( ( len(alertfile.name) > 7 ) and ( alertfile.name[-7:] == ".tar.gz" ) or 97 | ( len(alertfile.name) > 4 ) and ( alertfile.name[-4:] == ".tar" ) ): 98 | self.logger.debug( f'Loading tar file {alertfile}' ) 99 | self.load_tarfile( alertfile ) 100 | continue 101 | else: 102 | self.logger.warning( f'Skipping unrecognized file {alertfile.name}' ) 103 | continue 104 | 105 | alert = fastavro.schemaless_reader( fstream, self.schema ) 106 | fstream.close() 107 | self.alertcache.append( alert ) 108 | if len(self.alertcache) >= self.alert_cache_size: 109 | self.flush_alert_cache( ) 110 | if top: 111 | self.flush_alert_cache() 112 | 113 | def load_tarfile( self, tarfilename ): 114 | with tarfile.open( tarfilename, 'r' ) as tar: 115 | members = tar.getmembers() 116 | for member in members: 117 | if ( len(member.name) > 8 ) and ( member.name[-8:] == ".avro.gz" ): 118 | fstream = gzip.open( tar.extractfile(member.name), 'rb' ) 119 | elif ( len(member.name) > 5 ) and ( member.name[-5:] == ".avro" ): 120 | fstream = tar.extractfile(member.name) 121 | else: 122 | continue 123 | 124 | alert = fastavro.schemaless_reader( fstream, self.schema ) 125 | fstream.close() 126 | self.alertcache.append( alert ) 127 | if len(self.alertcache) >= self.alert_cache_size: 128 | self.flush_alert_cache( ) 129 | self.flush_alert_cache() 130 | 131 | def main(): 132 | logger = logging.getLogger("main") 133 | logout = logging.StreamHandler( sys.stderr ) 134 | logger.addHandler( logout ) 135 | logout.setFormatter( logging.Formatter( f'[%(asctime)s - %(levelname)s] - %(message)s' ) ) 136 | logger.setLevel( logging.INFO ) 137 | 138 | parser = argparse.ArgumentParser() 139 | parser.add_argument( "-d", "--directory", default=None, help="Directory of alerts to load" ) 140 | parser.add_argument( "-t", "--tarfile", default=None, help="Tar file of alerts to load" ) 141 | parser.add_argument( "-u", "--urlbase", default="https://desc-tom.lbl.gov", 142 | help="URL of TOM (no trailing / ; default https://desc-tom.lbl.gov)" ) 143 | parser.add_argument( "-U", "--username", default="root", help="TOM username" ) 144 | parser.add_argument( "-p", "--password", default="password", help="TOM password" ) 145 | parser.add_argument( "-v", "--verbose", default=False, action="store_true", help="Show debug info" ) 146 | parser.add_argument( "--dry-run", default=False, action="store_true", 147 | help="Don't actually post alerts to the db, just read and parse them." ) 148 | args = parser.parse_args() 149 | 150 | if args.verbose: 151 | logger.setLevel( logging.DEBUG ) 152 | 153 | if args.urlbase[-1] == '/': 154 | raise ValueError( "Things will fail in a very mysterious fashion if the URL has a trailing slash." ) 155 | 156 | if ( args.tarfile is None ) == ( args.directory is None ): 157 | sys.stderr.write( f"--tarfile = {args.tarfile}\n" ) 158 | sys.stderr.write( f"--directory = {args.directory}\n" ) 159 | sys.stderr.write( "Must specify one of --directory or --tarfile\n" ) 160 | sys.exit(1) 161 | 162 | loader = AlertLoader( args.urlbase, args.username, args.password, dryrun=args.dry_run, logger=logger ) 163 | 164 | if ( args.tarfile is not None ): 165 | loader.load_tarfile( args.tarfile ) 166 | else: 167 | loader.load_directory( args.directory ) 168 | 169 | # ====================================================================== 170 | 171 | if __name__ == "__main__": 172 | main() 173 | 174 | -------------------------------------------------------------------------------- /tom_management/load_elasticc_objecttruth.py: -------------------------------------------------------------------------------- 1 | raise RuntimeError( "Deprecated. See elasticc2/management/commands/load_snana_fits.py in desc-tom" ) 2 | 3 | import sys 4 | import argparse 5 | import logging 6 | from truthloader import TruthLoader 7 | 8 | class ObjectTruthLoader(TruthLoader): 9 | def __init__( self, *args, **kwargs ): 10 | urlend = 'elasticc/addobjecttruth' 11 | toint = lambda s: int(s) if s!='' else None 12 | tofloat = lambda s: float(s) if s!='' else None 13 | converters = { 'SNID': toint, 14 | 'CID': toint, 15 | 'LIBID': toint, 16 | 'SIM_SEARCHEFF_MASK': toint, 17 | 'GENTYPE': toint, 18 | 'NON1A_INDEX': toint, 19 | 'SIM_TEMPLATE_INDEX': toint, 20 | 'ZCMB': tofloat, 21 | 'ZHELIO': tofloat, 22 | 'ZCMB_SMEAR': tofloat, 23 | 'RA': tofloat, 24 | 'DEC': tofloat, 25 | 'MWEBV': tofloat, 26 | 'GALNMATCH': toint, 27 | 'GALID': toint, 28 | 'GALZPHOT': tofloat, 29 | 'GALZPHOTERR': tofloat, 30 | 'GALSNSEP': tofloat, 31 | 'GALSNDDLR': tofloat, 32 | 'RV': tofloat, 33 | 'AV': tofloat, 34 | 'MU': tofloat, 35 | 'LENSDMU': tofloat, 36 | 'PEAKMJD': tofloat, 37 | 'MJD_DETECT_FIRST': tofloat, 38 | 'MJD_DETECT_LAST': tofloat, 39 | 'DTSEASON_PEAK': tofloat, 40 | 'PEAKMAG_u': tofloat, 41 | 'PEAKMAG_g': tofloat, 42 | 'PEAKMAG_r': tofloat, 43 | 'PEAKMAG_i': tofloat, 44 | 'PEAKMAG_z': tofloat, 45 | 'PEAKMAG_Y': tofloat, 46 | 'SNRMAX': tofloat, 47 | 'SNRMAX2': tofloat, 48 | 'SNRMAX3': tofloat, 49 | 'NOBS': toint, 50 | 'NOBS_SATURATE': toint } 51 | renames = { 'SNID': "diaObjectId", 52 | 'CID': "diaObjectId", 53 | 'LIBID': 'libid', 54 | 'SIM_SEARCHEFF_MASK': 'sim_searcheff_mask', 55 | 'GENTYPE': 'gentype', 56 | "NON1A_INDEX": 'sim_template_index', 57 | 'SIM_TEMPLATE_INDEX': 'sim_template_index', 58 | 'ZCMB': 'zcmb', 59 | 'ZHELIO': 'zhelio', 60 | 'ZCMB_SMEAR': 'zcmb_smear', 61 | 'RA': 'ra', 62 | 'DEC': 'dec', 63 | 'MWEBV': 'mwebv', 64 | 'GALNMATCH': 'galnmatch', 65 | 'GALID': 'galid', 66 | 'GALZPHOT': 'galzphot', 67 | 'GALZPHOTERR': 'galzphoterr', 68 | 'GALSNSEP': 'galsnsep', 69 | 'GALSNDDLR': 'galsnddlr', 70 | 'RV': 'rv', 71 | 'AV': 'av', 72 | 'MU': 'mu', 73 | 'LENSDMU': 'lensdmu', 74 | 'PEAKMJD': 'peakmjd', 75 | 'MJD_DETECT_FIRST': 'mjd_detect_first', 76 | 'MJD_DETECT_LAST': 'mjd_detect_last', 77 | 'DTSEASON_PEAK': 'dtseason_peak', 78 | 'PEAKMAG_u': 'peakmag_u', 79 | 'PEAKMAG_g': 'peakmag_g', 80 | 'PEAKMAG_r': 'peakmag_r', 81 | 'PEAKMAG_i': 'peakmag_i', 82 | 'PEAKMAG_z': 'peakmag_z', 83 | 'PEAKMAG_Y': 'peakmag_Y', 84 | 'SNRMAX': 'snrmax', 85 | 'SNRMAX2': 'snrmax2', 86 | 'SNRMAX3': 'snrmax3', 87 | 'NOBS': 'nobs', 88 | 'NOBS_SATURATE': 'nobs_saturate' } 89 | super().__init__( *args, urlend=urlend, converters=converters, renames=renames, sep=',', **kwargs ) 90 | 91 | def main(): 92 | logger = logging.getLogger( "main" ) 93 | logout = logging.StreamHandler( sys.stderr ) 94 | logger.addHandler( logout ) 95 | logout.setFormatter( logging.Formatter( f'[%(asctime)s - %(levelname)s] - %(message)s' ) ) 96 | logger.setLevel( logging.DEBUG ) 97 | 98 | parser = argparse.ArgumentParser( "Load object truth for already-loaded Elasticc objects" ) 99 | parser.add_argument( "filenames", nargs='+', help="Filenames of object truth" ) 100 | parser.add_argument( "-u", "--urlbase", default="https://desc-tom.lbl.gov", 101 | help="URL of TOM (no trailing / ; default https://desc-tom.lbl.gov)" ) 102 | parser.add_argument( "-U", "--username", default="root", help="TOM username" ) 103 | parser.add_argument( "-p", "--password", default="password", help="TOM password" ) 104 | args = parser.parse_args() 105 | 106 | loader = ObjectTruthLoader( args.urlbase, args.username, args.password, logger=logger ) 107 | for filename in args.filenames: 108 | loader.load_csv( filename ) 109 | logger.info( "All done" ) 110 | 111 | 112 | # ====================================================================== 113 | 114 | if __name__ == "__main__": 115 | main() 116 | -------------------------------------------------------------------------------- /tom_management/load_elasticc_truth.py: -------------------------------------------------------------------------------- 1 | raise RuntimeError( "Deprecated. See elasticc2/management/commands/load_snana_fits.py in desc-tom" ) 2 | 3 | import sys 4 | import argparse 5 | import logging 6 | from truthloader import TruthLoader 7 | 8 | class SourceTruthLoader(TruthLoader): 9 | def __init__( self, *args, **kwargs ): 10 | urlend = 'elasticc/addtruth' 11 | converters = { 'SourceID': int, 12 | 'SNID': int, 13 | 'MJD': float, 14 | 'DETECT': int, 15 | 'TRUE_GENTYPE': int, 16 | 'TRUE_GENMAG': float } 17 | renames = {} 18 | super().__init__( *args, urlend=urlend, converters=converters, renames=renames, **kwargs ) 19 | 20 | def main(): 21 | logger = logging.getLogger( "main" ) 22 | logout = logging.StreamHandler( sys.stderr ) 23 | logger.addHandler( logout ) 24 | logout.setFormatter( logging.Formatter( f'[%(asctime)s - %(levelname)s] - %(message)s' ) ) 25 | logger.setLevel( logging.DEBUG ) 26 | 27 | parser = argparse.ArgumentParser( "Load truth for already-loaded Elasticc sources" ) 28 | parser.add_argument( "filenames", nargs='+', help="Filenames of truth" ) 29 | parser.add_argument( "-u", "--urlbase", default="https://desc-tom.lbl.gov", 30 | help="URL of TOM (no trailing / ; default https://desc-tom.lbl.gov)" ) 31 | parser.add_argument( "-U", "--username", default="root", help="TOM username" ) 32 | parser.add_argument( "-p", "--password", default="password", help="TOM password" ) 33 | args = parser.parse_args() 34 | 35 | loader = SourceTruthLoader( args.urlbase, args.username, args.password, logger=logger ) 36 | for filename in args.filenames: 37 | loader.load_csv( filename ) 38 | logger.info( "All done" ) 39 | 40 | 41 | # ====================================================================== 42 | 43 | if __name__ == "__main__": 44 | main() 45 | -------------------------------------------------------------------------------- /tom_management/run_pgdump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Requires environmente variables PGHOST, PGPASSWORD, AND PGDB 4 | # Requires a filesystem mounted at /pgdump 5 | 6 | date=`date -Iminutes` 7 | /usr/bin/pg_dump --format=c -h $PGHOST -U postgres --file=/pgdump/tom_${date}.sqlc $PGDB 8 | -------------------------------------------------------------------------------- /tom_management/tom-desc-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "12" 6 | field.cattle.io/creatorId: u-6fc5nwq2ou 7 | field.cattle.io/publicEndpoints: '[{"addresses":["128.55.206.106"],"port":80,"protocol":"HTTP","serviceName":"desc-tom:ingress-56d5f3c1a8a3f88ebb006b71c4a32cdd","ingressName":"desc-tom:tom-desc-app","hostname":"desc-tom.lbl.gov","allNodes":true},{"addresses":["128.55.206.106"],"port":80,"protocol":"HTTP","serviceName":"desc-tom:ingress-ecfdd332eb86650f33689e51928265af","ingressName":"desc-tom:tom-desc-app","hostname":"tom-desc-app.desc-tom.production.svc.spin.nersc.org","allNodes":true}]' 8 | nersc.gov/collab_uids: "70268" 9 | nersc.gov/gid: "95089" 10 | nersc.gov/gids: 45703,60152,57177,58102,59318,60070,63477,64483,79186,70268,92576,95089,96414 11 | nersc.gov/roles: user 12 | nersc.gov/uid: "95089" 13 | nersc.gov/username: raknop 14 | creationTimestamp: null 15 | generation: 1 16 | labels: 17 | cattle.io/creator: norman 18 | workload.user.cattle.io/workloadselector: deployment-desc-tom-tom-desc-app 19 | name: tom-desc-app 20 | selfLink: /apis/apps/v1/namespaces/desc-tom/deployments/tom-desc-app 21 | spec: 22 | progressDeadlineSeconds: 600 23 | replicas: 1 24 | revisionHistoryLimit: 10 25 | selector: 26 | matchLabels: 27 | workload.user.cattle.io/workloadselector: deployment-desc-tom-tom-desc-app 28 | strategy: 29 | rollingUpdate: 30 | maxSurge: 1 31 | maxUnavailable: 0 32 | type: RollingUpdate 33 | template: 34 | metadata: 35 | annotations: 36 | cattle.io/timestamp: "2022-07-06T20:36:39Z" 37 | field.cattle.io/publicEndpoints: '[{"addresses":["128.55.206.106"],"allNodes":true,"hostname":"desc-tom.lbl.gov","ingressId":"desc-tom:tom-desc-app","port":80,"protocol":"HTTP","serviceId":"desc-tom:ingress-56d5f3c1a8a3f88ebb006b71c4a32cdd"},{"addresses":["128.55.206.106"],"allNodes":true,"hostname":"tom-desc-app.desc-tom.production.svc.spin.nersc.org","ingressId":"desc-tom:tom-desc-app","port":80,"protocol":"HTTP","serviceId":"desc-tom:ingress-ecfdd332eb86650f33689e51928265af"}]' 38 | nersc.gov/collab_uids: "70268" 39 | nersc.gov/gid: "95089" 40 | nersc.gov/gids: 45703,60152,57177,58102,59318,60070,63477,64483,79186,70268,92576,95089,96414 41 | nersc.gov/roles: user 42 | nersc.gov/uid: "95089" 43 | nersc.gov/username: raknop 44 | creationTimestamp: null 45 | labels: 46 | workload.user.cattle.io/workloadselector: deployment-desc-tom-tom-desc-app 47 | spec: 48 | containers: 49 | - env: 50 | - name: DB_HOST 51 | value: tom-desc-postgres 52 | - name: DB_NAME 53 | value: tom_desc 54 | - name: DB_PASS 55 | value: fragile 56 | - name: DB_USER 57 | value: postgres 58 | - name: FINK_GROUP_ID 59 | value: lsstfr-johann 60 | - name: FINK_SERVER 61 | value: 134.158.74.95:24499, 62 | - name: FINK_TOPIC 63 | value: fink_early_sn_candidates_ztf 64 | - name: FINK_USERNAME 65 | value: johann 66 | - name: GOOGLE_APPLICATION_CREDENTIALS 67 | value: /secrets/GCP_auth_key-pitt_broker_user_project.json 68 | - name: GOOGLE_CLOUD_PROJECT 69 | value: pitt-broker-user-project 70 | image: registry.services.nersc.gov/raknop/tom-desc-production 71 | imagePullPolicy: Always 72 | name: tom-desc-app 73 | resources: {} 74 | securityContext: 75 | allowPrivilegeEscalation: false 76 | capabilities: 77 | add: 78 | - NET_BIND_SERVICE 79 | drop: 80 | - ALL 81 | privileged: false 82 | readOnlyRootFilesystem: false 83 | runAsNonRoot: true 84 | runAsUser: 95089 85 | stdin: true 86 | terminationMessagePath: /dev/termination-log 87 | terminationMessagePolicy: File 88 | tty: true 89 | volumeMounts: 90 | - mountPath: /secrets 91 | name: tom-desc-app-secrets 92 | - mountPath: /tom_desc 93 | name: tom-desc-production-deployment 94 | dnsPolicy: ClusterFirst 95 | restartPolicy: Always 96 | schedulerName: default-scheduler 97 | securityContext: 98 | fsGroup: 60152 99 | terminationGracePeriodSeconds: 30 100 | volumes: 101 | - name: tom-desc-app-secrets 102 | secret: 103 | defaultMode: 256 104 | optional: false 105 | secretName: desc-tom-secrets 106 | - hostPath: 107 | path: /global/cfs/cdirs/m1727/tom/deploy_production/tom_desc 108 | type: Directory 109 | name: tom-desc-production-deployment 110 | status: {} 111 | -------------------------------------------------------------------------------- /tom_management/tom-desc-postgres.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "12" 6 | field.cattle.io/creatorId: u-6fc5nwq2ou 7 | nersc.gov/collab_uids: "70268" 8 | nersc.gov/gid: "95089" 9 | nersc.gov/gids: 45703,60152,57177,58102,59318,60070,63477,64483,79186,70268,92576,95089,96414 10 | nersc.gov/roles: user 11 | nersc.gov/uid: "95089" 12 | nersc.gov/username: raknop 13 | creationTimestamp: null 14 | generation: 1 15 | labels: 16 | cattle.io/creator: norman 17 | workload.user.cattle.io/workloadselector: deployment-desc-tom-tom-desc-postgres 18 | name: tom-desc-postgres 19 | selfLink: /apis/apps/v1/namespaces/desc-tom/deployments/tom-desc-postgres 20 | spec: 21 | progressDeadlineSeconds: 600 22 | replicas: 1 23 | revisionHistoryLimit: 10 24 | selector: 25 | matchLabels: 26 | workload.user.cattle.io/workloadselector: deployment-desc-tom-tom-desc-postgres 27 | strategy: 28 | rollingUpdate: 29 | maxSurge: 1 30 | maxUnavailable: 0 31 | type: RollingUpdate 32 | template: 33 | metadata: 34 | annotations: 35 | cattle.io/timestamp: "2022-07-06T19:46:04Z" 36 | field.cattle.io/ports: '[[]]' 37 | nersc.gov/collab_uids: "70268" 38 | nersc.gov/gid: "95089" 39 | nersc.gov/gids: 45703,60152,57177,58102,59318,60070,63477,64483,79186,70268,92576,95089,96414 40 | nersc.gov/roles: user 41 | nersc.gov/uid: "95089" 42 | nersc.gov/username: raknop 43 | creationTimestamp: null 44 | labels: 45 | workload.user.cattle.io/workloadselector: deployment-desc-tom-tom-desc-postgres 46 | spec: 47 | containers: 48 | - env: 49 | - name: POSTGRES_DB 50 | value: tom_desc 51 | - name: POSTGRES_PASSWORD_FILE 52 | value: /secrets/postgres_password 53 | - name: POSTGRES_USER 54 | value: postgres 55 | image: registry.services.nersc.gov/raknop/tom-desc-postgres 56 | imagePullPolicy: Always 57 | name: tom-desc-postgres 58 | resources: {} 59 | securityContext: 60 | allowPrivilegeEscalation: false 61 | capabilities: 62 | add: 63 | - CHOWN 64 | - DAC_OVERRIDE 65 | - FOWNER 66 | - NET_BIND_SERVICE 67 | - SETGID 68 | - SETUID 69 | drop: 70 | - ALL 71 | privileged: false 72 | readOnlyRootFilesystem: false 73 | runAsNonRoot: false 74 | stdin: true 75 | terminationMessagePath: /dev/termination-log 76 | terminationMessagePolicy: File 77 | tty: true 78 | volumeMounts: 79 | - mountPath: /secrets 80 | name: vol2 81 | - mountPath: /var/lib/postgresql/data 82 | name: tom-desc-postgres 83 | dnsPolicy: ClusterFirst 84 | restartPolicy: Always 85 | schedulerName: default-scheduler 86 | securityContext: {} 87 | terminationGracePeriodSeconds: 30 88 | volumes: 89 | - name: vol2 90 | secret: 91 | defaultMode: 256 92 | optional: false 93 | secretName: desc-tom-secrets 94 | - name: tom-desc-postgres 95 | persistentVolumeClaim: 96 | claimName: tom-desc-postgres 97 | status: {} 98 | -------------------------------------------------------------------------------- /tom_management/tomconnection.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import requests 3 | import logging 4 | 5 | _logger = logging.getLogger(__name__) 6 | if not _logger.hasHandlers(): 7 | _logout = logging.StreamHandler( sys.stderr ) 8 | _logger.addHandler( _logout ) 9 | _formatter = logging.Formatter( f'[%(asctime)s - %(levelname)s] - %(message)s', 10 | datefmt='%Y-%m-%d %H:%M:%S' ) 11 | _logout.setFormatter( _formatter ) 12 | _logger.setLevel( logging.INFO ) 13 | 14 | class TomConnection: 15 | def __init__( self, urlbase, username, password, logger=_logger ): 16 | self.logger = logger 17 | self.urlbase = urlbase 18 | self.rqs = requests.session() 19 | res = self.rqs.get( f'{urlbase}/accounts/login/' ) 20 | res = self.rqs.post( f'{urlbase}/accounts/login/', 21 | data={ "username": username, 22 | "password": password, 23 | "csrfmiddlewaretoken": self.rqs.cookies['csrftoken'] } ) 24 | if res.status_code != 200: 25 | self.logger.error( "Failed to log in" ) 26 | raise RuntimeError( "Login failure" ) 27 | if 'Please enter a correct' in res.text: 28 | # I really hate this. I'm doing this based on what I saw, 29 | # and I don't feel confident that exactly this text will show up 30 | # when the login fails. But, I haven't found clean 31 | # documentation on how to log into a django site from an app 32 | # like this using the standard authentication stuff. 33 | self.logger.error( "Failed to log in. I think. Put in a debug break and look at res.text." ) 34 | raise RuntimeError( "Login failure" ) 35 | self.rqs.headers.update( { 'X-CSRFToken': self.rqs.cookies['csrftoken'] } ) 36 | -------------------------------------------------------------------------------- /tom_management/truthloader.py: -------------------------------------------------------------------------------- 1 | raise RuntimeError( "Deprecated. See elasticc2/management/commands/load_snana_fits.py in desc-tom" ) 2 | 3 | import sys 4 | import pandas 5 | import numpy 6 | import json 7 | import gzip 8 | import logging 9 | from tomconnection import TomConnection 10 | 11 | class TruthLoader(TomConnection): 12 | def __init__( self, *args, converters=None, urlend=None, renames=None, sep=',', **kwargs ): 13 | super().__init__( *args, **kwargs ) 14 | if converters is None or urlend is None: 15 | raise RuntimeError( "Must give converters and url" ) 16 | self.converters = converters 17 | self.urlend = urlend 18 | self.renames = renames 19 | self.sep = sep 20 | self.cache = [] 21 | self.tot_n_loaded = 0 22 | self.tot_missing = 0 23 | self.cache_size = 1000 24 | 25 | def load_csv( self, filename ): 26 | self.logger.info( f"****** Reading {filename} ******" ) 27 | if ( len(filename) >= 3 ) and ( filename[-3:] == ".gz" ): 28 | ifp = gzip.open( filename ) 29 | else: 30 | ifp = open( filename ) 31 | df = pandas.read_csv( ifp, skipinitialspace=True, comment='#', skip_blank_lines=True, sep=self.sep, 32 | converters=self.converters ) 33 | ifp.close() 34 | if self.renames is not None: 35 | df.rename( self.renames, axis=1, inplace=True ) 36 | # import pdb; pdb.set_trace() 37 | for i, row in df.iterrows(): 38 | # Dealing with Pandas NaN and JSON is painf8ul 39 | d = dict(row) 40 | for key, val in d.items(): 41 | if numpy.isnan( val ): d[key] = None 42 | self.cache.append( d ) 43 | if len( self.cache ) >= self.cache_size: 44 | self.flush_cache() 45 | self.flush_cache() 46 | 47 | def flush_cache( self ): 48 | if len( self.cache ) > 0: 49 | self.logger.debug( f"Posting {sys.getsizeof(json.dumps(self.cache))/1024:.2f} kiB " 50 | f"for {len(self.cache)} truth values" ) 51 | # Keep resending until we get a good result. The code on the server 52 | # should be smart enough to not load duplicates, so we should be 53 | # safe just resending. 54 | ok = False 55 | while not ok: 56 | resp = self.rqs.post( f'{self.urlbase}/{self.urlend}', json=self.cache ) 57 | if resp.status_code != 200: 58 | self.logger.error( f"ERROR : got status code {resp.status_code}; retrying after 1s..." ) 59 | time.sleep(1) 60 | else: 61 | ok = True 62 | rjson = json.loads( resp.text ) 63 | if rjson['status'] != 'ok': 64 | outlines = [ f"ERROR: got status {rjson['status']}" ] 65 | for key, val in rjson.items(): 66 | if key != 'status': 67 | outlines.append( f" {key} : {val}\n" ) 68 | self.logger.error( "\n".join( outlines ) ) 69 | else: 70 | if 'missing' in rjson: 71 | if len( rjson['missing'] ) > 0: 72 | self.logger.warning( f'Server told us the following was missing: ' 73 | f'{" ".join( [ str(i) for i in rjson["missing"] ] )}' ) 74 | self.tot_missing += len( rjson['missing'] ) 75 | self.tot_n_loaded += len( rjson["message"] ) 76 | self.logger.info( f'Loaded {len(rjson["message"])} truth values, ' 77 | f'cumulative {self.tot_n_loaded} (with {self.tot_missing} missing)\n' ) 78 | self.cache = [] 79 | --------------------------------------------------------------------------------