├── .DS_Store ├── ExplanationEvaluation ├── .DS_Store ├── __init__.py ├── configs │ ├── .DS_Store │ ├── batchnorm │ │ └── models │ │ │ ├── model_ori_ba2motifs.json │ │ │ ├── model_ori_bacommunity.json │ │ │ ├── model_ori_bashapes.json │ │ │ ├── model_ori_mutag.json │ │ │ ├── model_ori_treecycles.json │ │ │ └── model_ori_treegrids.json │ ├── extension │ │ └── explainers │ │ │ ├── gnnexplainer │ │ │ ├── ba2motifs.json │ │ │ ├── bacommunity.json │ │ │ ├── bashapes.json │ │ │ ├── mutag.json │ │ │ ├── treecycles.json │ │ │ └── treegrids.json │ │ │ └── pgexplainer │ │ │ ├── ba2motifs.json │ │ │ ├── bacommunity.json │ │ │ ├── bashapes.json │ │ │ ├── mutag.json │ │ │ ├── treecycles.json │ │ │ └── treegrids.json │ ├── replication │ │ ├── explainers │ │ │ ├── gnnexplainer │ │ │ │ ├── ba2motifs.json │ │ │ │ ├── bacommunity.json │ │ │ │ ├── bashapes.json │ │ │ │ ├── mutag.json │ │ │ │ ├── treecycles.json │ │ │ │ └── treegrids.json │ │ │ └── pgexplainer │ │ │ │ ├── ba2motifs.json │ │ │ │ ├── bacommunity.json │ │ │ │ ├── bashapes.json │ │ │ │ ├── mutag.json │ │ │ │ ├── treecycles.json │ │ │ │ └── treegrids.json │ │ └── models │ │ │ ├── model_gnn_ba2motifs.json │ │ │ ├── model_gnn_bacommunity.json │ │ │ ├── model_gnn_bashapes.json │ │ │ ├── model_gnn_mutag.json │ │ │ ├── model_gnn_treecycles.json │ │ │ └── model_gnn_treegrids.json │ └── selector.py ├── datasets │ ├── .DS_Store │ ├── Mutagenicity │ │ ├── Mutagenicity_A.txt │ │ ├── Mutagenicity_edge_gt.txt │ │ ├── Mutagenicity_edge_labels.txt │ │ ├── Mutagenicity_graph_indicator.txt │ │ ├── Mutagenicity_graph_labels.txt │ │ ├── Mutagenicity_label_readme.txt │ │ └── Mutagenicity_node_labels.txt │ ├── __init__.py │ ├── dataset_loaders.py │ ├── ground_truth_loaders.py │ ├── pkls │ │ ├── .DS_Store │ │ ├── BA-2motif.pkl │ │ ├── syn1.pkl │ │ ├── syn2.pkl │ │ ├── syn3.pkl │ │ └── syn4.pkl │ └── utils.py ├── evaluation │ ├── AUCEvaluation.py │ ├── BaseEvaluation.py │ ├── EfficiencyEvaluation.py │ ├── __init__.py │ └── utils.py ├── explainers │ ├── BaseExplainer.py │ ├── GNNExplainer.py │ ├── PGExplainer.py │ └── __init__.py ├── models │ ├── GNN_paper.py │ ├── PG_paper.py │ ├── __init__.py │ ├── model_selector.py │ └── pretrained │ │ ├── GNN │ │ ├── ba2 │ │ │ └── best_model │ │ ├── mutag │ │ │ └── best_model │ │ ├── syn1 │ │ │ └── best_model │ │ ├── syn2 │ │ │ └── best_model │ │ ├── syn3 │ │ │ └── best_model │ │ └── syn4 │ │ │ └── best_model │ │ └── PG │ │ ├── ba2 │ │ └── best_model │ │ ├── mutag │ │ └── best_model │ │ ├── syn1 │ │ ├── best_model │ │ └── best_model_old │ │ ├── syn2 │ │ └── best_model │ │ ├── syn3 │ │ └── best_model │ │ └── syn4 │ │ └── best_model ├── tasks │ ├── __init__.py │ ├── replication.py │ └── training.py └── utils │ ├── __init__.py │ ├── graph.py │ └── plotting.py ├── README.md ├── example_explain_your_model.ipynb ├── experiment_ablation.ipynb ├── experiment_models_training.ipynb ├── experiment_replication.ipynb └── requirements.txt /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/.DS_Store -------------------------------------------------------------------------------- /ExplanationEvaluation/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/.DS_Store -------------------------------------------------------------------------------- /ExplanationEvaluation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/__init__.py -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/configs/.DS_Store -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/batchnorm/models/model_ori_ba2motifs.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "ba2", 4 | "paper": "PG", 5 | "lr" : 0.001, 6 | "epochs" : 5000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 500, 10 | "seed" : 42, 11 | "eval_enabled" : false 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/batchnorm/models/model_ori_bacommunity.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "syn2", 4 | "paper": "PG", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : false 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/batchnorm/models/model_ori_bashapes.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "syn1", 4 | "paper": "PG", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : false 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/batchnorm/models/model_ori_mutag.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "mutag", 4 | "paper": "PG", 5 | "lr" : 0.001, 6 | "epochs" : 5000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : false 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/batchnorm/models/model_ori_treecycles.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "syn3", 4 | "paper": "PG", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : false 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/batchnorm/models/model_ori_treegrids.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "syn4", 4 | "paper": "PG", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : false 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/gnnexplainer/ba2motifs.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "ba2", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.005, 7 | "epochs" : 20, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.03, 10 | "reg_ent" : 0.01, 11 | "temps" : [5.0, 1.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 5, 15 | "thres_min" : -1 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/gnnexplainer/bacommunity.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn2", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.003, 7 | "epochs" : 100, 8 | "sample_bias" : 0.5, 9 | "reg_size" : 0.05, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/gnnexplainer/bashapes.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn1", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.005, 7 | "epochs" : 100, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.005, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/gnnexplainer/mutag.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "mutag", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.003, 7 | "epochs" : 30, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.01, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 2, 15 | "thres_min" : -1 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/gnnexplainer/treecycles.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn3", 4 | "model": "GNN", 5 | "explainer" : "GNN", 6 | "lr" : 0.003, 7 | "epochs" : 100, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.05, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/gnnexplainer/treegrids.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn4", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.01, 7 | "epochs" : 100, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.01, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 24, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/pgexplainer/ba2motifs.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "ba2", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.005, 7 | "epochs" : 20, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.03, 10 | "reg_ent" : 0.01, 11 | "temps" : [5.0, 1.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 5, 15 | "thres_min" : -1 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/pgexplainer/bacommunity.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn2", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.003, 7 | "epochs" : 20, 8 | "sample_bias" : 0.5, 9 | "reg_size" : 0.05, 10 | "reg_ent" : 1.0, 11 | "temps" : [1.0, 1.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/pgexplainer/bashapes.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn1", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.003, 7 | "epochs" : 10, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.05, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/pgexplainer/mutag.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "mutag", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.0003, 7 | "epochs" : 30, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.005, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 5.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 2, 15 | "thres_min" : -1 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/pgexplainer/treecycles.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn3", 4 | "model": "GNN", 5 | "explainer" : "PG", 6 | "lr" : 0.003, 7 | "epochs" : 20, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 1.0, 10 | "COMMENT": "regs need to be checked", 11 | "reg_ent" : 1.0, 12 | "temps" : [1.0, 1.0], 13 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 14 | "eval_enabled" : true, 15 | "thres_snip" : 12, 16 | "thres_min" : 100 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/extension/explainers/pgexplainer/treegrids.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn4", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.003, 7 | "epochs" : 30, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 1.0, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 24, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/gnnexplainer/ba2motifs.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "ba2", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.005, 7 | "epochs" : 100, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.03, 10 | "reg_ent" : 0.01, 11 | "temps" : [5.0, 1.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 5, 15 | "thres_min" : -1 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/gnnexplainer/bacommunity.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn2", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.003, 7 | "epochs" : 100, 8 | "sample_bias" : 0.5, 9 | "reg_size" : 0.05, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/gnnexplainer/bashapes.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn1", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.005, 7 | "epochs" : 100, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.005, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/gnnexplainer/mutag.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "mutag", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.003, 7 | "epochs" : 30, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.01, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 2, 15 | "thres_min" : -1 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/gnnexplainer/treecycles.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn3", 4 | "model": "GNN", 5 | "explainer" : "GNN", 6 | "lr" : 0.003, 7 | "epochs" : 100, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.05, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/gnnexplainer/treegrids.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn4", 4 | "model": "GNN", 5 | "explainer": "GNN", 6 | "lr" : 0.01, 7 | "epochs" : 100, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.01, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 24, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/pgexplainer/ba2motifs.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "ba2", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.005, 7 | "epochs" : 20, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.03, 10 | "reg_ent" : 0.01, 11 | "temps" : [5.0, 1.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 5, 15 | "thres_min" : -1 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/pgexplainer/bacommunity.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn2", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.003, 7 | "epochs" : 20, 8 | "sample_bias" : 0.5, 9 | "reg_size" : 0.05, 10 | "reg_ent" : 1.0, 11 | "temps" : [1.0, 1.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/pgexplainer/bashapes.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn1", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.003, 7 | "epochs" : 10, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.05, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 12, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/pgexplainer/mutag.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "mutag", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.0003, 7 | "epochs" : 30, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.005, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 5.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 2, 15 | "thres_min" : -1 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/pgexplainer/treecycles.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn3", 4 | "model": "GNN", 5 | "explainer" : "PG", 6 | "lr" : 0.003, 7 | "epochs" : 20, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 0.1, 10 | "COMMENT": "regs need to be checked", 11 | "reg_ent" : 10.0, 12 | "temps" : [1.0, 5.0], 13 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 14 | "eval_enabled" : true, 15 | "thres_snip" : 12, 16 | "thres_min" : 100 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/explainers/pgexplainer/treegrids.json: -------------------------------------------------------------------------------- 1 | { 2 | "explainer": { 3 | "dataset": "syn4", 4 | "model": "GNN", 5 | "explainer": "PG", 6 | "lr" : 0.003, 7 | "epochs" : 30, 8 | "sample_bias" : 0.0, 9 | "reg_size" : 1.0, 10 | "reg_ent" : 1.0, 11 | "temps" : [5.0, 2.0], 12 | "seeds" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 13 | "eval_enabled" : true, 14 | "thres_snip" : 24, 15 | "thres_min" : 100 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/models/model_gnn_ba2motifs.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "ba2", 4 | "paper": "GNN", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 500, 10 | "seed" : 42, 11 | "eval_enabled" : true 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/models/model_gnn_bacommunity.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "syn2", 4 | "paper": "GNN", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : true 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/models/model_gnn_bashapes.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "syn1", 4 | "paper": "GNN", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : true 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/models/model_gnn_mutag.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "mutag", 4 | "paper": "GNN", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : true 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/models/model_gnn_treecycles.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "syn3", 4 | "paper": "GNN", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : true 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/replication/models/model_gnn_treegrids.json: -------------------------------------------------------------------------------- 1 | { 2 | "model": { 3 | "dataset": "syn4", 4 | "paper": "GNN", 5 | "lr" : 0.001, 6 | "epochs" : 1000, 7 | "clip_max" : 2.0, 8 | "batch_size": 64, 9 | "early_stopping": 100, 10 | "seed" : 42, 11 | "eval_enabled" : true 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /ExplanationEvaluation/configs/selector.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | class Struct: 5 | """Helper class to parse dict to object""" 6 | def __init__(self, entries): 7 | self.__dict__.update(entries) 8 | 9 | class Selector: 10 | def __init__(self, config_path): 11 | self.args = self.parse_config(config_path) 12 | 13 | def parse_config(self, config_path): 14 | try: 15 | with open(config_path) as config_parser: 16 | config = json.loads(json.dumps(json.load(config_parser)), object_hook=Struct) 17 | return config 18 | except FileNotFoundError: 19 | print("No config found") 20 | return None 21 | -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/datasets/.DS_Store -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/Mutagenicity/Mutagenicity_graph_labels.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 1 3 | 0 4 | 1 5 | 1 6 | 1 7 | 1 8 | 1 9 | 1 10 | 1 11 | 0 12 | 1 13 | 1 14 | 1 15 | 0 16 | 0 17 | 0 18 | 0 19 | 0 20 | 1 21 | 0 22 | 1 23 | 0 24 | 0 25 | 0 26 | 0 27 | 1 28 | 1 29 | 1 30 | 1 31 | 1 32 | 0 33 | 1 34 | 0 35 | 0 36 | 1 37 | 1 38 | 1 39 | 1 40 | 0 41 | 1 42 | 0 43 | 0 44 | 1 45 | 0 46 | 0 47 | 1 48 | 0 49 | 1 50 | 0 51 | 1 52 | 0 53 | 1 54 | 0 55 | 1 56 | 1 57 | 1 58 | 1 59 | 1 60 | 1 61 | 1 62 | 1 63 | 1 64 | 0 65 | 1 66 | 0 67 | 1 68 | 1 69 | 1 70 | 0 71 | 1 72 | 0 73 | 0 74 | 0 75 | 0 76 | 1 77 | 1 78 | 0 79 | 0 80 | 0 81 | 0 82 | 0 83 | 0 84 | 1 85 | 1 86 | 0 87 | 1 88 | 1 89 | 0 90 | 0 91 | 1 92 | 0 93 | 0 94 | 1 95 | 0 96 | 1 97 | 1 98 | 0 99 | 0 100 | 0 101 | 0 102 | 1 103 | 1 104 | 0 105 | 1 106 | 1 107 | 1 108 | 1 109 | 0 110 | 1 111 | 0 112 | 1 113 | 1 114 | 0 115 | 0 116 | 1 117 | 1 118 | 1 119 | 1 120 | 1 121 | 0 122 | 0 123 | 1 124 | 1 125 | 1 126 | 0 127 | 1 128 | 1 129 | 0 130 | 1 131 | 1 132 | 1 133 | 0 134 | 0 135 | 0 136 | 0 137 | 1 138 | 1 139 | 1 140 | 0 141 | 0 142 | 1 143 | 1 144 | 0 145 | 1 146 | 1 147 | 1 148 | 0 149 | 1 150 | 1 151 | 0 152 | 0 153 | 0 154 | 1 155 | 0 156 | 1 157 | 0 158 | 0 159 | 1 160 | 0 161 | 0 162 | 1 163 | 1 164 | 0 165 | 0 166 | 0 167 | 1 168 | 1 169 | 1 170 | 1 171 | 0 172 | 1 173 | 0 174 | 1 175 | 0 176 | 0 177 | 0 178 | 1 179 | 1 180 | 1 181 | 0 182 | 0 183 | 0 184 | 1 185 | 0 186 | 1 187 | 0 188 | 1 189 | 1 190 | 0 191 | 0 192 | 1 193 | 0 194 | 0 195 | 1 196 | 0 197 | 0 198 | 0 199 | 0 200 | 0 201 | 1 202 | 0 203 | 0 204 | 0 205 | 0 206 | 1 207 | 0 208 | 0 209 | 1 210 | 1 211 | 0 212 | 1 213 | 1 214 | 0 215 | 1 216 | 0 217 | 0 218 | 0 219 | 1 220 | 1 221 | 1 222 | 1 223 | 0 224 | 1 225 | 0 226 | 0 227 | 0 228 | 0 229 | 0 230 | 0 231 | 0 232 | 0 233 | 0 234 | 0 235 | 1 236 | 0 237 | 0 238 | 0 239 | 0 240 | 0 241 | 0 242 | 0 243 | 0 244 | 1 245 | 0 246 | 0 247 | 0 248 | 0 249 | 1 250 | 0 251 | 1 252 | 1 253 | 0 254 | 0 255 | 0 256 | 0 257 | 0 258 | 0 259 | 1 260 | 1 261 | 0 262 | 0 263 | 1 264 | 0 265 | 0 266 | 0 267 | 0 268 | 0 269 | 1 270 | 0 271 | 1 272 | 0 273 | 0 274 | 1 275 | 0 276 | 0 277 | 0 278 | 1 279 | 0 280 | 1 281 | 1 282 | 0 283 | 0 284 | 0 285 | 0 286 | 1 287 | 1 288 | 0 289 | 0 290 | 1 291 | 1 292 | 0 293 | 0 294 | 0 295 | 1 296 | 1 297 | 1 298 | 1 299 | 1 300 | 0 301 | 1 302 | 0 303 | 1 304 | 1 305 | 1 306 | 1 307 | 1 308 | 1 309 | 0 310 | 1 311 | 1 312 | 0 313 | 0 314 | 0 315 | 1 316 | 1 317 | 1 318 | 0 319 | 0 320 | 1 321 | 1 322 | 1 323 | 1 324 | 1 325 | 0 326 | 0 327 | 1 328 | 0 329 | 1 330 | 1 331 | 0 332 | 1 333 | 0 334 | 1 335 | 0 336 | 0 337 | 0 338 | 0 339 | 0 340 | 0 341 | 0 342 | 0 343 | 1 344 | 1 345 | 1 346 | 1 347 | 0 348 | 0 349 | 1 350 | 0 351 | 0 352 | 0 353 | 0 354 | 1 355 | 1 356 | 0 357 | 1 358 | 1 359 | 0 360 | 0 361 | 1 362 | 1 363 | 1 364 | 0 365 | 1 366 | 1 367 | 0 368 | 0 369 | 1 370 | 0 371 | 0 372 | 0 373 | 1 374 | 1 375 | 0 376 | 1 377 | 0 378 | 0 379 | 1 380 | 0 381 | 0 382 | 1 383 | 0 384 | 1 385 | 1 386 | 1 387 | 1 388 | 1 389 | 0 390 | 1 391 | 0 392 | 1 393 | 0 394 | 1 395 | 1 396 | 0 397 | 0 398 | 1 399 | 1 400 | 0 401 | 0 402 | 0 403 | 1 404 | 1 405 | 1 406 | 0 407 | 0 408 | 0 409 | 0 410 | 0 411 | 0 412 | 1 413 | 1 414 | 0 415 | 1 416 | 1 417 | 0 418 | 0 419 | 0 420 | 1 421 | 0 422 | 1 423 | 0 424 | 0 425 | 0 426 | 1 427 | 0 428 | 1 429 | 1 430 | 1 431 | 1 432 | 1 433 | 1 434 | 0 435 | 0 436 | 0 437 | 0 438 | 0 439 | 0 440 | 1 441 | 1 442 | 1 443 | 0 444 | 1 445 | 1 446 | 0 447 | 0 448 | 0 449 | 1 450 | 0 451 | 0 452 | 0 453 | 1 454 | 1 455 | 0 456 | 0 457 | 0 458 | 1 459 | 0 460 | 0 461 | 0 462 | 1 463 | 0 464 | 0 465 | 0 466 | 1 467 | 0 468 | 0 469 | 1 470 | 1 471 | 1 472 | 0 473 | 0 474 | 0 475 | 0 476 | 1 477 | 1 478 | 0 479 | 0 480 | 1 481 | 1 482 | 1 483 | 0 484 | 1 485 | 0 486 | 1 487 | 0 488 | 1 489 | 0 490 | 1 491 | 1 492 | 0 493 | 0 494 | 0 495 | 1 496 | 0 497 | 1 498 | 0 499 | 1 500 | 0 501 | 0 502 | 0 503 | 0 504 | 0 505 | 0 506 | 1 507 | 1 508 | 1 509 | 0 510 | 0 511 | 0 512 | 0 513 | 0 514 | 1 515 | 0 516 | 1 517 | 1 518 | 0 519 | 1 520 | 0 521 | 1 522 | 0 523 | 0 524 | 0 525 | 0 526 | 0 527 | 1 528 | 1 529 | 1 530 | 0 531 | 0 532 | 1 533 | 1 534 | 0 535 | 0 536 | 1 537 | 1 538 | 0 539 | 1 540 | 0 541 | 1 542 | 1 543 | 0 544 | 1 545 | 0 546 | 1 547 | 1 548 | 1 549 | 0 550 | 1 551 | 0 552 | 1 553 | 0 554 | 0 555 | 0 556 | 0 557 | 1 558 | 1 559 | 0 560 | 0 561 | 0 562 | 1 563 | 0 564 | 0 565 | 1 566 | 0 567 | 1 568 | 1 569 | 0 570 | 0 571 | 1 572 | 0 573 | 1 574 | 0 575 | 1 576 | 1 577 | 1 578 | 1 579 | 0 580 | 0 581 | 0 582 | 0 583 | 0 584 | 0 585 | 0 586 | 0 587 | 1 588 | 1 589 | 1 590 | 1 591 | 0 592 | 0 593 | 0 594 | 0 595 | 1 596 | 1 597 | 1 598 | 0 599 | 1 600 | 0 601 | 0 602 | 0 603 | 1 604 | 0 605 | 1 606 | 0 607 | 1 608 | 1 609 | 1 610 | 0 611 | 1 612 | 1 613 | 0 614 | 1 615 | 0 616 | 1 617 | 0 618 | 1 619 | 0 620 | 0 621 | 0 622 | 0 623 | 1 624 | 1 625 | 0 626 | 1 627 | 1 628 | 0 629 | 1 630 | 1 631 | 1 632 | 1 633 | 1 634 | 0 635 | 0 636 | 0 637 | 0 638 | 0 639 | 0 640 | 1 641 | 0 642 | 0 643 | 0 644 | 0 645 | 0 646 | 0 647 | 0 648 | 1 649 | 0 650 | 0 651 | 0 652 | 0 653 | 0 654 | 1 655 | 1 656 | 1 657 | 1 658 | 1 659 | 0 660 | 1 661 | 1 662 | 0 663 | 0 664 | 0 665 | 0 666 | 1 667 | 0 668 | 1 669 | 0 670 | 0 671 | 1 672 | 1 673 | 0 674 | 0 675 | 0 676 | 1 677 | 0 678 | 0 679 | 0 680 | 0 681 | 1 682 | 1 683 | 0 684 | 0 685 | 0 686 | 0 687 | 0 688 | 1 689 | 0 690 | 0 691 | 0 692 | 0 693 | 0 694 | 1 695 | 1 696 | 0 697 | 0 698 | 0 699 | 0 700 | 1 701 | 1 702 | 1 703 | 0 704 | 0 705 | 1 706 | 0 707 | 1 708 | 1 709 | 0 710 | 1 711 | 1 712 | 1 713 | 1 714 | 0 715 | 1 716 | 0 717 | 0 718 | 0 719 | 0 720 | 0 721 | 1 722 | 0 723 | 0 724 | 1 725 | 1 726 | 0 727 | 0 728 | 0 729 | 1 730 | 1 731 | 0 732 | 1 733 | 0 734 | 0 735 | 1 736 | 0 737 | 1 738 | 0 739 | 1 740 | 0 741 | 0 742 | 1 743 | 0 744 | 0 745 | 0 746 | 1 747 | 1 748 | 0 749 | 0 750 | 0 751 | 0 752 | 0 753 | 1 754 | 0 755 | 0 756 | 1 757 | 1 758 | 0 759 | 0 760 | 1 761 | 0 762 | 0 763 | 1 764 | 0 765 | 0 766 | 1 767 | 1 768 | 0 769 | 0 770 | 1 771 | 1 772 | 1 773 | 0 774 | 1 775 | 1 776 | 1 777 | 0 778 | 0 779 | 0 780 | 1 781 | 0 782 | 1 783 | 0 784 | 1 785 | 0 786 | 0 787 | 1 788 | 0 789 | 0 790 | 1 791 | 0 792 | 0 793 | 0 794 | 1 795 | 0 796 | 1 797 | 0 798 | 1 799 | 0 800 | 0 801 | 1 802 | 1 803 | 1 804 | 1 805 | 0 806 | 1 807 | 0 808 | 1 809 | 0 810 | 0 811 | 0 812 | 0 813 | 0 814 | 0 815 | 1 816 | 0 817 | 0 818 | 1 819 | 1 820 | 1 821 | 1 822 | 1 823 | 1 824 | 0 825 | 0 826 | 0 827 | 1 828 | 1 829 | 1 830 | 1 831 | 0 832 | 0 833 | 0 834 | 1 835 | 1 836 | 0 837 | 1 838 | 1 839 | 0 840 | 0 841 | 1 842 | 0 843 | 0 844 | 0 845 | 1 846 | 0 847 | 0 848 | 0 849 | 0 850 | 0 851 | 0 852 | 0 853 | 1 854 | 1 855 | 0 856 | 0 857 | 0 858 | 0 859 | 0 860 | 0 861 | 1 862 | 1 863 | 1 864 | 0 865 | 0 866 | 0 867 | 1 868 | 1 869 | 1 870 | 1 871 | 1 872 | 1 873 | 0 874 | 0 875 | 1 876 | 1 877 | 1 878 | 0 879 | 1 880 | 0 881 | 0 882 | 0 883 | 1 884 | 0 885 | 1 886 | 0 887 | 0 888 | 1 889 | 0 890 | 1 891 | 0 892 | 0 893 | 0 894 | 1 895 | 0 896 | 1 897 | 1 898 | 0 899 | 1 900 | 1 901 | 0 902 | 0 903 | 0 904 | 1 905 | 0 906 | 0 907 | 1 908 | 1 909 | 1 910 | 1 911 | 1 912 | 1 913 | 1 914 | 0 915 | 0 916 | 0 917 | 0 918 | 0 919 | 1 920 | 1 921 | 1 922 | 1 923 | 0 924 | 1 925 | 1 926 | 0 927 | 0 928 | 1 929 | 0 930 | 0 931 | 1 932 | 1 933 | 0 934 | 1 935 | 1 936 | 0 937 | 0 938 | 0 939 | 0 940 | 1 941 | 0 942 | 1 943 | 0 944 | 0 945 | 1 946 | 0 947 | 0 948 | 1 949 | 0 950 | 1 951 | 0 952 | 0 953 | 1 954 | 0 955 | 1 956 | 0 957 | 0 958 | 1 959 | 0 960 | 0 961 | 1 962 | 0 963 | 0 964 | 0 965 | 0 966 | 1 967 | 1 968 | 0 969 | 1 970 | 1 971 | 1 972 | 1 973 | 1 974 | 0 975 | 0 976 | 1 977 | 0 978 | 0 979 | 1 980 | 1 981 | 1 982 | 1 983 | 0 984 | 1 985 | 0 986 | 1 987 | 1 988 | 0 989 | 1 990 | 0 991 | 0 992 | 1 993 | 0 994 | 0 995 | 1 996 | 1 997 | 0 998 | 1 999 | 1 1000 | 0 1001 | 1 1002 | 1 1003 | 0 1004 | 1 1005 | 1 1006 | 0 1007 | 0 1008 | 0 1009 | 0 1010 | 1 1011 | 1 1012 | 1 1013 | 0 1014 | 0 1015 | 0 1016 | 0 1017 | 1 1018 | 0 1019 | 1 1020 | 0 1021 | 0 1022 | 0 1023 | 0 1024 | 1 1025 | 0 1026 | 0 1027 | 1 1028 | 0 1029 | 0 1030 | 1 1031 | 1 1032 | 1 1033 | 0 1034 | 0 1035 | 1 1036 | 0 1037 | 0 1038 | 1 1039 | 1 1040 | 1 1041 | 1 1042 | 0 1043 | 1 1044 | 0 1045 | 1 1046 | 0 1047 | 1 1048 | 1 1049 | 1 1050 | 1 1051 | 1 1052 | 0 1053 | 1 1054 | 0 1055 | 0 1056 | 1 1057 | 1 1058 | 0 1059 | 1 1060 | 0 1061 | 0 1062 | 1 1063 | 0 1064 | 0 1065 | 1 1066 | 1 1067 | 0 1068 | 1 1069 | 0 1070 | 0 1071 | 0 1072 | 0 1073 | 0 1074 | 0 1075 | 0 1076 | 0 1077 | 1 1078 | 1 1079 | 0 1080 | 0 1081 | 1 1082 | 0 1083 | 0 1084 | 1 1085 | 1 1086 | 1 1087 | 1 1088 | 1 1089 | 1 1090 | 1 1091 | 1 1092 | 0 1093 | 0 1094 | 0 1095 | 0 1096 | 0 1097 | 0 1098 | 0 1099 | 1 1100 | 0 1101 | 0 1102 | 0 1103 | 1 1104 | 0 1105 | 0 1106 | 1 1107 | 1 1108 | 1 1109 | 0 1110 | 1 1111 | 1 1112 | 1 1113 | 1 1114 | 1 1115 | 1 1116 | 0 1117 | 1 1118 | 0 1119 | 0 1120 | 1 1121 | 0 1122 | 0 1123 | 0 1124 | 1 1125 | 0 1126 | 0 1127 | 1 1128 | 0 1129 | 1 1130 | 0 1131 | 0 1132 | 0 1133 | 1 1134 | 1 1135 | 1 1136 | 1 1137 | 0 1138 | 1 1139 | 0 1140 | 1 1141 | 0 1142 | 0 1143 | 1 1144 | 0 1145 | 1 1146 | 1 1147 | 1 1148 | 0 1149 | 1 1150 | 0 1151 | 1 1152 | 1 1153 | 0 1154 | 1 1155 | 0 1156 | 0 1157 | 1 1158 | 1 1159 | 0 1160 | 1 1161 | 1 1162 | 0 1163 | 1 1164 | 0 1165 | 0 1166 | 1 1167 | 1 1168 | 1 1169 | 0 1170 | 1 1171 | 0 1172 | 0 1173 | 1 1174 | 0 1175 | 0 1176 | 1 1177 | 1 1178 | 0 1179 | 0 1180 | 0 1181 | 0 1182 | 0 1183 | 1 1184 | 0 1185 | 1 1186 | 1 1187 | 1 1188 | 1 1189 | 0 1190 | 0 1191 | 1 1192 | 1 1193 | 0 1194 | 1 1195 | 0 1196 | 1 1197 | 0 1198 | 0 1199 | 0 1200 | 1 1201 | 0 1202 | 0 1203 | 1 1204 | 0 1205 | 1 1206 | 1 1207 | 0 1208 | 0 1209 | 1 1210 | 0 1211 | 0 1212 | 1 1213 | 1 1214 | 0 1215 | 0 1216 | 1 1217 | 0 1218 | 0 1219 | 0 1220 | 1 1221 | 1 1222 | 0 1223 | 1 1224 | 1 1225 | 0 1226 | 0 1227 | 1 1228 | 1 1229 | 0 1230 | 1 1231 | 0 1232 | 0 1233 | 0 1234 | 0 1235 | 1 1236 | 0 1237 | 1 1238 | 1 1239 | 1 1240 | 1 1241 | 1 1242 | 0 1243 | 1 1244 | 1 1245 | 1 1246 | 0 1247 | 0 1248 | 0 1249 | 1 1250 | 1 1251 | 1 1252 | 1 1253 | 1 1254 | 0 1255 | 1 1256 | 0 1257 | 1 1258 | 0 1259 | 1 1260 | 0 1261 | 0 1262 | 1 1263 | 0 1264 | 0 1265 | 0 1266 | 1 1267 | 0 1268 | 0 1269 | 0 1270 | 1 1271 | 0 1272 | 1 1273 | 0 1274 | 0 1275 | 1 1276 | 1 1277 | 0 1278 | 1 1279 | 0 1280 | 0 1281 | 0 1282 | 0 1283 | 1 1284 | 1 1285 | 1 1286 | 0 1287 | 0 1288 | 1 1289 | 0 1290 | 0 1291 | 1 1292 | 0 1293 | 0 1294 | 0 1295 | 0 1296 | 0 1297 | 0 1298 | 0 1299 | 1 1300 | 1 1301 | 0 1302 | 0 1303 | 0 1304 | 0 1305 | 0 1306 | 1 1307 | 0 1308 | 0 1309 | 0 1310 | 0 1311 | 1 1312 | 0 1313 | 0 1314 | 1 1315 | 0 1316 | 0 1317 | 1 1318 | 0 1319 | 1 1320 | 0 1321 | 1 1322 | 1 1323 | 0 1324 | 0 1325 | 0 1326 | 1 1327 | 0 1328 | 0 1329 | 0 1330 | 0 1331 | 1 1332 | 0 1333 | 1 1334 | 0 1335 | 0 1336 | 0 1337 | 0 1338 | 0 1339 | 1 1340 | 1 1341 | 0 1342 | 1 1343 | 0 1344 | 0 1345 | 1 1346 | 0 1347 | 0 1348 | 1 1349 | 1 1350 | 1 1351 | 1 1352 | 1 1353 | 0 1354 | 0 1355 | 0 1356 | 1 1357 | 0 1358 | 1 1359 | 0 1360 | 0 1361 | 0 1362 | 1 1363 | 1 1364 | 1 1365 | 0 1366 | 1 1367 | 1 1368 | 0 1369 | 0 1370 | 0 1371 | 0 1372 | 0 1373 | 1 1374 | 0 1375 | 0 1376 | 0 1377 | 1 1378 | 0 1379 | 0 1380 | 0 1381 | 0 1382 | 1 1383 | 0 1384 | 1 1385 | 0 1386 | 0 1387 | 1 1388 | 1 1389 | 0 1390 | 0 1391 | 1 1392 | 0 1393 | 1 1394 | 0 1395 | 0 1396 | 1 1397 | 0 1398 | 0 1399 | 0 1400 | 0 1401 | 1 1402 | 0 1403 | 1 1404 | 0 1405 | 0 1406 | 0 1407 | 0 1408 | 0 1409 | 1 1410 | 0 1411 | 0 1412 | 0 1413 | 0 1414 | 0 1415 | 0 1416 | 0 1417 | 1 1418 | 0 1419 | 1 1420 | 1 1421 | 1 1422 | 1 1423 | 0 1424 | 0 1425 | 1 1426 | 1 1427 | 0 1428 | 0 1429 | 1 1430 | 0 1431 | 1 1432 | 0 1433 | 0 1434 | 0 1435 | 0 1436 | 0 1437 | 0 1438 | 0 1439 | 0 1440 | 0 1441 | 0 1442 | 0 1443 | 0 1444 | 0 1445 | 1 1446 | 1 1447 | 0 1448 | 0 1449 | 0 1450 | 1 1451 | 1 1452 | 1 1453 | 0 1454 | 1 1455 | 0 1456 | 0 1457 | 1 1458 | 0 1459 | 1 1460 | 0 1461 | 0 1462 | 0 1463 | 0 1464 | 1 1465 | 1 1466 | 1 1467 | 0 1468 | 1 1469 | 1 1470 | 0 1471 | 1 1472 | 0 1473 | 0 1474 | 1 1475 | 0 1476 | 0 1477 | 0 1478 | 1 1479 | 1 1480 | 1 1481 | 1 1482 | 1 1483 | 0 1484 | 1 1485 | 0 1486 | 1 1487 | 1 1488 | 0 1489 | 1 1490 | 0 1491 | 0 1492 | 0 1493 | 1 1494 | 0 1495 | 0 1496 | 1 1497 | 0 1498 | 0 1499 | 0 1500 | 1 1501 | 1 1502 | 0 1503 | 0 1504 | 1 1505 | 0 1506 | 0 1507 | 1 1508 | 1 1509 | 1 1510 | 0 1511 | 0 1512 | 1 1513 | 0 1514 | 0 1515 | 1 1516 | 1 1517 | 1 1518 | 1 1519 | 1 1520 | 1 1521 | 0 1522 | 1 1523 | 1 1524 | 0 1525 | 0 1526 | 1 1527 | 0 1528 | 0 1529 | 1 1530 | 0 1531 | 1 1532 | 0 1533 | 0 1534 | 0 1535 | 0 1536 | 0 1537 | 1 1538 | 1 1539 | 0 1540 | 0 1541 | 0 1542 | 0 1543 | 0 1544 | 1 1545 | 1 1546 | 0 1547 | 0 1548 | 1 1549 | 1 1550 | 1 1551 | 1 1552 | 0 1553 | 1 1554 | 1 1555 | 0 1556 | 1 1557 | 0 1558 | 0 1559 | 1 1560 | 1 1561 | 0 1562 | 0 1563 | 0 1564 | 0 1565 | 0 1566 | 0 1567 | 0 1568 | 0 1569 | 0 1570 | 0 1571 | 0 1572 | 1 1573 | 1 1574 | 0 1575 | 0 1576 | 1 1577 | 1 1578 | 1 1579 | 0 1580 | 0 1581 | 0 1582 | 0 1583 | 0 1584 | 0 1585 | 1 1586 | 1 1587 | 0 1588 | 0 1589 | 1 1590 | 1 1591 | 1 1592 | 0 1593 | 1 1594 | 0 1595 | 0 1596 | 1 1597 | 0 1598 | 0 1599 | 0 1600 | 0 1601 | 0 1602 | 1 1603 | 1 1604 | 0 1605 | 0 1606 | 0 1607 | 0 1608 | 1 1609 | 1 1610 | 0 1611 | 0 1612 | 0 1613 | 0 1614 | 0 1615 | 1 1616 | 0 1617 | 0 1618 | 0 1619 | 0 1620 | 1 1621 | 1 1622 | 1 1623 | 1 1624 | 0 1625 | 0 1626 | 1 1627 | 1 1628 | 1 1629 | 0 1630 | 0 1631 | 1 1632 | 0 1633 | 0 1634 | 0 1635 | 0 1636 | 1 1637 | 0 1638 | 0 1639 | 0 1640 | 1 1641 | 0 1642 | 0 1643 | 0 1644 | 0 1645 | 0 1646 | 1 1647 | 1 1648 | 0 1649 | 0 1650 | 0 1651 | 1 1652 | 0 1653 | 1 1654 | 1 1655 | 1 1656 | 0 1657 | 1 1658 | 1 1659 | 1 1660 | 1 1661 | 0 1662 | 1 1663 | 0 1664 | 0 1665 | 1 1666 | 0 1667 | 1 1668 | 0 1669 | 0 1670 | 0 1671 | 1 1672 | 0 1673 | 0 1674 | 1 1675 | 1 1676 | 0 1677 | 0 1678 | 1 1679 | 0 1680 | 0 1681 | 1 1682 | 0 1683 | 0 1684 | 0 1685 | 0 1686 | 1 1687 | 1 1688 | 0 1689 | 0 1690 | 0 1691 | 0 1692 | 1 1693 | 0 1694 | 1 1695 | 0 1696 | 0 1697 | 0 1698 | 0 1699 | 0 1700 | 1 1701 | 0 1702 | 1 1703 | 1 1704 | 1 1705 | 0 1706 | 0 1707 | 1 1708 | 0 1709 | 0 1710 | 1 1711 | 1 1712 | 1 1713 | 0 1714 | 1 1715 | 0 1716 | 0 1717 | 0 1718 | 1 1719 | 1 1720 | 0 1721 | 1 1722 | 0 1723 | 0 1724 | 0 1725 | 0 1726 | 0 1727 | 0 1728 | 0 1729 | 0 1730 | 0 1731 | 0 1732 | 1 1733 | 0 1734 | 0 1735 | 1 1736 | 0 1737 | 1 1738 | 0 1739 | 1 1740 | 1 1741 | 0 1742 | 0 1743 | 0 1744 | 0 1745 | 0 1746 | 0 1747 | 1 1748 | 0 1749 | 0 1750 | 1 1751 | 0 1752 | 1 1753 | 0 1754 | 0 1755 | 1 1756 | 0 1757 | 1 1758 | 1 1759 | 1 1760 | 1 1761 | 1 1762 | 0 1763 | 0 1764 | 1 1765 | 1 1766 | 0 1767 | 0 1768 | 1 1769 | 0 1770 | 1 1771 | 0 1772 | 1 1773 | 0 1774 | 1 1775 | 0 1776 | 1 1777 | 0 1778 | 0 1779 | 1 1780 | 1 1781 | 1 1782 | 1 1783 | 1 1784 | 0 1785 | 0 1786 | 1 1787 | 0 1788 | 0 1789 | 1 1790 | 1 1791 | 1 1792 | 0 1793 | 0 1794 | 1 1795 | 1 1796 | 1 1797 | 1 1798 | 1 1799 | 0 1800 | 0 1801 | 1 1802 | 0 1803 | 1 1804 | 0 1805 | 1 1806 | 0 1807 | 0 1808 | 1 1809 | 1 1810 | 0 1811 | 1 1812 | 0 1813 | 1 1814 | 0 1815 | 1 1816 | 1 1817 | 0 1818 | 1 1819 | 0 1820 | 0 1821 | 1 1822 | 0 1823 | 0 1824 | 0 1825 | 1 1826 | 0 1827 | 1 1828 | 0 1829 | 0 1830 | 0 1831 | 0 1832 | 1 1833 | 0 1834 | 0 1835 | 0 1836 | 1 1837 | 1 1838 | 0 1839 | 1 1840 | 0 1841 | 1 1842 | 1 1843 | 0 1844 | 0 1845 | 1 1846 | 1 1847 | 1 1848 | 0 1849 | 0 1850 | 0 1851 | 1 1852 | 1 1853 | 1 1854 | 0 1855 | 0 1856 | 0 1857 | 1 1858 | 0 1859 | 0 1860 | 0 1861 | 1 1862 | 0 1863 | 0 1864 | 1 1865 | 1 1866 | 0 1867 | 0 1868 | 0 1869 | 1 1870 | 0 1871 | 0 1872 | 1 1873 | 0 1874 | 0 1875 | 0 1876 | 1 1877 | 0 1878 | 0 1879 | 0 1880 | 1 1881 | 1 1882 | 1 1883 | 0 1884 | 0 1885 | 0 1886 | 0 1887 | 0 1888 | 0 1889 | 0 1890 | 1 1891 | 1 1892 | 0 1893 | 0 1894 | 1 1895 | 0 1896 | 0 1897 | 1 1898 | 0 1899 | 1 1900 | 0 1901 | 1 1902 | 1 1903 | 0 1904 | 0 1905 | 0 1906 | 1 1907 | 1 1908 | 0 1909 | 0 1910 | 1 1911 | 0 1912 | 1 1913 | 1 1914 | 0 1915 | 1 1916 | 1 1917 | 0 1918 | 1 1919 | 1 1920 | 0 1921 | 1 1922 | 0 1923 | 1 1924 | 0 1925 | 0 1926 | 1 1927 | 1 1928 | 1 1929 | 0 1930 | 0 1931 | 1 1932 | 1 1933 | 1 1934 | 0 1935 | 0 1936 | 1 1937 | 0 1938 | 1 1939 | 1 1940 | 0 1941 | 0 1942 | 1 1943 | 1 1944 | 1 1945 | 1 1946 | 0 1947 | 1 1948 | 0 1949 | 0 1950 | 1 1951 | 0 1952 | 1 1953 | 0 1954 | 1 1955 | 1 1956 | 1 1957 | 1 1958 | 0 1959 | 1 1960 | 0 1961 | 0 1962 | 0 1963 | 0 1964 | 1 1965 | 0 1966 | 1 1967 | 0 1968 | 0 1969 | 0 1970 | 0 1971 | 1 1972 | 1 1973 | 1 1974 | 1 1975 | 1 1976 | 0 1977 | 1 1978 | 0 1979 | 1 1980 | 0 1981 | 0 1982 | 0 1983 | 1 1984 | 1 1985 | 0 1986 | 0 1987 | 0 1988 | 1 1989 | 1 1990 | 1 1991 | 0 1992 | 0 1993 | 0 1994 | 1 1995 | 0 1996 | 0 1997 | 0 1998 | 0 1999 | 0 2000 | 0 2001 | 0 2002 | 0 2003 | 0 2004 | 0 2005 | 1 2006 | 0 2007 | 1 2008 | 0 2009 | 1 2010 | 1 2011 | 0 2012 | 1 2013 | 0 2014 | 1 2015 | 0 2016 | 1 2017 | 0 2018 | 0 2019 | 0 2020 | 0 2021 | 0 2022 | 0 2023 | 0 2024 | 1 2025 | 1 2026 | 0 2027 | 1 2028 | 1 2029 | 0 2030 | 0 2031 | 0 2032 | 1 2033 | 0 2034 | 0 2035 | 1 2036 | 1 2037 | 1 2038 | 0 2039 | 1 2040 | 1 2041 | 0 2042 | 0 2043 | 1 2044 | 0 2045 | 0 2046 | 1 2047 | 0 2048 | 0 2049 | 0 2050 | 1 2051 | 1 2052 | 0 2053 | 0 2054 | 1 2055 | 0 2056 | 0 2057 | 1 2058 | 1 2059 | 1 2060 | 0 2061 | 0 2062 | 1 2063 | 0 2064 | 0 2065 | 0 2066 | 1 2067 | 0 2068 | 0 2069 | 0 2070 | 1 2071 | 0 2072 | 0 2073 | 1 2074 | 1 2075 | 0 2076 | 0 2077 | 0 2078 | 0 2079 | 1 2080 | 0 2081 | 0 2082 | 0 2083 | 1 2084 | 1 2085 | 0 2086 | 1 2087 | 0 2088 | 1 2089 | 0 2090 | 0 2091 | 0 2092 | 1 2093 | 0 2094 | 1 2095 | 1 2096 | 1 2097 | 1 2098 | 0 2099 | 1 2100 | 0 2101 | 0 2102 | 0 2103 | 1 2104 | 0 2105 | 0 2106 | 0 2107 | 0 2108 | 0 2109 | 0 2110 | 1 2111 | 1 2112 | 0 2113 | 1 2114 | 0 2115 | 0 2116 | 0 2117 | 0 2118 | 0 2119 | 0 2120 | 1 2121 | 1 2122 | 0 2123 | 1 2124 | 0 2125 | 0 2126 | 1 2127 | 1 2128 | 0 2129 | 0 2130 | 1 2131 | 0 2132 | 0 2133 | 0 2134 | 1 2135 | 0 2136 | 0 2137 | 1 2138 | 0 2139 | 1 2140 | 0 2141 | 0 2142 | 1 2143 | 0 2144 | 1 2145 | 1 2146 | 0 2147 | 1 2148 | 1 2149 | 1 2150 | 0 2151 | 1 2152 | 0 2153 | 0 2154 | 1 2155 | 0 2156 | 0 2157 | 1 2158 | 1 2159 | 1 2160 | 1 2161 | 1 2162 | 0 2163 | 0 2164 | 0 2165 | 1 2166 | 1 2167 | 0 2168 | 1 2169 | 0 2170 | 0 2171 | 1 2172 | 0 2173 | 0 2174 | 1 2175 | 1 2176 | 1 2177 | 1 2178 | 1 2179 | 1 2180 | 0 2181 | 1 2182 | 1 2183 | 0 2184 | 1 2185 | 0 2186 | 1 2187 | 0 2188 | 1 2189 | 1 2190 | 1 2191 | 0 2192 | 0 2193 | 1 2194 | 1 2195 | 1 2196 | 0 2197 | 1 2198 | 0 2199 | 0 2200 | 0 2201 | 0 2202 | 1 2203 | 0 2204 | 0 2205 | 0 2206 | 0 2207 | 0 2208 | 1 2209 | 0 2210 | 0 2211 | 1 2212 | 1 2213 | 1 2214 | 1 2215 | 0 2216 | 0 2217 | 1 2218 | 1 2219 | 1 2220 | 0 2221 | 0 2222 | 0 2223 | 0 2224 | 1 2225 | 0 2226 | 0 2227 | 0 2228 | 1 2229 | 1 2230 | 1 2231 | 1 2232 | 1 2233 | 0 2234 | 1 2235 | 1 2236 | 0 2237 | 1 2238 | 1 2239 | 1 2240 | 1 2241 | 0 2242 | 0 2243 | 0 2244 | 1 2245 | 1 2246 | 1 2247 | 0 2248 | 0 2249 | 0 2250 | 0 2251 | 0 2252 | 1 2253 | 1 2254 | 0 2255 | 0 2256 | 1 2257 | 0 2258 | 0 2259 | 1 2260 | 0 2261 | 0 2262 | 1 2263 | 1 2264 | 1 2265 | 0 2266 | 1 2267 | 0 2268 | 0 2269 | 1 2270 | 0 2271 | 1 2272 | 0 2273 | 0 2274 | 1 2275 | 0 2276 | 1 2277 | 0 2278 | 1 2279 | 0 2280 | 1 2281 | 1 2282 | 0 2283 | 0 2284 | 1 2285 | 1 2286 | 1 2287 | 0 2288 | 1 2289 | 0 2290 | 0 2291 | 0 2292 | 1 2293 | 1 2294 | 1 2295 | 0 2296 | 0 2297 | 0 2298 | 0 2299 | 1 2300 | 1 2301 | 0 2302 | 0 2303 | 1 2304 | 1 2305 | 1 2306 | 0 2307 | 1 2308 | 0 2309 | 0 2310 | 0 2311 | 1 2312 | 1 2313 | 1 2314 | 1 2315 | 0 2316 | 1 2317 | 0 2318 | 0 2319 | 1 2320 | 0 2321 | 1 2322 | 1 2323 | 1 2324 | 1 2325 | 0 2326 | 0 2327 | 1 2328 | 0 2329 | 0 2330 | 0 2331 | 0 2332 | 1 2333 | 0 2334 | 0 2335 | 1 2336 | 1 2337 | 1 2338 | 0 2339 | 0 2340 | 0 2341 | 1 2342 | 0 2343 | 1 2344 | 0 2345 | 1 2346 | 1 2347 | 0 2348 | 0 2349 | 0 2350 | 0 2351 | 1 2352 | 0 2353 | 1 2354 | 1 2355 | 1 2356 | 0 2357 | 0 2358 | 1 2359 | 0 2360 | 1 2361 | 1 2362 | 0 2363 | 0 2364 | 1 2365 | 0 2366 | 1 2367 | 0 2368 | 0 2369 | 1 2370 | 1 2371 | 1 2372 | 0 2373 | 1 2374 | 1 2375 | 0 2376 | 1 2377 | 0 2378 | 0 2379 | 0 2380 | 1 2381 | 0 2382 | 1 2383 | 1 2384 | 0 2385 | 0 2386 | 0 2387 | 1 2388 | 0 2389 | 0 2390 | 1 2391 | 1 2392 | 0 2393 | 1 2394 | 0 2395 | 1 2396 | 0 2397 | 0 2398 | 0 2399 | 0 2400 | 0 2401 | 1 2402 | 1 2403 | 0 2404 | 0 2405 | 0 2406 | 0 2407 | 1 2408 | 1 2409 | 0 2410 | 0 2411 | 0 2412 | 0 2413 | 1 2414 | 1 2415 | 1 2416 | 0 2417 | 0 2418 | 0 2419 | 1 2420 | 1 2421 | 1 2422 | 0 2423 | 1 2424 | 1 2425 | 1 2426 | 1 2427 | 0 2428 | 0 2429 | 1 2430 | 1 2431 | 1 2432 | 1 2433 | 1 2434 | 0 2435 | 0 2436 | 1 2437 | 0 2438 | 0 2439 | 1 2440 | 0 2441 | 1 2442 | 1 2443 | 1 2444 | 1 2445 | 0 2446 | 0 2447 | 1 2448 | 0 2449 | 0 2450 | 1 2451 | 0 2452 | 1 2453 | 1 2454 | 1 2455 | 0 2456 | 1 2457 | 0 2458 | 0 2459 | 0 2460 | 0 2461 | 1 2462 | 1 2463 | 1 2464 | 1 2465 | 1 2466 | 0 2467 | 0 2468 | 0 2469 | 0 2470 | 0 2471 | 1 2472 | 1 2473 | 0 2474 | 0 2475 | 0 2476 | 1 2477 | 0 2478 | 0 2479 | 0 2480 | 0 2481 | 0 2482 | 0 2483 | 1 2484 | 0 2485 | 0 2486 | 1 2487 | 0 2488 | 1 2489 | 0 2490 | 0 2491 | 1 2492 | 0 2493 | 0 2494 | 0 2495 | 1 2496 | 1 2497 | 0 2498 | 0 2499 | 1 2500 | 1 2501 | 1 2502 | 0 2503 | 0 2504 | 1 2505 | 1 2506 | 0 2507 | 1 2508 | 1 2509 | 1 2510 | 1 2511 | 0 2512 | 0 2513 | 0 2514 | 1 2515 | 1 2516 | 1 2517 | 1 2518 | 0 2519 | 1 2520 | 0 2521 | 1 2522 | 1 2523 | 0 2524 | 0 2525 | 1 2526 | 0 2527 | 0 2528 | 0 2529 | 1 2530 | 0 2531 | 0 2532 | 1 2533 | 1 2534 | 0 2535 | 1 2536 | 1 2537 | 0 2538 | 0 2539 | 1 2540 | 0 2541 | 1 2542 | 1 2543 | 0 2544 | 0 2545 | 1 2546 | 1 2547 | 0 2548 | 0 2549 | 1 2550 | 0 2551 | 1 2552 | 1 2553 | 0 2554 | 0 2555 | 0 2556 | 0 2557 | 1 2558 | 0 2559 | 1 2560 | 1 2561 | 1 2562 | 1 2563 | 0 2564 | 1 2565 | 0 2566 | 1 2567 | 1 2568 | 1 2569 | 1 2570 | 1 2571 | 1 2572 | 0 2573 | 1 2574 | 0 2575 | 0 2576 | 0 2577 | 0 2578 | 0 2579 | 0 2580 | 0 2581 | 1 2582 | 0 2583 | 0 2584 | 0 2585 | 0 2586 | 0 2587 | 0 2588 | 0 2589 | 0 2590 | 1 2591 | 0 2592 | 1 2593 | 0 2594 | 1 2595 | 1 2596 | 1 2597 | 1 2598 | 0 2599 | 0 2600 | 1 2601 | 1 2602 | 1 2603 | 0 2604 | 0 2605 | 1 2606 | 0 2607 | 0 2608 | 1 2609 | 1 2610 | 0 2611 | 1 2612 | 0 2613 | 0 2614 | 0 2615 | 1 2616 | 1 2617 | 1 2618 | 1 2619 | 0 2620 | 0 2621 | 0 2622 | 1 2623 | 0 2624 | 0 2625 | 1 2626 | 0 2627 | 0 2628 | 1 2629 | 0 2630 | 0 2631 | 0 2632 | 1 2633 | 0 2634 | 1 2635 | 1 2636 | 0 2637 | 0 2638 | 1 2639 | 1 2640 | 0 2641 | 1 2642 | 1 2643 | 0 2644 | 1 2645 | 0 2646 | 0 2647 | 0 2648 | 0 2649 | 0 2650 | 0 2651 | 0 2652 | 1 2653 | 0 2654 | 0 2655 | 0 2656 | 0 2657 | 1 2658 | 1 2659 | 1 2660 | 0 2661 | 0 2662 | 1 2663 | 1 2664 | 1 2665 | 1 2666 | 0 2667 | 1 2668 | 1 2669 | 1 2670 | 1 2671 | 1 2672 | 0 2673 | 0 2674 | 1 2675 | 0 2676 | 0 2677 | 1 2678 | 1 2679 | 1 2680 | 0 2681 | 0 2682 | 1 2683 | 1 2684 | 0 2685 | 1 2686 | 0 2687 | 1 2688 | 0 2689 | 0 2690 | 1 2691 | 1 2692 | 0 2693 | 0 2694 | 0 2695 | 0 2696 | 1 2697 | 0 2698 | 1 2699 | 0 2700 | 0 2701 | 0 2702 | 1 2703 | 1 2704 | 0 2705 | 1 2706 | 0 2707 | 0 2708 | 0 2709 | 1 2710 | 1 2711 | 0 2712 | 0 2713 | 1 2714 | 0 2715 | 0 2716 | 1 2717 | 0 2718 | 1 2719 | 1 2720 | 1 2721 | 1 2722 | 1 2723 | 0 2724 | 1 2725 | 0 2726 | 1 2727 | 1 2728 | 0 2729 | 0 2730 | 1 2731 | 1 2732 | 0 2733 | 1 2734 | 0 2735 | 0 2736 | 0 2737 | 1 2738 | 1 2739 | 1 2740 | 0 2741 | 1 2742 | 0 2743 | 1 2744 | 0 2745 | 1 2746 | 0 2747 | 0 2748 | 0 2749 | 0 2750 | 1 2751 | 1 2752 | 0 2753 | 0 2754 | 1 2755 | 1 2756 | 0 2757 | 1 2758 | 0 2759 | 1 2760 | 0 2761 | 1 2762 | 1 2763 | 0 2764 | 1 2765 | 0 2766 | 1 2767 | 1 2768 | 0 2769 | 1 2770 | 1 2771 | 1 2772 | 1 2773 | 0 2774 | 0 2775 | 1 2776 | 0 2777 | 0 2778 | 0 2779 | 0 2780 | 0 2781 | 1 2782 | 0 2783 | 1 2784 | 0 2785 | 0 2786 | 1 2787 | 0 2788 | 0 2789 | 1 2790 | 0 2791 | 1 2792 | 0 2793 | 1 2794 | 0 2795 | 1 2796 | 1 2797 | 0 2798 | 1 2799 | 0 2800 | 1 2801 | 0 2802 | 1 2803 | 1 2804 | 0 2805 | 0 2806 | 0 2807 | 0 2808 | 0 2809 | 1 2810 | 1 2811 | 1 2812 | 0 2813 | 1 2814 | 1 2815 | 0 2816 | 0 2817 | 0 2818 | 0 2819 | 0 2820 | 1 2821 | 1 2822 | 1 2823 | 1 2824 | 1 2825 | 1 2826 | 0 2827 | 0 2828 | 1 2829 | 1 2830 | 0 2831 | 1 2832 | 0 2833 | 0 2834 | 0 2835 | 1 2836 | 1 2837 | 0 2838 | 0 2839 | 0 2840 | 1 2841 | 0 2842 | 1 2843 | 1 2844 | 0 2845 | 0 2846 | 0 2847 | 1 2848 | 0 2849 | 1 2850 | 0 2851 | 1 2852 | 1 2853 | 0 2854 | 0 2855 | 1 2856 | 0 2857 | 1 2858 | 0 2859 | 0 2860 | 0 2861 | 0 2862 | 1 2863 | 0 2864 | 0 2865 | 1 2866 | 0 2867 | 0 2868 | 0 2869 | 0 2870 | 1 2871 | 1 2872 | 1 2873 | 0 2874 | 0 2875 | 1 2876 | 1 2877 | 1 2878 | 1 2879 | 1 2880 | 1 2881 | 1 2882 | 0 2883 | 1 2884 | 1 2885 | 1 2886 | 0 2887 | 0 2888 | 0 2889 | 1 2890 | 1 2891 | 0 2892 | 0 2893 | 0 2894 | 1 2895 | 0 2896 | 1 2897 | 0 2898 | 0 2899 | 0 2900 | 1 2901 | 1 2902 | 1 2903 | 0 2904 | 0 2905 | 0 2906 | 0 2907 | 0 2908 | 0 2909 | 1 2910 | 1 2911 | 1 2912 | 0 2913 | 0 2914 | 1 2915 | 1 2916 | 0 2917 | 0 2918 | 0 2919 | 0 2920 | 0 2921 | 0 2922 | 0 2923 | 1 2924 | 0 2925 | 0 2926 | 0 2927 | 1 2928 | 0 2929 | 0 2930 | 1 2931 | 0 2932 | 0 2933 | 0 2934 | 1 2935 | 1 2936 | 0 2937 | 0 2938 | 0 2939 | 0 2940 | 1 2941 | 0 2942 | 0 2943 | 0 2944 | 0 2945 | 0 2946 | 0 2947 | 1 2948 | 0 2949 | 0 2950 | 1 2951 | 0 2952 | 1 2953 | 0 2954 | 0 2955 | 1 2956 | 1 2957 | 0 2958 | 0 2959 | 0 2960 | 1 2961 | 0 2962 | 1 2963 | 1 2964 | 0 2965 | 1 2966 | 0 2967 | 0 2968 | 1 2969 | 1 2970 | 0 2971 | 0 2972 | 1 2973 | 0 2974 | 1 2975 | 0 2976 | 0 2977 | 0 2978 | 1 2979 | 0 2980 | 1 2981 | 0 2982 | 0 2983 | 0 2984 | 0 2985 | 0 2986 | 0 2987 | 1 2988 | 0 2989 | 0 2990 | 0 2991 | 1 2992 | 0 2993 | 0 2994 | 0 2995 | 1 2996 | 1 2997 | 1 2998 | 0 2999 | 0 3000 | 0 3001 | 1 3002 | 0 3003 | 1 3004 | 0 3005 | 0 3006 | 1 3007 | 1 3008 | 0 3009 | 1 3010 | 0 3011 | 1 3012 | 0 3013 | 1 3014 | 0 3015 | 0 3016 | 0 3017 | 0 3018 | 0 3019 | 1 3020 | 1 3021 | 0 3022 | 1 3023 | 0 3024 | 0 3025 | 0 3026 | 1 3027 | 0 3028 | 0 3029 | 0 3030 | 1 3031 | 0 3032 | 1 3033 | 1 3034 | 0 3035 | 1 3036 | 1 3037 | 0 3038 | 1 3039 | 0 3040 | 1 3041 | 0 3042 | 0 3043 | 1 3044 | 1 3045 | 0 3046 | 1 3047 | 0 3048 | 0 3049 | 1 3050 | 1 3051 | 0 3052 | 0 3053 | 1 3054 | 1 3055 | 0 3056 | 0 3057 | 0 3058 | 0 3059 | 0 3060 | 0 3061 | 0 3062 | 1 3063 | 0 3064 | 1 3065 | 1 3066 | 1 3067 | 0 3068 | 1 3069 | 1 3070 | 1 3071 | 0 3072 | 0 3073 | 0 3074 | 0 3075 | 0 3076 | 0 3077 | 0 3078 | 0 3079 | 1 3080 | 1 3081 | 0 3082 | 1 3083 | 1 3084 | 0 3085 | 1 3086 | 1 3087 | 1 3088 | 1 3089 | 0 3090 | 0 3091 | 1 3092 | 0 3093 | 0 3094 | 0 3095 | 1 3096 | 1 3097 | 1 3098 | 1 3099 | 0 3100 | 0 3101 | 1 3102 | 0 3103 | 0 3104 | 1 3105 | 1 3106 | 1 3107 | 0 3108 | 1 3109 | 0 3110 | 1 3111 | 0 3112 | 0 3113 | 0 3114 | 1 3115 | 1 3116 | 0 3117 | 0 3118 | 1 3119 | 1 3120 | 1 3121 | 0 3122 | 0 3123 | 0 3124 | 0 3125 | 0 3126 | 0 3127 | 0 3128 | 0 3129 | 0 3130 | 0 3131 | 0 3132 | 0 3133 | 0 3134 | 0 3135 | 1 3136 | 0 3137 | 0 3138 | 0 3139 | 0 3140 | 1 3141 | 0 3142 | 0 3143 | 1 3144 | 0 3145 | 1 3146 | 1 3147 | 1 3148 | 0 3149 | 1 3150 | 1 3151 | 0 3152 | 1 3153 | 0 3154 | 1 3155 | 1 3156 | 1 3157 | 1 3158 | 1 3159 | 0 3160 | 0 3161 | 1 3162 | 0 3163 | 0 3164 | 0 3165 | 0 3166 | 0 3167 | 0 3168 | 1 3169 | 1 3170 | 0 3171 | 0 3172 | 1 3173 | 0 3174 | 0 3175 | 1 3176 | 0 3177 | 0 3178 | 0 3179 | 0 3180 | 0 3181 | 1 3182 | 1 3183 | 1 3184 | 0 3185 | 1 3186 | 1 3187 | 1 3188 | 0 3189 | 0 3190 | 0 3191 | 0 3192 | 0 3193 | 0 3194 | 0 3195 | 1 3196 | 0 3197 | 1 3198 | 0 3199 | 0 3200 | 0 3201 | 0 3202 | 0 3203 | 0 3204 | 0 3205 | 1 3206 | 0 3207 | 0 3208 | 0 3209 | 1 3210 | 1 3211 | 1 3212 | 0 3213 | 1 3214 | 1 3215 | 1 3216 | 1 3217 | 1 3218 | 1 3219 | 1 3220 | 0 3221 | 0 3222 | 1 3223 | 0 3224 | 0 3225 | 0 3226 | 1 3227 | 1 3228 | 1 3229 | 0 3230 | 0 3231 | 0 3232 | 0 3233 | 0 3234 | 1 3235 | 0 3236 | 1 3237 | 0 3238 | 1 3239 | 1 3240 | 1 3241 | 1 3242 | 1 3243 | 1 3244 | 0 3245 | 1 3246 | 1 3247 | 0 3248 | 0 3249 | 0 3250 | 0 3251 | 1 3252 | 1 3253 | 1 3254 | 0 3255 | 0 3256 | 0 3257 | 1 3258 | 1 3259 | 0 3260 | 0 3261 | 0 3262 | 0 3263 | 0 3264 | 0 3265 | 0 3266 | 0 3267 | 1 3268 | 0 3269 | 1 3270 | 0 3271 | 0 3272 | 0 3273 | 0 3274 | 0 3275 | 0 3276 | 0 3277 | 0 3278 | 0 3279 | 0 3280 | 0 3281 | 1 3282 | 0 3283 | 1 3284 | 1 3285 | 0 3286 | 0 3287 | 0 3288 | 0 3289 | 0 3290 | 1 3291 | 1 3292 | 0 3293 | 0 3294 | 1 3295 | 1 3296 | 0 3297 | 1 3298 | 1 3299 | 1 3300 | 0 3301 | 1 3302 | 0 3303 | 0 3304 | 0 3305 | 1 3306 | 0 3307 | 0 3308 | 1 3309 | 1 3310 | 1 3311 | 1 3312 | 0 3313 | 1 3314 | 0 3315 | 1 3316 | 0 3317 | 0 3318 | 0 3319 | 1 3320 | 0 3321 | 1 3322 | 0 3323 | 0 3324 | 0 3325 | 1 3326 | 0 3327 | 1 3328 | 0 3329 | 0 3330 | 1 3331 | 1 3332 | 1 3333 | 1 3334 | 0 3335 | 1 3336 | 0 3337 | 1 3338 | 1 3339 | 0 3340 | 0 3341 | 1 3342 | 1 3343 | 0 3344 | 0 3345 | 1 3346 | 1 3347 | 0 3348 | 0 3349 | 1 3350 | 1 3351 | 1 3352 | 0 3353 | 1 3354 | 1 3355 | 0 3356 | 0 3357 | 0 3358 | 0 3359 | 1 3360 | 1 3361 | 0 3362 | 0 3363 | 0 3364 | 0 3365 | 0 3366 | 0 3367 | 0 3368 | 0 3369 | 0 3370 | 0 3371 | 0 3372 | 1 3373 | 0 3374 | 1 3375 | 1 3376 | 1 3377 | 0 3378 | 0 3379 | 0 3380 | 1 3381 | 1 3382 | 1 3383 | 0 3384 | 0 3385 | 0 3386 | 0 3387 | 0 3388 | 0 3389 | 0 3390 | 0 3391 | 0 3392 | 0 3393 | 1 3394 | 0 3395 | 0 3396 | 1 3397 | 1 3398 | 0 3399 | 0 3400 | 0 3401 | 0 3402 | 0 3403 | 0 3404 | 1 3405 | 1 3406 | 1 3407 | 0 3408 | 1 3409 | 0 3410 | 0 3411 | 0 3412 | 1 3413 | 0 3414 | 1 3415 | 1 3416 | 1 3417 | 0 3418 | 1 3419 | 1 3420 | 1 3421 | 0 3422 | 0 3423 | 1 3424 | 0 3425 | 0 3426 | 0 3427 | 0 3428 | 1 3429 | 1 3430 | 1 3431 | 1 3432 | 1 3433 | 1 3434 | 0 3435 | 0 3436 | 0 3437 | 0 3438 | 0 3439 | 0 3440 | 0 3441 | 1 3442 | 1 3443 | 1 3444 | 1 3445 | 0 3446 | 1 3447 | 0 3448 | 1 3449 | 1 3450 | 0 3451 | 0 3452 | 1 3453 | 0 3454 | 0 3455 | 1 3456 | 0 3457 | 0 3458 | 1 3459 | 0 3460 | 0 3461 | 0 3462 | 0 3463 | 0 3464 | 1 3465 | 0 3466 | 1 3467 | 1 3468 | 0 3469 | 0 3470 | 0 3471 | 0 3472 | 1 3473 | 0 3474 | 1 3475 | 1 3476 | 0 3477 | 1 3478 | 1 3479 | 0 3480 | 0 3481 | 0 3482 | 1 3483 | 1 3484 | 0 3485 | 0 3486 | 0 3487 | 1 3488 | 1 3489 | 1 3490 | 0 3491 | 1 3492 | 0 3493 | 1 3494 | 0 3495 | 0 3496 | 0 3497 | 1 3498 | 1 3499 | 1 3500 | 0 3501 | 0 3502 | 1 3503 | 1 3504 | 0 3505 | 0 3506 | 0 3507 | 0 3508 | 0 3509 | 0 3510 | 1 3511 | 1 3512 | 0 3513 | 0 3514 | 1 3515 | 0 3516 | 0 3517 | 1 3518 | 0 3519 | 1 3520 | 1 3521 | 0 3522 | 0 3523 | 1 3524 | 0 3525 | 1 3526 | 1 3527 | 1 3528 | 1 3529 | 1 3530 | 0 3531 | 0 3532 | 0 3533 | 0 3534 | 0 3535 | 1 3536 | 1 3537 | 0 3538 | 0 3539 | 1 3540 | 0 3541 | 0 3542 | 0 3543 | 0 3544 | 0 3545 | 0 3546 | 0 3547 | 1 3548 | 0 3549 | 0 3550 | 0 3551 | 1 3552 | 1 3553 | 0 3554 | 1 3555 | 1 3556 | 1 3557 | 0 3558 | 1 3559 | 0 3560 | 0 3561 | 1 3562 | 1 3563 | 0 3564 | 0 3565 | 0 3566 | 1 3567 | 0 3568 | 0 3569 | 1 3570 | 0 3571 | 1 3572 | 0 3573 | 0 3574 | 1 3575 | 1 3576 | 1 3577 | 0 3578 | 0 3579 | 1 3580 | 1 3581 | 0 3582 | 0 3583 | 0 3584 | 1 3585 | 0 3586 | 0 3587 | 0 3588 | 0 3589 | 0 3590 | 1 3591 | 0 3592 | 0 3593 | 1 3594 | 1 3595 | 1 3596 | 0 3597 | 1 3598 | 0 3599 | 0 3600 | 0 3601 | 0 3602 | 0 3603 | 1 3604 | 0 3605 | 1 3606 | 1 3607 | 0 3608 | 0 3609 | 0 3610 | 1 3611 | 1 3612 | 1 3613 | 0 3614 | 0 3615 | 0 3616 | 0 3617 | 0 3618 | 0 3619 | 1 3620 | 0 3621 | 1 3622 | 0 3623 | 1 3624 | 0 3625 | 0 3626 | 1 3627 | 1 3628 | 0 3629 | 1 3630 | 1 3631 | 0 3632 | 0 3633 | 1 3634 | 1 3635 | 0 3636 | 1 3637 | 1 3638 | 0 3639 | 0 3640 | 1 3641 | 0 3642 | 0 3643 | 0 3644 | 1 3645 | 0 3646 | 1 3647 | 0 3648 | 1 3649 | 0 3650 | 0 3651 | 1 3652 | 0 3653 | 1 3654 | 0 3655 | 0 3656 | 0 3657 | 0 3658 | 0 3659 | 1 3660 | 1 3661 | 0 3662 | 0 3663 | 0 3664 | 0 3665 | 0 3666 | 0 3667 | 0 3668 | 0 3669 | 0 3670 | 1 3671 | 0 3672 | 1 3673 | 0 3674 | 1 3675 | 0 3676 | 1 3677 | 0 3678 | 0 3679 | 0 3680 | 0 3681 | 0 3682 | 0 3683 | 0 3684 | 1 3685 | 1 3686 | 1 3687 | 0 3688 | 1 3689 | 1 3690 | 1 3691 | 1 3692 | 1 3693 | 0 3694 | 1 3695 | 1 3696 | 0 3697 | 0 3698 | 1 3699 | 0 3700 | 0 3701 | 1 3702 | 0 3703 | 1 3704 | 0 3705 | 1 3706 | 1 3707 | 0 3708 | 0 3709 | 1 3710 | 0 3711 | 1 3712 | 0 3713 | 0 3714 | 0 3715 | 1 3716 | 0 3717 | 0 3718 | 1 3719 | 0 3720 | 0 3721 | 1 3722 | 0 3723 | 1 3724 | 0 3725 | 1 3726 | 1 3727 | 1 3728 | 0 3729 | 1 3730 | 0 3731 | 0 3732 | 1 3733 | 0 3734 | 0 3735 | 0 3736 | 0 3737 | 1 3738 | 0 3739 | 0 3740 | 0 3741 | 0 3742 | 0 3743 | 0 3744 | 1 3745 | 1 3746 | 0 3747 | 1 3748 | 0 3749 | 0 3750 | 0 3751 | 0 3752 | 1 3753 | 0 3754 | 0 3755 | 0 3756 | 0 3757 | 0 3758 | 1 3759 | 1 3760 | 1 3761 | 1 3762 | 0 3763 | 1 3764 | 1 3765 | 1 3766 | 0 3767 | 1 3768 | 1 3769 | 0 3770 | 0 3771 | 0 3772 | 0 3773 | 0 3774 | 0 3775 | 0 3776 | 0 3777 | 1 3778 | 0 3779 | 1 3780 | 0 3781 | 0 3782 | 1 3783 | 1 3784 | 0 3785 | 1 3786 | 1 3787 | 0 3788 | 1 3789 | 0 3790 | 1 3791 | 1 3792 | 0 3793 | 1 3794 | 0 3795 | 1 3796 | 0 3797 | 0 3798 | 0 3799 | 1 3800 | 0 3801 | 0 3802 | 0 3803 | 1 3804 | 0 3805 | 0 3806 | 1 3807 | 1 3808 | 0 3809 | 1 3810 | 1 3811 | 0 3812 | 0 3813 | 0 3814 | 0 3815 | 1 3816 | 0 3817 | 0 3818 | 1 3819 | 0 3820 | 1 3821 | 1 3822 | 0 3823 | 1 3824 | 0 3825 | 0 3826 | 0 3827 | 0 3828 | 1 3829 | 0 3830 | 0 3831 | 0 3832 | 0 3833 | 1 3834 | 1 3835 | 1 3836 | 1 3837 | 0 3838 | 1 3839 | 1 3840 | 1 3841 | 0 3842 | 0 3843 | 1 3844 | 1 3845 | 0 3846 | 0 3847 | 0 3848 | 0 3849 | 1 3850 | 1 3851 | 0 3852 | 0 3853 | 0 3854 | 1 3855 | 1 3856 | 1 3857 | 0 3858 | 0 3859 | 1 3860 | 0 3861 | 0 3862 | 0 3863 | 1 3864 | 1 3865 | 1 3866 | 1 3867 | 0 3868 | 0 3869 | 0 3870 | 1 3871 | 0 3872 | 1 3873 | 0 3874 | 1 3875 | 1 3876 | 1 3877 | 1 3878 | 0 3879 | 0 3880 | 1 3881 | 1 3882 | 0 3883 | 0 3884 | 1 3885 | 0 3886 | 1 3887 | 1 3888 | 0 3889 | 1 3890 | 1 3891 | 0 3892 | 1 3893 | 1 3894 | 0 3895 | 1 3896 | 1 3897 | 1 3898 | 1 3899 | 0 3900 | 1 3901 | 1 3902 | 0 3903 | 0 3904 | 1 3905 | 0 3906 | 1 3907 | 0 3908 | 1 3909 | 1 3910 | 1 3911 | 0 3912 | 1 3913 | 0 3914 | 0 3915 | 0 3916 | 1 3917 | 0 3918 | 0 3919 | 0 3920 | 1 3921 | 1 3922 | 0 3923 | 1 3924 | 1 3925 | 1 3926 | 1 3927 | 0 3928 | 0 3929 | 0 3930 | 0 3931 | 0 3932 | 0 3933 | 0 3934 | 1 3935 | 1 3936 | 0 3937 | 1 3938 | 1 3939 | 0 3940 | 1 3941 | 1 3942 | 0 3943 | 1 3944 | 0 3945 | 0 3946 | 0 3947 | 1 3948 | 1 3949 | 0 3950 | 0 3951 | 0 3952 | 1 3953 | 0 3954 | 0 3955 | 0 3956 | 1 3957 | 1 3958 | 1 3959 | 1 3960 | 0 3961 | 1 3962 | 1 3963 | 1 3964 | 0 3965 | 1 3966 | 1 3967 | 0 3968 | 1 3969 | 1 3970 | 1 3971 | 0 3972 | 1 3973 | 1 3974 | 0 3975 | 1 3976 | 0 3977 | 0 3978 | 0 3979 | 0 3980 | 0 3981 | 0 3982 | 1 3983 | 1 3984 | 0 3985 | 0 3986 | 1 3987 | 0 3988 | 0 3989 | 1 3990 | 0 3991 | 0 3992 | 0 3993 | 0 3994 | 0 3995 | 0 3996 | 1 3997 | 0 3998 | 1 3999 | 0 4000 | 0 4001 | 1 4002 | 1 4003 | 1 4004 | 1 4005 | 1 4006 | 0 4007 | 0 4008 | 0 4009 | 0 4010 | 0 4011 | 1 4012 | 1 4013 | 1 4014 | 0 4015 | 1 4016 | 1 4017 | 0 4018 | 0 4019 | 1 4020 | 0 4021 | 0 4022 | 0 4023 | 1 4024 | 0 4025 | 0 4026 | 0 4027 | 0 4028 | 0 4029 | 1 4030 | 0 4031 | 1 4032 | 1 4033 | 1 4034 | 1 4035 | 0 4036 | 1 4037 | 1 4038 | 0 4039 | 1 4040 | 1 4041 | 0 4042 | 1 4043 | 0 4044 | 1 4045 | 0 4046 | 0 4047 | 0 4048 | 1 4049 | 0 4050 | 0 4051 | 0 4052 | 1 4053 | 1 4054 | 1 4055 | 1 4056 | 1 4057 | 1 4058 | 0 4059 | 0 4060 | 1 4061 | 0 4062 | 0 4063 | 1 4064 | 0 4065 | 1 4066 | 1 4067 | 0 4068 | 0 4069 | 0 4070 | 0 4071 | 1 4072 | 1 4073 | 0 4074 | 1 4075 | 1 4076 | 1 4077 | 0 4078 | 1 4079 | 1 4080 | 1 4081 | 1 4082 | 0 4083 | 1 4084 | 0 4085 | 0 4086 | 1 4087 | 1 4088 | 0 4089 | 0 4090 | 0 4091 | 0 4092 | 1 4093 | 1 4094 | 0 4095 | 0 4096 | 1 4097 | 0 4098 | 0 4099 | 0 4100 | 1 4101 | 0 4102 | 1 4103 | 1 4104 | 0 4105 | 1 4106 | 0 4107 | 0 4108 | 0 4109 | 1 4110 | 1 4111 | 0 4112 | 1 4113 | 0 4114 | 1 4115 | 0 4116 | 1 4117 | 1 4118 | 0 4119 | 0 4120 | 0 4121 | 0 4122 | 1 4123 | 1 4124 | 0 4125 | 0 4126 | 1 4127 | 1 4128 | 0 4129 | 0 4130 | 1 4131 | 1 4132 | 0 4133 | 0 4134 | 1 4135 | 1 4136 | 0 4137 | 0 4138 | 0 4139 | 0 4140 | 1 4141 | 0 4142 | 0 4143 | 1 4144 | 0 4145 | 1 4146 | 1 4147 | 0 4148 | 1 4149 | 0 4150 | 1 4151 | 1 4152 | 1 4153 | 1 4154 | 0 4155 | 0 4156 | 1 4157 | 0 4158 | 1 4159 | 0 4160 | 0 4161 | 1 4162 | 1 4163 | 1 4164 | 0 4165 | 0 4166 | 1 4167 | 0 4168 | 0 4169 | 1 4170 | 1 4171 | 0 4172 | 0 4173 | 1 4174 | 0 4175 | 1 4176 | 0 4177 | 1 4178 | 0 4179 | 1 4180 | 0 4181 | 1 4182 | 0 4183 | 0 4184 | 0 4185 | 1 4186 | 0 4187 | 0 4188 | 1 4189 | 1 4190 | 1 4191 | 0 4192 | 1 4193 | 0 4194 | 0 4195 | 1 4196 | 0 4197 | 0 4198 | 0 4199 | 1 4200 | 1 4201 | 1 4202 | 0 4203 | 0 4204 | 1 4205 | 1 4206 | 1 4207 | 1 4208 | 1 4209 | 0 4210 | 1 4211 | 0 4212 | 1 4213 | 0 4214 | 1 4215 | 1 4216 | 0 4217 | 0 4218 | 0 4219 | 1 4220 | 1 4221 | 0 4222 | 1 4223 | 1 4224 | 1 4225 | 1 4226 | 0 4227 | 1 4228 | 0 4229 | 1 4230 | 1 4231 | 0 4232 | 1 4233 | 0 4234 | 0 4235 | 1 4236 | 1 4237 | 1 4238 | 0 4239 | 0 4240 | 1 4241 | 1 4242 | 0 4243 | 0 4244 | 0 4245 | 0 4246 | 0 4247 | 1 4248 | 0 4249 | 0 4250 | 1 4251 | 0 4252 | 0 4253 | 0 4254 | 0 4255 | 0 4256 | 0 4257 | 0 4258 | 1 4259 | 1 4260 | 0 4261 | 1 4262 | 0 4263 | 1 4264 | 1 4265 | 0 4266 | 0 4267 | 1 4268 | 1 4269 | 0 4270 | 1 4271 | 0 4272 | 1 4273 | 0 4274 | 1 4275 | 1 4276 | 0 4277 | 1 4278 | 0 4279 | 1 4280 | 0 4281 | 1 4282 | 0 4283 | 0 4284 | 0 4285 | 0 4286 | 0 4287 | 0 4288 | 1 4289 | 1 4290 | 1 4291 | 0 4292 | 1 4293 | 0 4294 | 0 4295 | 0 4296 | 0 4297 | 1 4298 | 1 4299 | 1 4300 | 0 4301 | 0 4302 | 0 4303 | 1 4304 | 0 4305 | 1 4306 | 1 4307 | 1 4308 | 0 4309 | 0 4310 | 1 4311 | 1 4312 | 1 4313 | 0 4314 | 1 4315 | 1 4316 | 1 4317 | 1 4318 | 1 4319 | 0 4320 | 0 4321 | 1 4322 | 0 4323 | 1 4324 | 0 4325 | 0 4326 | 0 4327 | 1 4328 | 1 4329 | 0 4330 | 1 4331 | 0 4332 | 1 4333 | 1 4334 | 1 4335 | 1 4336 | 0 4337 | 0 4338 | -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/Mutagenicity/Mutagenicity_label_readme.txt: -------------------------------------------------------------------------------- 1 | Node labels: [chem] 2 | 3 | Edge labels: [valence] 4 | 5 | Node labels were converted to integer values using this map: 6 | 7 | Component 0: 8 | 0 C 9 | 1 O 10 | 2 Cl 11 | 3 H 12 | 4 N 13 | 5 F 14 | 6 Br 15 | 7 S 16 | 8 P 17 | 9 I 18 | 10 Na 19 | 11 K 20 | 12 Li 21 | 13 Ca 22 | 23 | 24 | 25 | Edge labels were converted to integer values using this map: 26 | 27 | Component 0: 28 | 0 1 29 | 1 2 30 | 2 3 31 | 32 | 33 | 34 | Class labels were converted to integer values using this map: 35 | 36 | 0 mutagen 37 | 1 nonmutagen 38 | 39 | 40 | ############################# 41 | Mutagenicity_edge_gt.txt 42 | 0: Edges outside motifs. 43 | 1: Edges inside motifs (NO2 and NH2) 44 | 45 | -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/datasets/__init__.py -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/dataset_loaders.py: -------------------------------------------------------------------------------- 1 | import pickle as pkl 2 | import numpy as np 3 | import os 4 | from numpy.random.mtrand import RandomState 5 | 6 | from ExplanationEvaluation.datasets.utils import preprocess_features, preprocess_adj, adj_to_edge_index, load_real_dataset 7 | 8 | 9 | def load_graph_dataset(_dataset, shuffle=True): 10 | """Load a graph dataset and optionally shuffle it. 11 | 12 | :param _dataset: Which dataset to load. Choose from "ba2" or "mutag" 13 | :param shuffle: Boolean. Wheter to suffle the loaded dataset. 14 | :returns: np.array 15 | """ 16 | # Load the chosen dataset from the pickle file. 17 | if _dataset == "ba2": 18 | dir_path = os.path.dirname(os.path.realpath(__file__)) 19 | path = dir_path + '/pkls/' + "BA-2motif" + '.pkl' 20 | with open(path, 'rb') as fin: 21 | adjs, features, labels = pkl.load(fin) 22 | 23 | elif _dataset == "mutag": 24 | dir_path = os.path.dirname(os.path.realpath(__file__)) 25 | path = dir_path + '/pkls/' + "Mutagenicity" + '.pkl' 26 | if not os.path.exists(path): # pkl not yet created 27 | print("Mutag dataset pickle is not yet created, doing this now. Can take some time") 28 | adjs, features, labels = load_real_dataset(path, dir_path + '/Mutagenicity/Mutagenicity_') 29 | print("Done with creating the mutag dataset") 30 | else: 31 | with open(path, 'rb') as fin: 32 | adjs, features, labels = pkl.load(fin) 33 | else: 34 | print("Unknown dataset") 35 | raise NotImplemented 36 | 37 | n_graphs = adjs.shape[0] 38 | indices = np.arange(0, n_graphs) 39 | if shuffle: 40 | prng = RandomState(42) # Make sure that the permutation is always the same, even if we set the seed different 41 | indices = prng.permutation(indices) 42 | 43 | # Create shuffled data 44 | adjs = adjs[indices] 45 | features = features[indices].astype('float32') 46 | labels = labels[indices] 47 | 48 | # Create masks 49 | train_indices = np.arange(0, int(n_graphs*0.8)) 50 | val_indices = np.arange(int(n_graphs*0.8), int(n_graphs*0.9)) 51 | test_indices = np.arange(int(n_graphs*0.9), n_graphs) 52 | train_mask = np.full((n_graphs), False, dtype=bool) 53 | train_mask[train_indices] = True 54 | val_mask = np.full((n_graphs), False, dtype=bool) 55 | val_mask[val_indices] = True 56 | test_mask = np.full((n_graphs), False, dtype=bool) 57 | test_mask[test_indices] = True 58 | 59 | # Transform to edge index 60 | edge_index = adj_to_edge_index(adjs) 61 | 62 | return edge_index, features, labels, train_mask, val_mask, test_mask 63 | 64 | 65 | def _load_node_dataset(_dataset): 66 | """Load a node dataset. 67 | 68 | :param _dataset: Which dataset to load. Choose from "syn1", "syn2", "syn3" or "syn4" 69 | :returns: np.array 70 | """ 71 | dir_path = os.path.dirname(os.path.realpath(__file__)) 72 | path = dir_path + '/pkls/' + _dataset + '.pkl' 73 | with open(path, 'rb') as fin: 74 | adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, edge_label_matrix = pkl.load(fin) 75 | labels = y_train 76 | labels[val_mask] = y_val[val_mask] 77 | labels[test_mask] = y_test[test_mask] 78 | 79 | return adj, features, labels, train_mask, val_mask, test_mask 80 | 81 | 82 | def load_dataset(_dataset, skip_preproccessing=False, shuffle=True): 83 | """High level function which loads the dataset 84 | by calling others spesifying in nodes or graphs. 85 | 86 | Keyword arguments: 87 | :param _dataset: Which dataset to load. Choose from "syn1", "syn2", "syn3", "syn4", "ba2" or "mutag" 88 | :param skip_preproccessing: Whether or not to convert the adjacency matrix to an edge matrix. 89 | :param shuffle: Should the returned dataset be shuffled or not. 90 | :returns: multiple np.arrays 91 | """ 92 | print(f"Loading {_dataset} dataset") 93 | if _dataset[:3] == "syn": # Load node_dataset 94 | adj, features, labels, train_mask, val_mask, test_mask = _load_node_dataset(_dataset) 95 | preprocessed_features = preprocess_features(features).astype('float32') 96 | if skip_preproccessing: 97 | graph = adj 98 | else: 99 | graph = preprocess_adj(adj)[0].astype('int64').T 100 | labels = np.argmax(labels, axis=1) 101 | return graph, preprocessed_features, labels, train_mask, val_mask, test_mask 102 | else: # Load graph dataset 103 | return load_graph_dataset(_dataset, shuffle) 104 | -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/ground_truth_loaders.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from numpy.random.mtrand import RandomState 4 | 5 | from ExplanationEvaluation.datasets.utils import preprocess_adj, adj_to_edge_index, load_real_dataset, get_graph_data 6 | 7 | import pickle as pkl 8 | import numpy as np 9 | 10 | 11 | def load_ba2_ground_truth(shuffle=True): 12 | """Load a the ground truth from the ba2motif dataset. 13 | 14 | :param shuffle: Wheter the data should be shuffled. 15 | :returns: np.array, np.array 16 | """ 17 | dir_path = os.path.dirname(os.path.realpath(__file__)) 18 | path = dir_path + '/pkls/' + "BA-2motif" + '.pkl' 19 | with open(path, 'rb') as fin: 20 | adjs, features, labels = pkl.load(fin) 21 | 22 | n_graphs = adjs.shape[0] 23 | indices = np.arange(0, n_graphs) 24 | if shuffle: 25 | prng = RandomState(42) # Make sure that the permutation is always the same, even if we set the seed different 26 | shuffled_indices = prng.permutation(indices) 27 | else: 28 | shuffled_indices = indices 29 | 30 | # Create shuffled data 31 | shuffled_adjs = adjs[shuffled_indices] 32 | shuffled_edge_index = adj_to_edge_index(shuffled_adjs) 33 | 34 | np_edge_labels = [] 35 | 36 | # Obtain the edge labels. 37 | insert = 20 38 | skip = 5 39 | for edge_index in shuffled_edge_index: 40 | labels = [] 41 | for pair in edge_index.T: 42 | r = pair[0] 43 | c = pair[1] 44 | # In line with the original PGExplainer code we determine the ground truth based on the location in the index 45 | if r >= insert and r < insert + skip and c >= insert and c < insert + skip: 46 | labels.append(1) 47 | else: 48 | labels.append(0) 49 | np_edge_labels.append(np.array(labels)) 50 | 51 | return shuffled_edge_index, np_edge_labels 52 | 53 | 54 | def load_mutag_ground_truth(shuffle=True): 55 | """Load a the ground truth from the mutagenicity dataset. 56 | Mutag is a large dataset and can thus take a while to load into memory. 57 | 58 | :param shuffle: Wheter the data should be shuffled. 59 | :returns: np.array, np.array, np.array, np.array 60 | """ 61 | print("Loading MUTAG dataset, this can take a while") 62 | dir_path = os.path.dirname(os.path.realpath(__file__)) 63 | path = dir_path + '/pkls/' + "Mutagenicity" + '.pkl' 64 | if not os.path.exists(path): # pkl not yet created 65 | original_adjs, original_features, original_labels = load_real_dataset(path, dir_path + '/Mutagenicity/Mutagenicity_') 66 | else: 67 | with open(path, 'rb') as fin: 68 | original_adjs, original_features, original_labels = pkl.load(fin) 69 | 70 | print("Loading MUTAG groundtruth, this can take a while") 71 | path = dir_path + '/Mutagenicity/Mutagenicity_' 72 | edge_lists, _, edge_label_lists, _ = get_graph_data(path) 73 | 74 | n_graphs = original_adjs.shape[0] 75 | indices = np.arange(0, n_graphs) 76 | if shuffle: 77 | prng = RandomState(42) # Make sure that the permutation is always the same, even if we set the seed different 78 | shuffled_indices = prng.permutation(indices) 79 | else: 80 | shuffled_indices = indices 81 | 82 | # Create shuffled data 83 | shuffled_adjs = original_adjs[shuffled_indices] 84 | shuffled_labels = original_labels[shuffled_indices] 85 | shuffled_edge_list = [edge_lists[i] for i in shuffled_indices] 86 | shuffled_edge_label_lists = [edge_label_lists[i] for i in shuffled_indices] 87 | 88 | # Transform to edge index 89 | shuffled_edge_index = adj_to_edge_index(shuffled_adjs) 90 | 91 | return shuffled_edge_index, shuffled_labels, shuffled_edge_list, shuffled_edge_label_lists 92 | 93 | 94 | def _load_node_dataset_ground_truth(_dataset): 95 | """Load a the ground truth from a synthetic node dataset. 96 | Mutag is a large dataset and can thus take a while to load into memory. 97 | 98 | :param shuffle: Whether the data should be shuffled. 99 | :returns: np.array, np.array 100 | """ 101 | dir_path = os.path.dirname(os.path.realpath(__file__)) 102 | path = dir_path + '/pkls/' + _dataset + '.pkl' 103 | with open(path, 'rb') as fin: 104 | adj, _, _, _, _, _, _, _, edge_label_matrix = pkl.load(fin) 105 | graph = preprocess_adj(adj)[0].astype('int64').T 106 | labels = [] 107 | for pair in graph.T: 108 | labels.append(edge_label_matrix[pair[0], pair[1]]) 109 | labels = np.array(labels) 110 | return graph, labels 111 | 112 | 113 | def load_dataset_ground_truth(_dataset, test_indices=None): 114 | """Load a the ground truth from a dataset. 115 | Optionally we can only request the indices needed for testing. 116 | 117 | :param test_indices: Only return the indices used by the PGExplaier paper. 118 | :returns: (np.array, np.array), np.array 119 | """ 120 | if _dataset == "syn1" or _dataset == "syn2": 121 | graph, labels = _load_node_dataset_ground_truth(_dataset) 122 | if test_indices is None: 123 | return (graph, labels), range(400, 700, 5) 124 | else: 125 | all = range(400, 700, 1) 126 | filtered = [i for i in all if i in test_indices] 127 | return (graph, labels), filtered 128 | if _dataset == "syn3": 129 | graph, labels = _load_node_dataset_ground_truth(_dataset) 130 | if test_indices is None: 131 | return (graph, labels), range(511,871,6) 132 | else: 133 | all = range(511, 871, 1) 134 | filtered = [i for i in all if i in test_indices] 135 | return (graph, labels), filtered 136 | if _dataset == "syn4": 137 | graph, labels = _load_node_dataset_ground_truth(_dataset) 138 | if test_indices is None: 139 | return (graph, labels), range(511,800,1) 140 | else: 141 | all = range(511, 800, 1) 142 | filtered = [i for i in all if i in test_indices] 143 | return (graph, labels), filtered 144 | elif _dataset == "ba2": 145 | edge_index, labels = load_ba2_ground_truth(shuffle=True) 146 | allnodes = [i for i in range(0,100)] 147 | allnodes.extend([i for i in range(500,600)]) 148 | if test_indices is None: 149 | return (edge_index, labels), allnodes 150 | else: 151 | all = range(0, 1000, 1) 152 | filtered = [i for i in all if i in test_indices] 153 | return (edge_index, labels), filtered 154 | elif _dataset == "mutag": 155 | edge_index, labels, edge_list, edge_labels = load_mutag_ground_truth() 156 | selected = [] 157 | np_edge_list = [] 158 | for gid in range(0, len(edge_index)): 159 | ed = edge_list[gid] 160 | ed_np = np.array(ed).T 161 | np_edge_list.append(ed_np) 162 | if np.argmax(labels[gid]) == 0 and np.sum(edge_labels[gid]) > 0: 163 | selected.append(gid) 164 | np_edge_labels = [np.array(ed_lab) for ed_lab in edge_labels] 165 | if test_indices is None: 166 | return (np_edge_list, np_edge_labels), selected 167 | else: 168 | all = range(400, 700, 1) 169 | filtered = [i for i in all if i in test_indices] 170 | return (np_edge_list, np_edge_labels), filtered 171 | else: 172 | print("Dataset does not exist") 173 | raise ValueError -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/pkls/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/datasets/pkls/.DS_Store -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/pkls/BA-2motif.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/datasets/pkls/BA-2motif.pkl -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/pkls/syn1.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/datasets/pkls/syn1.pkl -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/pkls/syn2.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/datasets/pkls/syn2.pkl -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/pkls/syn3.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/datasets/pkls/syn3.pkl -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/pkls/syn4.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/datasets/pkls/syn4.pkl -------------------------------------------------------------------------------- /ExplanationEvaluation/datasets/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.sparse as sp 3 | import torch 4 | import scipy 5 | import pickle as pkl 6 | from scipy.sparse import coo_matrix 7 | 8 | """ 9 | Most of the functions in this module are copied from the PGExplainer code base. This ensures that the data is handled in the same way. 10 | 11 | link: https://github.com/flyingdoog/PGExplainer 12 | """ 13 | 14 | def adj_to_edge_index(adj): 15 | """ 16 | Convert an adjacency matrix to an edge index 17 | :param adj: Original adjacency matrix 18 | :return: Edge index representation of the graphs 19 | """ 20 | converted = [] 21 | for d in adj: 22 | edge_index = np.argwhere(d > 0.).T 23 | converted.append(edge_index) 24 | 25 | return converted 26 | 27 | 28 | def preprocess_features(features): 29 | """ 30 | Preprocess the features and transforms them into the edge index representation 31 | :param features: Orginal feature representation 32 | :return: edge index representation 33 | """ 34 | rowsum = np.array(features.sum(1)) 35 | r_inv = np.power(rowsum, -1).flatten() 36 | r_inv[np.isinf(r_inv)] = 0. 37 | r_mat_inv = sp.diags(r_inv) 38 | features = r_mat_inv.dot(features).astype(np.float32) 39 | try: 40 | return features.todense() # [coordinates, data, shape], [] 41 | except: 42 | return features 43 | 44 | 45 | def preprocess_adj(adj): 46 | """ 47 | Transforms adj matrix into edge index. 48 | Is different to adj_to_edge_index in terms of how the final representation can be used 49 | :param adj: adjacency matrix 50 | :return: edge index 51 | """ 52 | return sparse_to_tuple(sp.coo_matrix(adj)) 53 | 54 | 55 | def sparse_to_tuple(sparse_mx): 56 | """Convert sparse matrix to tuple representation.""" 57 | def to_tuple(mx): 58 | if not sp.isspmatrix_coo(mx): 59 | mx = mx.tocoo() 60 | coords = np.vstack((mx.row, mx.col)).transpose() 61 | values = mx.data 62 | values = values.astype(np.float32) 63 | shape = mx.shape 64 | return coords, values, shape 65 | 66 | if isinstance(sparse_mx, list): 67 | for i in range(len(sparse_mx)): 68 | sparse_mx[i] = to_tuple(sparse_mx[i]) 69 | else: 70 | sparse_mx = to_tuple(sparse_mx) 71 | 72 | return sparse_mx 73 | 74 | def get_graph_data(path): 75 | """Obtain the mutagenicity dataset from text files. 76 | 77 | :param path: Location of the txt files. 78 | :returns: np.array, np.array, np.array, np.array 79 | """ 80 | pri = path 81 | 82 | file_edges = pri+'A.txt' 83 | file_edge_labels = pri+'edge_labels.txt' 84 | file_edge_labels = pri+'edge_gt.txt' 85 | file_graph_indicator = pri+'graph_indicator.txt' 86 | file_graph_labels = pri+'graph_labels.txt' 87 | file_node_labels = pri+'node_labels.txt' 88 | 89 | edges = np.loadtxt( file_edges,delimiter=',').astype(np.int32) 90 | edge_labels = np.loadtxt(file_edge_labels,delimiter=',').astype(np.int32) 91 | graph_indicator = np.loadtxt(file_graph_indicator,delimiter=',').astype(np.int32) 92 | graph_labels = np.loadtxt(file_graph_labels,delimiter=',').astype(np.int32) 93 | node_labels = np.loadtxt(file_node_labels,delimiter=',').astype(np.int32) 94 | 95 | graph_id = 1 96 | starts = [1] 97 | node2graph = {} 98 | for i in range(len(graph_indicator)): 99 | if graph_indicator[i]!=graph_id: 100 | graph_id = graph_indicator[i] 101 | starts.append(i+1) 102 | node2graph[i+1]=len(starts)-1 103 | 104 | graphid = 0 105 | edge_lists = [] 106 | edge_label_lists = [] 107 | edge_list = [] 108 | edge_label_list = [] 109 | for (s,t),l in list(zip(edges,edge_labels)): 110 | sgid = node2graph[s] 111 | tgid = node2graph[t] 112 | if sgid!=tgid: 113 | print('edges connecting different graphs, error here, please check.') 114 | print(s,t,'graph id',sgid,tgid) 115 | exit(1) 116 | gid = sgid 117 | if gid != graphid: 118 | edge_lists.append(edge_list) 119 | edge_label_lists.append(edge_label_list) 120 | edge_list = [] 121 | edge_label_list = [] 122 | graphid = gid 123 | start = starts[gid] 124 | edge_list.append((s-start,t-start)) 125 | edge_label_list.append(l) 126 | 127 | edge_lists.append(edge_list) 128 | edge_label_lists.append(edge_label_list) 129 | 130 | # node labels 131 | node_label_lists = [] 132 | graphid = 0 133 | node_label_list = [] 134 | for i in range(len(node_labels)): 135 | nid = i+1 136 | gid = node2graph[nid] 137 | # start = starts[gid] 138 | if gid!=graphid: 139 | node_label_lists.append(node_label_list) 140 | graphid = gid 141 | node_label_list = [] 142 | node_label_list.append(node_labels[i]) 143 | node_label_lists.append(node_label_list) 144 | 145 | return edge_lists, graph_labels, edge_label_lists, node_label_lists 146 | 147 | 148 | def load_real_dataset(path_pkl, path_graph): 149 | """Obtain the mutagenicity dataset from text files. 150 | 151 | :param path_pkl: Path to save the pickle file containing the mutagenicity dataset. 152 | :param path_graph: Location of the txt files. 153 | :returns: adjecency matrix, node features, labels. 154 | """ 155 | edge_lists, graph_labels, edge_label_lists, node_label_lists = get_graph_data(path_graph) 156 | 157 | graph_labels[graph_labels == -1] = 0 158 | 159 | max_node_nmb = np.max([len(node_label) for node_label in node_label_lists]) + 1 # add nodes for each graph 160 | 161 | edge_label_nmb = np.max([np.max(l) for l in edge_label_lists]) + 1 162 | node_label_nmb = np.max([np.max(l) for l in node_label_lists]) + 1 163 | 164 | for gid in range(len(edge_lists)): 165 | node_nmb = len(node_label_lists[gid]) 166 | for nid in range(node_nmb, max_node_nmb): 167 | edge_lists[gid].append((nid, nid)) # add self edges 168 | node_label_lists[gid].append(node_label_nmb) # the label of added node is node_label_nmb 169 | edge_label_lists[gid].append(edge_label_nmb) 170 | 171 | adjs = [] 172 | for edge_list in edge_lists: 173 | row = np.array(edge_list)[:, 0] 174 | col = np.array(edge_list)[:, 1] 175 | data = np.ones(row.shape) 176 | adj = coo_matrix((data, (row, col))).toarray() 177 | if True: # originally checked the adjacency to be normal 178 | degree = np.sum(adj, axis=0, dtype=float).squeeze() 179 | degree[degree == 0] = 1 180 | sqrt_deg = np.diag(1.0 / np.sqrt(degree)) 181 | adj = np.matmul(np.matmul(sqrt_deg, adj), sqrt_deg) 182 | adjs.append(np.expand_dims(adj, 0)) 183 | 184 | labels = graph_labels 185 | 186 | adjs = np.concatenate(adjs, 0) 187 | labels = np.array(labels).astype(int) 188 | feas = [] 189 | 190 | for node_label in node_label_lists: 191 | fea = np.zeros((len(node_label), node_label_nmb + 1)) 192 | rows = np.arange(len(node_label)) 193 | fea[rows, node_label] = 1 194 | fea = fea[:, :-1] # remove the added node feature 195 | 196 | if node_label_nmb < 3: 197 | const_features = np.ones([fea.shape[0], 10]) 198 | fea = np.concatenate([fea, const_features], -1) 199 | feas.append(fea) 200 | 201 | feas = np.array(feas) 202 | 203 | b = np.zeros((labels.size, labels.max() + 1)) 204 | b[np.arange(labels.size), labels] = 1 205 | labels = b 206 | with open(path_pkl,'wb') as fout: 207 | pkl.dump((adjs, feas,labels),fout) 208 | return adjs, feas, labels 209 | -------------------------------------------------------------------------------- /ExplanationEvaluation/evaluation/AUCEvaluation.py: -------------------------------------------------------------------------------- 1 | from ExplanationEvaluation.evaluation.BaseEvaluation import BaseEvaluation 2 | from ExplanationEvaluation.evaluation.utils import evaluation_auc 3 | 4 | 5 | class AUCEvaluation(BaseEvaluation): 6 | """ 7 | A class enabling the evaluation of the AUC metric on both graphs and nodes. 8 | 9 | :param task: str either "node" or "graph". 10 | :param ground_truth: ground truth labels. 11 | :param indices: Which indices to evaluate. 12 | 13 | :funcion get_score: obtain the roc auc score. 14 | """ 15 | def __init__(self, task, ground_truth, indices): 16 | self.task = task 17 | self.ground_truth = ground_truth 18 | self.indices = indices 19 | 20 | def get_score(self, explanations): 21 | """ 22 | Determines the auc score based on the given list of explanations and the list of ground truths 23 | :param explanations: list of explanations 24 | :return: auc score 25 | """ 26 | return evaluation_auc(self.task, explanations, self.ground_truth, self.indices) 27 | -------------------------------------------------------------------------------- /ExplanationEvaluation/evaluation/BaseEvaluation.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class BaseEvaluation(ABC): 5 | """Base class for evaluations that provide a score metric""" 6 | @abstractmethod 7 | def get_score(self, explanations): 8 | """ 9 | Returns the score of the metric 10 | :param explanations: list of explanations by the explainer 11 | :return: score 12 | """ 13 | pass -------------------------------------------------------------------------------- /ExplanationEvaluation/evaluation/EfficiencyEvaluation.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from ExplanationEvaluation.evaluation.BaseEvaluation import BaseEvaluation 4 | 5 | 6 | class EfficiencyEvluation(BaseEvaluation): 7 | def __init__(self): 8 | self.t_prep = 0 9 | self.t_expl = 0 10 | self.t_done = 0 11 | 12 | def reset(self): 13 | """Resets all times""" 14 | self.t_prep = 0 15 | self.t_expl = 0 16 | self.t_done = 0 17 | 18 | def start_prepate(self): 19 | """Should be called when the evaluation starts preparing the explainer""" 20 | self.t_prep = time.time() 21 | 22 | def start_explaining(self): 23 | """Should be called when the explainers starts explaining the samples""" 24 | self.t_expl = time.time() 25 | 26 | def done_explaining(self): 27 | """Should be called when the explainer is done explaining all the samples""" 28 | self.t_done = time.time() 29 | 30 | def get_score(self, explanations): 31 | """Returns the time it took to explain a single instance 32 | :param explanations: List of all explanations performed 33 | :return: time it took to explain a single instance 34 | """ 35 | return (self.t_done - self.t_expl) / len(explanations) * 1000 -------------------------------------------------------------------------------- /ExplanationEvaluation/evaluation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/evaluation/__init__.py -------------------------------------------------------------------------------- /ExplanationEvaluation/evaluation/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.metrics import roc_auc_score 3 | 4 | 5 | def evaluation_auc(task, explanations, explanation_labels, indices): 6 | """Determines based on the task which auc evaluation method should be called to determine the AUC score 7 | 8 | :param task: str either "node" or "graph". 9 | :param explanations: predicted labels. 10 | :param ground_truth: ground truth labels. 11 | :param indices: Which indices to evaluate. We ignore all others. 12 | :returns: area under curve score. 13 | """ 14 | if task == 'graph': 15 | return evaluation_auc_graph(explanations, explanation_labels, indices) 16 | elif task == 'node': 17 | return evaluation_auc_node(explanations, explanation_labels) 18 | 19 | 20 | def evaluation_auc_graph(explanations, explanation_labels, indices): 21 | """Evaluate the auc score given explaination and ground truth labels. 22 | 23 | :param explanations: predicted labels. 24 | :param ground_truth: ground truth labels. 25 | :param indices: Which indices to evaluate. We ignore all others. 26 | :returns: area under curve score. 27 | """ 28 | ground_truth = [] 29 | predictions = [] 30 | 31 | for idx, n in enumerate(indices): # Use idx for explanation list and indices for ground truth list 32 | 33 | # Select explanation 34 | mask = explanations[idx][1].detach().numpy() 35 | graph = explanations[idx][0].detach().numpy() 36 | 37 | # Select ground truths 38 | edge_list = explanation_labels[0][n] 39 | edge_labels = explanation_labels[1][n] 40 | 41 | for edge_idx in range(0, edge_labels.shape[0]): # Consider every edge in the ground truth 42 | edge_ = edge_list.T[edge_idx] 43 | if edge_[0] == edge_[1]: # We dont consider self loops for our evaluation (Needed for Ba2Motif) 44 | continue 45 | t = np.where((graph.T == edge_.T).all(axis=1)) # Determine index of edge in graph 46 | 47 | # Retrieve predictions and ground truth 48 | predictions.append(mask[t][0]) 49 | ground_truth.append(edge_labels[edge_idx]) 50 | 51 | score = roc_auc_score(ground_truth, predictions) 52 | return score 53 | 54 | 55 | def evaluation_auc_node(explanations, explanation_labels): 56 | """Evaluate the auc score given explaination and ground truth labels. 57 | 58 | :param explanations: predicted labels. 59 | :param ground_truth: ground truth labels. 60 | :param indices: Which indices to evaluate. We ignore all others. 61 | :returns: area under curve score. 62 | """ 63 | ground_truth = [] 64 | predictions = [] 65 | for expl in explanations: # Loop over the explanations for each node 66 | 67 | ground_truth_node = [] 68 | prediction_node = [] 69 | 70 | for i in range(0, expl[0].size(1)): # Loop over all edges in the explanation sub-graph 71 | prediction_node.append(expl[1][i].item()) 72 | 73 | # Graphs are defined bidirectional, so we need to retrieve both edges 74 | pair = expl[0].T[i].numpy() 75 | idx_edge = np.where((explanation_labels[0].T == pair).all(axis=1))[0] 76 | idx_edge_rev = np.where((explanation_labels[0].T == [pair[1], pair[0]]).all(axis=1))[0] 77 | 78 | # If any of the edges is in the ground truth set, the edge should be in the explanation 79 | gt = explanation_labels[1][idx_edge] + explanation_labels[1][idx_edge_rev] 80 | if gt == 0: 81 | ground_truth_node.append(0) 82 | else: 83 | ground_truth_node.append(1) 84 | 85 | ground_truth.extend(ground_truth_node) 86 | predictions.extend(prediction_node) 87 | 88 | score = roc_auc_score(ground_truth, predictions) 89 | return score -------------------------------------------------------------------------------- /ExplanationEvaluation/explainers/BaseExplainer.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class BaseExplainer(ABC): 5 | def __init__(self, model_to_explain, graphs, features, task): 6 | self.model_to_explain = model_to_explain 7 | self.graphs = graphs 8 | self.features = features 9 | self.type = task 10 | 11 | @abstractmethod 12 | def prepare(self, args): 13 | """Prepars the explanation method for explaining. 14 | Can for example be used to train the method""" 15 | pass 16 | 17 | @abstractmethod 18 | def explain(self, index): 19 | """ 20 | Main method for explaining samples 21 | :param index: index of node/graph in self.graphs 22 | :return: explanation for sample 23 | """ 24 | pass 25 | 26 | -------------------------------------------------------------------------------- /ExplanationEvaluation/explainers/GNNExplainer.py: -------------------------------------------------------------------------------- 1 | from math import sqrt 2 | 3 | import torch 4 | import torch_geometric as ptgeom 5 | from torch import nn 6 | from torch.optim import Adam 7 | from torch_geometric.data import Data 8 | from torch_geometric.nn import MessagePassing 9 | from tqdm import tqdm 10 | 11 | from ExplanationEvaluation.explainers.BaseExplainer import BaseExplainer 12 | from ExplanationEvaluation.utils.graph import index_edge 13 | 14 | """ 15 | This is an adaption of the GNNExplainer of the PyTorch-Lightning library. 16 | 17 | The main similarity is the use of the methods _set_mask and _clear_mask to handle the mask. 18 | The main difference is the handling of different classification tasks. The original Geometric implementation only works for node 19 | classification. The implementation presented here also works for graph_classification datasets. 20 | 21 | link: https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/nn/models/gnn_explainer.html 22 | """ 23 | 24 | 25 | class GNNExplainer(BaseExplainer): 26 | """ 27 | A class encaptulating the GNNexplainer (https://arxiv.org/abs/1903.03894). 28 | 29 | :param model_to_explain: graph classification model who's predictions we wish to explain. 30 | :param graphs: the collections of edge_indices representing the graphs 31 | :param features: the collcection of features for each node in the graphs. 32 | :param task: str "node" or "graph" 33 | :param epochs: amount of epochs to train our explainer 34 | :param lr: learning rate used in the training of the explainer 35 | :param reg_coefs: reguaization coefficients used in the loss. The first item in the tuple restricts the size of the explainations, the second rescticts the entropy matrix mask. 36 | 37 | :function __set_masks__: utility; sets learnable mask on the graphs. 38 | :function __clear_masks__: utility; rmoves the learnable mask. 39 | :function _loss: calculates the loss of the explainer 40 | :function explain: trains the explainer to return the subgraph which explains the classification of the model-to-be-explained. 41 | """ 42 | def __init__(self, model_to_explain, graphs, features, task, epochs=30, lr=0.003, reg_coefs=(0.05, 1.0)): 43 | super().__init__(model_to_explain, graphs, features, task) 44 | self.epochs = epochs 45 | self.lr = lr 46 | self.reg_coefs = reg_coefs 47 | 48 | 49 | def _set_masks(self, x, edge_index): 50 | """ 51 | Inject the explanation maks into the message passing modules. 52 | :param x: features 53 | :param edge_index: graph representation 54 | """ 55 | (N, F), E = x.size(), edge_index.size(1) 56 | 57 | std = torch.nn.init.calculate_gain('relu') * sqrt(2.0 / (2 * N)) 58 | self.edge_mask = torch.nn.Parameter(torch.randn(E) * std) 59 | 60 | for module in self.model_to_explain.modules(): 61 | if isinstance(module, MessagePassing): 62 | module.__explain__ = True 63 | module.__edge_mask__ = self.edge_mask 64 | 65 | 66 | def _clear_masks(self): 67 | """ 68 | Cleans the injected edge mask from the message passing modules. Has to be called before any new sample can be explained. 69 | """ 70 | for module in self.model_to_explain.modules(): 71 | if isinstance(module, MessagePassing): 72 | module.__explain__ = False 73 | module.__edge_mask__ = None 74 | self.edge_mask = None 75 | 76 | def _loss(self, masked_pred, original_pred, edge_mask, reg_coefs): 77 | """ 78 | Returns the loss score based on the given mask. 79 | :param masked_pred: Prediction based on the current explanation 80 | :param original_pred: Predicion based on the original graph 81 | :param edge_mask: Current explanaiton 82 | :param reg_coefs: regularization coefficients 83 | :return: loss 84 | """ 85 | size_reg = reg_coefs[0] 86 | entropy_reg = reg_coefs[1] 87 | EPS = 1e-15 88 | 89 | # Regularization losses 90 | mask = torch.sigmoid(edge_mask) 91 | size_loss = torch.sum(mask) * size_reg 92 | mask_ent_reg = -mask * torch.log(mask + EPS) - (1 - mask) * torch.log(1 - mask + EPS) 93 | mask_ent_loss = entropy_reg * torch.mean(mask_ent_reg) 94 | 95 | # Explanation loss 96 | cce_loss = torch.nn.functional.cross_entropy(masked_pred, original_pred) 97 | 98 | return cce_loss + size_loss + mask_ent_loss 99 | 100 | def prepare(self, args): 101 | """Nothing is done to prepare the GNNExplainer, this happens at every index""" 102 | return 103 | 104 | def explain(self, index): 105 | """ 106 | Main method to construct the explanation for a given sample. This is done by training a mask such that the masked graph still gives 107 | the same prediction as the original graph using an optimization approach 108 | :param index: index of the node/graph that we wish to explain 109 | :return: explanation graph and edge weights 110 | """ 111 | index = int(index) 112 | 113 | # Prepare model for new explanation run 114 | self.model_to_explain.eval() 115 | self._clear_masks() 116 | 117 | if self.type == 'node': 118 | # Similar to the original paper we only consider a subgraph for explaining 119 | feats = self.features 120 | graph = ptgeom.utils.k_hop_subgraph(index, 3, self.graphs)[1] 121 | with torch.no_grad(): 122 | original_pred = self.model_to_explain(feats, graph)[index] 123 | pred_label = original_pred.argmax(dim=-1).detach() 124 | else: 125 | feats = self.features[index].detach() 126 | graph = self.graphs[index].detach() 127 | # Remove self-loops 128 | graph = graph[:, (graph[0] != graph[1])] 129 | with torch.no_grad(): 130 | original_pred = self.model_to_explain(feats, graph) 131 | pred_label = original_pred.argmax(dim=-1).detach() 132 | 133 | self._set_masks(feats, graph) 134 | optimizer = Adam([self.edge_mask], lr=self.lr) 135 | 136 | # Start training loop 137 | for e in range(0, self.epochs): 138 | optimizer.zero_grad() 139 | 140 | # Sample possible explanation 141 | if self.type == 'node': 142 | masked_pred = self.model_to_explain(feats, graph)[index] 143 | loss = self._loss(masked_pred.unsqueeze(0), pred_label.unsqueeze(0), self.edge_mask, self.reg_coefs) 144 | else: 145 | masked_pred = self.model_to_explain(feats, graph) 146 | loss = self._loss(masked_pred, pred_label, self.edge_mask, self.reg_coefs) 147 | 148 | loss.backward() 149 | optimizer.step() 150 | 151 | # Retrieve final explanation 152 | mask = torch.sigmoid(self.edge_mask) 153 | expl_graph_weights = torch.zeros(graph.size(1)) 154 | for i in range(0, self.edge_mask.size(0)): # Link explanation to original graph 155 | pair = graph.T[i] 156 | t = index_edge(graph, pair) 157 | expl_graph_weights[t] = mask[i] 158 | 159 | return graph, expl_graph_weights 160 | 161 | 162 | -------------------------------------------------------------------------------- /ExplanationEvaluation/explainers/PGExplainer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch_geometric as ptgeom 3 | from torch import nn 4 | from torch.optim import Adam 5 | from torch_geometric.data import Data 6 | from tqdm import tqdm 7 | 8 | from ExplanationEvaluation.explainers.BaseExplainer import BaseExplainer 9 | from ExplanationEvaluation.utils.graph import index_edge 10 | 11 | class PGExplainer(BaseExplainer): 12 | """ 13 | A class encaptulating the PGExplainer (https://arxiv.org/abs/2011.04573). 14 | 15 | :param model_to_explain: graph classification model who's predictions we wish to explain. 16 | :param graphs: the collections of edge_indices representing the graphs. 17 | :param features: the collcection of features for each node in the graphs. 18 | :param task: str "node" or "graph". 19 | :param epochs: amount of epochs to train our explainer. 20 | :param lr: learning rate used in the training of the explainer. 21 | :param temp: the temperture parameters dictacting how we sample our random graphs. 22 | :param reg_coefs: reguaization coefficients used in the loss. The first item in the tuple restricts the size of the explainations, the second rescticts the entropy matrix mask. 23 | :params sample_bias: the bias we add when sampling random graphs. 24 | 25 | :function _create_explainer_input: utility; 26 | :function _sample_graph: utility; sample an explanatory subgraph. 27 | :function _loss: calculate the loss of the explainer during training. 28 | :function train: train the explainer 29 | :function explain: search for the subgraph which contributes most to the clasification decision of the model-to-be-explained. 30 | """ 31 | def __init__(self, model_to_explain, graphs, features, task, epochs=30, lr=0.003, temp=(5.0, 2.0), reg_coefs=(0.05, 1.0),sample_bias=0): 32 | super().__init__(model_to_explain, graphs, features, task) 33 | 34 | self.epochs = epochs 35 | self.lr = lr 36 | self.temp = temp 37 | self.reg_coefs = reg_coefs 38 | self.sample_bias = sample_bias 39 | 40 | if self.type == "graph": 41 | self.expl_embedding = self.model_to_explain.embedding_size * 2 42 | else: 43 | self.expl_embedding = self.model_to_explain.embedding_size * 3 44 | 45 | 46 | def _create_explainer_input(self, pair, embeds, node_id): 47 | """ 48 | Given the embeddign of the sample by the model that we wish to explain, this method construct the input to the mlp explainer model. 49 | Depending on if the task is to explain a graph or a sample, this is done by either concatenating two or three embeddings. 50 | :param pair: edge pair 51 | :param embeds: embedding of all nodes in the graph 52 | :param node_id: id of the node, not used for graph datasets 53 | :return: concatenated embedding 54 | """ 55 | rows = pair[0] 56 | cols = pair[1] 57 | row_embeds = embeds[rows] 58 | col_embeds = embeds[cols] 59 | if self.type == 'node': 60 | node_embed = embeds[node_id].repeat(rows.size(0), 1) 61 | input_expl = torch.cat([row_embeds, col_embeds, node_embed], 1) 62 | else: 63 | # Node id is not used in this case 64 | input_expl = torch.cat([row_embeds, col_embeds], 1) 65 | return input_expl 66 | 67 | 68 | def _sample_graph(self, sampling_weights, temperature=1.0, bias=0.0, training=True): 69 | """ 70 | Implementation of the reparamerization trick to obtain a sample graph while maintaining the posibility to backprop. 71 | :param sampling_weights: Weights provided by the mlp 72 | :param temperature: annealing temperature to make the procedure more deterministic 73 | :param bias: Bias on the weights to make samplign less deterministic 74 | :param training: If set to false, the samplign will be entirely deterministic 75 | :return: sample graph 76 | """ 77 | if training: 78 | bias = bias + 0.0001 # If bias is 0, we run into problems 79 | eps = (bias - (1-bias)) * torch.rand(sampling_weights.size()) + (1-bias) 80 | gate_inputs = torch.log(eps) - torch.log(1 - eps) 81 | gate_inputs = (gate_inputs + sampling_weights) / temperature 82 | graph = torch.sigmoid(gate_inputs) 83 | else: 84 | graph = torch.sigmoid(sampling_weights) 85 | return graph 86 | 87 | 88 | def _loss(self, masked_pred, original_pred, mask, reg_coefs): 89 | """ 90 | Returns the loss score based on the given mask. 91 | :param masked_pred: Prediction based on the current explanation 92 | :param original_pred: Predicion based on the original graph 93 | :param edge_mask: Current explanaiton 94 | :param reg_coefs: regularization coefficients 95 | :return: loss 96 | """ 97 | size_reg = reg_coefs[0] 98 | entropy_reg = reg_coefs[1] 99 | 100 | # Regularization losses 101 | size_loss = torch.sum(mask) * size_reg 102 | mask_ent_reg = -mask * torch.log(mask) - (1 - mask) * torch.log(1 - mask) 103 | mask_ent_loss = entropy_reg * torch.mean(mask_ent_reg) 104 | 105 | # Explanation loss 106 | cce_loss = torch.nn.functional.cross_entropy(masked_pred, original_pred) 107 | 108 | return cce_loss + size_loss + mask_ent_loss 109 | 110 | def prepare(self, indices=None): 111 | """ 112 | Before we can use the explainer we first need to train it. This is done here. 113 | :param indices: Indices over which we wish to train. 114 | """ 115 | # Creation of the explainer_model is done here to make sure that the seed is set 116 | self.explainer_model = nn.Sequential( 117 | nn.Linear(self.expl_embedding, 64), 118 | nn.ReLU(), 119 | nn.Linear(64, 1), 120 | ) 121 | 122 | if indices is None: # Consider all indices 123 | indices = range(0, self.graphs.size(0)) 124 | 125 | self.train(indices=indices) 126 | 127 | def train(self, indices = None): 128 | """ 129 | Main method to train the model 130 | :param indices: Indices that we want to use for training. 131 | :return: 132 | """ 133 | # Make sure the explainer model can be trained 134 | self.explainer_model.train() 135 | 136 | # Create optimizer and temperature schedule 137 | optimizer = Adam(self.explainer_model.parameters(), lr=self.lr) 138 | temp_schedule = lambda e: self.temp[0]*((self.temp[1]/self.temp[0])**(e/self.epochs)) 139 | 140 | # If we are explaining a graph, we can determine the embeddings before we run 141 | if self.type == 'node': 142 | embeds = self.model_to_explain.embedding(self.features, self.graphs).detach() 143 | 144 | # Start training loop 145 | for e in tqdm(range(0, self.epochs)): 146 | optimizer.zero_grad() 147 | loss = torch.FloatTensor([0]).detach() 148 | t = temp_schedule(e) 149 | 150 | for n in indices: 151 | n = int(n) 152 | if self.type == 'node': 153 | # Similar to the original paper we only consider a subgraph for explaining 154 | feats = self.features 155 | graph = ptgeom.utils.k_hop_subgraph(n, 3, self.graphs)[1] 156 | else: 157 | feats = self.features[n].detach() 158 | graph = self.graphs[n].detach() 159 | embeds = self.model_to_explain.embedding(feats, graph).detach() 160 | 161 | # Sample possible explanation 162 | input_expl = self._create_explainer_input(graph, embeds, n).unsqueeze(0) 163 | sampling_weights = self.explainer_model(input_expl) 164 | mask = self._sample_graph(sampling_weights, t, bias=self.sample_bias).squeeze() 165 | 166 | masked_pred = self.model_to_explain(feats, graph, edge_weights=mask) 167 | original_pred = self.model_to_explain(feats, graph) 168 | 169 | if self.type == 'node': # we only care for the prediction of the node 170 | masked_pred = masked_pred[n].unsqueeze(dim=0) 171 | original_pred = original_pred[n] 172 | 173 | id_loss = self._loss(masked_pred, torch.argmax(original_pred).unsqueeze(0), mask, self.reg_coefs) 174 | loss += id_loss 175 | 176 | loss.backward() 177 | optimizer.step() 178 | 179 | def explain(self, index): 180 | """ 181 | Given the index of a node/graph this method returns its explanation. This only gives sensible results if the prepare method has 182 | already been called. 183 | :param index: index of the node/graph that we wish to explain 184 | :return: explanaiton graph and edge weights 185 | """ 186 | index = int(index) 187 | if self.type == 'node': 188 | # Similar to the original paper we only consider a subgraph for explaining 189 | graph = ptgeom.utils.k_hop_subgraph(index, 3, self.graphs)[1] 190 | embeds = self.model_to_explain.embedding(self.features, self.graphs).detach() 191 | else: 192 | feats = self.features[index].clone().detach() 193 | graph = self.graphs[index].clone().detach() 194 | embeds = self.model_to_explain.embedding(feats, graph).detach() 195 | 196 | # Use explainer mlp to get an explanation 197 | input_expl = self._create_explainer_input(graph, embeds, index).unsqueeze(dim=0) 198 | sampling_weights = self.explainer_model(input_expl) 199 | mask = self._sample_graph(sampling_weights, training=False).squeeze() 200 | 201 | expl_graph_weights = torch.zeros(graph.size(1)) # Combine with original graph 202 | for i in range(0, mask.size(0)): 203 | pair = graph.T[i] 204 | t = index_edge(graph, pair) 205 | expl_graph_weights[t] = mask[i] 206 | 207 | return graph, expl_graph_weights 208 | -------------------------------------------------------------------------------- /ExplanationEvaluation/explainers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/explainers/__init__.py -------------------------------------------------------------------------------- /ExplanationEvaluation/models/GNN_paper.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import ReLU, Linear 3 | from torch_geometric.nn import GCNConv, global_max_pool, global_mean_pool 4 | 5 | 6 | class NodeGCN(torch.nn.Module): 7 | """ 8 | A graph clasification model for nodes decribed in https://arxiv.org/abs/1903.03894. 9 | This model consists of 3 stacked GCN layers followed by a linear layer. 10 | """ 11 | def __init__(self, num_features, num_classes): 12 | super(NodeGCN, self).__init__() 13 | self.embedding_size = 20 * 3 14 | self.conv1 = GCNConv(num_features, 20) 15 | self.relu1 = ReLU() 16 | self.conv2 = GCNConv(20, 20) 17 | self.relu2 = ReLU() 18 | self.conv3 = GCNConv(20, 20) 19 | self.relu3 = ReLU() 20 | self.lin = Linear(3*20, num_classes) 21 | 22 | def forward(self, x, edge_index, edge_weights=None): 23 | input_lin = self.embedding(x, edge_index, edge_weights) 24 | final = self.lin(input_lin) 25 | return final 26 | 27 | def embedding(self, x, edge_index, edge_weights=None): 28 | if edge_weights is None: 29 | edge_weights = torch.ones(edge_index.size(1)) 30 | stack = [] 31 | 32 | out1 = self.conv1(x, edge_index, edge_weights) 33 | out1 = torch.nn.functional.normalize(out1, p=2, dim=1) # this is not used in PGExplainer 34 | out1 = self.relu1(out1) 35 | stack.append(out1) 36 | 37 | out2 = self.conv2(out1, edge_index, edge_weights) 38 | out2 = torch.nn.functional.normalize(out2, p=2, dim=1) # this is not used in PGExplainer 39 | out2 = self.relu2(out2) 40 | stack.append(out2) 41 | 42 | out3 = self.conv3(out2, edge_index, edge_weights) 43 | out3 = torch.nn.functional.normalize(out3, p=2, dim=1) # this is not used in PGExplainer 44 | out3 = self.relu3(out3) 45 | stack.append(out3) 46 | 47 | input_lin = torch.cat(stack, dim=1) 48 | 49 | return input_lin 50 | 51 | class GraphGCN(torch.nn.Module): 52 | """ 53 | A graph clasification model for graphs decribed in https://arxiv.org/abs/1903.03894. 54 | This model consists of 3 stacked GCN layers followed by a linear layer. 55 | In between the GCN outputs and linear layers are pooling operations in both mean and max. 56 | """ 57 | def __init__(self, num_features, num_classes): 58 | super(GraphGCN, self).__init__() 59 | self.embedding_size = 20 60 | self.conv1 = GCNConv(num_features, 20) 61 | self.relu1 = ReLU() 62 | self.conv2 = GCNConv(20, 20) 63 | self.relu2 = ReLU() 64 | self.conv3 = GCNConv(20, 20) 65 | self.relu3 = ReLU() 66 | self.lin = Linear(self.embedding_size * 2, num_classes) 67 | 68 | def forward(self, x, edge_index, batch=None, edge_weights=None): 69 | if batch is None: # No batch given 70 | batch = torch.zeros(x.size(0), dtype=torch.long) 71 | embed = self.embedding(x, edge_index, edge_weights) 72 | 73 | out1 = global_max_pool(embed, batch) 74 | out2 = global_mean_pool(embed, batch) 75 | input_lin = torch.cat([out1, out2], dim=-1) 76 | 77 | out = self.lin(input_lin) 78 | return out 79 | 80 | def embedding(self, x, edge_index, edge_weights=None): 81 | if edge_weights is None: 82 | edge_weights = torch.ones(edge_index.size(1)) 83 | stack = [] 84 | 85 | out1 = self.conv1(x, edge_index, edge_weights) 86 | out1 = torch.nn.functional.normalize(out1, p=2, dim=1) 87 | out1 = self.relu1(out1) 88 | stack.append(out1) 89 | 90 | out2 = self.conv2(out1, edge_index, edge_weights) 91 | out2 = torch.nn.functional.normalize(out2, p=2, dim=1) 92 | out2 = self.relu2(out2) 93 | stack.append(out2) 94 | 95 | out3 = self.conv3(out2, edge_index, edge_weights) 96 | out3 = torch.nn.functional.normalize(out3, p=2, dim=1) 97 | out3 = self.relu3(out3) 98 | 99 | input_lin = out3 100 | 101 | return input_lin 102 | -------------------------------------------------------------------------------- /ExplanationEvaluation/models/PG_paper.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import ReLU, Linear 3 | from torch_geometric.nn import GCNConv, BatchNorm 4 | from torch_geometric.nn import global_mean_pool, global_max_pool 5 | 6 | 7 | class NodeGCN(torch.nn.Module): 8 | """ 9 | A graph clasification model for nodes decribed in https://arxiv.org/abs/2011.04573. 10 | This model consists of 3 stacked GCN layers and batch norm, followed by a linear layer. 11 | """ 12 | def __init__(self, num_features, num_classes): 13 | super(NodeGCN, self).__init__() 14 | self.embedding_size = 20 * 3 15 | self.conv1 = GCNConv(num_features, 20) 16 | self.relu1 = ReLU() 17 | self.bn1 = BatchNorm(20) # BN is not used in GNNExplainer 18 | self.conv2 = GCNConv(20, 20) 19 | self.relu2 = ReLU() 20 | self.bn2 = BatchNorm(20) 21 | self.conv3 = GCNConv(20, 20) 22 | self.relu3 = ReLU() 23 | self.lin = Linear(self.embedding_size, num_classes) 24 | 25 | def forward(self, x, edge_index, edge_weights=None): 26 | input_lin = self.embedding(x, edge_index, edge_weights) 27 | out = self.lin(input_lin) 28 | return out 29 | 30 | def embedding(self, x, edge_index, edge_weights=None): 31 | stack = [] 32 | 33 | out1 = self.conv1(x, edge_index, edge_weights) 34 | out1 = self.relu1(out1) 35 | out1 = self.bn1(out1) 36 | stack.append(out1) 37 | 38 | out2 = self.conv2(out1, edge_index, edge_weights) 39 | out2 = self.relu2(out2) 40 | out2 = self.bn2(out2) 41 | stack.append(out2) 42 | 43 | out3 = self.conv3(out2, edge_index, edge_weights) 44 | out3 = self.relu3(out3) 45 | stack.append(out3) 46 | 47 | input_lin = torch.cat(stack, dim=1) 48 | 49 | return input_lin 50 | 51 | class GraphGCN(torch.nn.Module): 52 | """ 53 | A graph clasification model for graphs decribed in https://arxiv.org/abs/2011.04573. 54 | This model consists of 3 stacked GCN layers followed by a linear layer. 55 | In between the GCN outputs and linear layers are pooling operations in both mean and max. 56 | """ 57 | def __init__(self, num_features, num_classes): 58 | super(GraphGCN, self).__init__() 59 | self.embedding_size = 20 60 | self.conv1 = GCNConv(num_features, 20) 61 | self.relu1 = ReLU() 62 | self.conv2 = GCNConv(20, 20) 63 | self.relu2 = ReLU() 64 | self.conv3 = GCNConv(20, 20) 65 | self.relu3 = ReLU() 66 | self.lin = Linear(self.embedding_size * 2, num_classes) 67 | 68 | def forward(self, x, edge_index, batch=None, edge_weights=None): 69 | if batch is None: # No batch given 70 | batch = torch.zeros(x.size(0), dtype=torch.long) 71 | embed = self.embedding(x, edge_index, edge_weights) 72 | 73 | out1 = global_max_pool(embed, batch) 74 | out2 = global_mean_pool(embed, batch) 75 | input_lin = torch.cat([out1, out2], dim=-1) 76 | 77 | out = self.lin(input_lin) 78 | return out 79 | 80 | def embedding(self, x, edge_index, edge_weights=None): 81 | if edge_weights is None: 82 | edge_weights = torch.ones(edge_index.size(1)) 83 | 84 | out1 = self.conv1(x, edge_index, edge_weights) 85 | out1 = torch.nn.functional.normalize(out1, p=2, dim=1) # this is not used in PGExplainer 86 | out1 = self.relu1(out1) 87 | 88 | out2 = self.conv2(out1, edge_index, edge_weights) 89 | out2 = torch.nn.functional.normalize(out2, p=2, dim=1) # this is not used in PGExplainer 90 | out2 = self.relu2(out2) 91 | 92 | out3 = self.conv3(out2, edge_index, edge_weights) 93 | out3 = torch.nn.functional.normalize(out3, p=2, dim=1) # this is not used in PGExplainer 94 | out3 = self.relu3(out3) 95 | 96 | input_lin = out3 97 | 98 | return input_lin 99 | -------------------------------------------------------------------------------- /ExplanationEvaluation/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/__init__.py -------------------------------------------------------------------------------- /ExplanationEvaluation/models/model_selector.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | 4 | from ExplanationEvaluation.models.GNN_paper import NodeGCN as GNN_NodeGCN 5 | from ExplanationEvaluation.models.GNN_paper import GraphGCN as GNN_GraphGCN 6 | from ExplanationEvaluation.models.PG_paper import NodeGCN as PG_NodeGCN 7 | from ExplanationEvaluation.models.PG_paper import GraphGCN as PG_GraphGCN 8 | 9 | def string_to_model(paper, dataset): 10 | """ 11 | Given a paper and a dataset return the cooresponding neural model needed for training. 12 | :param paper: the paper who's classification model we want to use. 13 | :param dataset: the dataset on which we wish to train. This ensures that the model in- and output are correct. 14 | :returns: torch.nn.module models 15 | """ 16 | if paper == "GNN": 17 | if dataset in ['syn1']: 18 | return GNN_NodeGCN(10, 4) 19 | elif dataset in ['syn2']: 20 | return GNN_NodeGCN(10, 8) 21 | elif dataset in ['syn3']: 22 | return GNN_NodeGCN(10, 2) 23 | elif dataset in ['syn4']: 24 | return GNN_NodeGCN(10, 2) 25 | elif dataset == "ba2": 26 | return GNN_GraphGCN(10, 2) 27 | elif dataset == "mutag": 28 | return GNN_GraphGCN(14, 2) 29 | else: 30 | raise NotImplementedError 31 | elif paper == "PG": 32 | if dataset in ['syn1']: 33 | return PG_NodeGCN(10, 4) 34 | elif dataset in ['syn2']: 35 | return PG_NodeGCN(10, 8) 36 | elif dataset in ['syn3']: 37 | return PG_NodeGCN(10, 2) 38 | elif dataset in ['syn4']: 39 | return PG_NodeGCN(10, 2) 40 | elif dataset == "ba2": 41 | return PG_GraphGCN(10, 2) 42 | elif dataset == "mutag": 43 | return PG_GraphGCN(14, 2) 44 | else: 45 | raise NotImplementedError 46 | else: 47 | raise NotImplementedError 48 | 49 | 50 | def get_pretrained_path(paper, dataset): 51 | """ 52 | Given a paper and dataset loads the pre-trained model. 53 | :param paper: the paper who's classification model we want to use. 54 | :param dataset: the dataset on which we wish to train. This ensures that the model in- and output are correct. 55 | :returns: str; the path to the pre-trined model parameters. 56 | """ 57 | dir_path = os.path.dirname(os.path.realpath(__file__)) 58 | path = f"{dir_path}/pretrained/{paper}/{dataset}/best_model" 59 | return path 60 | 61 | 62 | def model_selector(paper, dataset, pretrained=True, return_checkpoint=False): 63 | """ 64 | Given a paper and dataset loads accociated model. 65 | :param paper: the paper who's classification model we want to use. 66 | :param dataset: the dataset on which we wish to train. This ensures that the model in- and output are correct. 67 | :param pretrained: whter to return a pre-trained model or not. 68 | :param return_checkpoint: wheter to return the dict contining the models parameters or not. 69 | :returns: torch.nn.module models and optionallly a dict containing it's parameters. 70 | """ 71 | model = string_to_model(paper, dataset) 72 | if pretrained: 73 | path = get_pretrained_path(paper, dataset) 74 | checkpoint = torch.load(path) 75 | model.load_state_dict(checkpoint['model_state_dict']) 76 | print(f"This model obtained: Train Acc: {checkpoint['train_acc']:.4f}, Val Acc: {checkpoint['val_acc']:.4f}, Test Acc: {checkpoint['test_acc']:.4f}.") 77 | if return_checkpoint: 78 | return model, checkpoint 79 | return model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/GNN/ba2/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/GNN/ba2/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/GNN/mutag/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/GNN/mutag/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/GNN/syn1/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/GNN/syn1/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/GNN/syn2/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/GNN/syn2/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/GNN/syn3/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/GNN/syn3/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/GNN/syn4/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/GNN/syn4/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/PG/ba2/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/PG/ba2/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/PG/mutag/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/PG/mutag/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/PG/syn1/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/PG/syn1/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/PG/syn1/best_model_old: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/PG/syn1/best_model_old -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/PG/syn2/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/PG/syn2/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/PG/syn3/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/PG/syn3/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/models/pretrained/PG/syn4/best_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/models/pretrained/PG/syn4/best_model -------------------------------------------------------------------------------- /ExplanationEvaluation/tasks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/tasks/__init__.py -------------------------------------------------------------------------------- /ExplanationEvaluation/tasks/replication.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | import os 4 | 5 | import torch 6 | import numpy as np 7 | from tqdm import tqdm 8 | 9 | from ExplanationEvaluation.datasets.dataset_loaders import load_dataset 10 | from ExplanationEvaluation.datasets.ground_truth_loaders import load_dataset_ground_truth 11 | from ExplanationEvaluation.evaluation.AUCEvaluation import AUCEvaluation 12 | from ExplanationEvaluation.evaluation.EfficiencyEvaluation import EfficiencyEvluation 13 | from ExplanationEvaluation.explainers.GNNExplainer import GNNExplainer 14 | from ExplanationEvaluation.explainers.PGExplainer import PGExplainer 15 | from ExplanationEvaluation.models.model_selector import model_selector 16 | from ExplanationEvaluation.utils.plotting import plot 17 | 18 | 19 | def get_classification_task(graphs): 20 | """ 21 | Given the original data, determines if the task as hand is a node or graph classification task 22 | :return: str either 'graph' or 'node' 23 | """ 24 | if isinstance(graphs, list): # We're working with a model for graph classification 25 | return "graph" 26 | else: 27 | return "node" 28 | 29 | 30 | def to_torch_graph(graphs, task): 31 | """ 32 | Transforms the numpy graphs to torch tensors depending on the task of the model that we want to explain 33 | :param graphs: list of single numpy graph 34 | :param task: either 'node' or 'graph' 35 | :return: torch tensor 36 | """ 37 | if task == 'graph': 38 | return [torch.tensor(g) for g in graphs] 39 | else: 40 | return torch.tensor(graphs) 41 | 42 | 43 | def select_explainer(explainer, model, graphs, features, task, epochs, lr, reg_coefs, temp=None, sample_bias=None): 44 | """ 45 | Select the explainer we which to use. 46 | :param explainer: str, "PG" or "GNN" 47 | :param model: graph classification model who's predictions we wish to explain. 48 | :param graphs: the collections of edge_indices representing the graphs 49 | :param features: the collcection of features for each node in the graphs. 50 | :param task: str "node" or "graph" 51 | :param epochs: amount of epochs to train our explainer 52 | :param lr: learning rate used in the training of the explainer 53 | :param reg_coefs: reguaization coefficients used in the loss. The first item in the tuple restricts the size of the explainations, the second rescticts the entropy matrix mask. 54 | :param temp: the temperture parameters dictacting how we sample our random graphs. 55 | :params sample_bias: the bias we add when sampling random graphs. 56 | """ 57 | if explainer == "PG": 58 | return PGExplainer(model, graphs, features, task, epochs=epochs, lr=lr, reg_coefs=reg_coefs, temp=temp, sample_bias=sample_bias) 59 | elif explainer == "GNN": 60 | return GNNExplainer(model, graphs, features, task, epochs=epochs, lr=lr, reg_coefs=reg_coefs) 61 | else: 62 | raise NotImplementedError("Unknown explainer type") 63 | 64 | 65 | def run_experiment(inference_eval, auc_eval, explainer, indices): 66 | """ 67 | Runs an experiment. 68 | We generate explanations for given indices and calculate the AUC score. 69 | :param inference_eval: object for measure the inference speed 70 | :param auc_eval: a metric object, which calculate the AUC score 71 | :param explainer: the explainer we wish to obtain predictions from 72 | :param indices: indices over which to evaluate the auc 73 | :returns: AUC score, inference speed 74 | """ 75 | inference_eval.start_prepate() 76 | explainer.prepare(indices) 77 | 78 | inference_eval.start_explaining() 79 | explanations = [] 80 | for idx in tqdm(indices): 81 | graph, expl = explainer.explain(idx) 82 | explanations.append((graph, expl)) 83 | inference_eval.done_explaining() 84 | 85 | auc_score = auc_eval.get_score(explanations) 86 | time_score = inference_eval.get_score(explanations) 87 | 88 | return auc_score, time_score 89 | 90 | 91 | def run_qualitative_experiment(explainer, indices, labels, config, explanation_labels): 92 | """ 93 | Plot the explaination generated by the explainer 94 | :param explainer: the explainer object 95 | :param indices: indices on which we validate 96 | :param labels: predictions of the explainer 97 | :param config: dict holding which subgraphs to plot 98 | :param explanation_labels: the ground truth labels 99 | """ 100 | for idx in indices: 101 | graph, expl = explainer.explain(idx) 102 | plot(graph, expl, labels, idx, config.thres_min, config.thres_snip, config.dataset, config, explanation_labels) 103 | 104 | 105 | def store_results(auc, auc_std, inf_time, checkpoint, config): 106 | """ 107 | Save the replication results into a json file 108 | :param auc: the obtained AUC score 109 | :param auc_std: the obtained AUC standard deviation 110 | :param inf_time: time it takes to make a single prediction 111 | :param checkpoint: the checkpoint of the explained model 112 | :param config: dict config 113 | """ 114 | results = {"AUC": auc, 115 | "AUC std": auc_std, 116 | "Inference time (ms)": inf_time} 117 | 118 | model_res = {"Training Accuracy": checkpoint["train_acc"], 119 | "Validation Accuracy": checkpoint["val_acc"], 120 | "Test Accuracy": checkpoint["test_acc"], } 121 | 122 | explainer_params = {"Explainer": config.explainer, 123 | "Model": config.model, 124 | "Dataset": config.dataset} 125 | 126 | json_dict = {"Explainer parameters": explainer_params, 127 | "Results": results, 128 | "Trained model stats": model_res} 129 | 130 | save_dir = "./results" 131 | os.makedirs(save_dir, exist_ok=True) 132 | with open(f"./results/P_{config.explainer}_M_{config.model}_D_{config.dataset}_results.json", "w") as fp: 133 | json.dump(json_dict, fp, indent=4) 134 | 135 | 136 | def replication(config, extension=False, run_qual=True, results_store=True): 137 | """ 138 | Perform the replication study. 139 | First load a pre-trained model. 140 | Then we train our expainer. 141 | Followed by obtaining the generated explanations. 142 | And saving the obtained AUC score in a json file. 143 | :param config: a dict containing the config file values 144 | :param extension: bool, wheter to use all indices 145 | """ 146 | # Load complete dataset 147 | graphs, features, labels, _, _, test_mask = load_dataset(config.dataset) 148 | task = get_classification_task(graphs) 149 | 150 | features = torch.tensor(features) 151 | labels = torch.tensor(labels) 152 | graphs = to_torch_graph(graphs, task) 153 | 154 | # Load pretrained models 155 | model, checkpoint = model_selector(config.model, 156 | config.dataset, 157 | pretrained=True, 158 | return_checkpoint=True) 159 | if config.eval_enabled: 160 | model.eval() 161 | 162 | # Get ground_truth for every node 163 | explanation_labels, indices = load_dataset_ground_truth(config.dataset) 164 | if extension: indices = np.argwhere(test_mask).squeeze() 165 | 166 | # Get explainer 167 | explainer = select_explainer(config.explainer, 168 | model=model, 169 | graphs=graphs, 170 | features=features, 171 | task=task, 172 | epochs=config.epochs, 173 | lr=config.lr, 174 | reg_coefs=[config.reg_size, 175 | config.reg_ent], 176 | temp=config.temps, 177 | sample_bias=config.sample_bias) 178 | 179 | # Get evaluation methods 180 | auc_evaluation = AUCEvaluation(task, explanation_labels, indices) 181 | inference_eval = EfficiencyEvluation() 182 | 183 | # Perform the evaluation 10 times 184 | auc_scores = [] 185 | times = [] 186 | 187 | for idx, s in enumerate(config.seeds): 188 | print(f"Run {idx} with seed {s}") 189 | # Set all seeds needed for reproducibility 190 | torch.manual_seed(s) 191 | torch.cuda.manual_seed(s) 192 | np.random.seed(s) 193 | 194 | inference_eval.reset() 195 | auc_score, time_score = run_experiment(inference_eval, auc_evaluation, explainer, indices) 196 | 197 | if idx == 0 and run_qual: # We only run the qualitative experiment once 198 | run_qualitative_experiment(explainer, indices, labels, config, explanation_labels) 199 | 200 | auc_scores.append(auc_score) 201 | print("score:",auc_score) 202 | times.append(time_score) 203 | print("time_elased:",time_score) 204 | 205 | auc = np.mean(auc_scores) 206 | auc_std = np.std(auc_scores) 207 | inf_time = np.mean(times) / 10 208 | 209 | if results_store: store_results(auc, auc_std, inf_time, checkpoint, config) 210 | 211 | return (auc, auc_std), inf_time 212 | -------------------------------------------------------------------------------- /ExplanationEvaluation/tasks/training.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import numpy as np 4 | from torch_geometric.data import Data, DataLoader 5 | 6 | from ExplanationEvaluation.datasets.dataset_loaders import load_dataset 7 | from ExplanationEvaluation.models.model_selector import model_selector 8 | 9 | 10 | def create_data_list(graphs, features, labels, mask): 11 | """ 12 | Convert the numpy data to torch tensors and save them in a list. 13 | :params graphs: edge indecs of the graphs 14 | :params features: features for every node 15 | :params labels: ground truth labels 16 | :params mask: mask, used to filter the data 17 | :retuns: list; contains the dataset 18 | """ 19 | indices = np.argwhere(mask).squeeze() 20 | data_list = [] 21 | for i in indices: 22 | x = torch.tensor(features[i]) 23 | edge_index = torch.tensor(graphs[i]) 24 | y = torch.tensor(labels[i].argmax()) 25 | data = Data(x=x, edge_index=edge_index, y=y) 26 | data_list.append(data) 27 | return data_list 28 | 29 | 30 | def evaluate(out, labels): 31 | """ 32 | Calculates the accuracy between the prediction and the ground truth. 33 | :param out: predicted outputs of the explainer 34 | :param labels: ground truth of the data 35 | :returns: int accuracy 36 | """ 37 | preds = out.argmax(dim=1) 38 | correct = preds == labels 39 | acc = int(correct.sum()) / int(correct.size(0)) 40 | return acc 41 | 42 | 43 | def store_checkpoint(paper, dataset, model, train_acc, val_acc, test_acc, epoch=-1): 44 | """ 45 | Store the model weights at a predifined location. 46 | :param paper: str, the paper 47 | :param dataset: str, the dataset 48 | :param model: the model who's parameters we whish to save 49 | :param train_acc: training accuracy obtained by the model 50 | :param val_acc: validation accuracy obtained by the model 51 | :param test_acc: test accuracy obtained by the model 52 | :param epoch: the current epoch of the training process 53 | :retunrs: None 54 | """ 55 | save_dir = f"./checkpoints/{paper}/{dataset}" 56 | checkpoint = {'model_state_dict': model.state_dict(), 57 | 'train_acc': train_acc, 58 | 'val_acc': val_acc, 59 | 'test_acc': test_acc} 60 | if not os.path.isdir(save_dir): 61 | os.makedirs(save_dir) 62 | 63 | if epoch == -1: 64 | torch.save(checkpoint, os.path.join(save_dir, f"best_model")) 65 | else: 66 | torch.save(checkpoint, os.path.join(save_dir, f"model_{epoch}")) 67 | 68 | 69 | def load_best_model(best_epoch, paper, dataset, model, eval_enabled): 70 | """ 71 | Load the model parameters from a checkpoint into a model 72 | :param best_epoch: the epoch which obtained the best result. use -1 to chose the "best model" 73 | :param paper: str, the paper 74 | :param dataset: str, the dataset 75 | :param model: the model who's parameters overide 76 | :param eval_enabled: wheater to activate evaluation mode on the model or not 77 | :return: model with pramaters taken from the checkpoint 78 | """ 79 | print(best_epoch) 80 | if best_epoch == -1: 81 | checkpoint = torch.load(f"./checkpoints/{paper}/{dataset}/best_model") 82 | else: 83 | checkpoint = torch.load(f"./checkpoints/{paper}/{dataset}/model_{best_epoch}") 84 | model.load_state_dict(checkpoint['model_state_dict']) 85 | 86 | if eval_enabled: model.eval() 87 | 88 | return model 89 | 90 | 91 | def train_node(_dataset, _paper, args): 92 | """ 93 | Train a explainer to explain node classifications 94 | :param _dataset: the dataset we wish to use for training 95 | :param _paper: the paper we whish to follow, chose from "GNN" or "PG" 96 | :param args: a dict containing the relevant model arguements 97 | """ 98 | graph, features, labels, train_mask, val_mask, test_mask = load_dataset(_dataset) 99 | model = model_selector(_paper, _dataset, False) 100 | 101 | x = torch.tensor(features) 102 | edge_index = torch.tensor(graph) 103 | labels = torch.tensor(labels) 104 | 105 | # Define graph 106 | print(model) 107 | optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) 108 | criterion = torch.nn.CrossEntropyLoss() 109 | 110 | best_val_acc = 0.0 111 | best_epoch = 0 112 | 113 | for epoch in range(0, args.epochs): 114 | model.train() 115 | optimizer.zero_grad() 116 | out = model(x, edge_index) 117 | loss = criterion(out[train_mask], labels[train_mask]) 118 | loss.backward() 119 | torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_max) 120 | optimizer.step() 121 | 122 | if args.eval_enabled: model.eval() 123 | with torch.no_grad(): 124 | out = model(x, edge_index) 125 | 126 | # Evaluate train 127 | train_acc = evaluate(out[train_mask], labels[train_mask]) 128 | test_acc = evaluate(out[test_mask], labels[test_mask]) 129 | val_acc = evaluate(out[val_mask], labels[val_mask]) 130 | 131 | print(f"Epoch: {epoch}, train_acc: {train_acc:.4f}, val_acc: {val_acc:.4f}, train_loss: {loss:.4f}") 132 | 133 | if val_acc > best_val_acc: # New best results 134 | print("Val improved") 135 | best_val_acc = val_acc 136 | best_epoch = epoch 137 | store_checkpoint(_paper, _dataset, model, train_acc, val_acc, test_acc, best_epoch) 138 | 139 | if epoch - best_epoch > args.early_stopping and best_val_acc > 0.99: 140 | break 141 | 142 | model = load_best_model(best_epoch, _paper, _dataset, model, args.eval_enabled) 143 | out = model(x, edge_index) 144 | 145 | # Train eval 146 | train_acc = evaluate(out[train_mask], labels[train_mask]) 147 | test_acc = evaluate(out[test_mask], labels[test_mask]) 148 | val_acc = evaluate(out[val_mask], labels[val_mask]) 149 | print(f"final train_acc:{train_acc}, val_acc: {val_acc}, test_acc: {test_acc}") 150 | 151 | store_checkpoint(_paper, _dataset, model, train_acc, val_acc, test_acc) 152 | 153 | 154 | def train_graph(_dataset, _paper, args): 155 | """ 156 | Train a explainer to explain graph classifications 157 | :param _dataset: the dataset we wish to use for training 158 | :param _paper: the paper we whish to follow, chose from "GNN" or "PG" 159 | :param args: a dict containing the relevant model arguements 160 | """ 161 | graphs, features, labels, train_mask, val_mask, test_mask = load_dataset(_dataset) 162 | train_set = create_data_list(graphs, features, labels, train_mask) 163 | val_set = create_data_list(graphs, features, labels, val_mask) 164 | test_set = create_data_list(graphs, features, labels, test_mask) 165 | 166 | train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True) 167 | val_loader = DataLoader(val_set, batch_size=len(val_set), shuffle=False) 168 | test_loader = DataLoader(test_set, batch_size=len(test_set), shuffle=False) 169 | 170 | model = model_selector(_paper, _dataset, False) 171 | 172 | # Define graph 173 | optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) 174 | criterion = torch.nn.CrossEntropyLoss() 175 | 176 | best_val_acc = 0.0 177 | best_epoch = 0 178 | 179 | for epoch in range(0, args.epochs): 180 | model.train() 181 | 182 | # Use pytorch-geometric batching method 183 | for data in train_loader: 184 | optimizer.zero_grad() 185 | out = model(data.x, data.edge_index, data.batch) 186 | loss = criterion(out, data.y) 187 | loss.backward() 188 | torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_max) 189 | optimizer.step() 190 | 191 | model.eval() 192 | # Evaluate train 193 | with torch.no_grad(): 194 | train_sum = 0 195 | loss = 0 196 | for data in train_loader: 197 | out = model(data.x, data.edge_index, data.batch) 198 | loss += criterion(out, data.y) 199 | preds = out.argmax(dim=1) 200 | train_sum += (preds == data.y).sum() 201 | train_acc = int(train_sum) / int(len(train_set)) 202 | train_loss = float(loss) / int(len(train_loader)) 203 | 204 | eval_data = next(iter(test_loader)) # Loads all test samples 205 | out = model(eval_data.x, eval_data.edge_index, eval_data.batch) 206 | test_acc = evaluate(out, eval_data.y) 207 | 208 | eval_data = next(iter(val_loader)) # Loads all eval samples 209 | out = model(eval_data.x, eval_data.edge_index, eval_data.batch) 210 | val_acc = evaluate(out, eval_data.y) 211 | 212 | print(f"Epoch: {epoch}, train_acc: {train_acc:.4f}, val_acc: {val_acc:.4f}, train_loss: {loss:.4f}") 213 | 214 | if val_acc > best_val_acc: # New best results 215 | print("Val improved") 216 | best_val_acc = val_acc 217 | best_epoch = epoch 218 | store_checkpoint(_paper, _dataset, model, train_acc, val_acc, test_acc, best_epoch) 219 | 220 | # Early stopping 221 | if epoch - best_epoch > args.early_stopping: 222 | break 223 | 224 | model = load_best_model(best_epoch, _paper, _dataset, model, args.eval_enabled) 225 | 226 | with torch.no_grad(): 227 | train_sum = 0 228 | for data in train_loader: 229 | out = model(data.x, data.edge_index, data.batch) 230 | preds = out.argmax(dim=1) 231 | train_sum += (preds == data.y).sum() 232 | train_acc = int(train_sum) / int(len(train_set)) 233 | 234 | eval_data = next(iter(test_loader)) 235 | out = model(eval_data.x, eval_data.edge_index, eval_data.batch) 236 | test_acc = evaluate(out, eval_data.y) 237 | 238 | eval_data = next(iter(val_loader)) 239 | out = model(eval_data.x, eval_data.edge_index, eval_data.batch) 240 | val_acc = evaluate(out, eval_data.y) 241 | 242 | print(f"final train_acc:{train_acc}, val_acc: {val_acc}, test_acc: {test_acc}") 243 | 244 | store_checkpoint(_paper, _dataset, model, train_acc, val_acc, test_acc) -------------------------------------------------------------------------------- /ExplanationEvaluation/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LarsHoldijk/RE-ParameterizedExplainerForGraphNeuralNetworks/46870bddd8da22ebbd28c9eaff78514790753e64/ExplanationEvaluation/utils/__init__.py -------------------------------------------------------------------------------- /ExplanationEvaluation/utils/graph.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def index_edge(graph, pair): 4 | return torch.where((graph.T == pair).all(dim=1))[0] 5 | -------------------------------------------------------------------------------- /ExplanationEvaluation/utils/plotting.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import torch 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | from pathlib import Path 6 | 7 | """ 8 | The function in this file is largely copied from the orginal PGExplainer codebase. The decision was made to largely copy this file to ensure 9 | that the graph visualization between the original and replicate results would be as similar as possible. Additional comments were added 10 | to clarify the code. 11 | """ 12 | 13 | def plot(graph, edge_weigths, labels, idx, thres_min, thres_snip, dataset, args=None, gt=None, show=False): 14 | """ 15 | Function that can plot an explanation (sub)graph and store the image. 16 | 17 | :param graph: graph provided by explainer 18 | :param edge_weigths: Mask of edge weights provided by explainer 19 | :param labels: Label of each node required for coloring of nodes 20 | :param idx: Node index of interesting node 21 | :param thresh_min: total number of edges 22 | :param thres_snip: number of top edges 23 | :param args: Object containing arguments from configuration 24 | :param gt: Ground Truth 25 | :param show: flag to show plot made 26 | """ 27 | # Set thresholds 28 | sorted_edge_weigths, _ = torch.sort(edge_weigths) 29 | 30 | thres_index = max(int(edge_weigths.shape[0]-thres_snip),0) 31 | 32 | thres = sorted_edge_weigths[thres_index] 33 | if thres_min == -1: 34 | filter_thres_index = 0 35 | else: 36 | filter_thres_index = min(thres_index, 37 | max(int(edge_weigths.shape[0]-edge_weigths.shape[0]/2), 38 | edge_weigths.shape[0]-thres_min)) 39 | filter_thres = sorted_edge_weigths[filter_thres_index] 40 | # Init edges 41 | filter_nodes = set() 42 | filter_edges = [] 43 | pos_edges = [] 44 | # Select all edges and nodes to plot 45 | for i in range(edge_weigths.shape[0]): 46 | # Select important edges 47 | if edge_weigths[i] >= thres and not graph[0][i] == graph[1][i]: 48 | pos_edges.append((graph[0][i].item(),graph[1][i].item())) 49 | # Select all edges to plot 50 | if edge_weigths[i] > filter_thres and not graph[0][i] == graph[1][i]: 51 | filter_edges.append((graph[0][i].item(),graph[1][i].item())) 52 | filter_nodes.add(graph[0][i].item()) 53 | filter_nodes.add(graph[1][i].item()) 54 | num_nodes = len(pos_edges) 55 | 56 | # Initialize graph object 57 | G = nx.Graph() 58 | 59 | if not (thres_min == -1): 60 | # Deal with plotting of node datasets 61 | G.add_edges_from(filter_edges) 62 | pos = nx.kamada_kawai_layout(G) 63 | 64 | label = [] 65 | for node in filter_nodes: 66 | label.append(int(labels[node])) 67 | 68 | for cc in nx.connected_components(G): 69 | if idx in cc: 70 | G = G.subgraph(cc).copy() 71 | break 72 | 73 | pos_edges = [(u, v) for (u, v) in pos_edges if u in G.nodes() and v in G.nodes()] 74 | colors = ['orange', 'red', 'green', 'blue', 'maroon', 'brown', 'darkslategray', 'paleturquoise', 'darksalmon', 75 | 'slategray', 'mediumseagreen', 'mediumblue', 'orchid', ] 76 | if dataset=='syn3': 77 | colors = ['orange', 'blue'] 78 | 79 | 80 | if dataset=='syn4': 81 | colors = ['orange', 'black','black','black','blue'] 82 | 83 | # node coloring 84 | label2nodes= [] 85 | max_label = np.max(label)+1 # amount of labels to use 86 | nmb_nodes = len(filter_nodes) # amount of nodes that need coloring 87 | 88 | # Create empty lists of possible labels 89 | for i in range(max_label): 90 | label2nodes.append([]) 91 | 92 | # For each node add the node to it's assigned label 93 | for i in range(nmb_nodes): 94 | label2nodes[label[i]].append(list(filter_nodes)[i]) 95 | 96 | # actually draw the nodes 97 | for i in range(len(label2nodes)): 98 | node_list = [] 99 | # For each label that needs a color 100 | for j in range(len(label2nodes[i])): 101 | if label2nodes[i][j] in G.nodes(): 102 | node_list.append(label2nodes[i][j]) 103 | # Draw all nodes of a certain color 104 | nx.draw_networkx_nodes(G, 105 | pos, 106 | nodelist=node_list, 107 | node_color=colors[i % len(colors)], 108 | node_size=500) 109 | 110 | # Draw a base node 111 | if idx in pos.keys(): 112 | nx.draw_networkx_nodes(G, pos, 113 | nodelist=[idx], 114 | node_color=colors[labels[idx]], 115 | node_size=1000) 116 | 117 | # Deal with plotting of graph datasets 118 | else: 119 | # Format edges 120 | edges = [(pair[0], pair[1]) for pair in gt[0][idx].T] 121 | # Obtain all unique nodes 122 | nodes = np.unique(gt[0][idx]) 123 | # Add all unique nodes and all edges 124 | G.add_nodes_from(nodes) 125 | G.add_edges_from(edges) 126 | # Let the graph generate all positions 127 | pos = nx.kamada_kawai_layout(G) 128 | 129 | pos_edges = [(u, v) for (u, v) in pos_edges if u in G.nodes() and v in G.nodes()] 130 | 131 | nx.draw_networkx_nodes(G, 132 | pos, 133 | nodelist=nodes, 134 | node_color='red', 135 | node_size=500) 136 | 137 | 138 | # Draw an edge 139 | nx.draw_networkx_edges(G, 140 | pos, 141 | width=7, 142 | alpha=0.5, 143 | edge_color='grey') 144 | 145 | # Draw all pos edges 146 | nx.draw_networkx_edges(G, 147 | pos, 148 | edgelist=pos_edges, 149 | width=7, 150 | alpha=0.5) 151 | plt.axis('off') 152 | if show: 153 | plt.show() 154 | else: 155 | save_path = f'./qualitative/e_{args.explainer}/m_{args.model}/d_{args.dataset}/' 156 | 157 | # Generate folders if they do not exist 158 | Path(save_path).mkdir(parents=True, exist_ok=True) 159 | 160 | # Save figure 161 | plt.savefig(f'{save_path}{idx}.png') 162 | plt.clf() 163 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # [Re] Parameterized Explainer for Graph Neural Networks 2 | **Authors**: Maarten Boon, Stijn Henckens, [Lars Holdijk](https://www.larsholdijk.com/) and Lysander de Jong 3 | 4 | This repository contains all code required to replicate our replication study of the paper _Parameterized Explainer for Graph Neural Networks_[1]. This includes both a new implementation of the PGExplainer method introduced as well as a reimplementation of the earlier introduced GNNExplainer [2], which serves as a benchmark in the evaluation. In addition to this, the repository contains the datasets and pretrained models needed for a faithfull replication. 5 | 6 | In addition to ipython notebooks for replicating our study, the repository also contains an example that shows how our codebase can be used for your own experiments. This example also serves as a good starting point for understanding how the replication study is performed. 7 | 8 | **bibtex citation** 9 | ``` 10 | @inproceedings{holdijk2021re, 11 | title={[Re] Parameterized Explainer for Graph Neural Network}, 12 | author={Holdijk, Lars and Boon, Maarten and Henckens, Stijn and de Jong, Lysander}, 13 | booktitle={ML Reproducibility Challenge 2020}, 14 | year={2021} 15 | } 16 | ``` 17 | 18 | ## IPython Notebooks 19 | Four IPython Notebooks are availabe to replicate our experiments 20 | 21 | - **experiment_model_training**: Replicates the trained models used in the evaluations. Instead of retraining the models yourself, it is also possible to reuse the already trained models. 22 | - **experiment_replication**: Replicates the main replication study of our paper. By default the notebook uses the pretrained models. 23 | - **experiment_ablation**: Replicates the small ablation study found in the paper 24 | - **example_explain_your_model**: This notebook is not part of the replication study but instead serves as a starting point for reusing our code in your own project. 25 | 26 | ## Codebase 27 | All code required for the replication study can be found in the `ExplanationEvaluation` module. This also includes the required datasets and pretrained models. 28 | 29 | ## Configurations 30 | In the folder `ExplanataionEvaluation` all configuration files needed to replicate the replication study can be found. A discussion of their setup can be found in the appendix of the correspondig paper. 31 | 32 | 33 | ## Installation 34 | Install required packages using 35 | ```pip install -r requirements.txt``` 36 | additionally follow the [instructions](https://pytorch-geometric.readthedocs.io/en/latest/notes/installation.html) in order to install PyTorch Geometric. 37 | 38 | 39 | ## Resources 40 | - [1] [GNNExplainer](https://arxiv.org/pdf/1903.03894.pdf) 41 | - [2] [PGExplainer](https://arxiv.org/pdf/2011.04573.pdf) 42 | -------------------------------------------------------------------------------- /experiment_replication.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Replication experiment\n", 8 | "\n", 9 | "This is the main notebook required to obtain the results of our replication study. The notebook is build around the concepts of predefined configuration files. These configuration files can be found within the codebase. The configuration files for different datasets and different explainers can be chosen by changing the parameters in the second codeblock. \n", 10 | "\n", 11 | "When loaded, the configuration for a replication experiment is passed to the replication function. This function is responsible for running all parts of the evaluation; quantitative, qualitative and efficiency. The results for the quantitative and efficiency studies are returned by the replication method and also stored in the `results` folder. The results of the qualitative study are stored in the folder name `qualitative`. \n", 12 | "\n", 13 | "**Be aware that the replication function can take very long to completed**. This is caused by the method averaging all scores over ten runs. If speed is required over accuracy the last line of the 2nd codeblock can be uncommented. This will make the evaluation run over one run only. \n" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 1, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "from ExplanationEvaluation.configs.selector import Selector\n", 23 | "from ExplanationEvaluation.tasks.replication import replication" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "_dataset = 'bashapes' # One of: bashapes, bacommunity, treecycles, treegrids, ba2motifs, mutag\n", 33 | "_explainer = 'pgexplainer' # One of: pgexplainer, gnnexplainer\n", 34 | "\n", 35 | "# Parameters below should only be changed if you want to run any of the experiments in the supplementary\n", 36 | "_folder = 'replication' # One of: replication, extension\n", 37 | "\n", 38 | "# PGExplainer\n", 39 | "config_path = f\"./ExplanationEvaluation/configs/{_folder}/explainers/{_explainer}/{_dataset}.json\"\n", 40 | "\n", 41 | "config = Selector(config_path)\n", 42 | "extension = (_folder == 'extension')\n", 43 | "\n", 44 | "# config.args.explainer.seeds = [0]" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "metadata": {}, 51 | "outputs": [ 52 | { 53 | "name": "stderr", 54 | "output_type": "stream", 55 | "text": [ 56 | "\r", 57 | " 0%| | 0/10 [00:00