├── .gitignore ├── .gitmodules ├── All_scores_100.ods ├── All_scores_20.ods ├── CausalGEN ├── ratio_biotic_clusterGenie3_data.csv ├── ratio_biotic_clusterGenie3_target.csv └── results │ ├── ratio_biotic_117genes.csv_result.csv │ └── ratio_biotic_clusterGenie3.csv_result.csv ├── LICENSE.md ├── README.md ├── cyto ├── cyto_full_data.csv ├── cyto_full_log │ └── agsam_t_res_nh-200_dagp-0_dnh-200_line-False_dagp-0.001_dlr-0.01_samp-sigmoidproba_loss-fgan_dagl-True_lamb-0.01_filt-0.5_lamb-1e-05_lr-0.01_test-1000_trai-10000_dags-0.5_use_-False └── cyto_full_target.csv ├── sam ├── __init__.py ├── sam.py └── utils │ ├── batchnorm.py │ ├── graph.py │ ├── gumble_utils.py │ ├── linear3d.py │ ├── parlib.py │ ├── test_sigmoid.py │ └── treillis.py ├── setup.py ├── syntren ├── 100_probaComplexInter1 │ ├── syntrenHop1_100_0_data.csv │ ├── syntrenHop1_100_0_target.csv │ ├── syntrenHop1_100_1_data.csv │ ├── syntrenHop1_100_1_target.csv │ ├── syntrenHop1_100_2_data.csv │ ├── syntrenHop1_100_2_target.csv │ ├── syntrenHop1_100_3_data.csv │ ├── syntrenHop1_100_3_target.csv │ ├── syntrenHop1_100_4_data.csv │ ├── syntrenHop1_100_4_target.csv │ ├── syntrenHop1_100_5_data.csv │ ├── syntrenHop1_100_5_target.csv │ ├── syntrenHop1_100_6_data.csv │ ├── syntrenHop1_100_6_target.csv │ ├── syntrenHop1_100_7_data.csv │ ├── syntrenHop1_100_7_target.csv │ ├── syntrenHop1_100_8_data.csv │ ├── syntrenHop1_100_8_target.csv │ ├── syntrenHop1_100_9_data.csv │ └── syntrenHop1_100_9_target.csv └── 20_probaComplexInter1 │ ├── syntrenHop1_20_0_data.csv │ ├── syntrenHop1_20_0_target.csv │ ├── syntrenHop1_20_1_data.csv │ ├── syntrenHop1_20_1_target.csv │ ├── syntrenHop1_20_2_data.csv │ ├── syntrenHop1_20_2_target.csv │ ├── syntrenHop1_20_3_data.csv │ ├── syntrenHop1_20_3_target.csv │ ├── syntrenHop1_20_4_data.csv │ ├── syntrenHop1_20_4_target.csv │ ├── syntrenHop1_20_5_data.csv │ ├── syntrenHop1_20_5_target.csv │ ├── syntrenHop1_20_6_data.csv │ ├── syntrenHop1_20_6_target.csv │ ├── syntrenHop1_20_7_data.csv │ ├── syntrenHop1_20_7_target.csv │ ├── syntrenHop1_20_8_data.csv │ ├── syntrenHop1_20_8_target.csv │ ├── syntrenHop1_20_9_data.csv │ └── syntrenHop1_20_9_target.csv ├── test_sam.py └── train_graphs ├── artificial ├── NN-train-20_data.csv ├── NN-train-20_target.csv ├── gp_add-train-20_data.csv ├── gp_add-train-20_target.csv ├── gp_mix-train-20_data.csv ├── gp_mix-train-20_target.csv ├── linear-train-20_data.csv ├── linear-train-20_target.csv ├── polynomial-train-20_data.csv ├── polynomial-train-20_target.csv ├── sigmoid_add-train-20_data.csv ├── sigmoid_add-train-20_target.csv ├── sigmoid_mix-train-20_data.csv └── sigmoid_mix-train-20_target.csv ├── generate_graphs.py └── syntren ├── syntren_1_data.csv └── syntren_1_target.csv /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.xml 6 | *~ 7 | \#* 8 | *\# 9 | .idea 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | env/ 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | _build/ 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *,cover 53 | .hypothesis/ 54 | 55 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "datasets"] 2 | path = datasets 3 | url = https://github.com/Diviyan-Kalainathan/SAM_datasets.git 4 | -------------------------------------------------------------------------------- /All_scores_100.ods: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diviyank/SAM/75b00b8354bd02f9a0a2613a22e781daf546645b/All_scores_100.ods -------------------------------------------------------------------------------- /All_scores_20.ods: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diviyank/SAM/75b00b8354bd02f9a0a2613a22e781daf546645b/All_scores_20.ods -------------------------------------------------------------------------------- /CausalGEN/ratio_biotic_clusterGenie3_target.csv: -------------------------------------------------------------------------------- 1 | "Cause","Effect" 2 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2018 Diviyan Kalainathan 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | http://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Structural Agnostic Modeling: Adversarial Learning of Causal Graphs 2 | This version is the new version of SAM, using structural gates and functional 3 | gates 4 | ## NB: This code is the code of the V2 of SAM, for the lastest (V3), please check the CDT package (https://github.com/FenTechSolutions/CausalDiscoveryToolbox) in the time being 5 | Code in PyTorch. Link to the paper: https://arxiv.org/abs/1803.04929 6 | 7 | The (OLD) code of SAM is available at 8 | https://github.com/Diviyan-Kalainathan/SAMv1 9 | The first version of the paper is available at https://arxiv.org/abs/1803.04929v1 10 | 11 | In order to use SAM: 12 | 1. Install the package requirements with ```pip install -r requirements.txt```. For PyTorch visit: http://pytorch.org 13 | 2. Install the package with the command: ```python setup.py develop --user ``` 14 | 3. Execute the SAM by including the desired options: 15 | ```python 16 | import pandas as pd 17 | from sam import SAM 18 | sam = SAM() 19 | data = pd.read_csv("test/G5_v1_numdata.tab", sep="\t") 20 | output = sam.predict(data, nruns=12) # Recommended if high computational capability available, else nruns=1 21 | ``` 22 | 23 | We highly recommand to use GPUs if possible. Here is an example for 2 GPUs: 24 | ```python 25 | import pandas as pd 26 | from sam import SAM 27 | sam = SAM() 28 | data = pd.read_csv("test/G5_v1_numdata.tab", sep="\t") 29 | output = sam.predict(data, nruns=12, gpus=2, njobs=4) # As the model is small, we recommand using 2 jobs on each GPU 30 | ``` 31 | 32 | 33 | In order to download the datasets used in the paper as well as the generators, download the submodule "datasets" (458MB): 34 | ``` 35 | git submodule update --init 36 | ``` 37 | 38 | The acyclic graphs for the mechanisms _Linear, GP Add, GP Mix, Sigmoid Add and Sigmoid Mix_ were generated using the software provided at : https://github.com/bquast/ANM/tree/master/codeANM 39 | 40 | -------------------------------------------------------------------------------- /cyto/cyto_full_log/agsam_t_res_nh-200_dagp-0_dnh-200_line-False_dagp-0.001_dlr-0.01_samp-sigmoidproba_loss-fgan_dagl-True_lamb-0.01_filt-0.5_lamb-1e-05_lr-0.01_test-1000_trai-10000_dags-0.5_use_-False: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00,6.486849188804626465e-01,3.730064257979393005e-03,1.261471118777990341e-02,9.970126301050186157e-02,1.372190564870834351e-01,2.835868299007415771e-02,1.293598711490631104e-01,1.605232432484626770e-02,6.726568937301635742e-02,7.800966035574674606e-03 2 | 3.360049128532409668e-01,0.000000000000000000e+00,6.600316613912582397e-02,6.017605960369110107e-02,6.511569768190383911e-02,6.119437217712402344e-01,2.513469159603118896e-01,1.937384456396102905e-01,6.463482230901718140e-02,1.363331675529479980e-01,1.141231320798397064e-02 3 | 1.448640674352645874e-01,4.990390315651893616e-02,0.000000000000000000e+00,7.188475728034973145e-01,2.639831602573394775e-01,1.712557673454284668e-01,3.694621026515960693e-01,5.172051787376403809e-01,1.308322995901107788e-01,1.347104609012603760e-01,2.753647565841674805e-01 4 | 3.317390754818916321e-02,1.389614026993513107e-02,4.315310716629028320e-01,0.000000000000000000e+00,5.220479369163513184e-01,8.223679661750793457e-02,4.796565510332584381e-03,8.596456795930862427e-02,5.180053785443305969e-02,8.221364766359329224e-02,4.581386968493461609e-02 5 | 1.774623407982289791e-03,6.083586812019348145e-02,9.206546097993850708e-02,1.617215275764465332e-01,0.000000000000000000e+00,8.722565323114395142e-02,2.830184064805507660e-03,4.635705798864364624e-02,5.819371715188026428e-02,3.410364035516977310e-03,2.789567224681377411e-02 6 | 2.920021116733551025e-02,3.926421329379081726e-02,2.245445176959037781e-02,6.066459231078624725e-03,5.773724988102912903e-02,0.000000000000000000e+00,3.854452967643737793e-01,3.055457770824432373e-01,5.874022841453552246e-02,3.408959135413169861e-03,5.889128427952528000e-03 7 | 8.357542008161544800e-02,1.338996589183807373e-01,2.478347420692443848e-01,7.605372369289398193e-02,3.423921167850494385e-01,6.020032763481140137e-01,0.000000000000000000e+00,1.834429502487182617e-01,7.734293490648269653e-02,6.910391896963119507e-02,9.244306385517120361e-02 8 | 1.504389918409287930e-03,7.131003774702548981e-03,7.256183028221130371e-02,5.480586364865303040e-02,1.789093911647796631e-01,1.007396429777145386e-01,4.750127792358398438e-01,0.000000000000000000e+00,9.020160883665084839e-03,3.356086090207099915e-02,2.536493353545665741e-03 9 | 1.296536438167095184e-02,2.198208123445510864e-02,6.915286183357238770e-02,6.782517582178115845e-02,4.086985066533088684e-02,1.362641900777816772e-01,1.701991856098175049e-01,5.077517405152320862e-02,0.000000000000000000e+00,5.906547307968139648e-01,5.937905311584472656e-01 10 | 8.382007479667663574e-02,8.113323897123336792e-02,1.470714509487152100e-01,3.821973502635955811e-02,1.848944425582885742e-01,1.941089779138565063e-01,7.279218733310699463e-02,1.400239616632461548e-01,3.137533068656921387e-01,0.000000000000000000e+00,3.051249384880065918e-01 11 | 2.359800413250923157e-02,6.141897290945053101e-02,1.819606572389602661e-01,1.850055297836661339e-03,2.461204379796981812e-01,1.528254002332687378e-01,1.134768873453140259e-01,4.231981933116912842e-02,9.413883090019226074e-02,8.859904110431671143e-02,0.000000000000000000e+00 12 | -------------------------------------------------------------------------------- /cyto/cyto_full_target.csv: -------------------------------------------------------------------------------- 1 | "Cause","Effect" 2 | "PIP2","PKC" 3 | "plcg","PIP2" 4 | "PIP3","plcg" 5 | "PIP2","PIP3" 6 | "plcg","PKC" 7 | "PKC","pjnk" 8 | "PKC","P38" 9 | "PKA","P38" 10 | "PKC","praf" 11 | "PKA","praf" 12 | "praf","pmek" 13 | "pmek","p44/42" 14 | "PKC","pmek" 15 | "PKA","pakts473" 16 | "PIP3","pakts473" 17 | "PKA","pmek" 18 | "PKA","p44/42" 19 | "PKA","pjnk" 20 | -------------------------------------------------------------------------------- /sam/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diviyank/SAM/75b00b8354bd02f9a0a2613a22e781daf546645b/sam/__init__.py -------------------------------------------------------------------------------- /sam/sam.py: -------------------------------------------------------------------------------- 1 | """Structural Agnostic Model. 2 | 3 | Author: Diviyan Kalainathan, Olivier Goudet 4 | Date: 09/3/2018 5 | """ 6 | import os 7 | import numpy as np 8 | import torch as th 9 | import pandas as pd 10 | from torch.utils.data import DataLoader 11 | from tqdm import tqdm 12 | from sklearn.preprocessing import scale 13 | from .utils.linear3d import Linear3D 14 | from .utils.graph import MatrixSampler, notears_constr 15 | from .utils.batchnorm import ChannelBatchNorm1d 16 | from .utils.parlib import parallel_run 17 | 18 | 19 | class SAM_generators(th.nn.Module): 20 | """Ensemble of all the generators.""" 21 | 22 | def __init__(self, data_shape, nh, skeleton=None, cat_sizes=None, linear=False): 23 | """Init the model.""" 24 | super(SAM_generators, self).__init__() 25 | layers = [] 26 | # Building skeleton 27 | self.sizes = cat_sizes 28 | self.linear = linear 29 | 30 | nb_vars = data_shape[1] 31 | self.nb_vars = nb_vars 32 | if skeleton is None: 33 | skeleton = 1 - th.eye(nb_vars + 1, nb_vars) # 1 row for noise 34 | else: 35 | skeleton = th.cat([th.Tensor(skeleton), th.ones(1, nb_vars)], 1) 36 | if linear: 37 | self.input_layer = Linear3D((nb_vars, nb_vars + 1, 1)) 38 | else: 39 | self.input_layer = Linear3D((nb_vars, nb_vars + 1, nh)) 40 | layers.append(ChannelBatchNorm1d(nb_vars, nh)) 41 | layers.append(th.nn.Tanh()) 42 | self.output_layer = Linear3D((nb_vars, nh, 1)) 43 | 44 | self.layers = th.nn.Sequential(*layers) 45 | self.register_buffer('skeleton', skeleton) 46 | 47 | def forward(self, data, noise, adj_matrix, drawn_neurons=None): 48 | """Forward through all the generators.""" 49 | 50 | if self.linear: 51 | output = self.input_layer(data, noise, adj_matrix * self.skeleton) 52 | else: 53 | output = self.output_layer(self.layers(self.input_layer(data, 54 | noise, 55 | adj_matrix * self.skeleton)), 56 | drawn_neurons) 57 | 58 | return output.squeeze(2) 59 | 60 | def reset_parameters(self): 61 | if not self.linear: 62 | self.output_layer.reset_parameters() 63 | for layer in self.layers: 64 | if hasattr(layer, 'reset_parameters'): 65 | layer.reset_parameters() 66 | self.input_layer.reset_parameters() 67 | 68 | 69 | class SAM_discriminator(th.nn.Module): 70 | """SAM discriminator.""" 71 | 72 | def __init__(self, nfeatures, dnh, hlayers=2, mask=None): 73 | super(SAM_discriminator, self).__init__() 74 | self.nfeatures = nfeatures 75 | layers = [] 76 | layers.append(th.nn.Linear(nfeatures, dnh)) 77 | layers.append(th.nn.BatchNorm1d(dnh)) 78 | layers.append(th.nn.LeakyReLU(.2)) 79 | for i in range(hlayers-1): 80 | layers.append(th.nn.Linear(dnh, dnh)) 81 | layers.append(th.nn.BatchNorm1d(dnh)) 82 | layers.append(th.nn.LeakyReLU(.2)) 83 | 84 | layers.append(th.nn.Linear(dnh, 1)) 85 | self.layers = th.nn.Sequential(*layers) 86 | 87 | if mask is None: 88 | mask = th.eye(nfeatures, nfeatures) 89 | self.register_buffer("mask", mask.unsqueeze(0)) 90 | 91 | def forward(self, input, obs_data=None): 92 | if obs_data is not None: 93 | return [self.layers(i) for i in th.unbind(obs_data.unsqueeze(1) * (1 - self.mask) 94 | + input.unsqueeze(1) * self.mask, 1)] 95 | else: 96 | return self.layers(input) 97 | 98 | def reset_parameters(self): 99 | for layer in self.layers: 100 | if hasattr(layer, 'reset_parameters'): 101 | layer.reset_parameters() 102 | 103 | 104 | def run_SAM(in_data, skeleton=None, device="cpu", 105 | train=5000, test=1000, 106 | batch_size=-1, lr_gen=.001, 107 | lr_disc=.01, lambda1=0.001, lambda2=0.0000001, nh=None, dnh=None, 108 | verbose=True, losstype="fgan", 109 | dagstart=0, dagloss=False, 110 | dagpenalization=0.05, dagpenalization_increase=0.0, 111 | dag_threshold=0.5, 112 | linear=False, hlayers=2): 113 | 114 | list_nodes = list(in_data.columns) 115 | data = scale(in_data[list_nodes].values) 116 | nb_var = len(list_nodes) 117 | data = data.astype('float32') 118 | data = th.from_numpy(data).to(device) 119 | if batch_size == -1: 120 | batch_size = data.shape[0] 121 | rows, cols = data.size() 122 | # Get the list of indexes to ignore 123 | if skeleton is not None: 124 | skeleton = th.from_numpy(skeleton.astype('float32')) 125 | 126 | sam = SAM_generators((batch_size, cols), nh, skeleton=skeleton, 127 | linear=linear).to(device) 128 | 129 | sam.reset_parameters() 130 | g_optimizer = th.optim.Adam(list(sam.parameters()), lr=lr_gen) 131 | 132 | if losstype != "mse": 133 | discriminator = SAM_discriminator(cols, dnh, hlayers).to(device) 134 | discriminator.reset_parameters() 135 | d_optimizer = th.optim.Adam(discriminator.parameters(), lr=lr_disc) 136 | criterion = th.nn.BCEWithLogitsLoss() 137 | else: 138 | criterion = th.nn.MSELoss() 139 | disc_loss = th.zeros(1) 140 | 141 | graph_sampler = MatrixSampler(nb_var, mask=skeleton, 142 | gumble=False).to(device) 143 | graph_sampler.weights.data.fill_(2) 144 | graph_optimizer = th.optim.Adam(graph_sampler.parameters(), lr=lr_gen) 145 | 146 | if not linear: 147 | neuron_sampler = MatrixSampler((nh, nb_var), mask=False, 148 | gumble=True).to(device) 149 | neuron_optimizer = th.optim.Adam(list(neuron_sampler.parameters()), 150 | lr=lr_gen) 151 | 152 | _true = th.ones(1).to(device) 153 | _false = th.zeros(1).to(device) 154 | output = th.zeros(nb_var, nb_var).to(device) 155 | 156 | noise = th.randn(batch_size, nb_var).to(device) 157 | noise_row = th.ones(1, nb_var).to(device) 158 | data_iterator = DataLoader(data, batch_size=batch_size, 159 | shuffle=True, drop_last=True) 160 | 161 | # RUN 162 | if verbose: 163 | pbar = tqdm(range(train + test)) 164 | else: 165 | pbar = range(train+test) 166 | for epoch in pbar: 167 | for i_batch, batch in enumerate(data_iterator): 168 | g_optimizer.zero_grad() 169 | graph_optimizer.zero_grad() 170 | 171 | if losstype != "mse": 172 | d_optimizer.zero_grad() 173 | 174 | if not linear: 175 | neuron_optimizer.zero_grad() 176 | 177 | # Train the discriminator 178 | 179 | if not epoch > train: 180 | drawn_graph = graph_sampler() 181 | if not linear: 182 | drawn_neurons = neuron_sampler() 183 | else: 184 | drawn_neurons = None 185 | noise.normal_() 186 | generated_variables = sam(batch, noise, 187 | th.cat([drawn_graph, noise_row], 0), 188 | drawn_neurons) 189 | 190 | if losstype == "mse": 191 | gen_loss = criterion(generated_variables, batch) 192 | else: 193 | disc_vars_d = discriminator(generated_variables.detach(), batch) 194 | disc_vars_g = discriminator(generated_variables, batch) 195 | true_vars_disc = discriminator(batch) 196 | 197 | if losstype == "gan": 198 | disc_loss = sum([criterion(gen, _false.expand_as(gen)) for gen in disc_vars_d]) / nb_var \ 199 | + criterion(true_vars_disc, _true.expand_as(true_vars_disc)) 200 | # Gen Losses per generator: multiply py the number of channels 201 | gen_loss = sum([criterion(gen, 202 | _true.expand_as(gen)) 203 | for gen in disc_vars_g]) 204 | elif losstype == "fgan": 205 | 206 | disc_loss = sum([th.mean(th.exp(gen - 1)) for gen in disc_vars_d]) / nb_var - th.mean(true_vars_disc) 207 | gen_loss = -sum([th.mean(th.exp(gen - 1)) for gen in disc_vars_g]) 208 | 209 | disc_loss.backward() 210 | d_optimizer.step() 211 | 212 | filters = graph_sampler.get_proba() 213 | 214 | struc_loss = lambda1*drawn_graph.sum() 215 | 216 | func_loss = 0 if linear else lambda2*drawn_neurons.sum() 217 | regul_loss = struc_loss + func_loss 218 | 219 | if dagloss and epoch > train * dagstart: 220 | dag_constraint = notears_constr(filters*filters) 221 | loss = gen_loss + regul_loss + (dagpenalization + 222 | (epoch - train * dagstart) 223 | * dagpenalization_increase) * dag_constraint 224 | else: 225 | loss = gen_loss + regul_loss 226 | if verbose and epoch % 20 == 0 and i_batch == 0: 227 | pbar.set_postfix(gen=gen_loss.item()/cols, 228 | disc=disc_loss.item(), 229 | regul_loss=regul_loss.item(), 230 | tot=loss.item()) 231 | 232 | if epoch < train + test - 1: 233 | loss.backward(retain_graph=True) 234 | 235 | if epoch >= train: 236 | output.add_(filters.data) 237 | 238 | g_optimizer.step() 239 | graph_optimizer.step() 240 | if not linear: 241 | neuron_optimizer.step() 242 | 243 | return output.div_(test).cpu().numpy() 244 | 245 | 246 | # def exec_sam_instance(data, skeleton=None, gpus=0, 247 | # device='cpu', verbose=True, log=None, 248 | # lr=0.001, dlr=0.01, lambda1=0.001, lambda2=0.0000001, nh=200, dnh=500, 249 | # train=10000, test=1000, batchsize=-1, 250 | # losstype="fgan", dagstart=0, dagloss=False, 251 | # dagpenalization=0.001, dagpenalization_increase=0.0, 252 | # dag_threshold=0.5, linear=False, hlayers=2): 253 | # out = run_SAM(data, skeleton=skeleton, 254 | # device=device,lr_gen=lr, lr_disc=dlr, 255 | # lambda1=lambda1, lambda2=lambda2, 256 | # nh=nh, dnh=dnh, 257 | # train=train, 258 | # test=test, batch_size=batchsize, 259 | # dagstart=dagstart, 260 | # dagloss=dagloss, 261 | # dagpenalization=dagpenalization, 262 | # dagpenalization_increase=dagpenalization_increase, 263 | # losstype=losstype, 264 | # dag_threshold=dag_threshold, 265 | # linear=linear, 266 | # hlayers=hlayers 267 | # ) 268 | # if log is not None: 269 | # np.savetxt(log, out, delimiter=",") 270 | # return out 271 | 272 | 273 | class SAM(object): 274 | """Structural Agnostic Model.""" 275 | 276 | def __init__(self, lr=0.01, dlr=0.01, lambda1=0.01, lambda2=0.00001, nh=200, dnh=200, 277 | train_epochs=10000, test_epochs=1000, batchsize=-1, 278 | losstype="fgan", dagstart=0.5, dagloss=True, dagpenalization=0, 279 | dagpenalization_increase=0.001, linear=False, hlayers=2): 280 | 281 | """Init and parametrize the SAM model. 282 | 283 | :param lr: Learning rate of the generators 284 | :param dlr: Learning rate of the discriminator 285 | :param l1: L1 penalization on the causal filters 286 | :param nh: Number of hidden units in the generators' hidden layers 287 | a 288 | ((cols,cols) 289 | :param dnh: Number of hidden units in the discriminator's hidden layer$ 290 | :param train_epochs: Number of training epochs 291 | :param test_epochs: Number of test epochs (saving and averaging the causal filters) 292 | :param batchsize: Size of the batches to be fed to the SAM model. 293 | """ 294 | super(SAM, self).__init__() 295 | self.lr = lr 296 | self.dlr = dlr 297 | self.lambda1 = lambda1 298 | self.lambda2 = lambda2 299 | self.nh = nh 300 | self.dnh = dnh 301 | self.train = train_epochs 302 | self.test = test_epochs 303 | self.batchsize = batchsize 304 | self.losstype = losstype 305 | self.dagstart = dagstart 306 | self.dagloss = dagloss 307 | self.dagpenalization = dagpenalization 308 | self.dagpenalization_increase = dagpenalization_increase 309 | self.linear = linear 310 | self.hlayers = hlayers 311 | 312 | def predict(self, data, skeleton=None, mixed_data=False, nruns=6, njobs=1, 313 | gpus=0, verbose=True, log=None): 314 | """Execute SAM on a dataset given a skeleton or not. 315 | 316 | :param data: Observational data for estimation of causal relationships by SAM 317 | :param skeleton: A priori knowledge about the causal relationships as an adjacency matrix. 318 | Can be fed either directed or undirected links. 319 | :param nruns: Number of runs to be made for causal estimation. 320 | Recommended: >5 for optimal performance. 321 | :param njobs: Numbers of jobs to be run in Parallel. 322 | Recommended: 1 if no GPU available, 2*number of GPUs else. 323 | :param gpus: Number of available GPUs for the algorithm. 324 | :param verbose: verbose mode 325 | :param plot: Plot losses interactively. Not recommended if nruns>1 326 | :param plot_generated_pair: plots a generated pair interactively. Not recommended if nruns>1 327 | :return: Adjacency matrix (A) of the graph estimated by SAM, 328 | A[i,j] is the term of the ith variable for the jth generator. 329 | """ 330 | assert nruns > 0 331 | if nruns == 1: 332 | return run_SAM(data, skeleton=skeleton, 333 | lr_gen=self.lr, 334 | lr_disc=self.dlr, 335 | verbose=verbose, 336 | lambda1=self.lambda1, lambda2=self.lambda2, 337 | nh=self.nh, dnh=self.dnh, 338 | train=self.train, 339 | test=self.test, batch_size=self.batchsize, 340 | dagstart=self.dagstart, 341 | dagloss=self.dagloss, 342 | dagpenalization=self.dagpenalization, 343 | dagpenalization_increase=self.dagpenalization_increase, 344 | losstype=self.losstype, 345 | dag_threshold=self.dag_threshold, 346 | linear=self.linear, 347 | hlayers=self.hlayers, 348 | device='cuda:0' if gpus else 'cpu') 349 | else: 350 | list_out = [] 351 | if log is not None: 352 | idx = 0 353 | while os.path.isfile(log + str(idx)): 354 | list_out.append(np.loadtxt(log + str(idx), delimiter=",")) 355 | idx += 1 356 | results = parallel_run(run_SAM, data, skeleton=skeleton, 357 | nruns=nruns-len(list_out), 358 | njobs=njobs, gpus=gpus, lr_gen=self.lr, 359 | lr_disc=self.dlr, 360 | verbose=verbose, 361 | lambda1=self.lambda1, lambda2=self.lambda2, 362 | nh=self.nh, dnh=self.dnh, 363 | train=self.train, 364 | test=self.test, batch_size=self.batchsize, 365 | dagstart=self.dagstart, 366 | dagloss=self.dagloss, 367 | dagpenalization=self.dagpenalization, 368 | dagpenalization_increase=self.dagpenalization_increase, 369 | losstype=self.losstype, 370 | linear=self.linear, 371 | hlayers=self.hlayers) 372 | list_out.extend(results) 373 | list_out = [i for i in list_out if not np.isnan(i).any()] 374 | try: 375 | assert len(list_out) > 0 376 | except AssertionError as e: 377 | print("All solutions contain NaNs") 378 | raise(e) 379 | return sum(list_out)/len(list_out) 380 | -------------------------------------------------------------------------------- /sam/utils/batchnorm.py: -------------------------------------------------------------------------------- 1 | """Modification in the implementation of batchnorm. 2 | 3 | In order to have the same comportment when 2D or 3D. 4 | Author:Diviyan Kalainathan 5 | """ 6 | 7 | import torch 8 | from torch.nn import Module 9 | from torch.nn.parameter import Parameter 10 | from torch.nn import functional as F 11 | from torch.nn.modules.batchnorm import _BatchNorm 12 | 13 | 14 | class ChannelBatchNorm1d(_BatchNorm): 15 | r"""Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D 16 | inputs with optional additional channel dimension) as described in the paper 17 | `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ . 18 | 19 | .. math:: 20 | 21 | y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta 22 | 23 | The mean and standard-deviation are calculated per-dimension over 24 | the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors 25 | of size `C` (where `C` is the input size). 26 | 27 | By default, during training this layer keeps running estimates of its 28 | computed mean and variance, which are then used for normalization during 29 | evaluation. The running estimates are kept with a default :attr:`momentum` 30 | of 0.1. 31 | 32 | If :attr:`track_running_stats` is set to ``False``, this layer then does not 33 | keep running estimates, and batch statistics are instead used during 34 | evaluation time as well. 35 | 36 | .. note:: 37 | This :attr:`momentum` argument is different from one used in optimizer 38 | classes and the conventional notion of momentum. Mathematically, the 39 | update rule for running statistics here is 40 | :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`, 41 | where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the 42 | new observed value. 43 | 44 | Because the Batch Normalization is done over the `C` dimension, computing statistics 45 | on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization. 46 | 47 | Args: 48 | num_features: :math:`C` from an expected input of size 49 | :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)` 50 | eps: a value added to the denominator for numerical stability. 51 | Default: 1e-5 52 | momentum: the value used for the running_mean and running_var 53 | computation. Can be set to ``None`` for cumulative moving average 54 | (i.e. simple average). Default: 0.1 55 | affine: a boolean value that when set to ``True``, this module has 56 | learnable affine parameters. Default: ``True`` 57 | track_running_stats: a boolean value that when set to ``True``, this 58 | module tracks the running mean and variance, and when set to ``False``, 59 | this module does not track such statistics and always uses batch 60 | statistics in both training and eval modes. Default: ``True`` 61 | 62 | Shape: 63 | - Input: :math:`(N, C)` or :math:`(N, C, L)` 64 | - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) 65 | 66 | Examples:: 67 | 68 | >>> # With Learnable Parameters 69 | >>> m = nn.BatchNorm1d(100) 70 | >>> # Without Learnable Parameters 71 | >>> m = nn.BatchNorm1d(100, affine=False) 72 | >>> input = torch.randn(20, 100) 73 | >>> output = m(input) 74 | 75 | .. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`: 76 | https://arxiv.org/abs/1502.03167 77 | """ 78 | def __init__(self, num_channels, num_features, *args, **kwargs): 79 | super(ChannelBatchNorm1d, self).__init__(num_channels*num_features, *args, **kwargs) 80 | self.num_channels = num_channels 81 | self.num_features = num_features 82 | 83 | def _check_input_dim(self, input): 84 | if input.dim() != 2 and input.dim() != 3: 85 | raise ValueError('expected 2D or 3D input (got {}D input)' 86 | .format(input.dim())) 87 | 88 | def forward(self, input): 89 | _input = input.contiguous().view(-1, self.num_channels * self.num_features) 90 | output = super(ChannelBatchNorm1d, self).forward(_input) 91 | return output.view(-1, self.num_channels, self.num_features) 92 | -------------------------------------------------------------------------------- /sam/utils/graph.py: -------------------------------------------------------------------------------- 1 | """DAG Constraint Loss. 2 | 3 | Paper: DAGs with NO TEARS: Smooth Optimization for Structure Learning 4 | Authors: Zheng, Xun; Aragam, Bryon; Ravikumar, Pradeep; Xing, Eric P. 5 | Implementation by Diviyan Kalainathan 6 | """ 7 | import math 8 | import torch as th 9 | from .gumble_utils import gumbel_softmax, gumbel_sigmoid 10 | 11 | class GraphSampler(th.nn.Module): 12 | """Ensemble of all the generators.""" 13 | 14 | def __init__(self, n_noises, gnh, graph_size, mask=None): 15 | """Init the model.""" 16 | super(GraphSampler, self).__init__() 17 | 18 | if not isinstance(graph_size, (list, tuple)): 19 | self.graph_size = (graph_size, graph_size) 20 | else: 21 | self.graph_size = graph_size 22 | 23 | if mask is None: 24 | mask = 1 - th.eye(*self.graph_size) 25 | if not (type(mask)==bool and not mask): 26 | self.register_buffer("mask", mask) 27 | 28 | ones_tensor = th.ones(*self.graph_size) 29 | self.register_buffer("ones_tensor", ones_tensor) 30 | 31 | zeros_tensor = th.zeros(*self.graph_size) 32 | self.register_buffer("zeros_tensor", zeros_tensor) 33 | 34 | 35 | self.register_buffer("noise_graph_sampler", th.Tensor(1, n_noises)) 36 | 37 | layers = [] 38 | layers.append(th.nn.Linear(n_noises, gnh)) 39 | #layers.append(th.nn.BatchNorm1d(gnh)) 40 | layers.append(th.nn.LeakyReLU(.2)) 41 | layers.append(th.nn.Linear(gnh, gnh)) 42 | #layers.append(th.nn.BatchNorm1d(gnh)) 43 | layers.append(th.nn.LeakyReLU(.2)) 44 | layers.append(th.nn.Linear(gnh, gnh)) 45 | #layers.append(th.nn.BatchNorm1d(gnh)) 46 | layers.append(th.nn.LeakyReLU(.2)) 47 | # layers.append(th.nn.Linear(gnh, gnh)) 48 | # layers.append(th.nn.BatchNorm1d(gnh)) 49 | # layers.append(th.nn.LeakyReLU(.2)) 50 | layers.append(th.nn.Linear(gnh, graph_size*graph_size)) 51 | self.layers = th.nn.Sequential(*layers) 52 | 53 | self.reset_parameters() 54 | 55 | def forward(self): 56 | 57 | self.noise_graph_sampler.normal_() 58 | 59 | output_sampler = self.layers(self.noise_graph_sampler).view(*self.graph_size) 60 | 61 | sample_soft = th.sigmoid(output_sampler) 62 | sample_hard = th.where(output_sampler > 0, self.ones_tensor, self.zeros_tensor) 63 | 64 | #print(output_sampler* self.mask) 65 | #print(sample_soft* self.mask) 66 | #print(sample_hard* self.mask) 67 | 68 | sample = sample_hard - sample_soft.data + sample_soft 69 | 70 | return sample * self.mask 71 | 72 | def reset_parameters(self): 73 | for layer in self.layers: 74 | if hasattr(layer, 'reset_parameters'): 75 | layer.weight.data.normal_() 76 | 77 | class MatrixSampler(th.nn.Module): 78 | """Matrix Sampler, following a Bernoulli distribution. Differenciable.""" 79 | def __init__(self, graph_size, mask=None, gumble=False): 80 | super(MatrixSampler, self).__init__() 81 | if not isinstance(graph_size, (list, tuple)): 82 | self.graph_size = (graph_size, graph_size) 83 | else: 84 | self.graph_size = graph_size 85 | self.weights = th.nn.Parameter(th.FloatTensor(*self.graph_size)) 86 | self.weights.data.zero_() 87 | if mask is None: 88 | mask = 1 - th.eye(*self.graph_size) 89 | if not (type(mask)==bool and not mask): 90 | self.register_buffer("mask", mask) 91 | self.gumble = gumble 92 | 93 | ones_tensor = th.ones(*self.graph_size) 94 | self.register_buffer("ones_tensor", ones_tensor) 95 | 96 | zeros_tensor = th.zeros(*self.graph_size) 97 | self.register_buffer("zeros_tensor", zeros_tensor) 98 | 99 | 100 | def forward(self, tau=1, drawhard=True): 101 | """Return a sampled graph.""" 102 | 103 | if(self.gumble): 104 | 105 | drawn_proba = gumbel_softmax(th.stack([self.weights.view(-1), -self.weights.view(-1)], 1), 106 | tau=tau, hard=drawhard)[:, 0].view(*self.graph_size) 107 | else: 108 | drawn_proba = gumbel_sigmoid(2 * self.weights, self.ones_tensor, self.zeros_tensor, tau=tau, hard=drawhard) 109 | 110 | if hasattr(self, "mask"): 111 | return self.mask * drawn_proba 112 | else: 113 | return drawn_proba 114 | 115 | def get_proba(self): 116 | if hasattr(self, "mask"): 117 | return th.sigmoid(2 * self.weights) * self.mask 118 | else: 119 | return th.sigmoid(2 * self.weights) 120 | 121 | def set_skeleton(self, mask): 122 | self.register_buffer("mask", mask) 123 | 124 | 125 | class MatrixSampler2(th.nn.Module): 126 | """Matrix Sampler, following a Bernoulli distribution. Differenciable.""" 127 | def __init__(self, graph_size, mask=None): 128 | super(MatrixSampler2, self).__init__() 129 | if not isinstance(graph_size, (list, tuple)): 130 | self.graph_size = (graph_size, graph_size) 131 | else: 132 | self.graph_size = graph_size 133 | self.weights = th.nn.Parameter(th.FloatTensor(*self.graph_size)) 134 | self.weights.data.zero_() 135 | self.v_weights = th.nn.Parameter(th.where(th.eye(*self.graph_size)>0, th.ones(*self.graph_size).fill_(1), th.zeros(*self.graph_size)) 136 | .repeat(self.graph_size[1], 1, 1) 137 | .transpose(0, 2)) 138 | if mask is None: 139 | mask = 1 - th.eye(*self.graph_size) 140 | if not (type(mask)==bool and not mask): 141 | self.register_buffer("mask", mask) 142 | def forward(self, tau=1, drawhard=True): 143 | """Return a sampled graph.""" 144 | # drawn_proba = gumbel_softmax(th.stack([self.weights.view(-1), -self.weights.view(-1)], 1), 145 | # tau=tau, hard=drawhard)[:, 0].view(*self.graph_size) 146 | # corr_weights = (drawn_proba.unsqueeze(0) * 147 | # (self.v_weights/ (.5 * self.v_weights.abs().sum(1, keepdim=True)))).sum(0) 148 | corr_weights = (self.weights.unsqueeze(1) * 149 | (self.v_weights/ self.v_weights.abs().sum(1, keepdim=True))).sum(0) 150 | out_proba = gumbel_softmax(th.stack([corr_weights.view(-1), -corr_weights.view(-1)], 1), 151 | tau=tau, hard=drawhard)[:, 0].view(*self.graph_size) 152 | if hasattr(self, "mask"): 153 | return self.mask * out_proba 154 | else: 155 | return out_proba 156 | 157 | def get_proba(self): 158 | if hasattr(self, "mask"): 159 | return th.sigmoid(2 * self.weights) * self.mask 160 | else: 161 | return th.sigmoid(2 * self.weights) 162 | 163 | def set_skeleton(self, mask): 164 | self.register_buffer("mask", mask) 165 | 166 | 167 | class MatrixSampler3(th.nn.Module): 168 | """Matrix Sampler, following a Bernoulli distribution. Differenciable.""" 169 | def __init__(self, graph_size, mask=None, gumbel=True, k=None): 170 | super(MatrixSampler3, self).__init__() 171 | if not isinstance(graph_size, (list, tuple)): 172 | self.graph_size = (graph_size, graph_size) 173 | else: 174 | self.graph_size = graph_size 175 | self.k = k if k is not None else self.graph_size[0] - 1 176 | self.in_weights = th.nn.Parameter(th.FloatTensor(self.graph_size[0], self.k)) 177 | self.out_weights = th.nn.Parameter(th.FloatTensor(self.k, self.graph_size[1])) 178 | self.in_weights.data.normal_() 179 | self.out_weights.data.normal_() 180 | self.gumbel_softmax = gumbel 181 | if not gumbel: 182 | ones_tensor = th.ones(*self.graph_size) 183 | zeros_tensor = th.zeros(*self.graph_size) 184 | self.register_buffer("ones_tensor", ones_tensor) 185 | self.register_buffer("zeros_tensor", zeros_tensor) 186 | 187 | if mask is None: 188 | mask = 1 - th.eye(*self.graph_size) 189 | if not (type(mask)==bool and not mask): 190 | self.register_buffer("mask", mask) 191 | 192 | def forward(self, tau=1, drawhard=True): 193 | """Return a sampled graph.""" 194 | corr_weights = self.in_weights @ self.out_weights 195 | if self.gumbel_softmax: 196 | out_sample = gumbel_softmax(th.stack([corr_weights.view(-1), -corr_weights.view(-1)], 1), 197 | tau=tau, hard=drawhard)[:, 0].view(*self.graph_size) 198 | else: 199 | sample_soft = th.sigmoid(corr_weights) 200 | sample_hard = th.where(corr_weights > 0, 201 | self.ones_tensor, self.zeros_tensor) 202 | out_sample = sample_hard - sample_soft.data + sample_soft 203 | 204 | if hasattr(self, "mask"): 205 | return self.mask * out_sample 206 | else: 207 | return out_sample 208 | 209 | def get_proba(self): 210 | if hasattr(self, "mask"): 211 | return th.sigmoid(2 * (self.in_weights @ self.out_weights)) * self.mask 212 | else: 213 | return th.sigmoid(2 * (self.in_weights @ self.out_weights)) 214 | 215 | def set_skeleton(self, mask): 216 | self.register_buffer("mask", mask) 217 | 218 | 219 | class SimpleMatrixConnection(th.nn.Module): 220 | """Matrix Sampler, following a Bernoulli distribution. Differenciable.""" 221 | 222 | def __init__(self, graph_size, mask=None): 223 | super(SimpleMatrixConnection, self).__init__() 224 | if not isinstance(graph_size, (list, tuple)): 225 | self.graph_size = (graph_size, graph_size) 226 | else: 227 | self.graph_size = graph_size 228 | self.weights = th.nn.Parameter(th.FloatTensor(*self.graph_size)) 229 | self.weights.data.normal_() 230 | 231 | if mask is None: 232 | mask = 1 - th.eye(*self.graph_size) 233 | if not (type(mask) == bool and not mask): 234 | self.register_buffer("mask", mask) 235 | 236 | ones_tensor = th.ones(*self.graph_size) 237 | self.register_buffer("ones_tensor", ones_tensor) 238 | 239 | zeros_tensor = th.zeros(*self.graph_size) 240 | self.register_buffer("zeros_tensor", zeros_tensor) 241 | 242 | def forward(self): 243 | """Return a sampled graph.""" 244 | 245 | sample_soft = th.sigmoid(2 * self.weights) 246 | 247 | sample_hard = th.where(self.weights > 0, self.ones_tensor, self.zeros_tensor) 248 | sample = sample_hard - sample_soft.data + sample_soft 249 | 250 | if hasattr(self, "mask"): 251 | return self.mask * sample_soft 252 | else: 253 | return sample_soft 254 | 255 | def get_proba(self): 256 | if hasattr(self, "mask"): 257 | return th.sigmoid(2 * self.weights) * self.mask 258 | else: 259 | return th.sigmoid(2 * self.weights) 260 | 261 | 262 | def notears_constr(adj_m, max_pow=None): 263 | """No Tears for binary adjacency matrixes. 264 | 265 | If adj_m is non binary: give adj_m * adj_m as input (Hadamard product).""" 266 | m_exp = [adj_m] 267 | if max_pow is None: 268 | max_pow = adj_m.shape[1] 269 | while(m_exp[-1].sum() > 0 and len(m_exp) < max_pow): 270 | m_exp.append(m_exp[-1] @ adj_m/len(m_exp)) 271 | 272 | return sum([i.diag().sum() for idx, i in enumerate(m_exp)]) 273 | -------------------------------------------------------------------------------- /sam/utils/gumble_utils.py: -------------------------------------------------------------------------------- 1 | """Gumble softmax.""" 2 | import torch as th 3 | 4 | import torch.distributions.relaxed_bernoulli as relaxed_bernoulli 5 | from torch.distributions.transformed_distribution import TransformedDistribution 6 | from torch.distributions.transforms import SigmoidTransform,AffineTransform 7 | 8 | from torch.distributions.uniform import Uniform 9 | 10 | def _sample_gumbel(shape, eps=1e-10, out=None): 11 | """ 12 | Implementation of pytorch. 13 | (https://github.com/pytorch/pytorch/blob/e4eee7c2cf43f4edba7a14687ad59d3ed61d9833/torch/nn/functional.py) 14 | Sample from Gumbel(0, 1) 15 | based on 16 | https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb , 17 | (MIT license) 18 | """ 19 | U = out.resize_(shape).uniform_() if out is not None else th.rand(shape) 20 | return - th.log(eps - th.log(U + eps)) 21 | 22 | 23 | def _gumbel_softmax_sample(logits, tau=1, eps=1e-10): 24 | """ 25 | Implementation of pytorch. 26 | (https://github.com/pytorch/pytorch/blob/e4eee7c2cf43f4edba7a14687ad59d3ed61d9833/torch/nn/functional.py) 27 | Draw a sample from the Gumbel-Softmax distribution 28 | based on 29 | https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb 30 | (MIT license) 31 | """ 32 | dims = logits.dim() 33 | gumbel_noise = _sample_gumbel(logits.size(), eps=eps, out=logits.data.new()) 34 | y = logits + gumbel_noise 35 | return th.softmax(y / tau, dims-1) 36 | 37 | 38 | def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10): 39 | """ 40 | Implementation of pytorch. 41 | (https://github.com/pytorch/pytorch/blob/e4eee7c2cf43f4edba7a14687ad59d3ed61d9833/torch/nn/functional.py) 42 | Sample from the Gumbel-Softmax distribution and optionally discretize. 43 | Args: 44 | logits: `[batch_size, n_class]` unnormalized log-probs 45 | tau: non-negative scalar temperature 46 | hard: if ``True``, take `argmax`, but differentiate w.r.t. soft sample y 47 | Returns: 48 | [batch_size, n_class] sample from the Gumbel-Softmax distribution. 49 | If hard=True, then the returned sample will be one-hot, otherwise it will 50 | be a probability distribution that sums to 1 across classes 51 | Constraints: 52 | - this implementation only works on batch_size x num_features tensor for now 53 | based on 54 | https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb , 55 | (MIT license) 56 | """ 57 | shape = logits.size() 58 | assert len(shape) == 2 59 | y_soft = _gumbel_softmax_sample(logits, tau=tau, eps=eps) 60 | if hard: 61 | _, k = y_soft.data.max(-1) 62 | # this bit is based on 63 | # https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5 64 | y_hard = logits.data.new(*shape).zero_().scatter_(-1, k.view(-1, 1), 1.0) 65 | # this cool bit of code achieves two things: 66 | # - makes the output value exactly one-hot (since we add then 67 | # subtract y_soft value) 68 | # - makes the gradient equal to y_soft gradient (since we strip 69 | # all other gradients) 70 | y = y_hard - y_soft.data + y_soft 71 | else: 72 | y = y_soft 73 | return y 74 | 75 | 76 | def _sample_logistic(shape, out=None): 77 | 78 | U = out.resize_(shape).uniform_() if out is not None else th.rand(shape) 79 | #U2 = out.resize_(shape).uniform_() if out is not None else th.rand(shape) 80 | 81 | return th.log(U) - th.log(1-U) 82 | 83 | 84 | def _sigmoid_sample(logits, tau=1): 85 | """ 86 | Implementation of Bernouilli reparametrization based on Maddison et al. 2017 87 | """ 88 | dims = logits.dim() 89 | logistic_noise = _sample_logistic(logits.size(), out=logits.data.new()) 90 | y = logits + logistic_noise 91 | return th.sigmoid(y / tau) 92 | 93 | 94 | def gumbel_sigmoid(logits, ones_tensor, zeros_tensor, tau=1, hard=False): 95 | 96 | shape = logits.size() 97 | 98 | y_soft = _sigmoid_sample(logits, tau=tau) 99 | 100 | if hard: 101 | 102 | y_hard = th.where(y_soft > 0.5, ones_tensor, zeros_tensor) 103 | 104 | y = y_hard.data - y_soft.data + y_soft 105 | 106 | else: 107 | y = y_soft 108 | 109 | return y 110 | 111 | 112 | 113 | # "Utilisation de https://pytorch.org/docs/stable/_modules/torch/distributions/relaxed_bernoulli.html" 114 | # def gumbel_sigmoid_v1(logits, tau=1, hard=False, eps=1e-10): 115 | # 116 | # shape = logits.size() 117 | # 118 | # y_soft = relaxed_bernoulli.RelaxedBernoulli(tau,logits=2*logits).sample() 119 | # 120 | # 121 | # 122 | # if hard: 123 | # _, k = y_soft.data.max(-1) 124 | # # this bit is based on 125 | # # https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5 126 | # y_hard = logits.data.new(*shape).zero_().scatter_(-1, k.view(-1, 1), 1.0) 127 | # # this cool bit of code achieves two things: 128 | # # - makes the output value exactly one-hot (since we add then 129 | # # subtract y_soft value) 130 | # # - makes the gradient equal to y_soft gradient (since we strip 131 | # # all other gradients) 132 | # y = y_hard - y_soft.data + y_soft 133 | # else: 134 | # y = y_soft 135 | # return y 136 | # 137 | # 138 | # "Implementation d'après http://edwardlib.org/api/ed/models/RelaxedBernoulli" 139 | # def gumbel_sigmoid_v2(logits, tau=1, hard=False, eps=1e-10): 140 | # 141 | # shape = logits.size() 142 | # 143 | # base_distribution = Uniform(0, 1) 144 | # transforms = [SigmoidTransform().inv, AffineTransform(loc=2*logits/tau, scale=1./tau)] 145 | # logistic = TransformedDistribution(base_distribution, transforms) 146 | # 147 | # y_soft = th.sigmoid(logistic.sample()) 148 | # 149 | # if hard: 150 | # _, k = y_soft.data.max(-1) 151 | # # this bit is based on 152 | # # https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5 153 | # y_hard = logits.data.new(*shape).zero_().scatter_(-1, k.view(-1, 1), 1.0) 154 | # # this cool bit of code achieves two things: 155 | # # - makes the output value exactly one-hot (since we add then 156 | # # subtract y_soft value) 157 | # # - makes the gradient equal to y_soft gradient (since we strip 158 | # # all other gradients) 159 | # y = y_hard - y_soft.data + y_soft 160 | # else: 161 | # y = y_soft 162 | # return y 163 | 164 | 165 | if __name__ == "__main__": 166 | logits = th.tensor([0.1,0.2,0.3,2]) 167 | stacked_logit = th.stack([logits, -logits], 1) 168 | 169 | probability = th.sigmoid(2*logits) 170 | 171 | print("probability") 172 | print(probability) 173 | print(th.softmax(stacked_logit, dim =1)) 174 | 175 | 176 | print("Test gumble softmax") 177 | output_gumble_softmax = th.zeros(stacked_logit.shape) 178 | 179 | nbrun = 100000 180 | 181 | for i in range(0,nbrun): 182 | test_gumble_softmax = gumbel_softmax(stacked_logit,eps=0) 183 | output_gumble_softmax.add_(test_gumble_softmax) 184 | 185 | print(output_gumble_softmax.div_(nbrun)) 186 | 187 | 188 | print("Test gumble sigmoid v1") 189 | output_gumble_sigmoid_v1 = th.zeros(logits.shape) 190 | 191 | for i in range(0,100000): 192 | test_gumble_sigmoid = gumbel_sigmoid_v1(logits) 193 | output_gumble_sigmoid_v1.add_(test_gumble_sigmoid) 194 | 195 | print(output_gumble_sigmoid_v1.div_(nbrun)) 196 | 197 | 198 | 199 | print("Test gumble sigmoid v2") 200 | output_gumble_sigmoid_v2 = th.zeros(logits.shape) 201 | 202 | for i in range(0,100000): 203 | test_gumble_sigmoid = gumbel_sigmoid_v2(logits) 204 | output_gumble_sigmoid_v2.add_(test_gumble_sigmoid) 205 | 206 | print(output_gumble_sigmoid_v2.div_(nbrun)) 207 | -------------------------------------------------------------------------------- /sam/utils/linear3d.py: -------------------------------------------------------------------------------- 1 | """Layer that takes as input as 2D vector, with 3D params.""" 2 | import math 3 | import torch as th 4 | from torch.nn import Parameter 5 | 6 | 7 | 8 | def functional_linear3d(input, weight, bias=None): 9 | r""" 10 | Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. 11 | Shape: 12 | - Input: :math:`(N, *, in\_features)` where `*` means any number of 13 | additional dimensions 14 | - Weight: :math:`(out\_features, in\_features)` 15 | - Bias: :math:`(out\_features)` 16 | - Output: :math:`(N, *, out\_features)` 17 | """ 18 | output = input.transpose(0, 1).matmul(weight) 19 | if bias is not None: 20 | output += bias.unsqueeze(1) 21 | return output.transpose(0, 1) 22 | 23 | 24 | class Linear3D(th.nn.Module): 25 | r"""Applies a linear transformation to the incoming data: :math:`y = Ax + b`. 26 | Args: 27 | in_features: size of each input sample 28 | out_features: size of each output sample 29 | bias: If set to False, the layer will not learn an additive bias. 30 | Default: ``True`` 31 | Shape: 32 | - Input: :math:`(N, *, in\_features)` where :math:`*` means any number of 33 | additional dimensions 34 | - Output: :math:`(N, *, out\_features)` where all but the last dimension 35 | are the same shape as the input. 36 | Attributes: 37 | weight: the learnable weights of the module of shape 38 | `(out_features x in_features)` 39 | bias: the learnable bias of the module of shape `(out_features)` 40 | Examples:: 41 | >>> m = nn.Linear(3, 20, 30) 42 | >>> input = torch.randn(128, 20) 43 | >>> output = m(input) 44 | >>> print(output.size()) 45 | """ 46 | 47 | def __init__(self, sizes, bias=True): 48 | super(Linear3D, self).__init__() 49 | self.in_features = sizes[1] 50 | self.out_features = sizes[2] 51 | self.channels = sizes[0] 52 | self.weight = Parameter(th.Tensor(self.channels, self.in_features, self.out_features)) 53 | if bias: 54 | self.bias = Parameter(th.Tensor(self.channels, self.out_features)) 55 | else: 56 | self.register_parameter('bias', None) 57 | self.reset_parameters() 58 | 59 | def reset_parameters(self): 60 | stdv = 1. / math.sqrt(self.weight.size(1)) 61 | self.weight.data.uniform_(-stdv, stdv) 62 | if self.bias is not None: 63 | self.bias.data.uniform_(-stdv, stdv) 64 | 65 | def forward(self, input, noise=None, adj_matrix=None): 66 | 67 | if input.dim() == 2: 68 | if noise is None: 69 | input = input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features]) 70 | else: 71 | input = th.cat([input.unsqueeze(1).expand([input.shape[0], 72 | self.channels, 73 | self.in_features - 1]), 74 | noise.unsqueeze(2)], 2) 75 | if adj_matrix is not None: 76 | input = input * adj_matrix.t().unsqueeze(0) 77 | 78 | return functional_linear3d(input, self.weight, self.bias) 79 | 80 | def extra_repr(self): 81 | return 'in_features={}, out_features={}, bias={}'.format( 82 | self.in_features, self.out_features, self.bias is not None 83 | ) 84 | -------------------------------------------------------------------------------- /sam/utils/parlib.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import multiprocessing as mp 4 | from multiprocessing import Manager 5 | from time import sleep 6 | import os 7 | from joblib import Parallel, delayed 8 | 9 | 10 | def worker_subprocess(function, devices, lockd, results, lockr, 11 | pids, lockp, args, kwargs, *others): 12 | device = None 13 | while device is None: 14 | with lockd: 15 | try: 16 | device = devices.pop() 17 | except IndexError: 18 | pass 19 | sleep(1) 20 | output = function(*args, **kwargs, device=device) 21 | with lockd: 22 | devices.append(device) 23 | with lockr: 24 | results.append(output) 25 | with lockp: 26 | pids.append(os.getpid()) 27 | 28 | 29 | def parallel_run(function, *args, nruns=1, njobs=1, gpus=1, **kwargs): 30 | """ Mutiprocessed execution of a function with parameters, with GPU management. 31 | This function is useful when the used wants to execute a bootstrap on a 32 | function on GPU devices, as joblib does not include such feature. 33 | Args: 34 | function (function): Function to execute. 35 | \*args: arguments going to be fed to the function. 36 | nruns (int): Total number of executions of the function. 37 | njobs (int): Number of parallel executions (defaults to ``cdt.SETTINGS.NJOBS``). 38 | gpus (int): Number of GPU devices allocated to the job (defaults to ``cdt.SETTINGS.GPU``) 39 | \**kwargs: Keyword arguments going to be fed to the function. 40 | Returns: 41 | list: concatenated list of outputs of executions. The order of elements 42 | does not correspond to the initial order. 43 | """ 44 | if gpus == 0 and njobs > 1: 45 | return Parallel(n_jobs=njobs)(delayed(function)(*args, **kwargs) for i in range(nruns)) 46 | manager = Manager() 47 | devices = manager.list([f'cuda:{i%gpus}' if gpus !=0 48 | else 'cpu' for i in range(njobs)]) 49 | results = manager.list() 50 | pids = manager.list() 51 | lockd = manager.Lock() 52 | lockr = manager.Lock() 53 | lockp = manager.Lock() 54 | poll = [mp.Process(target=worker_subprocess, 55 | args=(function, devices, 56 | lockd, results, lockr, 57 | pids, lockp, args, 58 | kwargs)) 59 | for i in range(nruns)] 60 | for p in poll: 61 | p.start() 62 | for p in poll: 63 | p.join() 64 | 65 | return list(results) 66 | -------------------------------------------------------------------------------- /sam/utils/test_sigmoid.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import pylab as pl 4 | from numpy.random import uniform 5 | from numpy import exp 6 | from scipy.special import expit as sigmoid, logit 7 | 8 | 9 | 10 | # These are the sigmoid parameters we're going to sample from. 11 | n = 10000 12 | X = np.linspace(-5, 5, n) 13 | 14 | # number of runs to average over. 15 | R = 10000 16 | 17 | # Used for plotting average p(Y=1) 18 | F = np.zeros_like(X) 19 | 20 | # Temporary array for saving on memory allocation, cf. method slow-2. 21 | tmp = np.empty(n) 22 | 23 | for _ in range(R): 24 | # Let's use the same random variables for all methods. This allows 25 | # for a lower variance comparsion and equivalence testing. 26 | u = uniform(0, 1, size=n) 27 | z = logit(u) # used in fast method: precompute expensive stuff. 28 | 29 | # Requires computing sigmoid for each x. 30 | 31 | print("OK1") 32 | f = X > z 33 | 34 | print(f) 35 | 36 | # print("OK2") 37 | # sigmoid(X, out=tmp) 38 | # s2 = tmp > u 39 | # 40 | # print("OK3") 41 | # s3 = 1 / (1 + exp(-X)) > u 42 | # 43 | # print("OK4") 44 | # f = X > z 45 | 46 | # F += f / R 47 | # assert (s1 == f).all() 48 | # assert (s2 == f).all() 49 | # assert (s3 == f).all() 50 | 51 | pl.plot(X, F) 52 | pl.plot(X, sigmoid(X), c='r', lw=2) 53 | -------------------------------------------------------------------------------- /sam/utils/treillis.py: -------------------------------------------------------------------------------- 1 | """Treillis.""" 2 | import numpy as np 3 | from copy import deepcopy 4 | from tqdm import tqdm 5 | 6 | def decompress_list(l): 7 | out = [] 8 | 9 | for i in l: 10 | if type(i) == int: 11 | out = l 12 | break 13 | if type(i) == list: 14 | 15 | if type(i[0]) == list: 16 | for j in i: 17 | out.append(j) 18 | else: 19 | out.append(i) 20 | return out 21 | 22 | 23 | def treillis(adj_m, a, b, path=[], maxlength=5): 24 | """ Find all the acyclic paths between vars. 25 | """ 26 | if len(path) != 0: 27 | cause = path[-1] 28 | if cause == b: 29 | return path 30 | else: 31 | cause = a 32 | path = [a] 33 | 34 | output = [] 35 | 36 | for i in np.nonzero(adj_m[cause, :])[0]: 37 | if i not in path and len(path) < maxlength: 38 | output.append(treillis(adj_m, a, b, path+[i], maxlength)) 39 | if len(output) == 0: 40 | return None 41 | output = [j for j in output if j is not None] 42 | 43 | 44 | return decompress_list(output) if len(output)>0 else None 45 | 46 | 47 | def compute_total_effect(adj_m, gradient, maxlength=5): 48 | 49 | row, col = adj_m.shape 50 | 51 | total_effect_m = np.zeros((gradient.shape[0], row, col)) 52 | 53 | for i in range(adj_m.shape[0]): 54 | for j in range(adj_m.shape[1]): 55 | 56 | all_path = treillis(adj_m, i, j, [], maxlength) 57 | 58 | total_effect = np.zeros((gradient.shape[0], 1)) 59 | 60 | if(all_path is not None): 61 | for path in all_path: 62 | 63 | path_effect = np.ones((gradient.shape[0], 1)) 64 | 65 | for k in range(len(path)-1): 66 | 67 | cause = path[k] 68 | effect = path[k+1] 69 | 70 | path_effect = path_effect*np.reshape(gradient[:, cause,effect], (gradient.shape[0],1)) 71 | 72 | total_effect += path_effect 73 | 74 | total_effect_m[:, i,j] = list(total_effect) 75 | 76 | return total_effect_m 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (C) 2016 Diviyan Kalainathan 3 | # Licence: Apache 2.0 4 | 5 | try: 6 | from setuptools import setup 7 | except ImportError: 8 | from distutils.core import setup 9 | 10 | 11 | def setup_package(): 12 | """Install the package.""" 13 | setup(name='gsam', 14 | version='0.1', 15 | description='Gumbel Softmax Structural Agnostic Model', 16 | url='https://github.com/Diviyan-Kalainathan/gSAM', 17 | author='Diviyan Kalainathan', 18 | author_email='diviyan.kalainathan@lri.fr', 19 | license='Apache 2.0', 20 | packages=['gsam']) 21 | 22 | 23 | if __name__ == '__main__': 24 | setup_package() 25 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_0_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | lacI,lacZYA 3 | crp,lacZYA 4 | crp,ebgAC 5 | crp,epd_pgk 6 | crp,glpD 7 | crp,galETKM 8 | crp,dctA 9 | crp,ppiA 10 | crp,fadL 11 | crp,focA_pflB 12 | crp,tdcABCDEFG 13 | crp,caiTABCDE 14 | crp,gntT 15 | crp,tsx 16 | crp,caiF 17 | crp,glgCAP 18 | crp,nagE 19 | crp,dcuB_fumB 20 | crp,araJ 21 | crp,melAB 22 | crp,malXY 23 | crp,proP 24 | crp,fucAO 25 | crp,glnALG 26 | crp,udp 27 | crp,fixABCX 28 | crp,ansB 29 | crp,yiaKLMNOPQRS 30 | crp,nagBACD 31 | crp,nupG 32 | crp,melR 33 | crp,glpACB 34 | crp,dsdXA 35 | crp,rhaBAD 36 | crp,malI 37 | crp,acs 38 | crp,ompA 39 | crp,fucPIKUR 40 | crp,malS 41 | crp,malEFG 42 | crp,cyaA 43 | crp,sdhCDAB_b0725_sucABCD 44 | crp,araC 45 | crp,malT 46 | crp,speC 47 | crp,deoCABD 48 | crp,aldB 49 | crp,yhfA 50 | crp,glpFK 51 | crp,ptsHI_crr 52 | fadR,fadL 53 | fadR,fadBA 54 | fadR,iclMR 55 | caiF,caiTABCDE 56 | caiF,fixABCX 57 | narL,focA_pflB 58 | narL,caiF 59 | narL,frdABCD 60 | narL,dcuB_fumB 61 | narL,fdnGHI 62 | narL,narGHJI 63 | narL,torCAD 64 | narL,nirBDC_cysG 65 | narL,adhE 66 | narL,nuoABCEFGHIJKLMN 67 | narL,narK 68 | narL,dmsABC 69 | arcA,dctA 70 | arcA,focA_pflB 71 | arcA,aceBAK 72 | arcA,mdh 73 | arcA,fadBA 74 | arcA,betIBA 75 | arcA,glpACB 76 | arcA,icdA 77 | arcA,sdhCDAB_b0725_sucABCD 78 | arcA,nuoABCEFGHIJKLMN 79 | arcA,cyoABCDE 80 | arcA,sodA 81 | arcA,glcDEFGB 82 | arcA,fumA 83 | FruR,aceBAK 84 | FruR,icdA 85 | FruR,pykF 86 | FruR,adhE 87 | FruR,ptsHI_crr 88 | glnALG,glnHPQ 89 | glnALG,nac 90 | iclMR,aceBAK 91 | iclMR,acs 92 | nagBACD,nagE 93 | nagBACD,glmUS 94 | melR,melAB 95 | ebgR,ebgAC 96 | fnr,focA_pflB 97 | fnr,tdcABCDEFG 98 | fnr,caiF 99 | fnr,arcA 100 | fnr,frdABCD 101 | fnr,dcuB_fumB 102 | fnr,fdnGHI 103 | fnr,ansB 104 | fnr,glpACB 105 | fnr,aspA 106 | fnr,narGHJI 107 | fnr,acs 108 | fnr,icdA 109 | fnr,nirBDC_cysG 110 | fnr,sdhCDAB_b0725_sucABCD 111 | fnr,nuoABCEFGHIJKLMN 112 | fnr,cyoABCDE 113 | fnr,narK 114 | fnr,dmsABC 115 | himA,focA_pflB 116 | himA,tdcABCDEFG 117 | himA,caiTABCDE 118 | himA,aceBAK 119 | himA,ompR_envZ 120 | himA,narGHJI 121 | himA,ecpD_htrE 122 | himA,glnHPQ 123 | himA,carAB 124 | himA,nuoABCEFGHIJKLMN 125 | himA,sodA 126 | himA,glcDEFGB 127 | himA,narK 128 | himA,osmE 129 | himA,pspABCDE 130 | ompR_envZ,fadL 131 | ompR_envZ,csgBA 132 | ompR_envZ,csgDEFG 133 | malI,malXY 134 | fucPIKUR,fucAO 135 | yjdHG,dctA 136 | yjdHG,frdABCD 137 | yjdHG,dcuB_fumB 138 | deoR,tsx 139 | deoR,nupG 140 | deoR,deoCABD 141 | araC,araJ 142 | malT,malS 143 | malT,malEFG 144 | nac,putAP 145 | yhdG_fis,proP 146 | yhdG_fis,sdhCDAB_b0725_sucABCD 147 | yhdG_fis,adhE 148 | yhdG_fis,alaWX 149 | yhdG_fis,aldB 150 | yhdG_fis,nrdAB 151 | yhdG_fis,proL 152 | lysR,tdcABCDEFG 153 | glpR,glpD 154 | glpR,glpACB 155 | glpR,glpFK 156 | nlpD_rpoS,proP 157 | nlpD_rpoS,acs 158 | nlpD_rpoS,adhE 159 | nlpD_rpoS,aldB 160 | GalR,galETKM 161 | csgDEFG,csgBA 162 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_1_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | nlpD_rpoS,adhE 3 | nlpD_rpoS,narZYWV 4 | nlpD_rpoS,katG 5 | nlpD_rpoS,alkA 6 | nlpD_rpoS,osmY 7 | nlpD_rpoS,dps 8 | nlpD_rpoS,nhaA 9 | nlpD_rpoS,aldB 10 | nlpD_rpoS,acs 11 | nlpD_rpoS,appY 12 | nlpD_rpoS,proP 13 | narL,adhE 14 | narL,frdABCD 15 | narL,dcuB_fumB 16 | narL,narGHJI 17 | narL,focA_pflB 18 | narL,fdnGHI 19 | narL,narK 20 | narL,nuoABCEFGHIJKLMN 21 | narL,torCAD 22 | narL,nrfABCDEFG 23 | narL,caiF 24 | fnr,frdABCD 25 | fnr,dcuB_fumB 26 | fnr,ansB 27 | fnr,tdcABCDEFG 28 | fnr,aspA 29 | fnr,narGHJI 30 | fnr,cyoABCDE 31 | fnr,focA_pflB 32 | fnr,fdnGHI 33 | fnr,narK 34 | fnr,nuoABCEFGHIJKLMN 35 | fnr,ndh 36 | fnr,arcA 37 | fnr,glpACB 38 | fnr,sdhCDAB_b0725_sucABCD 39 | fnr,acs 40 | fnr,cydAB 41 | fnr,icdA 42 | fnr,caiF 43 | tdcAR,tdcABCDEFG 44 | yhdG_fis,adhE 45 | yhdG_fis,aldB 46 | yhdG_fis,lysT_valT_lysW 47 | yhdG_fis,thrU_tyrU_glyT_thrT 48 | yhdG_fis,pdhR_aceEF_lpdA 49 | yhdG_fis,pheU 50 | yhdG_fis,sdhCDAB_b0725_sucABCD 51 | yhdG_fis,proP 52 | yhdG_fis,nrdAB 53 | yhdG_fis,aspV 54 | yhdG_fis,thrW 55 | yhdG_fis,proL 56 | yhdG_fis,argU 57 | crp,dcuB_fumB 58 | crp,ansB 59 | crp,tdcABCDEFG 60 | crp,focA_pflB 61 | crp,aldB 62 | crp,nagBACD 63 | crp,araE 64 | crp,malI 65 | crp,deoCABD 66 | crp,araC 67 | crp,dctA 68 | crp,uhpT 69 | crp,ebgAC 70 | crp,glpACB 71 | crp,melR 72 | crp,fucPIKUR 73 | crp,sdhCDAB_b0725_sucABCD 74 | crp,acs 75 | crp,ivbL_ilvBN 76 | crp,nagE 77 | crp,dsdXA 78 | crp,proP 79 | crp,cpdB 80 | crp,fucAO 81 | crp,speC 82 | crp,glnALG 83 | crp,malEFG 84 | crp,lacZYA 85 | crp,ppiA 86 | crp,yiaKLMNOPQRS 87 | crp,gltA 88 | crp,galETKM 89 | crp,mglBAC 90 | crp,cyaA 91 | crp,caiF 92 | crp,glgCAP 93 | crp,gntKU 94 | crp,nupG 95 | crp,galS 96 | arcA,cyoABCDE 97 | arcA,focA_pflB 98 | arcA,nuoABCEFGHIJKLMN 99 | arcA,lctPRD 100 | arcA,fadBA 101 | arcA,appCBA 102 | arcA,dctA 103 | arcA,glpACB 104 | arcA,glcDEFGB 105 | arcA,sdhCDAB_b0725_sucABCD 106 | arcA,sodA 107 | arcA,appY 108 | arcA,cydAB 109 | arcA,gltA 110 | arcA,icdA 111 | arcA,aceBAK 112 | himA,tdcABCDEFG 113 | himA,narGHJI 114 | himA,focA_pflB 115 | himA,dps 116 | himA,narK 117 | himA,nuoABCEFGHIJKLMN 118 | himA,himD 119 | himA,ompC 120 | himA,glcDEFGB 121 | himA,sodA 122 | himA,ompR_envZ 123 | himA,aceBAK 124 | himA,pspABCDE 125 | nagBACD,glmUS 126 | nagBACD,nagE 127 | araC,araE 128 | fucPIKUR,fucAO 129 | torR,torCAD 130 | lrp,ompC 131 | lrp,gltBDF 132 | lrp,stpA 133 | lrp,lysU 134 | appY,appCBA 135 | oxyR,katG 136 | oxyR,dps 137 | oxyR,gorA 138 | ompR_envZ,ompC 139 | envY_ompT,ompC 140 | ebgR,ebgAC 141 | marRAB,sodA 142 | fadR,fadBA 143 | fadR,iclMR 144 | iclMR,acs 145 | iclMR,aceBAK 146 | galS,mglBAC 147 | yjdHG,frdABCD 148 | yjdHG,dcuB_fumB 149 | yjdHG,dctA 150 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_2_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | crp,udp 3 | crp,araFG_araH_1H_2 4 | crp,cyaA 5 | crp,uhpT 6 | crp,glpD 7 | crp,ppiA 8 | crp,caiF 9 | crp,dadAX 10 | crp,melR 11 | crp,ivbL_ilvBN 12 | crp,dcuB_fumB 13 | crp,tdcABCDEFG 14 | crp,araE 15 | crp,caiTABCDE 16 | crp,glpACB 17 | crp,fixABCX 18 | crp,mglBAC 19 | crp,dctA 20 | crp,gntKU 21 | crp,glpFK 22 | crp,rpoH 23 | crp,malK_lamB_malM 24 | crp,melAB 25 | crp,yhfA 26 | crp,deoCABD 27 | crp,flhDC 28 | crp,ptsHI_crr 29 | crp,aldB 30 | crp,focA_pflB 31 | crp,rhaBAD 32 | crp,araJ 33 | crp,tsx 34 | crp,fur 35 | crp,fadL 36 | crp,yiaKLMNOPQRS 37 | crp,ebgAC 38 | crp,araC 39 | crp,glgS 40 | crp,malT 41 | crp,sdhCDAB_b0725_sucABCD 42 | crp,ompA 43 | crp,malEFG 44 | crp,araBAD 45 | crp,glgCAP 46 | crp,galETKM 47 | caiF,caiTABCDE 48 | caiF,fixABCX 49 | melR,melAB 50 | uhpA,uhpT 51 | glpR,glpD 52 | glpR,glpACB 53 | glpR,glpFK 54 | hns,caiF 55 | hns,flhDC 56 | hns,nhaA 57 | hns,stpA 58 | hns,osmC 59 | himA,tdcABCDEFG 60 | himA,caiTABCDE 61 | himA,glcDEFGB 62 | himA,ompC 63 | himA,hycABCDEFGH 64 | himA,ecpD_htrE 65 | himA,focA_pflB 66 | himA,narGHJI 67 | himA,nuoABCEFGHIJKLMN 68 | himA,ompF 69 | himA,sodA 70 | himA,aceBAK 71 | himA,ompR_envZ 72 | himA,narK 73 | cspA,hns 74 | cspA,gyrA 75 | rpoH,mopA 76 | rpoH,lon 77 | rpoH,dnaKJ 78 | rpoH,htpY 79 | rpoH,hflB 80 | arcA,glpACB 81 | arcA,dctA 82 | arcA,glcDEFGB 83 | arcA,cyoABCDE 84 | arcA,cydAB 85 | arcA,focA_pflB 86 | arcA,nuoABCEFGHIJKLMN 87 | arcA,sodA 88 | arcA,aceBAK 89 | arcA,sdhCDAB_b0725_sucABCD 90 | arcA,mdh 91 | fnr,caiF 92 | fnr,dcuB_fumB 93 | fnr,tdcABCDEFG 94 | fnr,glpACB 95 | fnr,arcA 96 | fnr,cyoABCDE 97 | fnr,ndh 98 | fnr,cydAB 99 | fnr,focA_pflB 100 | fnr,narGHJI 101 | fnr,fdnGHI 102 | fnr,nirBDC_cysG 103 | fnr,frdABCD 104 | fnr,nuoABCEFGHIJKLMN 105 | fnr,sdhCDAB_b0725_sucABCD 106 | fnr,narK 107 | flhDC,fliE 108 | evgA,ompC 109 | lrp,ompC 110 | lrp,stpA 111 | lrp,oppABCDF 112 | lrp,livJ 113 | lrp,osmC 114 | lrp,sdaA 115 | lrp,ompF 116 | lrp,livKHMGF 117 | lrp,serA 118 | yjdHG,dcuB_fumB 119 | yjdHG,dctA 120 | yjdHG,frdABCD 121 | cytR,udp 122 | cytR,ppiA 123 | cytR,rpoH 124 | cytR,deoCABD 125 | cytR,tsx 126 | fur,fepB 127 | fur,sodA 128 | fur,fhuACDB 129 | araC,araFG_araH_1H_2 130 | araC,araE 131 | araC,araJ 132 | araC,araBAD 133 | glcC,glcDEFGB 134 | malT,malK_lamB_malM 135 | malT,malEFG 136 | soxS,sodA 137 | ompR_envZ,ompC 138 | ompR_envZ,flhDC 139 | ompR_envZ,fadL 140 | ompR_envZ,ompF 141 | narL,caiF 142 | narL,dcuB_fumB 143 | narL,focA_pflB 144 | narL,narGHJI 145 | narL,fdnGHI 146 | narL,nirBDC_cysG 147 | narL,frdABCD 148 | narL,nuoABCEFGHIJKLMN 149 | narL,narK 150 | narL,adhE 151 | yhdG_fis,aldB 152 | yhdG_fis,sdhCDAB_b0725_sucABCD 153 | yhdG_fis,adhE 154 | yhdG_fis,serX 155 | mlc,ptsHI_crr 156 | mlc,malT 157 | FruR,ptsHI_crr 158 | FruR,aceBAK 159 | FruR,adhE 160 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_3_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | fliAZY,flgKL 3 | fliAZY,flhBAE 4 | fliAZY,tarTapcheRBYZ 5 | fliAZY,fliE 6 | fliAZY,motABcheAW 7 | fliAZY,fliLMNOPQR 8 | fliAZY,fliC 9 | fliAZY,flgMN 10 | fliAZY,fliDST 11 | fliAZY,flgBCDEFGHIJK 12 | fliAZY,fliFGHIJK 13 | fliAZY,tsr 14 | flhDC,fliAZY 15 | flhDC,flhBAE 16 | flhDC,fliE 17 | flhDC,fliLMNOPQR 18 | flhDC,flgBCDEFGHIJK 19 | flhDC,fliFGHIJK 20 | hns,fliAZY 21 | hns,flhDC 22 | hns,osmC 23 | hns,caiF 24 | hns,rcsAB 25 | ompR_envZ,flhDC 26 | ompR_envZ,csgDEFG 27 | ompR_envZ,csgBA 28 | ompR_envZ,fadL 29 | ompR_envZ,ompC 30 | csgDEFG,csgBA 31 | crp,flhDC 32 | crp,cpdB 33 | crp,araJ 34 | crp,glpD 35 | crp,aldB 36 | crp,nagBACD 37 | crp,glgS 38 | crp,galETKM 39 | crp,dctA 40 | crp,araE 41 | crp,cyaA 42 | crp,caiF 43 | crp,melAB 44 | crp,fixABCX 45 | crp,fadL 46 | crp,glpACB 47 | crp,caiTABCDE 48 | crp,lacZYA 49 | crp,manXYZ 50 | crp,dcuB_fumB 51 | crp,uhpT 52 | crp,araC 53 | crp,sdhCDAB_b0725_sucABCD 54 | crp,ansB 55 | crp,glnALG 56 | crp,proP 57 | crp,glgCAP 58 | crp,malK_lamB_malM 59 | crp,nagE 60 | crp,tsx 61 | crp,ebgAC 62 | crp,ivbL_ilvBN 63 | crp,rhaT 64 | crp,malXY 65 | crp,udp 66 | crp,malEFG 67 | crp,malT 68 | crp,speC 69 | nagBACD,manXYZ 70 | nagBACD,nagE 71 | cpxAR,motABcheAW 72 | cpxAR,xprB_dsbC_recJ 73 | cpxAR,skp_lpxDA_fabZ 74 | cpxAR,dsbA 75 | cpxAR,htrA 76 | cpxAR,tsr 77 | caiF,fixABCX 78 | caiF,caiTABCDE 79 | lrp,osmC 80 | lrp,livJ 81 | lrp,oppABCDF 82 | lrp,gltBDF 83 | lrp,ompC 84 | lrp,ilvIH 85 | lrp,kbl_tdh 86 | arcA,dctA 87 | arcA,glpACB 88 | arcA,icdA 89 | arcA,sdhCDAB_b0725_sucABCD 90 | arcA,fadBA 91 | arcA,cydAB 92 | arcA,fumA 93 | arcA,cyoABCDE 94 | arcA,glcDEFGB 95 | arcA,nuoABCEFGHIJKLMN 96 | yjdHG,dctA 97 | yjdHG,dcuB_fumB 98 | rpoE_rseABC,xprB_dsbC_recJ 99 | rpoE_rseABC,skp_lpxDA_fabZ 100 | rpoE_rseABC,ecfABC 101 | rpoE_rseABC,nlpB_purA 102 | rpoE_rseABC,ecfH 103 | rpoE_rseABC,ostA_surA_pdxA 104 | rpoE_rseABC,htrA 105 | rpoE_rseABC,mdoGH 106 | rpoE_rseABC,uppS_cdsA_ecfE 107 | rpoE_rseABC,ecfK 108 | fnr,caiF 109 | fnr,glpACB 110 | fnr,dcuB_fumB 111 | fnr,arcA 112 | fnr,icdA 113 | fnr,sdhCDAB_b0725_sucABCD 114 | fnr,ansB 115 | fnr,cydAB 116 | fnr,hypABCDE 117 | fnr,cyoABCDE 118 | fnr,nuoABCEFGHIJKLMN 119 | fnr,narK 120 | fadR,fadL 121 | fadR,fadBA 122 | fadR,iclMR 123 | fadR,uspA 124 | araC,araJ 125 | araC,araE 126 | lacI,lacZYA 127 | nlpD_rpoS,osmC 128 | nlpD_rpoS,aldB 129 | nlpD_rpoS,cpxAR 130 | nlpD_rpoS,proP 131 | nlpD_rpoS,adhE 132 | narL,caiF 133 | narL,dcuB_fumB 134 | narL,nuoABCEFGHIJKLMN 135 | narL,adhE 136 | narL,narK 137 | glpR,glpD 138 | glpR,glpACB 139 | envY_ompT,ompC 140 | malT,malK_lamB_malM 141 | malT,malEFG 142 | himA,ompR_envZ 143 | himA,caiTABCDE 144 | himA,hypABCDE 145 | himA,ompC 146 | himA,glcDEFGB 147 | himA,nuoABCEFGHIJKLMN 148 | himA,narK 149 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_4_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | fnr,dmsABC 3 | fnr,ansB 4 | fnr,frdABCD 5 | fnr,icdA 6 | fnr,sdhCDAB_b0725_sucABCD 7 | fnr,aspA 8 | fnr,arcA 9 | fnr,glpACB 10 | fnr,focA_pflB 11 | fnr,nuoABCEFGHIJKLMN 12 | fnr,cydAB 13 | fnr,nirBDC_cysG 14 | fnr,tdcABCDEFG 15 | fnr,narGHJI 16 | fnr,acs 17 | fnr,ndh 18 | fnr,caiF 19 | fnr,narK 20 | yjdHG,frdABCD 21 | yjdHG,dctA 22 | arcA,icdA 23 | arcA,sdhCDAB_b0725_sucABCD 24 | arcA,glpACB 25 | arcA,focA_pflB 26 | arcA,gltA 27 | arcA,nuoABCEFGHIJKLMN 28 | arcA,dctA 29 | arcA,cydAB 30 | arcA,betIBA 31 | arcA,lctPRD 32 | arcA,sodA 33 | arcA,aceBAK 34 | arcA,glcDEFGB 35 | arcA,fumC 36 | arcA,fadBA 37 | arcA,fumA 38 | narL,dmsABC 39 | narL,frdABCD 40 | narL,focA_pflB 41 | narL,adhE 42 | narL,nuoABCEFGHIJKLMN 43 | narL,nirBDC_cysG 44 | narL,narGHJI 45 | narL,caiF 46 | narL,narK 47 | crp,ansB 48 | crp,sdhCDAB_b0725_sucABCD 49 | crp,glpACB 50 | crp,focA_pflB 51 | crp,araJ 52 | crp,gltA 53 | crp,dctA 54 | crp,malXY 55 | crp,dadAX 56 | crp,ppiA 57 | crp,cyaA 58 | crp,manXYZ 59 | crp,fur 60 | crp,araC 61 | crp,gntKU 62 | crp,araFG_araH_1H_2 63 | crp,melR 64 | crp,tdcABCDEFG 65 | crp,caiTABCDE 66 | crp,srlAEBD_gutM_srlR_gutQ 67 | crp,malK_lamB_malM 68 | crp,ivbL_ilvBN 69 | crp,deoCABD 70 | crp,malI 71 | crp,acs 72 | crp,nagE 73 | crp,speC 74 | crp,glpTQ 75 | crp,araE 76 | crp,yiaKLMNOPQRS 77 | crp,rpoH 78 | crp,galS 79 | crp,caiF 80 | crp,mglBAC 81 | crp,glgS 82 | crp,cirA 83 | crp,nagBACD 84 | crp,ebgAC 85 | crp,tnaLAB 86 | crp,ompA 87 | crp,malEFG 88 | yhdG_fis,sdhCDAB_b0725_sucABCD 89 | yhdG_fis,adhE 90 | yhdG_fis,aspV 91 | yhdG_fis,serX 92 | yhdG_fis,argW 93 | yhdG_fis,metT_leuW_glnUW_metU_glnVX 94 | yhdG_fis,thrU_tyrU_glyT_thrT 95 | yhdG_fis,proL 96 | yhdG_fis,proK 97 | FruR,icdA 98 | FruR,adhE 99 | FruR,ppsA 100 | FruR,aceBAK 101 | fur,fhuACDB 102 | fur,fepDGC 103 | fur,fepA_entD 104 | fur,sodA 105 | fur,entCEBA 106 | fur,cirA 107 | araC,araJ 108 | araC,araFG_araH_1H_2 109 | araC,araE 110 | himA,focA_pflB 111 | himA,nuoABCEFGHIJKLMN 112 | himA,tdcABCDEFG 113 | himA,caiTABCDE 114 | himA,narGHJI 115 | himA,sodA 116 | himA,aceBAK 117 | himA,glnHPQ 118 | himA,osmE 119 | himA,glcDEFGB 120 | himA,pspABCDE 121 | himA,ompR_envZ 122 | himA,ompC 123 | himA,narK 124 | himA,himD 125 | malI,malXY 126 | rpoN,dctA 127 | rpoN,glnHPQ 128 | rpoN,pspABCDE 129 | gntR,gntKU 130 | rpoH,hflB 131 | rpoH,dnaKJ 132 | marRAB,sodA 133 | marRAB,fumC 134 | marRAB,zwf 135 | galS,mglBAC 136 | caiF,caiTABCDE 137 | rob,sodA 138 | rob,marRAB 139 | rob,fumC 140 | rob,ybaO 141 | rob,zwf 142 | ompR_envZ,csgDEFG 143 | ompR_envZ,ompC 144 | glcC,glcDEFGB 145 | cytR,ppiA 146 | cytR,deoCABD 147 | cytR,rpoH 148 | nlpD_rpoS,adhE 149 | nlpD_rpoS,acs 150 | nlpD_rpoS,alkA 151 | glpR,glpACB 152 | glpR,glpTQ 153 | nagBACD,manXYZ 154 | nagBACD,nagE 155 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_5_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | nhaR,nhaA 3 | hns,nhaA 4 | hns,stpA 5 | hns,fliAZY 6 | hns,rcsAB 7 | hns,flhDC 8 | hns,osmC 9 | hns,caiF 10 | cspA,hns 11 | cspA,gyrA 12 | fliAZY,flgKL 13 | fliAZY,fliLMNOPQR 14 | fliAZY,tsr 15 | fliAZY,fliE 16 | fliAZY,fliDST 17 | fliAZY,flgMN 18 | fliAZY,tarTapcheRBYZ 19 | fliAZY,motABcheAW 20 | fliAZY,fliFGHIJK 21 | flhDC,fliAZY 22 | flhDC,fliLMNOPQR 23 | flhDC,fliE 24 | flhDC,fliFGHIJK 25 | lrp,stpA 26 | lrp,kbl_tdh 27 | lrp,livJ 28 | lrp,ompF 29 | lrp,osmC 30 | lrp,livKHMGF 31 | lrp,ompC 32 | lrp,oppABCDF 33 | lrp,gltBDF 34 | lrp,gcvTHP 35 | ompR_envZ,flhDC 36 | ompR_envZ,ompF 37 | ompR_envZ,csgBA 38 | ompR_envZ,ompC 39 | ompR_envZ,csgDEFG 40 | ompR_envZ,fadL 41 | nlpD_rpoS,nhaA 42 | nlpD_rpoS,acs 43 | nlpD_rpoS,osmC 44 | nlpD_rpoS,dps 45 | nlpD_rpoS,appY 46 | nlpD_rpoS,aldB 47 | nlpD_rpoS,cpxAR 48 | nlpD_rpoS,alkA 49 | nlpD_rpoS,narZYWV 50 | nlpD_rpoS,proP 51 | nlpD_rpoS,ftsQAZ 52 | cpxAR,tsr 53 | cpxAR,cpxP 54 | cpxAR,htrA 55 | cpxAR,dsbA 56 | cpxAR,skp_lpxDA_fabZ 57 | cpxAR,motABcheAW 58 | cpxAR,ecfI 59 | iclMR,acs 60 | csgDEFG,csgBA 61 | envY_ompT,ompF 62 | envY_ompT,ompC 63 | oxyR,dps 64 | crp,flhDC 65 | crp,acs 66 | crp,aldB 67 | crp,fadL 68 | crp,rhaT 69 | crp,sdhCDAB_b0725_sucABCD 70 | crp,caiF 71 | crp,nagBACD 72 | crp,gntKU 73 | crp,glpTQ 74 | crp,araFG_araH_1H_2 75 | crp,caiTABCDE 76 | crp,ebgAC 77 | crp,manXYZ 78 | crp,ppiA 79 | crp,galS 80 | crp,glpACB 81 | crp,malS 82 | crp,proP 83 | crp,tdcABCDEFG 84 | crp,melAB 85 | crp,epd_pgk 86 | crp,glnALG 87 | crp,focA_pflB 88 | crp,yiaKLMNOPQRS 89 | crp,fucAO 90 | crp,ansB 91 | crp,tsx 92 | crp,ompA 93 | crp,fucPIKUR 94 | caiF,caiTABCDE 95 | fadR,iclMR 96 | fadR,fadL 97 | fadR,fadBA 98 | fadR,fabA 99 | nagBACD,manXYZ 100 | himA,ompR_envZ 101 | himA,ompF 102 | himA,dps 103 | himA,ompC 104 | himA,caiTABCDE 105 | himA,glcDEFGB 106 | himA,nuoABCEFGHIJKLMN 107 | himA,carAB 108 | himA,tdcABCDEFG 109 | himA,ecpD_htrE 110 | himA,focA_pflB 111 | himA,sodA 112 | himA,hycABCDEFGH 113 | himA,narGHJI 114 | himA,glnHPQ 115 | arcA,appY 116 | arcA,sdhCDAB_b0725_sucABCD 117 | arcA,fadBA 118 | arcA,glcDEFGB 119 | arcA,nuoABCEFGHIJKLMN 120 | arcA,glpACB 121 | arcA,focA_pflB 122 | arcA,sodA 123 | arcA,lctPRD 124 | arcA,icdA 125 | glpR,glpTQ 126 | glpR,glpACB 127 | evgA,ompC 128 | argR,carAB 129 | glcC,glcDEFGB 130 | glnALG,nac 131 | glnALG,glnHPQ 132 | narL,caiF 133 | narL,nuoABCEFGHIJKLMN 134 | narL,focA_pflB 135 | narL,narGHJI 136 | fnr,acs 137 | fnr,sdhCDAB_b0725_sucABCD 138 | fnr,caiF 139 | fnr,arcA 140 | fnr,nuoABCEFGHIJKLMN 141 | fnr,glpACB 142 | fnr,tdcABCDEFG 143 | fnr,focA_pflB 144 | fnr,ansB 145 | fnr,narGHJI 146 | fnr,icdA 147 | nac,gdhA 148 | cytR,ppiA 149 | cytR,tsx 150 | fucPIKUR,fucAO 151 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_6_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | FruR,adhE 3 | FruR,icdA 4 | FruR,ppsA 5 | FruR,ptsHI_crr 6 | yhdG_fis,adhE 7 | yhdG_fis,pheV 8 | yhdG_fis,lysT_valT_lysW 9 | yhdG_fis,metT_leuW_glnUW_metU_glnVX 10 | yhdG_fis,proP 11 | yhdG_fis,argW 12 | yhdG_fis,proL 13 | yhdG_fis,alaWX 14 | yhdG_fis,argU 15 | yhdG_fis,pheU 16 | yhdG_fis,argX_hisR_leuT_proM 17 | yhdG_fis,serT 18 | yhdG_fis,sdhCDAB_b0725_sucABCD 19 | yhdG_fis,thrU_tyrU_glyT_thrT 20 | yhdG_fis,leuX 21 | yhdG_fis,aldB 22 | yhdG_fis,pdhR_aceEF_lpdA 23 | yhdG_fis,aspV 24 | yhdG_fis,serX 25 | narL,adhE 26 | narL,narGHJI 27 | narL,torCAD 28 | narL,caiF 29 | narL,dcuB_fumB 30 | narL,dmsABC 31 | narL,narK 32 | narL,nirBDC_cysG 33 | narL,fdnGHI 34 | narL,focA_pflB 35 | narL,nuoABCEFGHIJKLMN 36 | narL,frdABCD 37 | fnr,icdA 38 | fnr,narGHJI 39 | fnr,caiF 40 | fnr,dcuB_fumB 41 | fnr,acs 42 | fnr,sdhCDAB_b0725_sucABCD 43 | fnr,dmsABC 44 | fnr,narK 45 | fnr,hypABCDE 46 | fnr,nirBDC_cysG 47 | fnr,fdnGHI 48 | fnr,focA_pflB 49 | fnr,glpACB 50 | fnr,arcA 51 | fnr,tdcABCDEFG 52 | fnr,nuoABCEFGHIJKLMN 53 | fnr,ansB 54 | fnr,frdABCD 55 | fnr,cydAB 56 | caiF,fixABCX 57 | caiF,caiTABCDE 58 | himA,narGHJI 59 | himA,narK 60 | himA,ompR_envZ 61 | himA,hypABCDE 62 | himA,osmE 63 | himA,glcDEFGB 64 | himA,focA_pflB 65 | himA,ecpD_htrE 66 | himA,ompC 67 | himA,caiTABCDE 68 | himA,tdcABCDEFG 69 | himA,nuoABCEFGHIJKLMN 70 | himA,sodA 71 | himA,ompF 72 | himA,glnHPQ 73 | ompR_envZ,ompC 74 | ompR_envZ,flhDC 75 | ompR_envZ,ompF 76 | ompR_envZ,fadL 77 | yjdHG,dcuB_fumB 78 | yjdHG,frdABCD 79 | yjdHG,dctA 80 | nlpD_rpoS,adhE 81 | nlpD_rpoS,proP 82 | nlpD_rpoS,acs 83 | nlpD_rpoS,narZYWV 84 | nlpD_rpoS,aldB 85 | nlpD_rpoS,osmC 86 | crp,proP 87 | crp,caiF 88 | crp,dcuB_fumB 89 | crp,fixABCX 90 | crp,acs 91 | crp,sdhCDAB_b0725_sucABCD 92 | crp,focA_pflB 93 | crp,glpACB 94 | crp,lacZYA 95 | crp,cpdB 96 | crp,malXY 97 | crp,ompA 98 | crp,ubiG 99 | crp,srlAEBD_gutM_srlR_gutQ 100 | crp,galS 101 | crp,epd_pgk 102 | crp,aldB 103 | crp,ptsHI_crr 104 | crp,gntT 105 | crp,flhDC 106 | crp,gltA 107 | crp,glpFK 108 | crp,fur 109 | crp,glgCAP 110 | crp,caiTABCDE 111 | crp,tdcABCDEFG 112 | crp,ansB 113 | crp,fucAO 114 | crp,mglBAC 115 | crp,malT 116 | crp,cyaA 117 | crp,malEFG 118 | crp,dctA 119 | crp,malS 120 | crp,malK_lamB_malM 121 | crp,gntKU 122 | crp,nagE 123 | crp,fadL 124 | iclMR,acs 125 | galS,mglBAC 126 | lacI,lacZYA 127 | flhDC,fliFGHIJK 128 | flhDC,fliLMNOPQR 129 | fur,fepA_entD 130 | fur,sodA 131 | fur,fecIR 132 | gntR,gntT 133 | gntR,gntKU 134 | fhlA,hypABCDE 135 | mlc,ptsHI_crr 136 | mlc,malT 137 | arcA,icdA 138 | arcA,sdhCDAB_b0725_sucABCD 139 | arcA,glcDEFGB 140 | arcA,focA_pflB 141 | arcA,glpACB 142 | arcA,gltA 143 | arcA,nuoABCEFGHIJKLMN 144 | arcA,sodA 145 | arcA,betIBA 146 | arcA,dctA 147 | arcA,cydAB 148 | lrp,ompC 149 | lrp,osmC 150 | lrp,ompF 151 | lrp,ilvIH 152 | lrp,kbl_tdh 153 | malT,malEFG 154 | malT,malS 155 | malT,malK_lamB_malM 156 | glcC,glcDEFGB 157 | tdcAR,tdcABCDEFG 158 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_7_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | rcsA,ftsQAZ 3 | rcsA,wza_wzb_b2060_wcaA_wcaB 4 | nlpD_rpoS,ftsQAZ 5 | nlpD_rpoS,nhaA 6 | nlpD_rpoS,osmC 7 | nlpD_rpoS,dps 8 | nlpD_rpoS,acs 9 | nlpD_rpoS,adhE 10 | nlpD_rpoS,aldB 11 | nlpD_rpoS,proP 12 | nlpD_rpoS,alkA 13 | nlpD_rpoS,appY 14 | nlpD_rpoS,cpxAR 15 | crp,acs 16 | crp,galS 17 | crp,cirA 18 | crp,dcuB_fumB 19 | crp,focA_pflB 20 | crp,epd_pgk 21 | crp,nagBACD 22 | crp,glpTQ 23 | crp,galETKM 24 | crp,tsx 25 | crp,gntKU 26 | crp,manXYZ 27 | crp,nupG 28 | crp,aldB 29 | crp,yhfA 30 | crp,uhpT 31 | crp,tdcABCDEFG 32 | crp,proP 33 | crp,nagE 34 | crp,malK_lamB_malM 35 | crp,dctA 36 | crp,melR 37 | crp,rhaT 38 | crp,glpFK 39 | crp,speC 40 | crp,glpACB 41 | crp,caiF 42 | crp,fadL 43 | crp,yiaKLMNOPQRS 44 | crp,araC 45 | crp,rpoH 46 | crp,malT 47 | crp,deoCABD 48 | crp,dadAX 49 | crp,caiTABCDE 50 | crp,gntT 51 | crp,ebgAC 52 | GalR,galS 53 | GalR,galETKM 54 | nagBACD,manXYZ 55 | nagBACD,nagE 56 | lrp,osmC 57 | lrp,ompC 58 | lrp,stpA 59 | lrp,gcvTHP 60 | lrp,ompF 61 | lrp,livKHMGF 62 | lrp,sdaA 63 | glpR,glpTQ 64 | glpR,glpFK 65 | glpR,glpACB 66 | hns,nhaA 67 | hns,osmC 68 | hns,stpA 69 | hns,caiF 70 | hns,rcsAB 71 | hns,fliAZY 72 | envY_ompT,ompC 73 | envY_ompT,ompF 74 | purR,gcvTHP 75 | purR,ycfC_purB 76 | purR,codBA 77 | purR,glyA 78 | purR,pyrC 79 | purR,pyrD 80 | FruR,adhE 81 | FruR,ppsA 82 | FruR,pykF 83 | FruR,aceBAK 84 | himA,dps 85 | himA,focA_pflB 86 | himA,ompC 87 | himA,tdcABCDEFG 88 | himA,narGHJI 89 | himA,hycABCDEFGH 90 | himA,hypABCDE 91 | himA,ompF 92 | himA,sodA 93 | himA,narK 94 | himA,aceBAK 95 | himA,glnHPQ 96 | himA,caiTABCDE 97 | himA,nuoABCEFGHIJKLMN 98 | gcvA,gcvTHP 99 | fnr,acs 100 | fnr,dcuB_fumB 101 | fnr,focA_pflB 102 | fnr,tdcABCDEFG 103 | fnr,narGHJI 104 | fnr,hypABCDE 105 | fnr,arcA 106 | fnr,glpACB 107 | fnr,caiF 108 | fnr,cydAB 109 | fnr,narK 110 | fnr,fdnGHI 111 | fnr,nuoABCEFGHIJKLMN 112 | cspA,hns 113 | arcA,focA_pflB 114 | arcA,dctA 115 | arcA,glpACB 116 | arcA,cydAB 117 | arcA,sodA 118 | arcA,appY 119 | arcA,aceBAK 120 | arcA,nuoABCEFGHIJKLMN 121 | fhlA,hycABCDEFGH 122 | fhlA,hypABCDE 123 | fhlA,fdhF 124 | caiF,caiTABCDE 125 | rpoN,dctA 126 | rpoN,hycABCDEFGH 127 | rpoN,fhlA 128 | rpoN,hypA 129 | rpoN,glnHPQ 130 | rpoN,fdhF 131 | oxyR,dps 132 | cytR,tsx 133 | cytR,nupG 134 | cytR,rpoH 135 | cytR,deoCABD 136 | evgA,ompC 137 | rpoH,lon 138 | fliAZY,fliDST 139 | fliAZY,flgKL 140 | malT,malK_lamB_malM 141 | yiaJ,yiaKLMNOPQRS 142 | deoR,tsx 143 | deoR,nupG 144 | deoR,deoCABD 145 | narL,dcuB_fumB 146 | narL,focA_pflB 147 | narL,adhE 148 | narL,narGHJI 149 | narL,caiF 150 | narL,narK 151 | narL,fdnGHI 152 | narL,nuoABCEFGHIJKLMN 153 | tdcAR,tdcABCDEFG 154 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_8_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | hns,stpA 3 | hns,caiF 4 | hns,nhaA 5 | hns,osmC 6 | caiF,caiTABCDE 7 | crp,caiF 8 | crp,tsx 9 | crp,malK_lamB_malM 10 | crp,gltA 11 | crp,glnALG 12 | crp,dadAX 13 | crp,rhaT 14 | crp,udp 15 | crp,malI 16 | crp,acs 17 | crp,nagBACD 18 | crp,dctA 19 | crp,fucAO 20 | crp,tdcABCDEFG 21 | crp,rpoH 22 | crp,ptsHI_crr 23 | crp,melR 24 | crp,araJ 25 | crp,dsdXA 26 | crp,manXYZ 27 | crp,fucPIKUR 28 | crp,glpACB 29 | crp,araC 30 | crp,malT 31 | crp,gntKU 32 | crp,caiTABCDE 33 | crp,srlAEBD_gutM_srlR_gutQ 34 | crp,nupG 35 | crp,malS 36 | crp,ivbL_ilvBN 37 | crp,epd_pgk 38 | crp,dcuB_fumB 39 | crp,ppiA 40 | crp,gntT 41 | crp,galETKM 42 | crp,araFG_araH_1H_2 43 | crp,lacZYA 44 | crp,uhpT 45 | crp,galS 46 | crp,glpD 47 | crp,malEFG 48 | crp,glpTQ 49 | crp,cpdB 50 | crp,sdhCDAB_b0725_sucABCD 51 | crp,focA_pflB 52 | crp,mglBAC 53 | glnALG,glnHPQ 54 | glnALG,nac 55 | nagBACD,glmUS 56 | nagBACD,manXYZ 57 | rpoH,htpG 58 | rpoH,clpP 59 | rpoH,ibpAB 60 | rpoH,htpY 61 | yjdHG,dctA 62 | yjdHG,frdABCD 63 | yjdHG,dcuB_fumB 64 | mlc,ptsHI_crr 65 | mlc,manXYZ 66 | mlc,malT 67 | tdcAR,tdcABCDEFG 68 | rpoN,glnALG 69 | rpoN,dctA 70 | rpoN,nycA 71 | rpoN,glnHPQ 72 | rpoN,fdhF 73 | rpoN,nac 74 | rpoN,hycABCDEFGH 75 | rpoN,hypA 76 | fucPIKUR,fucAO 77 | araC,araJ 78 | araC,araFG_araH_1H_2 79 | malT,malK_lamB_malM 80 | malT,malS 81 | malT,malEFG 82 | himA,tdcABCDEFG 83 | himA,caiTABCDE 84 | himA,aceBAK 85 | himA,glnHPQ 86 | himA,himD 87 | himA,narGHJI 88 | himA,hypABCDE 89 | himA,ompF 90 | himA,ompC 91 | himA,ecpD_htrE 92 | himA,hycABCDEFGH 93 | himA,sodA 94 | himA,focA_pflB 95 | himA,narK 96 | nlpD_rpoS,acs 97 | nlpD_rpoS,nhaA 98 | nlpD_rpoS,appY 99 | nlpD_rpoS,osmC 100 | nlpD_rpoS,cpxAR 101 | nlpD_rpoS,alkA 102 | nlpD_rpoS,adhE 103 | fnr,caiF 104 | fnr,acs 105 | fnr,tdcABCDEFG 106 | fnr,frdABCD 107 | fnr,glpACB 108 | fnr,narGHJI 109 | fnr,hypABCDE 110 | fnr,dcuB_fumB 111 | fnr,cydAB 112 | fnr,fdnGHI 113 | fnr,arcA 114 | fnr,sdhCDAB_b0725_sucABCD 115 | fnr,focA_pflB 116 | fnr,narK 117 | iclMR,acs 118 | iclMR,aceBAK 119 | lrp,stpA 120 | lrp,ompF 121 | lrp,ompC 122 | lrp,serA 123 | lrp,osmC 124 | lrp,livJ 125 | lysR,tdcABCDEFG 126 | FruR,ptsHI_crr 127 | FruR,aceBAK 128 | FruR,fruBKA 129 | FruR,adhE 130 | cpxAR,tsr 131 | cpxAR,motABcheAW 132 | galS,mglBAC 133 | narL,caiF 134 | narL,frdABCD 135 | narL,narGHJI 136 | narL,dcuB_fumB 137 | narL,fdnGHI 138 | narL,adhE 139 | narL,focA_pflB 140 | narL,narK 141 | envY_ompT,ompF 142 | envY_ompT,ompC 143 | cytR,tsx 144 | cytR,udp 145 | cytR,rpoH 146 | cytR,nupG 147 | cytR,ppiA 148 | GalR,galETKM 149 | GalR,galS 150 | cspA,hns 151 | arcA,gltA 152 | arcA,dctA 153 | arcA,glpACB 154 | arcA,aceBAK 155 | arcA,appY 156 | arcA,cydAB 157 | arcA,sodA 158 | arcA,sdhCDAB_b0725_sucABCD 159 | arcA,focA_pflB 160 | -------------------------------------------------------------------------------- /syntren/100_probaComplexInter1/syntrenHop1_100_9_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | atoC,atoDAE 3 | atoC,atoDAB 4 | rpoN,atoC 5 | rpoN,hypA 6 | rpoN,glnALG 7 | rpoN,glnHPQ 8 | rpoN,nac 9 | rpoN,hycABCDEFGH 10 | rpoN,dctA 11 | rpoN,fdhF 12 | rpoN,nycA 13 | rpoN,rtcR 14 | rpoN,fhlA 15 | rpoN,pspABCDE 16 | glnALG,glnHPQ 17 | glnALG,nac 18 | nac,gdhA 19 | arcA,dctA 20 | arcA,nuoABCEFGHIJKLMN 21 | arcA,appCBA 22 | arcA,glpACB 23 | arcA,sdhCDAB_b0725_sucABCD 24 | arcA,focA_pflB 25 | arcA,sodA 26 | arcA,fadBA 27 | arcA,appY 28 | arcA,icdA 29 | crp,glnALG 30 | crp,dctA 31 | crp,fur 32 | crp,glpACB 33 | crp,sdhCDAB_b0725_sucABCD 34 | crp,cirA 35 | crp,araE 36 | crp,caiTABCDE 37 | crp,dsdXA 38 | crp,tnaLAB 39 | crp,glpD 40 | crp,speC 41 | crp,acs 42 | crp,focA_pflB 43 | crp,srlAEBD_gutM_srlR_gutQ 44 | crp,tdcABCDEFG 45 | crp,dadAX 46 | crp,caiF 47 | crp,melAB 48 | crp,malS 49 | crp,melR 50 | crp,gntT 51 | crp,galS 52 | crp,fixABCX 53 | crp,ebgAC 54 | crp,cyaA 55 | crp,tsx 56 | crp,ubiG 57 | crp,gntKU 58 | crp,fucPIKUR 59 | crp,glgCAP 60 | crp,dcuB_fumB 61 | crp,rhaT 62 | crp,araC 63 | crp,malEFG 64 | crp,ansB 65 | crp,ppiA 66 | crp,nagBACD 67 | crp,yiaKLMNOPQRS 68 | crp,malT 69 | fur,fhuACDB 70 | fur,cirA 71 | fur,fepB 72 | fur,sodA 73 | fur,fepA_entD 74 | fur,entCEBA 75 | fur,fepDGC 76 | fur,fecIR 77 | fhlA,hycABCDEFGH 78 | fhlA,fdhF 79 | fhlA,hypABCDE 80 | yjdHG,dctA 81 | yjdHG,dcuB_fumB 82 | caiF,caiTABCDE 83 | caiF,fixABCX 84 | himA,glnHPQ 85 | himA,hycABCDEFGH 86 | himA,nuoABCEFGHIJKLMN 87 | himA,caiTABCDE 88 | himA,focA_pflB 89 | himA,sodA 90 | himA,tdcABCDEFG 91 | himA,hypABCDE 92 | himA,himD 93 | himA,osmE 94 | himA,ecpD_htrE 95 | himA,carAB 96 | himA,ompC 97 | himA,ompF 98 | himA,narK 99 | himA,ompR_envZ 100 | himA,narGHJI 101 | himA,pspABCDE 102 | melR,melAB 103 | narL,nuoABCEFGHIJKLMN 104 | narL,focA_pflB 105 | narL,caiF 106 | narL,nrfABCDEFG 107 | narL,nirBDC_cysG 108 | narL,narK 109 | narL,narGHJI 110 | narL,dcuB_fumB 111 | glpR,glpACB 112 | glpR,glpD 113 | nlpD_rpoS,acs 114 | nlpD_rpoS,ftsQAZ 115 | nlpD_rpoS,appY 116 | gntR,gntT 117 | gntR,edd_eda 118 | gntR,gntKU 119 | fnr,arcA 120 | fnr,nuoABCEFGHIJKLMN 121 | fnr,glpACB 122 | fnr,sdhCDAB_b0725_sucABCD 123 | fnr,acs 124 | fnr,focA_pflB 125 | fnr,tdcABCDEFG 126 | fnr,caiF 127 | fnr,hypABCDE 128 | fnr,nirBDC_cysG 129 | fnr,narK 130 | fnr,narGHJI 131 | fnr,dcuB_fumB 132 | fnr,ansB 133 | fnr,icdA 134 | evgA,ompC 135 | iclMR,acs 136 | lrp,ompC 137 | lrp,ompF 138 | lrp,kbl_tdh 139 | ompR_envZ,ompC 140 | ompR_envZ,ompF 141 | argR,carAB 142 | fadR,fadBA 143 | fadR,iclMR 144 | GalR,galS 145 | appY,appCBA 146 | araC,araE 147 | rcsA,ftsQAZ 148 | tdcAR,tdcABCDEFG 149 | deoR,tsx 150 | FruR,icdA 151 | malT,malS 152 | malT,malEFG 153 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_0_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | lacI,lacZYA 3 | crp,lacZYA 4 | crp,ebgAC 5 | crp,epd_pgk 6 | crp,glpD 7 | crp,galETKM 8 | crp,dctA 9 | crp,ppiA 10 | crp,fadL 11 | crp,focA_pflB 12 | crp,tdcABCDEFG 13 | crp,caiTABCDE 14 | crp,gntT 15 | crp,tsx 16 | crp,caiF 17 | crp,glgCAP 18 | fadR,fadL 19 | caiF,caiTABCDE 20 | narL,focA_pflB 21 | narL,caiF 22 | arcA,dctA 23 | arcA,focA_pflB 24 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_1_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | nlpD_rpoS,adhE 3 | nlpD_rpoS,narZYWV 4 | nlpD_rpoS,katG 5 | nlpD_rpoS,alkA 6 | nlpD_rpoS,osmY 7 | nlpD_rpoS,dps 8 | nlpD_rpoS,nhaA 9 | narL,adhE 10 | narL,frdABCD 11 | narL,dcuB_fumB 12 | narL,narGHJI 13 | narL,focA_pflB 14 | narL,fdnGHI 15 | narL,narK 16 | fnr,frdABCD 17 | fnr,dcuB_fumB 18 | fnr,ansB 19 | fnr,tdcABCDEFG 20 | fnr,aspA 21 | fnr,narGHJI 22 | fnr,cyoABCDE 23 | fnr,focA_pflB 24 | fnr,fdnGHI 25 | fnr,narK 26 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_2_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | crp,udp 3 | crp,araFG_araH_1H_2 4 | crp,cyaA 5 | crp,uhpT 6 | crp,glpD 7 | crp,ppiA 8 | crp,caiF 9 | crp,dadAX 10 | crp,melR 11 | crp,ivbL_ilvBN 12 | crp,dcuB_fumB 13 | crp,tdcABCDEFG 14 | crp,araE 15 | crp,caiTABCDE 16 | crp,glpACB 17 | crp,fixABCX 18 | caiF,caiTABCDE 19 | caiF,fixABCX 20 | uhpA,uhpT 21 | glpR,glpD 22 | glpR,glpACB 23 | hns,caiF 24 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_3_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | fliAZY,flgKL 3 | fliAZY,flhBAE 4 | fliAZY,tarTapcheRBYZ 5 | fliAZY,fliE 6 | fliAZY,motABcheAW 7 | fliAZY,fliLMNOPQR 8 | fliAZY,fliC 9 | fliAZY,flgMN 10 | fliAZY,fliDST 11 | fliAZY,flgBCDEFGHIJK 12 | fliAZY,fliFGHIJK 13 | flhDC,fliAZY 14 | flhDC,flhBAE 15 | flhDC,fliE 16 | flhDC,fliLMNOPQR 17 | flhDC,flgBCDEFGHIJK 18 | flhDC,fliFGHIJK 19 | hns,fliAZY 20 | hns,flhDC 21 | hns,osmC 22 | ompR_envZ,flhDC 23 | ompR_envZ,csgDEFG 24 | ompR_envZ,csgBA 25 | csgDEFG,csgBA 26 | crp,flhDC 27 | crp,cpdB 28 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_4_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | fnr,dmsABC 3 | fnr,ansB 4 | fnr,frdABCD 5 | fnr,icdA 6 | fnr,sdhCDAB_b0725_sucABCD 7 | fnr,aspA 8 | fnr,arcA 9 | fnr,glpACB 10 | fnr,focA_pflB 11 | fnr,nuoABCEFGHIJKLMN 12 | yjdHG,frdABCD 13 | yjdHG,dctA 14 | arcA,icdA 15 | arcA,sdhCDAB_b0725_sucABCD 16 | arcA,glpACB 17 | arcA,focA_pflB 18 | arcA,gltA 19 | arcA,nuoABCEFGHIJKLMN 20 | arcA,dctA 21 | narL,dmsABC 22 | narL,frdABCD 23 | narL,focA_pflB 24 | narL,adhE 25 | narL,nuoABCEFGHIJKLMN 26 | crp,ansB 27 | crp,sdhCDAB_b0725_sucABCD 28 | crp,glpACB 29 | crp,focA_pflB 30 | crp,araJ 31 | crp,gltA 32 | crp,dctA 33 | yhdG_fis,sdhCDAB_b0725_sucABCD 34 | yhdG_fis,adhE 35 | yhdG_fis,aspV 36 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_5_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | nhaR,nhaA 3 | hns,nhaA 4 | hns,stpA 5 | hns,fliAZY 6 | hns,rcsAB 7 | hns,flhDC 8 | cspA,hns 9 | cspA,gyrA 10 | fliAZY,flgKL 11 | fliAZY,fliLMNOPQR 12 | fliAZY,tsr 13 | fliAZY,fliE 14 | flhDC,fliAZY 15 | flhDC,fliLMNOPQR 16 | flhDC,fliE 17 | lrp,stpA 18 | lrp,kbl_tdh 19 | lrp,livJ 20 | lrp,ompF 21 | ompR_envZ,flhDC 22 | ompR_envZ,ompF 23 | nlpD_rpoS,nhaA 24 | nlpD_rpoS,acs 25 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_6_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | FruR,adhE 3 | FruR,icdA 4 | yhdG_fis,adhE 5 | yhdG_fis,pheV 6 | yhdG_fis,lysT_valT_lysW 7 | yhdG_fis,metT_leuW_glnUW_metU_glnVX 8 | yhdG_fis,proP 9 | yhdG_fis,argW 10 | yhdG_fis,proL 11 | yhdG_fis,alaWX 12 | yhdG_fis,argU 13 | yhdG_fis,pheU 14 | yhdG_fis,argX_hisR_leuT_proM 15 | narL,adhE 16 | narL,narGHJI 17 | narL,torCAD 18 | narL,caiF 19 | narL,dcuB_fumB 20 | fnr,icdA 21 | fnr,narGHJI 22 | fnr,caiF 23 | fnr,dcuB_fumB 24 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_7_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | rcsA,ftsQAZ 3 | rcsA,wza_wzb_b2060_wcaA_wcaB 4 | nlpD_rpoS,ftsQAZ 5 | nlpD_rpoS,nhaA 6 | nlpD_rpoS,osmC 7 | nlpD_rpoS,dps 8 | nlpD_rpoS,acs 9 | crp,acs 10 | crp,galS 11 | crp,cirA 12 | crp,dcuB_fumB 13 | crp,focA_pflB 14 | crp,epd_pgk 15 | crp,nagBACD 16 | crp,glpTQ 17 | crp,galETKM 18 | crp,tsx 19 | crp,gntKU 20 | GalR,galS 21 | GalR,galETKM 22 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_8_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | hns,stpA 3 | hns,caiF 4 | crp,caiF 5 | crp,tsx 6 | crp,malK_lamB_malM 7 | crp,gltA 8 | crp,glnALG 9 | crp,dadAX 10 | crp,rhaT 11 | crp,udp 12 | crp,malI 13 | crp,acs 14 | crp,nagBACD 15 | crp,dctA 16 | crp,fucAO 17 | crp,tdcABCDEFG 18 | crp,rpoH 19 | crp,ptsHI_crr 20 | nagBACD,glmUS 21 | -------------------------------------------------------------------------------- /syntren/20_probaComplexInter1/syntrenHop1_20_9_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | atoC,atoDAE 3 | atoC,atoDAB 4 | rpoN,atoC 5 | rpoN,hypA 6 | rpoN,glnALG 7 | rpoN,glnHPQ 8 | rpoN,nac 9 | rpoN,hycABCDEFGH 10 | rpoN,dctA 11 | rpoN,fdhF 12 | rpoN,nycA 13 | rpoN,rtcR 14 | glnALG,glnHPQ 15 | glnALG,nac 16 | arcA,dctA 17 | arcA,nuoABCEFGHIJKLMN 18 | arcA,appCBA 19 | arcA,glpACB 20 | crp,glnALG 21 | crp,dctA 22 | crp,fur 23 | crp,glpACB 24 | fur,fhuACDB 25 | -------------------------------------------------------------------------------- /test_sam.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import cdt 4 | from sam.sam import SAM 5 | 6 | d, g = cdt.data.load_dataset('sachs') 7 | m = SAM() 8 | m.predict(d, nruns=1) 9 | -------------------------------------------------------------------------------- /train_graphs/artificial/NN-train-20_target.csv: -------------------------------------------------------------------------------- 1 | 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 2 | 0.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 3 | 0.0,0.0,1.0,0.0,1.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 4 | 0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0,0.0,1.0,1.0,0.0,0.0,0.0 5 | 0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 6 | 0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 7 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0 8 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,1.0,1.0,0.0,0.0 9 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0 10 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 11 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0 12 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0 13 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0 14 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,0.0 15 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0 16 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0 17 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 18 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 19 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 20 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 21 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 22 | -------------------------------------------------------------------------------- /train_graphs/artificial/gp_add-train-20_target.csv: -------------------------------------------------------------------------------- 1 | 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 2 | 0.0,0.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0 3 | 0.0,0.0,0.0,1.0,0.0,1.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0 4 | 0.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0 5 | 0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0 6 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 7 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0 8 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0 9 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0 10 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0 11 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 12 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 13 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 14 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 15 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 16 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 17 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0 18 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 19 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0 20 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 21 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 22 | -------------------------------------------------------------------------------- /train_graphs/artificial/gp_mix-train-20_target.csv: -------------------------------------------------------------------------------- 1 | 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 2 | 0.0,1.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 3 | 0.0,0.0,1.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0 4 | 0.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 5 | 0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0 6 | 0.0,0.0,0.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,0.0,0.0,0.0 7 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 8 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0 9 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0 10 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 11 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0 12 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0 13 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0 14 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 15 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 16 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 17 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 18 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 19 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 20 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 21 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 22 | -------------------------------------------------------------------------------- /train_graphs/artificial/linear-train-20_target.csv: -------------------------------------------------------------------------------- 1 | 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 2 | 0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 3 | 0.0,0.0,0.0,0.0,0.0,1.0,1.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 4 | 0.0,0.0,0.0,0.0,1.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0 5 | 0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,1.0,0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,1.0 6 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0 7 | 0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 8 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0 9 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 10 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0 11 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 12 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 13 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 14 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0 15 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0 16 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 17 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0 18 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 19 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0 20 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0 21 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 22 | -------------------------------------------------------------------------------- /train_graphs/artificial/polynomial-train-20_target.csv: -------------------------------------------------------------------------------- 1 | 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 2 | 0.0,0.0,1.0,0.0,1.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0 3 | 0.0,0.0,1.0,1.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0 4 | 0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 5 | 0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 6 | 0.0,0.0,0.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 7 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0 8 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 9 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0 10 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 11 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0 12 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0 13 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 14 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0 15 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 16 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0 17 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 18 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 19 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 20 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 21 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 22 | -------------------------------------------------------------------------------- /train_graphs/artificial/sigmoid_add-train-20_target.csv: -------------------------------------------------------------------------------- 1 | 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 2 | 0.0,1.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 3 | 0.0,0.0,0.0,1.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 4 | 0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0 5 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 6 | 0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0 7 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,1.0,0.0 8 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 9 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0 10 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0 11 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 12 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,1.0,0.0 13 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,1.0 14 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 15 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0 16 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0 17 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 18 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 19 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 20 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 21 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 22 | -------------------------------------------------------------------------------- /train_graphs/artificial/sigmoid_mix-train-20_target.csv: -------------------------------------------------------------------------------- 1 | 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 2 | 0.0,1.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0 3 | 0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0,0.0 4 | 0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 5 | 0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0 6 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0 7 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 8 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0 9 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0 10 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 11 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0 12 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0 13 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 14 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0 15 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.0,0.0 16 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0 17 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 18 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 19 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 20 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 21 | 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 22 | -------------------------------------------------------------------------------- /train_graphs/generate_graphs.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from cdt.generators import AcyclicGraphGenerator 4 | 5 | for mechanism in ['polynomial','NN','sigmoid_add', 'sigmoid_mix', 6 | 'gp_add', 'gp_mix', 'linear']:# 'linear', 'sigmoid_add', 'sigmoid_mix', 7 | a = AcyclicGraphGenerator(mechanism, nodes=20) 8 | a.generate() 9 | a.to_csv("-".join([str(j) for j in [mechanism,'train', str(20)]]), index=False) 10 | -------------------------------------------------------------------------------- /train_graphs/syntren/syntren_1_target.csv: -------------------------------------------------------------------------------- 1 | Cause,Effect 2 | nlpD_rpoS,adhE 3 | nlpD_rpoS,narZYWV 4 | nlpD_rpoS,katG 5 | nlpD_rpoS,alkA 6 | nlpD_rpoS,osmY 7 | nlpD_rpoS,dps 8 | nlpD_rpoS,nhaA 9 | narL,adhE 10 | narL,frdABCD 11 | narL,dcuB_fumB 12 | narL,narGHJI 13 | narL,focA_pflB 14 | narL,fdnGHI 15 | narL,narK 16 | fnr,frdABCD 17 | fnr,dcuB_fumB 18 | fnr,ansB 19 | fnr,tdcABCDEFG 20 | fnr,aspA 21 | fnr,narGHJI 22 | fnr,cyoABCDE 23 | fnr,focA_pflB 24 | fnr,fdnGHI 25 | fnr,narK 26 | --------------------------------------------------------------------------------