├── .gitignore ├── requirements.txt ├── datasets └── 7SYNTHETICS │ └── logistic_3.mat ├── calc_f_stat.py ├── normalize_0_mean_1_std.py ├── recovery_performance.py ├── LICENSE ├── multivariate_split.py ├── README.md ├── utils.py └── Demo_lsNGC.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.18.5 2 | torch==1.5.1 3 | scipy==1.4.1 4 | matplotlib==3.2.1 5 | sklearn==0.0 6 | networkx==2.4 7 | -------------------------------------------------------------------------------- /datasets/7SYNTHETICS/logistic_3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Large-scale-causality-inference/Large-scale-nonlinear-causality/HEAD/datasets/7SYNTHETICS/logistic_3.mat -------------------------------------------------------------------------------- /calc_f_stat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Input: RSS_R,RSS_U,n,pu,pr 5 | Output: f_GC as values of f-statistic 6 | 7 | - "RSS_R" and unrestricted "RSS_U" models. 8 | - "pu" and "pr" are the number of parameters to be estimated for the unrestricted and restricted model, respectively. 9 | - "n" is the number of time-delayed vectors. 10 | 11 | @Reference: 12 | Wismüller, A., Dsouza, A.M., Vosoughi, M.A., and Abidin, Anas. 13 | Large-scale nonlinear Granger causality for inferring directed dependence from short multivariate time-series data. Sci Rep 11, 7817 (2021). 14 | 15 | """ 16 | 17 | 18 | 19 | def calc_f_stat(RSS_R,RSS_U,n,pu,pr): 20 | f_GC = ((RSS_R-RSS_U)/(RSS_U))*((n-pu-1)/(pu-pr)); 21 | return f_GC 22 | -------------------------------------------------------------------------------- /normalize_0_mean_1_std.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | 5 | 6 | @Reference: 7 | Wismüller, A., Dsouza, A.M., Vosoughi, M.A. et al. 8 | Large-scale nonlinear Granger causality for inferring directed dependence from short multivariate time-series data. Sci Rep 11, 7817 (2021). 9 | """ 10 | import numpy as np 11 | 12 | def normalize_0_mean_1_std(inp_series): 13 | inp_series=inp_series.copy() 14 | mean_ts=np.array([inp_series.mean(axis=1)]).transpose() 15 | mean_ts_mtrx = mean_ts*np.ones((1,inp_series.shape[1])); 16 | unb_data_mtrx = inp_series - mean_ts_mtrx 17 | p = np.power(unb_data_mtrx,2) 18 | s=np.array([p.sum(axis=1)]).transpose() 19 | sc=np.sqrt(s/p.shape[1]) 20 | sc2=sc*(np.ones((1,p.shape[1]))) 21 | nrm= np.divide(unb_data_mtrx,sc2) 22 | return nrm 23 | -------------------------------------------------------------------------------- /recovery_performance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | The code returns performance measures (in terms of AUROC) given a list of 5 | "Adjs": data matrices and 6 | "label": label matrices. 7 | 8 | @Reference: 9 | Wismüller, A., Dsouza, A.M., Vosoughi, M.A. et al. 10 | Large-scale nonlinear Granger causality for inferring directed dependence from short multivariate time-series data. Sci Rep 11, 7817 (2021). 11 | 12 | """ 13 | import numpy as np 14 | from sklearn import metrics 15 | 16 | 17 | def recovery_performance(Adjs,label): 18 | Adjs=Adjs.copy() 19 | label=label.copy() 20 | N=len(Adjs) 21 | auc_all = np.zeros((N), dtype=np.float32) 22 | for i in range(N): 23 | auc_all[i] = metrics.roc_auc_score(label[i].flatten(), Adjs[i].flatten()) 24 | return auc_all 25 | 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Large-scale-causality-inference 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /multivariate_split.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | The function separates input time-series data into pieces of data, based on Taken's theorem to represent system dynamics within the state-space. 5 | 6 | @Reference: 7 | Wismüller, A., Dsouza, A.M., Vosoughi, M.A. et al. 8 | Large-scale nonlinear Granger causality for inferring directed dependence from short multivariate time-series data. Sci Rep 11, 7817 (2021). 9 | """ 10 | import torch 11 | import numpy as np 12 | 13 | 14 | def multivariate_split(X,ar_order, valid_percent=0): 15 | X=X.copy() 16 | TS=np.shape(X)[1] 17 | n_vars=np.shape(X)[0] 18 | val_num=int(valid_percent*TS) 19 | my_data_train=torch.zeros((TS-ar_order-val_num,ar_order,n_vars)) 20 | my_data_y_train=torch.zeros((TS-ar_order-val_num,1,n_vars)) 21 | my_data_val=torch.zeros((val_num,ar_order,n_vars)) 22 | my_data_y_val=torch.zeros((val_num,1,n_vars)) 23 | for i in range(TS-ar_order-val_num): 24 | my_data_train[i]=torch.from_numpy(X.transpose()[i:i+ar_order,:]) 25 | my_data_y_train[i]=torch.from_numpy(X.transpose()[i+ar_order,:]) 26 | 27 | for i in range(TS-ar_order-val_num, TS-ar_order,1): 28 | my_data_val[i-(TS-ar_order-val_num)]=torch.from_numpy(X.transpose()[i:i+ar_order,:]) 29 | my_data_y_val[i-(TS-ar_order-val_num)]=torch.from_numpy(X.transpose()[i+ar_order,:]) 30 | return my_data_train, my_data_y_train, my_data_val, my_data_y_val 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Large-Scale Nonlinear Granger Causality (lsNGC) 2 | 3 | ## Overview 4 | Large-Scale Nonlinear Granger Causality (lsNGC) is a computational framework designed for inferring directed dependencies from short multivariate time-series data. It addresses the challenge of deriving causal graphs that represent the underlying generative processes of large-scale multivariate data, particularly when the relationships among the data are nonlinear and the observational time series are brief. 5 | 6 | ## Getting Started 7 | To quickly experiment with lsNGC, access our interactive Colab notebook: 8 | [Demo_lsNGC.ipynb](https://colab.research.google.com/github/ali-vosoughi/Large-scale-nonlinear-causality/blob/main/Demo_lsNGC.ipynb) 9 | 10 | ## Background 11 | The task of identifying nonlinear and directed relations among components of complex systems from simultaneous time-series observations is a critical and expanding area of research. lsNGC efficiently identifies causal relations through nonlinear state-space transformations of limited observational data, without relying on explicit a priori assumptions about the functional interdependencies among component time series. 12 | 13 | ## Key Contributions 14 | - Introduction of the lsNGC framework for identifying large-scale nonlinear Granger causality. 15 | - Implementation of conditional Granger causality analysis between two multivariate time series, taking into account a large number of confounding time series. 16 | - Adaptation for scenarios with a limited number of time-series samples but large spatial resolution. 17 | - Conversion of multivariate time-series data into a graph adjacency matrix to represent causal relationships. 18 | 19 | ## Workflow 20 | lsNGC leverages theoretical concepts from Granger causality analysis, focusing on the predictability and precedence of time series. It estimates causal relationships by creating a nonlinear transformation of the state-space representation for each time series, facilitating the measurement of its influence on the system. Detailed theoretical concepts are discussed in the supplementary material available [here](https://static-content.springer.com/esm/art%3A10.1038%2Fs41598-021-87316-6/MediaObjects/41598_2021_87316_MOESM1_ESM.pdf). 21 | 22 | ## Results 23 | The lsNGC approach has been evaluated against several benchmark simulations, demonstrating its performance in comparison to four state-of-the-art methodologies. Additional implementation details and results are provided in the supplementary material. 24 | 25 | ## Citation 26 | If you utilize this code (in whole or part) for your research, please cite our paper: 27 | 28 | Wismüller, A., Dsouza, A.M., Vosoughi, M.A., et al. Large-scale nonlinear Granger causality for inferring directed dependence from short multivariate time-series data. Scientific Reports 11, 7817 (2021). [DOI: 10.1038/s41598-021-87316-6](https://doi.org/10.1038/s41598-021-87316-6) 29 | 30 | Public access to the paper is available [here](https://www.nature.com/articles/s41598-021-87316-6). 31 | 32 | ### BibTeX 33 | ```bibtex 34 | @article{wismuller2021large, 35 | title={Large-scale nonlinear Granger causality for inferring directed dependence from short multivariate time-series data}, 36 | author={Wism{\"u}ller, Axel and Dsouza, Adora M and Vosoughi, M Ali and Abidin, Anas}, 37 | journal={Scientific reports}, 38 | volume={11}, 39 | number={1}, 40 | pages={7817}, 41 | year={2021}, 42 | publisher={Nature Publishing Group UK London} 43 | } 44 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import stats, io, signal, linalg 3 | import matplotlib.pyplot as plt 4 | from scipy.spatial.distance import squareform, pdist 5 | from sklearn.model_selection import StratifiedShuffleSplit 6 | from sklearn.svm import SVC 7 | from sklearn import metrics 8 | import warnings 9 | from os.path import join 10 | import os 11 | import math 12 | from sklearn.cluster import KMeans 13 | import torch 14 | from normalize_0_mean_1_std import normalize_0_mean_1_std 15 | from calc_f_stat import calc_f_stat 16 | from multivariate_split import multivariate_split 17 | 18 | """ 19 | This code includes the main functionality of the proposed method. 20 | """ 21 | 22 | """ 23 | The file includes the primary function which calculates Granger causality using lsNGC. 24 | """ 25 | 26 | def lsNGC(inp_series, ar_order=1, k_f=3, k_g=2, normalize=1): 27 | if normalize: 28 | X_normalized=normalize_0_mean_1_std(inp_series) 29 | else: 30 | X_normalized=inp_series.copy() 31 | 32 | X_train, Y_train , X_test, Y_test=multivariate_split(X=X_normalized,ar_order=ar_order) 33 | 34 | X_train=torch.flatten(X_train, start_dim=1) 35 | 36 | km= KMeans(n_clusters= k_f, max_iter= 100, random_state=123) 37 | km.fit(X_train) 38 | cent= km.cluster_centers_ 39 | 40 | 41 | max=0 42 | 43 | for i in range(k_f): 44 | for j in range(k_f): 45 | d= np.linalg.norm(cent[i]-cent[j]) 46 | if(d> max): 47 | max= d 48 | d= max 49 | 50 | sigma= d/math.sqrt(2*k_f) 51 | 52 | sig_d=np.zeros((np.shape(X_normalized)[0],np.shape(X_normalized)[0])); 53 | sig=np.zeros((np.shape(X_normalized)[0],np.shape(X_normalized)[0])); 54 | 55 | # Z_train_label=Y_train 56 | for i in range(X_normalized.shape[0]): 57 | Z_temp=X_normalized.copy() 58 | Z_train, Z_train_label , _ , _=multivariate_split(X=Z_temp,ar_order=ar_order) 59 | Z_train=torch.flatten(Z_train, start_dim=1) 60 | Z_train_label=torch.flatten(Z_train_label, start_dim=1) 61 | 62 | # Obtain phase space Z_s by exclusing time series of of x_s 63 | Z_s_train, Z_s_train_label , _ , _=multivariate_split(X=np.delete(Z_temp,[i],axis=0),ar_order=ar_order) 64 | # Obtain phase space reconstruction of x_s 65 | W_s_train, W_s_train_label , _ , _=multivariate_split(X=np.array([Z_temp[i]]),ar_order=ar_order) 66 | 67 | # Flatten data 68 | Z_s_train=torch.flatten(Z_s_train, start_dim=1) 69 | Z_s_train_label=torch.flatten(Z_s_train_label, start_dim=1) 70 | 71 | W_s_train=torch.flatten(W_s_train, start_dim=1) 72 | W_s_train_label=torch.flatten(W_s_train_label, start_dim=1) 73 | # Obtain k_g number of cluster centers in the phase space W_s with k-means clustering, will have dim=(k_g * d) 74 | kmg= KMeans(n_clusters= k_g, max_iter= 100, random_state=123) 75 | kmg.fit(W_s_train) 76 | cent_W_s= kmg.cluster_centers_ 77 | # Calculate activations for each of the k_g neurons 78 | shape= W_s_train.shape 79 | row= shape[0] 80 | column= k_g 81 | G= np.empty((row,column), dtype= float) 82 | maxg=0 83 | 84 | for ii in range(k_g): 85 | for jj in range(k_g): 86 | dg= np.linalg.norm(cent_W_s[ii]-cent_W_s[jj]) 87 | if(dg> maxg): 88 | maxg= dg 89 | dg= maxg 90 | 91 | sigmag= dg/math.sqrt(2*k_g) 92 | if sigmag==0: 93 | sigmag=1 94 | for ii in range(row): 95 | for jj in range(column): 96 | dist= np.linalg.norm(W_s_train[ii]-cent_W_s[jj]) 97 | G[ii][jj]= math.exp(-math.pow(dist,2)/math.pow(2*sigmag,2)) 98 | # Generalized radial basis function 99 | g_ws=np.array([G[ii]/sum(G[ii]) for ii in range(len(G))]) 100 | # Calculate activations for each of the k_f neurons 101 | shape= Z_s_train.shape 102 | row= shape[0] 103 | column= k_f 104 | F= np.empty((row,column), dtype= float) 105 | for ii in range(row): 106 | for jj in range(column): 107 | cent_temp=cent.copy() 108 | cent_temp=np.delete(cent_temp,np.arange(jj,jj+ar_order),axis=1) 109 | dist= np.linalg.norm(Z_s_train[ii]-cent_temp) 110 | F[ii][jj]= math.exp(-math.pow(dist,2)/math.pow(2*sigma,2)) 111 | # Generalized radial basis function 112 | f_zs=np.array([F[ii]/sum(F[ii]) for ii in range(len(F))]) 113 | 114 | # Prediction in the presence of x_s 115 | num_samples=f_zs.shape[0] 116 | 117 | f_new=np.concatenate((0.5*f_zs,0.5*g_ws),axis=1) 118 | GTG= np.dot(f_new.T,f_new) 119 | GTG_inv= np.linalg.pinv(GTG) 120 | fac= np.dot(GTG_inv,f_new.T) 121 | W_presence= np.dot(fac,Z_train_label) 122 | 123 | prediction_presence= np.dot(f_new,W_presence) 124 | error_presence=prediction_presence-np.array(Z_train_label) 125 | sig[i,:]=np.diag(np.cov(error_presence.T)) 126 | 127 | # Prediction without x_s 128 | GTG= np.dot(f_zs.T,f_zs) 129 | GTG_inv= np.linalg.pinv(GTG) 130 | fac= np.dot(GTG_inv,f_zs.T) 131 | W_absence= np.dot(fac,Z_train_label) 132 | 133 | prediction_absence= np.dot(f_zs,W_absence) 134 | error_absence=prediction_absence-np.array(Z_train_label) 135 | sig_d[i,:]=np.diag(np.cov(error_absence.T)) 136 | # Comupte the Granger causality index 137 | 138 | Aff=np.log(np.divide(sig_d,sig)) 139 | Aff=(Aff>0)*Aff 140 | np.fill_diagonal(Aff,0) 141 | f_stat=calc_f_stat(sig_d, sig, n=num_samples+1, pu=k_f+k_g, pr=k_f) 142 | np.fill_diagonal(f_stat,0) 143 | 144 | return Aff, f_stat 145 | -------------------------------------------------------------------------------- /Demo_lsNGC.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "view-in-github", 7 | "colab_type": "text" 8 | }, 9 | "source": [ 10 | "\"Open" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": { 16 | "id": "nLhq_irZ77qg" 17 | }, 18 | "source": [ 19 | "# Demo for lsNGC" 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "metadata": { 25 | "id": "SG1k61Y677qh" 26 | }, 27 | "source": [ 28 | "## The following is a demo of a small toy example of a synthetic network." 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "metadata": { 34 | "id": "uiNkAHOe77qi" 35 | }, 36 | "source": [ 37 | "For now, lets assume that we have a causality algorithm, which we want to use for topology inference. We name it \"$granger$\", and we put it inside the following function:\n", 38 | "\n", 39 | "X: input timeseries data of order $T\\times N$\n", 40 | "\n", 41 | "N=X.shape[0] number of variable $N$, and \n", 42 | "\n", 43 | "T=X.shape[1] number of time samples $T$\n", 44 | "\n", 45 | "Adj: Adjacency matrix of the multivariate input timeseries data" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "source": [ 51 | "!git clone https://github.com/ali-vosoughi/Large-scale-nonlinear-causality/" 52 | ], 53 | "metadata": { 54 | "colab": { 55 | "base_uri": "https://localhost:8080/" 56 | }, 57 | "id": "wQPCrNzu7-bj", 58 | "outputId": "550a7977-53b4-4c42-de2f-8aaa91ac08fc" 59 | }, 60 | "execution_count": 7, 61 | "outputs": [ 62 | { 63 | "output_type": "stream", 64 | "name": "stdout", 65 | "text": [ 66 | "fatal: destination path 'Large-scale-nonlinear-causality' already exists and is not an empty directory.\n" 67 | ] 68 | } 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "source": [ 74 | "%cd Large-scale-nonlinear-causality/" 75 | ], 76 | "metadata": { 77 | "colab": { 78 | "base_uri": "https://localhost:8080/" 79 | }, 80 | "id": "Fr6lWYYp8h5z", 81 | "outputId": "b68a911f-32e0-4295-9d33-c4f4d61cd8ee" 82 | }, 83 | "execution_count": 10, 84 | "outputs": [ 85 | { 86 | "output_type": "stream", 87 | "name": "stdout", 88 | "text": [ 89 | "/content/Large-scale-nonlinear-causality\n" 90 | ] 91 | } 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "source": [ 97 | "!pwd" 98 | ], 99 | "metadata": { 100 | "colab": { 101 | "base_uri": "https://localhost:8080/" 102 | }, 103 | "id": "LCrfcUgm8peM", 104 | "outputId": "5e22ef89-bb88-449e-bdb0-bf9f99a2709d" 105 | }, 106 | "execution_count": 11, 107 | "outputs": [ 108 | { 109 | "output_type": "stream", 110 | "name": "stdout", 111 | "text": [ 112 | "/content/Large-scale-nonlinear-causality\n" 113 | ] 114 | } 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 12, 120 | "metadata": { 121 | "id": "4dluyXic77qi" 122 | }, 123 | "outputs": [], 124 | "source": [ 125 | "import numpy as np\n", 126 | "import scipy.io as sio\n", 127 | "from scipy import stats\n", 128 | "from utils import lsNGC as granger\n", 129 | "import matplotlib.pyplot as plt\n", 130 | "from scipy.io import loadmat\n", 131 | "import networkx as nx\n", 132 | "from recovery_performance import recovery_performance" 133 | ] 134 | }, 135 | { 136 | "cell_type": "markdown", 137 | "metadata": { 138 | "id": "SQIVk3jq77qj" 139 | }, 140 | "source": [ 141 | "# Synthetic benchmark, from our Nature paper" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": 13, 147 | "metadata": { 148 | "id": "XXm_qvfZ77qj" 149 | }, 150 | "outputs": [], 151 | "source": [ 152 | "benchmark_dataset=['logistic3']\n", 153 | "benchmark_titles={'logistic3':'3-Fan In'}\n", 154 | "benchmark_TS_set=np.array([50,100,200,500])\n", 155 | "bechmark_algorithms=[]\n", 156 | "\n", 157 | "algorithms_title={}\n", 158 | "bechmark_algorithms_title=[algorithms_title[i] for i in bechmark_algorithms]\n", 159 | "\n", 160 | "benchmark_label=dict()\n", 161 | "AUC=dict()" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": { 167 | "id": "mexXRUbl77qk" 168 | }, 169 | "source": [ 170 | "## Logistic 3 dataset, according to our Nature paper\n", 171 | "- Has 50 networks of lengths up to 1000. \n", 172 | "- It has 3 nodes" 173 | ] 174 | }, 175 | { 176 | "cell_type": "markdown", 177 | "metadata": { 178 | "id": "l0S-GvfO77qk" 179 | }, 180 | "source": [ 181 | "- Novel algorithm" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": 14, 187 | "metadata": { 188 | "colab": { 189 | "base_uri": "https://localhost:8080/" 190 | }, 191 | "id": "dQbjd01k77qk", 192 | "outputId": "fa0c98bf-d072-4cc7-f61a-bc6208d5fceb" 193 | }, 194 | "outputs": [ 195 | { 196 | "output_type": "stream", 197 | "name": "stdout", 198 | "text": [ 199 | "logistic3, auc lsgc for TS=50: 1.0 + 0.0\n", 200 | "logistic3, auc lsgc for TS=100: 1.0 + 0.0\n", 201 | "logistic3, auc lsgc for TS=200: 1.0 + 0.0\n", 202 | "logistic3, auc lsgc for TS=500: 1.0 + 0.0\n" 203 | ] 204 | } 205 | ], 206 | "source": [ 207 | "in_data_name='datasets/7SYNTHETICS/logistic_3.mat'\n", 208 | "\n", 209 | "\n", 210 | "for TS in benchmark_TS_set:\n", 211 | " benchmark_label['logistic3']= [loadmat(in_data_name)['Adj'] for i in range(50)]\n", 212 | " ts_logistic=[loadmat(in_data_name)['pt_N'][i,:,-TS:] for i in range(50)]\n", 213 | " Adj_lsgc=[granger(ts_logistic[i], k_f=2, k_g=2, ar_order=1, normalize=1)[0] for i in range(50)]\n", 214 | " AUC['granger_logistic3_'+str(TS)]=recovery_performance(Adj_lsgc,benchmark_label['logistic3'])\n", 215 | " print(\"logistic3, auc lsgc for TS=\"+str(TS)+\":\", AUC['granger_logistic3_'+str(TS)].mean(),\n", 216 | " \"+\",AUC['granger_logistic3_'+str(TS)].std())" 217 | ] 218 | } 219 | ], 220 | "metadata": { 221 | "kernelspec": { 222 | "display_name": "Python 3", 223 | "language": "python", 224 | "name": "python3" 225 | }, 226 | "language_info": { 227 | "codemirror_mode": { 228 | "name": "ipython", 229 | "version": 3 230 | }, 231 | "file_extension": ".py", 232 | "mimetype": "text/x-python", 233 | "name": "python", 234 | "nbconvert_exporter": "python", 235 | "pygments_lexer": "ipython3", 236 | "version": "3.7.6" 237 | }, 238 | "colab": { 239 | "name": "Demo_lsNGC.ipynb", 240 | "provenance": [], 241 | "include_colab_link": true 242 | } 243 | }, 244 | "nbformat": 4, 245 | "nbformat_minor": 0 246 | } --------------------------------------------------------------------------------