├── FFRLS.m ├── data_for_ultra.m ├── ffols_gui.mexw64 ├── fmri_net_built.m ├── highorder_net_built.m ├── net_built_ultar_lasso_OLS.m ├── spatial_feature_extraction.m ├── temporal_feature_extraction.m ├── README.md └── MMD-AE ├── model.py ├── main.py └── train.py /FFRLS.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/l675451097/Adaptive-Dynamic-Functional-Connectivity/HEAD/FFRLS.m -------------------------------------------------------------------------------- /data_for_ultra.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/l675451097/Adaptive-Dynamic-Functional-Connectivity/HEAD/data_for_ultra.m -------------------------------------------------------------------------------- /ffols_gui.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/l675451097/Adaptive-Dynamic-Functional-Connectivity/HEAD/ffols_gui.mexw64 -------------------------------------------------------------------------------- /fmri_net_built.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/l675451097/Adaptive-Dynamic-Functional-Connectivity/HEAD/fmri_net_built.m -------------------------------------------------------------------------------- /highorder_net_built.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/l675451097/Adaptive-Dynamic-Functional-Connectivity/HEAD/highorder_net_built.m -------------------------------------------------------------------------------- /net_built_ultar_lasso_OLS.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/l675451097/Adaptive-Dynamic-Functional-Connectivity/HEAD/net_built_ultar_lasso_OLS.m -------------------------------------------------------------------------------- /spatial_feature_extraction.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/l675451097/Adaptive-Dynamic-Functional-Connectivity/HEAD/spatial_feature_extraction.m -------------------------------------------------------------------------------- /temporal_feature_extraction.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/l675451097/Adaptive-Dynamic-Functional-Connectivity/HEAD/temporal_feature_extraction.m -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Adaptive-Dynamic-Functional-Connectivity 2 | The code of “Deep Spatial-Temporal Feature Fusion from Adaptive Dynamic Functional Connectivity for MCI Classification” doi.org/10.1109/tmi.2020.2976825 3 | 4 | fmri_net_built对应多模态融合,其输出net为后续代码中的low_net (即section III-A) 5 | 6 | highorder_net_built 对应RLS自适应动态连接计算+高阶网络构建 (即section III-B + D的一部分) 7 | 8 | temporal_feature_extraction 对应论文的Spatial Feature Extraction(即section III-C) 9 | 10 | spatial_feature_extraction 对应论文的Temporal Feature Extraction (即section III-D的一部分) 11 | 12 | 其余data_for_ultra,ffols_gui.mexw64,FFRLS,net_built_ultar_lasso_OLS是主函数的子函数 13 | 14 | MMD-AE文件中是MMD-AE深度融合模型(即section III-E的一部分) 15 | -------------------------------------------------------------------------------- /MMD-AE/model.py: -------------------------------------------------------------------------------- 1 | 2 | from torch import nn 3 | from torch.nn import functional as F 4 | import torch 5 | from torch.nn import init 6 | 7 | z_dim1, z_dim2 = 8, 16 8 | inner_1 = 32 9 | inner_2 = 128 10 | inner_21, inner_22 = 128, 16 11 | z_dim = z_dim1 + z_dim2 + 45 + 65 12 | a = nn.Dropout(p = 0.5) 13 | class model2(nn.Module): 14 | def __init__(self): 15 | super(model2, self).__init__() 16 | self.f1, self.f2 = nn.Linear(65, inner_1, bias = False), nn.Linear(inner_1, z_dim1, bias = False) 17 | self.f3, self.f4 = nn.Linear(z_dim1, inner_1, bias = False), nn.Linear(inner_1, 65, bias = False) 18 | self.relu = nn.ReLU(inplace = True) 19 | def encode(self, x): 20 | x = self.f2(self.relu(self.f1(x))) 21 | return x 22 | 23 | def decode(self, z): 24 | z = self.f4(self.relu(self.f3(z))) 25 | return torch.sigmoid(z) 26 | 27 | def forward(self, x): 28 | z = self.encode(x) 29 | recon_x = self.decode(z) 30 | return z, recon_x 31 | 32 | 33 | class model1(nn.Module): 34 | def __init__(self): 35 | super(model1, self).__init__() 36 | self.f1, self.f2 = nn.Linear(292, inner_2, bias = False), nn.Linear(inner_2, z_dim2, bias = False) 37 | self.f3, self.f4 = nn.Linear(z_dim2, inner_2, bias = False), nn.Linear(inner_2, 292, bias = False) 38 | self.relu = nn.ReLU(inplace = True) 39 | def encode(self, x): 40 | x = self.f2(self.relu(self.f1(x))) 41 | return x 42 | 43 | def decode(self, z): 44 | z = self.f4(self.relu(self.f3(z))) 45 | return torch.sigmoid(z) 46 | 47 | def forward(self, x): 48 | z = self.encode(x) 49 | recon_x = self.decode(z) 50 | return z, recon_x 51 | 52 | 53 | 54 | class classD(nn.Module): 55 | def __init__(self): 56 | super(classD, self).__init__() 57 | self.f1, self.f2 = nn.Linear(z_dim, int(z_dim/2), bias = False), nn.Linear(int(z_dim/2), 2, bias = False) 58 | self.relu = nn.ReLU(inplace = True) 59 | 60 | def forward(self, x): 61 | z = self.f2(self.relu(self.f1(x))) 62 | #z = self.f2(a(self.f1(x))) 63 | #z = self.f2(self.f1(x)) 64 | return z 65 | 66 | -------------------------------------------------------------------------------- /MMD-AE/main.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | import argparse 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import torch.optim as optim 8 | from torch.utils.data import DataLoader 9 | from torchvision import datasets, transforms 10 | import numpy as np 11 | from Dataset_73 import dataset, datasettest 12 | import random 13 | from model import * 14 | from train import Trainer 15 | import torch.utils.data as Data 16 | import scipy.io as io 17 | from torch.autograd import Variable 18 | from sklearn import svm 19 | 20 | train_num = 72 21 | test_num = 1 22 | index = 10 23 | epochs = 600 #400 24 | 25 | 26 | def test_svm(X, y, x_test): 27 | SVM = svm.SVC(kernel = 'linear') 28 | SVM.fit(X, y) 29 | predicted = SVM.predict(x_test) 30 | y_predict = SVM.predict(X) 31 | return predicted, sum(y_predict == y) 32 | 33 | def test(n): 34 | loader = io.loadmat('./feature/feature{}.mat'.format(n)) 35 | train_X = loader['trainData'] 36 | train_Y = loader['trainLabel'].squeeze() 37 | test_X = loader['testData'] 38 | predict_Y, _ = test_svm(train_X, train_Y, test_X) 39 | return predict_Y 40 | 41 | def best_epoch(epoch_list): 42 | epoch_list = np.array(epoch_list) 43 | epoch_list = epoch_list[epoch_list[:, 1]>1.1, :] 44 | epoch_list = epoch_list[epoch_list[:, 1]<3, :] 45 | return epoch_list[np.argmin(epoch_list[:, 1]), 0] 46 | 47 | 48 | 49 | 50 | def main(n): 51 | print(n) 52 | parser = argparse.ArgumentParser(description = 'VAE MNIST Example') 53 | parser.add_argument('--batch_size', type = int, default = 72, metavar = 'N', 54 | help = 'input batch size for training (default: 128)') 55 | parser.add_argument('--learning_rate', type = float, default = 0.005, metavar = 'N', 56 | help = 'learning rate for training(default: 0.001)') 57 | parser.add_argument('--test_every', type = int, default = 5, metavar = 'N', 58 | help = 'test every 10 epochs while training') 59 | parser.add_argument('--learning_rate_decay', type = float, default = 0.99, metavar='N', 60 | help = 'learning rate decay 1 for every epoch)') 61 | parser.add_argument('--weight_decay', type = float, default = 5e-4, metavar = 'N', 62 | help = 'weight decay for training(default: 5e-4)') 63 | parser.add_argument('--epochs', type = int, default = epochs, metavar = 'N', 64 | help = 'number of epochs to train (default: 41)') 65 | parser.add_argument('--checkpoint_dir', type = str, default = './model_state/', metavar = 'N', 66 | help = 'a dir for saving model.state.dict()') 67 | parser.add_argument('--no-cuda', action = 'store_true', default = False, 68 | help = 'enables CUDA training') 69 | parser.add_argument('--to_train', action = 'store_true', default = True, 70 | help = 'whether train or not') 71 | parser.add_argument('--seed', type = int, default = 21, metavar = 'S', 72 | help = 'random seed (default: 1)') 73 | parser.add_argument('--log-interval', type = int, default = 10, metavar = 'N', 74 | help = 'how many batches to wait before logging training status') 75 | args = parser.parse_args() 76 | #args.cuda = not args.no_cuda and torch.cuda.is_available() 77 | args.cuda = False 78 | def setup_seed(seed): 79 | torch.manual_seed(seed) 80 | torch.cuda.manual_seed_all(seed) 81 | np.random.seed(seed) 82 | random.seed(seed) 83 | torch.backends.cudnn.deterministic = True 84 | setup_seed(5) 85 | data1 = dataset(n) 86 | data2 = dataset(n) 87 | test_data = datasettest(n) #测试集 88 | model_1 = model1() 89 | model_2 = model2() 90 | class_D = classD() 91 | if args.cuda: 92 | model_1, model_2, class_D = model_1.cuda(), model_2.cuda(), class_D.cuda() 93 | trainloader = Data.DataLoader(dataset = data1, 94 | batch_size = args.batch_size, 95 | shuffle = True) 96 | trainloaderT = Data.DataLoader(dataset = data2, 97 | batch_size = train_num, 98 | shuffle = False) 99 | 100 | testloader = Data.DataLoader(dataset = test_data, 101 | batch_size = test_num, 102 | shuffle = False) 103 | 104 | trainer = Trainer(trainloader, model_1, model_2, class_D, args, testloader, trainloaderT) 105 | model_1, model_2, class_D, train_XX, test_XX, train_YY, epoch_list = trainer.train() 106 | print(best_epoch(epoch_list)) 107 | torch.save(model_1.state_dict(), args.checkpoint_dir + 'checkpoint_1_{}.pth'.format(n)) 108 | torch.save(model_2.state_dict(), args.checkpoint_dir + 'checkpoint_2_{}.pth'.format(n)) 109 | torch.save(class_D.state_dict(), args.checkpoint_dir + 'checkpoint_3_{}.pth'.format(n)) 110 | 111 | 112 | #io.savemat('./Data/featureFUSEss.mat', {'trainData': np.array(trainZ), 'trainLabel': np.array(trainLabel)}) 113 | io.savemat('./feature/feature{}.mat'.format(n), {'trainData': train_XX, 'trainLabel': train_YY, 'testData': test_XX}) 114 | 115 | if __name__ == "__main__": 116 | main(index) 117 | xx = test(index) 118 | print(xx) 119 | 120 | 121 | 122 | -------------------------------------------------------------------------------- /MMD-AE/train.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from torch.autograd import Variable 3 | from torch import optim 4 | import torch 5 | #from tensorboardX import SummaryWriter 6 | import shutil 7 | #from tqdm import tqdm 8 | import numpy as np 9 | from torchvision.utils import save_image 10 | import torch.nn as nn 11 | from sklearn import svm 12 | import scipy.io as io 13 | epoch_list = [] 14 | 15 | def test_svm(X, x_test, y): 16 | SVM = svm.SVC(kernel = 'linear') 17 | SVM.fit(X, y) 18 | predicted = SVM.predict(x_test) 19 | y_predict = SVM.predict(X) 20 | return predicted, sum(y_predict == y) 21 | 22 | def compute_kernel(x, y): 23 | bandlist = [0.25, 0.5, 1, 2, 4] 24 | x_size = x.size(0) 25 | y_size = y.size(0) 26 | dim = x.size(1) 27 | x = x.unsqueeze(1) # (x_size, 1, dim) 28 | y = y.unsqueeze(0) # (1, y_size, dim) 29 | tiled_x = x.expand(x_size, y_size, dim) 30 | tiled_y = y.expand(x_size, y_size, dim) 31 | kernel_input = [torch.exp(-(tiled_x - tiled_y).pow(2).sum(2)/float(bandwidth)) for bandwidth in bandlist] 32 | return kernel_input #[.., .., .., .., ..] every .. x_size*y_size 33 | 34 | def compute_mmd(x, y): 35 | 36 | x_kernel = sum(compute_kernel(x, x)) 37 | y_kernel = sum(compute_kernel(y, y)) 38 | xy_kernel = sum(compute_kernel(x, y)) 39 | mmd = x_kernel.mean() + y_kernel.mean() - 2*xy_kernel.mean() 40 | return mmd 41 | 42 | def forward_parallel(net, input, ngpu = 2): 43 | if ngpu > 1: 44 | return nn.parallel.data_parallel(net, input, range(ngpu)) 45 | else: 46 | return net(input) 47 | 48 | 49 | class Trainer: 50 | def __init__(self, trainloader, model1, model2, classD, args, testloader, trainloaderT): 51 | self.trainloader = trainloader 52 | self.model1 = model1 53 | self.model2 = model2 54 | self.classD = classD 55 | self.args = args 56 | self.testloader = testloader 57 | self.trainloaderT = trainloaderT 58 | def train(self): 59 | trainloader = self.trainloader 60 | model1 = self.model1 61 | model2 = self.model2 62 | testloader = self.testloader 63 | args = self.args 64 | classD = self.classD 65 | trainloaderT = self.trainloaderT 66 | optimizer1 = optim.Adam(model1.parameters(), lr=args.learning_rate, betas=(0.5, 0.9)) 67 | optimizer2 = optim.Adam(model2.parameters(), lr=args.learning_rate, betas=(0.5, 0.9)) 68 | optimizerC = optim.Adam(classD.parameters(), lr=args.learning_rate*0.1, betas=(0.0, 0.9)) 69 | 70 | scheduler1 = optim.lr_scheduler.ExponentialLR(optimizer1, gamma=0.99) 71 | scheduler2 = optim.lr_scheduler.ExponentialLR(optimizer2, gamma=0.99) 72 | schedulerC = optim.lr_scheduler.ExponentialLR(optimizerC, gamma=0.99) 73 | 74 | 75 | ae_criterion = nn.MSELoss(size_average = False) #均方差 76 | crossEntropy = nn.CrossEntropyLoss(size_average = False) #交叉熵 77 | xxxx = torch.zeros(600, 2) 78 | 79 | for epoch in range(args.epochs): 80 | for i, (data, targets) in enumerate(trainloader): 81 | optimizer1.zero_grad() 82 | optimizer2.zero_grad() 83 | optimizerC.zero_grad() 84 | data, targets = Variable(data), Variable(targets) 85 | if args.cuda: 86 | data = data.cuda() 87 | targets = targets.cuda() 88 | 89 | encoding1, fake1 = model1(data[:, :292]) 90 | ae_loss1 = ae_criterion(fake1, data[:, :292]) #292个特征重构的和原始的差异 91 | 92 | encoding2, fake2 = model2(data[:, 292:357]) 93 | ae_loss2 = ae_criterion(fake2, data[:, 292:357]) #65 个特征重构的和原始的差异 94 | 95 | 96 | z = torch.cat((encoding1, encoding2), 1) #中间变量8和16拼接 97 | f = torch.cat((z, data[:, 357:]), 1) #拼接后的中间变量和45和65再拼接 98 | a = classD(f) #感知机输出a 99 | acc = a.cpu().data.numpy().argmax(1) == targets.cpu().data.numpy() #感知机训练集上的准确率 100 | class_loss = crossEntropy(a, targets.squeeze()) #感知机的交叉熵损失 101 | trueSample = Variable(torch.randn(z.size()[0], z.size()[1])) #标准正态分布采样 102 | if args.cuda: 103 | trueSample = trueSample.cuda() 104 | mmd_loss = compute_mmd(z, trueSample) #计算拼接后的中间变量(8+16)分布和正态分布的差异 105 | l = ae_loss1 + ae_loss2 + 0.5*class_loss + 1.5*73*mmd_loss #将三部分loss加权相加 106 | l.backward() #计算各模型参数的梯度 107 | optimizer1.step() #参数更新 108 | optimizer2.step() 109 | optimizerC.step() 110 | 111 | 112 | if epoch % 100 == 0: #每100个epoch学习率衰减一次 113 | scheduler1.step() 114 | scheduler2.step() 115 | schedulerC.step() 116 | 117 | 118 | 119 | model1.eval() #此时模型是测试模式,参数不更新,也就是说每个训练中都可以在model.eval()之后看看在测试集上的结果,但是测试及结果不会影响参数更新 120 | model2.eval() 121 | classD.eval() 122 | 123 | for i, (data, targets) in enumerate(trainloaderT): 124 | data, targets = Variable(data), Variable(targets) 125 | if args.cuda: 126 | data = data.cuda() 127 | targets = targets.cuda() 128 | encoding1, fake1 = model1(data[:, :292]) 129 | encoding2, fake2 = model2(data[:, 292:357]) 130 | 131 | 132 | z = torch.cat((encoding1, encoding2), 1) 133 | f = torch.cat((z, data[:, 357:]), 1) #生成给svm的训练集 134 | 135 | for i, (data_test, targets_test) in enumerate(testloader): 136 | data_test, targets_test = Variable(data_test), Variable(targets_test) 137 | if args.cuda: 138 | data_test = data_test.cuda() 139 | targets_test = targets_test.cuda() 140 | encoding1, fake1 = model1(data_test[:, :292]) 141 | encoding2, fake2 = model2(data_test[:, 292:357]) 142 | 143 | 144 | z_test = torch.cat((encoding1, encoding2), 1) 145 | f_test = torch.cat((z_test, data_test[:, 357:]), 1) #生成给svm的测试集 146 | a_test = classD(f_test) #测试集的感知机结果 147 | class_losst = crossEntropy(a_test, targets_test) 148 | predict, acc_svm = test_svm(f.cpu().data.numpy(), f_test.cpu().data.numpy(), targets.cpu().data.numpy()) #测试在svm上的结果 149 | 150 | 151 | model1.train() 152 | model2.train() 153 | classD.train() 154 | if predict == targets_test.cpu().data.numpy(): 155 | #if epoch >=300 and epoch <= 500 and predict == targets_test.cpu().data.numpy(): 156 | print(epoch, class_losst.cpu().data.numpy(), a_test.cpu().data.numpy().argmax(), predict, sum(acc), acc_svm, class_loss.cpu().data.numpy()) 157 | ## 158 | xxxx[epoch, 0]=class_loss.cpu() 159 | xxxx[epoch, 1]=l 160 | ## 161 | if epoch > 340 and epoch < 400: 162 | epoch_list.append([epoch, class_loss, acc_svm]) 163 | 164 | ## 165 | result11 = xxxx.detach().numpy() 166 | io.savemat('save.mat',{'result11':result11}) 167 | ## 168 | print('final_epoch', class_losst.cpu().data.numpy(), a_test.cpu().data.numpy().argmax(), predict, sum(acc), acc_svm, class_loss.cpu().data.numpy()) 169 | return model1, model2, classD, f.cpu().data.numpy(), f_test.cpu().data.numpy(), targets.cpu().data.numpy(), epoch_list 170 | 171 | 172 | --------------------------------------------------------------------------------