├── Dataset ├── data.npy └── label.npy ├── Create_dataset.py ├── Package_dataset.py ├── Models ├── README.md ├── LeNet.py ├── MobileNetV1.py ├── AlexNet.py ├── ZFNet.py ├── SqueezeNet.py ├── ResNet50.py ├── MobileNetV2.py ├── shuffuleNetV1.py ├── VGG19.py ├── DenseNet.py ├── shuffuleNetV2.py ├── Mnasnet.py ├── Xception.py ├── EfficientNet.py ├── GoogLeNet.py └── MobileNetV3.py ├── train.py └── README.md /Dataset/data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StChenHaoGitHub/1D_Pytorch_Train_demo/HEAD/Dataset/data.npy -------------------------------------------------------------------------------- /Dataset/label.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StChenHaoGitHub/1D_Pytorch_Train_demo/HEAD/Dataset/label.npy -------------------------------------------------------------------------------- /Create_dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | numbers = 100 4 | channels = 3 5 | length = 224 6 | classes = 2 7 | 8 | data = np.random.randn(numbers,channels,length) 9 | label = np.random.randint(0,classes,numbers) 10 | 11 | np.save('Dataset/data.npy',data,allow_pickle=True) 12 | np.save('Dataset/label.npy',label,allow_pickle=True) 13 | -------------------------------------------------------------------------------- /Package_dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def package_dataset(data, label): 5 | dataset = [[i, j] for i, j in zip(data, label)] 6 | # channel number 7 | channels = data[0].shape[0] 8 | # data length 9 | length = data[0].shape[1] 10 | # data classes 11 | classes = len(np.unique(label)) 12 | return dataset, channels, length, classes 13 | 14 | 15 | if __name__ == '__main__': 16 | data = np.load('Dataset/data.npy') 17 | label = np.load('Dataset/label.npy') 18 | dataset, channels, length, classes = package_dataset(data, label) 19 | print(channels, length, classes) -------------------------------------------------------------------------------- /Models/README.md: -------------------------------------------------------------------------------- 1 | # One-demotion-deeplearning-model 2 | 3 | To do a deep learning project on ecg. I use pytorch to reproduce the traditional CNN models include LeNet AlexNet ZFNet VGG GoogLeNet ResNet DenseNet MonileNetV1-3 ShuffuleNet EfficientV0 with one demotion and more. 4 | In order to understand models easily, I',m not copy the Official routines,but reprodece the models by myself.Therefore it’s easy to carry out secondary developmet on codes. 5 | If you can access VPN in China,Here are some websites for detialed steps of reproducing these models.These websites are written by me in chinese. 6 | 7 | 8 | [chen-hao.blog.csdn.net](chen-hao.blog.csdn.net) 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /Models/LeNet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class LeNet(torch.nn.Module): 4 | def __init__(self, in_channels, input_sample_points, classes): 5 | super(LeNet, self).__init__() 6 | 7 | self.input_channels = in_channels 8 | self.input_sample_points = input_sample_points 9 | 10 | self.features = torch.nn.Sequential( 11 | torch.nn.Conv1d(in_channels, 20, kernel_size=5), 12 | torch.nn.BatchNorm1d(20), 13 | torch.nn.MaxPool1d(2), 14 | torch.nn.Conv1d(20, 50, kernel_size=5), 15 | torch.nn.BatchNorm1d(50), 16 | torch.nn.MaxPool1d(2), 17 | ) 18 | 19 | self.After_features_channels = 50 20 | # 根据公式计算出通过所有的卷积层和池化层后输出的通道中样本点的数量 21 | # self.After_features_sample_points = ((input_sample_points - 5 + 1) // 2 - 5 + 1) // 2 22 | self.After_features_sample_points = ((input_sample_points-4)//2-4) // 2 23 | 24 | 25 | self.classifier = torch.nn.Sequential( 26 | torch.nn.Linear(self.After_features_channels * self.After_features_sample_points, 512), 27 | torch.nn.ReLU(), 28 | torch.nn.Linear(512, classes), 29 | torch.nn.ReLU() 30 | ) 31 | 32 | def forward(self, x): 33 | # check the dimensionality 34 | # 检查输入样本维度是否有错误 35 | if x.size(1) != self.input_channels or x.size(2) != self.input_sample_points: 36 | raise Exception( 37 | 'Input dimensionality is wrong,Input dimensionality should be [Batch_size,{},{}],Actually is {}'.format(self.input_channels, self.input_sample_points,x.size())) 38 | 39 | x = self.features(x) 40 | x = x.view(-1, self.After_features_channels * self.After_features_sample_points) 41 | x = self.classifier(x) 42 | return x 43 | 44 | 45 | if __name__ == '__main__': 46 | model = LeNet(in_channels=1, input_sample_points=224, classes=5) 47 | input = torch.randn(size=(1, 1, 224)) 48 | output = model(input) 49 | print(output.shape) 50 | 51 | -------------------------------------------------------------------------------- /Models/MobileNetV1.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class DepthwiseSeparableConv(nn.Module): 5 | def __init__(self, in_channels, out_channels, stride): 6 | super(DepthwiseSeparableConv, self).__init__() 7 | self.depthwise = nn.Conv1d(in_channels, in_channels, kernel_size=3, stride=stride, padding=1, groups=in_channels) 8 | self.pointwise = nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) 9 | self.relu = nn.ReLU(inplace=True) 10 | 11 | def forward(self, x): 12 | x = self.depthwise(x) 13 | x = self.pointwise(x) 14 | x = self.relu(x) 15 | return x 16 | 17 | class MobileNetV1(nn.Module): 18 | def __init__(self, in_channels = 3,classes=1000): 19 | super(MobileNetV1, self).__init__() 20 | self.model = nn.Sequential( 21 | nn.Conv1d(in_channels, 32, kernel_size=3, stride=2, padding=1), 22 | nn.ReLU(inplace=True), 23 | DepthwiseSeparableConv(32, 64, stride=1), 24 | DepthwiseSeparableConv(64, 128, stride=2), 25 | DepthwiseSeparableConv(128, 128, stride=1), 26 | DepthwiseSeparableConv(128, 256, stride=2), 27 | DepthwiseSeparableConv(256, 256, stride=1), 28 | DepthwiseSeparableConv(256, 512, stride=2), 29 | DepthwiseSeparableConv(512, 512, stride=1), 30 | DepthwiseSeparableConv(512, 512, stride=1), 31 | DepthwiseSeparableConv(512, 512, stride=1), 32 | DepthwiseSeparableConv(512, 512, stride=1), 33 | DepthwiseSeparableConv(512, 512, stride=1), 34 | DepthwiseSeparableConv(512, 1024, stride=2), 35 | DepthwiseSeparableConv(1024, 1024, stride=1), 36 | nn.AdaptiveAvgPool1d(1) 37 | ) 38 | self.classifier = nn.Linear(1024, classes) 39 | 40 | def forward(self, x): 41 | x = self.model(x) 42 | x = x.view(x.size(0), -1) 43 | x = self.classifier(x) 44 | return x 45 | 46 | 47 | if __name__ == "__main__": 48 | # 创建MobileNet V1模型实例 49 | model = MobileNetV1(in_channels=3,classes=5) 50 | # 打印模型结构 51 | input = torch.randn(1, 3, 224) 52 | output = model(input) 53 | print(output.shape) 54 | 55 | -------------------------------------------------------------------------------- /Models/AlexNet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class AlexNet(torch.nn.Module): 4 | def __init__(self,in_channels,classes,input_sample_points): 5 | super(AlexNet, self).__init__() 6 | 7 | self.input_channels = in_channels 8 | self.input_sample_points = input_sample_points 9 | 10 | self.features = torch.nn.Sequential( 11 | 12 | torch.nn.Conv1d(in_channels,64,kernel_size=11,stride=4,padding=2), 13 | torch.nn.BatchNorm1d(64), 14 | torch.nn.ReLU(inplace=True), 15 | #torch.nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), 16 | torch.nn.MaxPool1d(kernel_size=3,stride=2), 17 | 18 | torch.nn.Conv1d(64, 192, kernel_size=5, padding=2), 19 | torch.nn.BatchNorm1d(192), 20 | torch.nn.ReLU(inplace=True), 21 | #torch.nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2), 22 | torch.nn.MaxPool1d(kernel_size=3, stride=2), 23 | 24 | torch.nn.Conv1d(192, 384, kernel_size=3, padding=1), 25 | torch.nn.BatchNorm1d(384), 26 | torch.nn.ReLU(inplace=True), 27 | torch.nn.Conv1d(384, 256, kernel_size=3, padding=1), 28 | torch.nn.ReLU(inplace=True), 29 | torch.nn.BatchNorm1d(256), 30 | torch.nn.Conv1d(256, 256, kernel_size=3, padding=1), 31 | torch.nn.BatchNorm1d(256), 32 | torch.nn.ReLU(inplace=True), 33 | torch.nn.MaxPool1d(kernel_size=3, stride=2), 34 | #自适应平均池化不管输入多少输出一定为6 35 | torch.nn.AdaptiveAvgPool1d(6), 36 | ) 37 | 38 | self.classifier = torch.nn.Sequential( 39 | 40 | torch.nn.Dropout(0.5), 41 | torch.nn.Linear(1536,1024), 42 | torch.nn.ReLU(inplace=True), 43 | 44 | torch.nn.Dropout(0.5), 45 | torch.nn.Linear(1024, 1024), 46 | torch.nn.ReLU(inplace=True), 47 | torch.nn.Linear(1024,classes), 48 | 49 | ) 50 | 51 | def forward(self,x): 52 | 53 | x = self.features(x) 54 | x = x.view(-1,1536) 55 | x = self.classifier(x) 56 | return x 57 | 58 | 59 | if __name__ == '__main__': 60 | model = AlexNet(in_channels=1, input_sample_points=224, classes=5) 61 | input = torch.randn(size=(1, 1, 224)) 62 | output = model(input) 63 | print(output.shape) 64 | -------------------------------------------------------------------------------- /Models/ZFNet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class ZFNet(torch.nn.Module): 4 | def __init__(self, in_channels, input_sample_points, classes): 5 | super(ZFNet, self).__init__() 6 | 7 | self.input_channels = in_channels 8 | self.input_sample_points = input_sample_points 9 | 10 | self.features = torch.nn.Sequential( 11 | torch.nn.Conv1d(in_channels, 96, kernel_size=7, stride=2), 12 | torch.nn.BatchNorm1d(96), 13 | torch.nn.MaxPool1d(kernel_size=3,stride=2), 14 | torch.nn.Conv1d(96, 256, kernel_size=5, stride=2), 15 | torch.nn.BatchNorm1d(256), 16 | torch.nn.MaxPool1d(kernel_size=3, stride=2), 17 | 18 | torch.nn.Conv1d(256, 384, kernel_size=3, padding=1), 19 | torch.nn.BatchNorm1d(384), 20 | torch.nn.Conv1d(384, 384, kernel_size=3, padding=1), 21 | torch.nn.BatchNorm1d(384), 22 | torch.nn.Conv1d(384, 256, kernel_size=3, padding=1), 23 | torch.nn.BatchNorm1d(256), 24 | torch.nn.MaxPool1d(kernel_size=3, stride=2), 25 | ) 26 | 27 | self.After_features_channels = 256 28 | self.After_features_sample_points = (((((((((input_sample_points-7)//2 + 1)-3)//2+1)-5)//2+1)-3)//2+1)-3)//2+1 29 | self.classifier = torch.nn.Sequential( 30 | 31 | torch.nn.Linear(self.After_features_channels*self.After_features_sample_points,1024), 32 | torch.nn.ReLU(inplace=True), 33 | torch.nn.Dropout(0.5), 34 | 35 | torch.nn.Linear(1024, 1024), 36 | torch.nn.ReLU(inplace=True), 37 | torch.nn.Dropout(0.5), 38 | 39 | torch.nn.Linear(1024,classes), 40 | ) 41 | 42 | def forward(self,x): 43 | if x.size(1)!=self.input_channels or x.size(2)!=self.input_sample_points: 44 | raise Exception('输入数据维度错误,输入维度应为[Batch_size,{},{}],实际输入维度为{}'.format(self.input_channels,self.input_sample_points,x.size())) 45 | 46 | x = self.features(x) 47 | x = x.view(-1,self.After_features_channels*self.After_features_sample_points) 48 | x = self.classifier(x) 49 | return x 50 | 51 | 52 | if __name__ == '__main__': 53 | model = ZFNet(in_channels=1, input_sample_points=224, classes=5) 54 | input = torch.randn(size=(1, 1, 224)) 55 | output = model(input) 56 | print(output.shape) 57 | -------------------------------------------------------------------------------- /Models/SqueezeNet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | # from torchsummary import summary 3 | class FireModule(torch.nn.Module): 4 | def __init__(self, in_channels, squeeze_channels, expand1x1_channels, expand1x3_channels): 5 | super(FireModule, self).__init__() 6 | self.squeeze = torch.nn.Conv1d(in_channels, squeeze_channels, kernel_size=1) 7 | self.relu = torch.nn.ReLU(inplace=True) 8 | self.expand1x1 = torch.nn.Conv1d(squeeze_channels, expand1x1_channels, kernel_size=1) 9 | self.expand1x3 = torch.nn.Conv1d(squeeze_channels, expand1x3_channels, kernel_size=3, padding=1) 10 | 11 | def forward(self, x): 12 | x = self.squeeze(x) 13 | x = self.relu(x) 14 | out1x1 = self.expand1x1(x) 15 | out1x3 = self.expand1x3(x) 16 | out = torch.cat([out1x1, out1x3], dim=1) 17 | return self.relu(out) 18 | 19 | 20 | class SqueezeNet(torch.nn.Module): 21 | def __init__(self,in_channels,classes): 22 | super(SqueezeNet, self).__init__() 23 | self.features = torch.nn.Sequential( 24 | # conv1 25 | torch.nn.Conv1d(in_channels, 96, kernel_size=7, stride=2), 26 | torch.nn.ReLU(inplace=True), 27 | # maxpool1 28 | torch.nn.MaxPool1d(kernel_size=3, stride=2), 29 | # Fire2 30 | FireModule(96, 16, 64, 64), 31 | # Fire3 32 | FireModule(128, 16, 64, 64), 33 | # Fire4 34 | FireModule(128, 32, 128, 128), 35 | # maxpool4 36 | torch.nn.MaxPool1d(kernel_size=3, stride=2), 37 | # Fire5 38 | FireModule(256, 32, 128, 128), 39 | # Fire6 40 | FireModule(256, 48, 192, 192), 41 | # Fire7 42 | FireModule(384, 48, 192, 192), 43 | # Fire8 44 | FireModule(384, 64, 256, 256), 45 | # maxpool8 46 | torch.nn.MaxPool1d(kernel_size=3, stride=2), 47 | # Fire9 48 | FireModule(512, 64, 256, 256) 49 | ) 50 | self.classifier = torch.nn.Sequential( 51 | # conv10 52 | torch.nn.Conv1d(512, classes, kernel_size=1), 53 | torch.nn.ReLU(inplace=True), 54 | # avgpool10 55 | torch.nn.AdaptiveAvgPool1d((1)) 56 | ) 57 | 58 | def forward(self, x): 59 | x = self.features(x) 60 | x = self.classifier(x) 61 | x = torch.flatten(x, 1) 62 | return x 63 | 64 | 65 | 66 | 67 | if __name__ == "__main__": 68 | # 创建一个SqueezeNet实例 69 | model = SqueezeNet(in_channels=3,classes=10) 70 | # model = FireModule(96,16,64,64) 71 | # 打印模型结构 72 | input = torch.randn(1,3,224) 73 | output = model(input) 74 | print(output.shape) 75 | -------------------------------------------------------------------------------- /Models/ResNet50.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class Bottlrneck(torch.nn.Module): 4 | def __init__(self,In_channel,Med_channel,Out_channel,downsample=False): 5 | super(Bottlrneck, self).__init__() 6 | self.stride = 1 7 | if downsample == True: 8 | self.stride = 2 9 | 10 | self.layer = torch.nn.Sequential( 11 | torch.nn.Conv1d(In_channel, Med_channel, 1, self.stride), 12 | torch.nn.BatchNorm1d(Med_channel), 13 | torch.nn.ReLU(), 14 | torch.nn.Conv1d(Med_channel, Med_channel, 3, padding=1), 15 | torch.nn.BatchNorm1d(Med_channel), 16 | torch.nn.ReLU(), 17 | torch.nn.Conv1d(Med_channel, Out_channel, 1), 18 | torch.nn.BatchNorm1d(Out_channel), 19 | torch.nn.ReLU(), 20 | ) 21 | 22 | if In_channel != Out_channel: 23 | self.res_layer = torch.nn.Conv1d(In_channel, Out_channel,1,self.stride) 24 | else: 25 | self.res_layer = None 26 | 27 | def forward(self,x): 28 | if self.res_layer is not None: 29 | residual = self.res_layer(x) 30 | else: 31 | residual = x 32 | return self.layer(x)+residual 33 | 34 | 35 | class ResNet50(torch.nn.Module): 36 | def __init__(self,in_channels=2,classes=125): 37 | super(ResNet50, self).__init__() 38 | self.features = torch.nn.Sequential( 39 | torch.nn.Conv1d(in_channels,64,kernel_size=7,stride=2,padding=3), 40 | torch.nn.MaxPool1d(3,2,1), 41 | 42 | Bottlrneck(64,64,256,False), 43 | Bottlrneck(256,64,256,False), 44 | Bottlrneck(256,64,256,False), 45 | # 46 | Bottlrneck(256,128,512, True), 47 | Bottlrneck(512,128,512, False), 48 | Bottlrneck(512,128,512, False), 49 | Bottlrneck(512,128,512, False), 50 | # 51 | Bottlrneck(512,256,1024, True), 52 | Bottlrneck(1024,256,1024, False), 53 | Bottlrneck(1024,256,1024, False), 54 | Bottlrneck(1024,256,1024, False), 55 | Bottlrneck(1024,256,1024, False), 56 | Bottlrneck(1024,256,1024, False), 57 | # 58 | Bottlrneck(1024,512,2048, True), 59 | Bottlrneck(2048,512,2048, False), 60 | Bottlrneck(2048,512,2048, False), 61 | 62 | torch.nn.AdaptiveAvgPool1d(1) 63 | ) 64 | self.classifer = torch.nn.Sequential( 65 | torch.nn.Linear(2048,classes) 66 | ) 67 | 68 | def forward(self,x): 69 | x = self.features(x) 70 | x = x.view(-1,2048) 71 | x = self.classifer(x) 72 | return x 73 | 74 | if __name__ == '__main__': 75 | x = torch.randn(size=(1,1,224)) 76 | # x = torch.randn(size=(1,64,224)) 77 | # model = Bottlrneck(64,64,256,True) 78 | model = ResNet50(in_channels=1,classes=5) 79 | 80 | output = model(x) 81 | print(output.shape) 82 | 83 | 84 | -------------------------------------------------------------------------------- /Models/MobileNetV2.py: -------------------------------------------------------------------------------- 1 | # 全文注释 2 | import torch 3 | 4 | 5 | class conv(torch.nn.Module): 6 | def __init__(self, in_channels, out_channels, keral,stride=1, groups=1): 7 | super().__init__() 8 | padding = 0 if keral==1 else 1 9 | 10 | self.conv = torch.nn.Conv1d(in_channels, out_channels, keral, stride,padding, groups=groups) 11 | self.bath = torch.nn.BatchNorm1d(out_channels) 12 | self.relu6 = torch.nn.ReLU6() 13 | 14 | 15 | 16 | def forward(self,x): 17 | x = self.conv(x) 18 | if x.size()[-1] != 1: 19 | x = self.bath(x) 20 | x = self.relu6(x) 21 | return x 22 | 23 | 24 | class bottleneck(torch.nn.Module): 25 | def __init__(self,in_channels,out_channels,stride,t): 26 | super().__init__() 27 | self.conv = conv(in_channels,in_channels*t,1) 28 | self.conv1 = conv(in_channels*t,in_channels*t,3,stride=stride,groups=in_channels*t) 29 | self.conv2 = conv(in_channels*t,out_channels,1) 30 | 31 | self.stride = stride 32 | self.in_channels = in_channels 33 | self.out_channels = out_channels 34 | 35 | def forward(self,x): 36 | x1 = self.conv(x) 37 | x1 = self.conv1(x1) 38 | x1 = self.conv2(x1) 39 | 40 | if self.stride == 1 and self.in_channels == self.out_channels: 41 | x1 += x 42 | 43 | return x1 44 | 45 | 46 | 47 | class MobileNetV2(torch.nn.Module): 48 | def __init__(self,in_channels,classes): 49 | super().__init__() 50 | 51 | self.fearures = torch.nn.Sequential( 52 | conv(in_channels,32,keral=3,stride=2), 53 | bottleneck(32,16,stride=1,t=1), 54 | 55 | bottleneck(16,24,stride=2,t=6), 56 | bottleneck(24,24,stride=1,t=6), 57 | 58 | bottleneck(24, 32,stride=2, t=6), 59 | bottleneck(32, 32,stride=1, t=6), 60 | bottleneck(32, 32,stride=1, t=6), 61 | 62 | bottleneck(32, 64,stride=2, t=6), 63 | bottleneck(64, 64,stride=1, t=6), 64 | bottleneck(64, 64,stride=1, t=6), 65 | bottleneck(64, 64,stride=1, t=6), 66 | 67 | bottleneck(64, 96,stride=1, t=6), 68 | bottleneck(96, 96,stride=1, t=6), 69 | bottleneck(96, 96,stride=1, t=6), 70 | 71 | bottleneck(96, 160,stride=2, t=6), 72 | bottleneck(160, 160,stride=1, t=6), 73 | bottleneck(160, 160,stride=1, t=6), 74 | 75 | bottleneck(160, 320,stride=1, t=6), 76 | conv(320,1280,1,stride=1), 77 | torch.nn.AdaptiveAvgPool1d(1) 78 | 79 | ) 80 | 81 | self.classifier = torch.nn.Sequential( 82 | conv(1280,out_channels=classes,keral=1), 83 | torch.nn.Flatten() 84 | 85 | ) 86 | 87 | def forward(self,x): 88 | x = self.fearures(x) 89 | x = self.classifier(x) 90 | return x 91 | 92 | 93 | 94 | if __name__ == "__main__": 95 | # model = conv(3,20,1) 96 | # model = bottleneck(32,32,1) 97 | model = MobileNetV2(32,5) 98 | input = torch.randn((1,32,224)) 99 | output = model(input) 100 | print(output.size()) 101 | -------------------------------------------------------------------------------- /Models/shuffuleNetV1.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class channel_shuffle(torch.nn.Module): 4 | def __init__(self, groups): 5 | super().__init__() 6 | self.groups = groups 7 | 8 | def forward(self, x): 9 | b, c, l = x.size() 10 | group_channel = c // self.groups 11 | x = x.reshape(b, self.groups, group_channel, l) 12 | x = x.permute(0, 2, 1, 3).contiguous() 13 | x = x.reshape(b, c, l) 14 | return x 15 | 16 | class shuffuleBlock(torch.nn.Module): 17 | def __init__(self, In_channel, Med_channel, Out_channel, stride=2, group=3): 18 | super(shuffuleBlock, self).__init__() 19 | self.stride = stride # Added to store the stride value 20 | 21 | if stride == 2: 22 | self.res_layer = torch.nn.AvgPool1d(3, stride, 1) 23 | 24 | self.layer = torch.nn.Sequential( 25 | torch.nn.Conv1d(In_channel, Med_channel, 1), 26 | torch.nn.BatchNorm1d(Med_channel), 27 | torch.nn.ReLU(), 28 | channel_shuffle(groups=group), 29 | torch.nn.Conv1d(Med_channel, Med_channel, 3, stride, padding=1, groups=group), 30 | torch.nn.BatchNorm1d(Med_channel), 31 | torch.nn.ReLU(), 32 | torch.nn.Conv1d(Med_channel, Out_channel, 1), 33 | torch.nn.BatchNorm1d(Out_channel), 34 | torch.nn.ReLU(), 35 | ) 36 | 37 | def forward(self, x): 38 | if self.stride == 2: 39 | return torch.cat((self.res_layer(x), self.layer(x)), 1) 40 | else: 41 | return self.layer(x) 42 | 43 | 44 | 45 | class shuffuleNetV1_G3(torch.nn.Module): 46 | def __init__(self,in_channels,classes): 47 | super().__init__() 48 | 49 | self.features = torch.nn.Sequential( 50 | torch.nn.Conv1d(in_channels,24,3,2,1), 51 | torch.nn.Conv1d(24,120,3,2,1), 52 | torch.nn.MaxPool1d(3,2,1), 53 | # 54 | shuffuleBlock(120,120//4,120,2,3), 55 | # 56 | shuffuleBlock(240,240//4,240,1,3), 57 | shuffuleBlock(240,240//4,240,1,3), 58 | shuffuleBlock(240,240//4,240,1,3), 59 | # 60 | shuffuleBlock(240, 240 // 4, 240, 2,3), 61 | # 62 | shuffuleBlock(480, 480 // 4, 480, 1,3), 63 | shuffuleBlock(480, 480 // 4, 480, 1,3), 64 | shuffuleBlock(480, 480 // 4, 480, 1,3), 65 | shuffuleBlock(480, 480 // 4, 480, 1,3), 66 | shuffuleBlock(480, 480 // 4, 480, 1,3), 67 | shuffuleBlock(480, 480 // 4, 480, 1,3), 68 | shuffuleBlock(480, 480 // 4, 480, 1,3), 69 | 70 | shuffuleBlock(480, 480 // 4, 480, 2,3), 71 | 72 | shuffuleBlock(960, 960 // 4, 960, 1,3), 73 | shuffuleBlock(960, 960 // 4, 960, 1,3), 74 | shuffuleBlock(960, 960 // 4, 960, 1,3), 75 | 76 | torch.nn.AdaptiveAvgPool1d(1) 77 | 78 | ) 79 | 80 | self.classifier = torch.nn.Sequential( 81 | torch.nn.Flatten(), 82 | torch.nn.Linear(960,classes) 83 | ) 84 | 85 | 86 | def forward(self,x): 87 | x = self.features(x) 88 | x = self.classifier(x) 89 | return x 90 | if __name__ == "__main__": 91 | x = torch.randn(1, 2, 200) 92 | # model = shuffuleBlock(300, 300 // 4, 300, 2, 3) 93 | model = shuffuleNetV1_G3(2, 125) 94 | output = model(x) 95 | print(output.size()) 96 | -------------------------------------------------------------------------------- /Models/VGG19.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class VGG19(torch.nn.Module): 4 | def __init__(self,in_channels=1,classes=5): 5 | super(VGG19, self).__init__() 6 | self.feature = torch.nn.Sequential( 7 | 8 | torch.nn.Conv1d(in_channels, 64, kernel_size=3, padding=1), 9 | torch.nn.BatchNorm1d(64), 10 | torch.nn.ReLU(), 11 | torch.nn.Conv1d(64, 64, kernel_size=3, padding=1), 12 | torch.nn.BatchNorm1d(64), 13 | torch.nn.ReLU(), 14 | torch.nn.MaxPool1d(2), 15 | 16 | torch.nn.Conv1d(64, 128, kernel_size=3, padding=1), 17 | torch.nn.BatchNorm1d(128), 18 | torch.nn.ReLU(), 19 | torch.nn.Conv1d(128, 128, kernel_size=3, padding=1), 20 | torch.nn.BatchNorm1d(128), 21 | torch.nn.ReLU(), 22 | torch.nn.MaxPool1d(2), 23 | 24 | torch.nn.Conv1d(128, 256, kernel_size=3, padding=1), 25 | torch.nn.BatchNorm1d(256), 26 | torch.nn.ReLU(), 27 | torch.nn.Conv1d(256, 256, kernel_size=3, padding=1), 28 | torch.nn.BatchNorm1d(256), 29 | torch.nn.ReLU(), 30 | torch.nn.Conv1d(256, 256, kernel_size=3, padding=1), 31 | torch.nn.BatchNorm1d(256), 32 | torch.nn.ReLU(), 33 | torch.nn.Conv1d(256, 256, kernel_size=3, padding=1), 34 | torch.nn.BatchNorm1d(256), 35 | torch.nn.ReLU(), 36 | torch.nn.MaxPool1d(2), 37 | 38 | torch.nn.Conv1d(256, 512, kernel_size=3, padding=1), 39 | torch.nn.BatchNorm1d(512), 40 | torch.nn.ReLU(), 41 | torch.nn.Conv1d(512, 512, kernel_size=3, padding=1), 42 | torch.nn.BatchNorm1d(512), 43 | torch.nn.ReLU(), 44 | torch.nn.Conv1d(512, 512, kernel_size=3, padding=1), 45 | torch.nn.BatchNorm1d(512), 46 | torch.nn.ReLU(), 47 | torch.nn.Conv1d(512, 512, kernel_size=3, padding=1), 48 | torch.nn.BatchNorm1d(512), 49 | torch.nn.ReLU(), 50 | torch.nn.MaxPool1d(2), 51 | 52 | torch.nn.Conv1d(512, 512, kernel_size=3, padding=1), 53 | torch.nn.BatchNorm1d(512), 54 | torch.nn.ReLU(), 55 | torch.nn.Conv1d(512, 512, kernel_size=3, padding=1), 56 | torch.nn.BatchNorm1d(512), 57 | torch.nn.ReLU(), 58 | torch.nn.Conv1d(512, 512, kernel_size=3, padding=1), 59 | torch.nn.BatchNorm1d(512), 60 | torch.nn.ReLU(), 61 | torch.nn.Conv1d(512, 512, kernel_size=3, padding=1), 62 | torch.nn.BatchNorm1d(512), 63 | torch.nn.ReLU(), 64 | torch.nn.MaxPool1d(2), 65 | 66 | torch.nn.AdaptiveAvgPool1d(7) 67 | ) 68 | self.classifer = torch.nn.Sequential( 69 | torch.nn.Linear(3584,1024), 70 | torch.nn.ReLU(), 71 | torch.nn.Dropout(0.5), 72 | torch.nn.Linear(1024,1024), 73 | torch.nn.ReLU(), 74 | torch.nn.Dropout(0.5), 75 | torch.nn.Linear(1024, 512), 76 | torch.nn.ReLU(), 77 | torch.nn.Linear(512, classes), 78 | ) 79 | 80 | def forward(self, x): 81 | x = self.feature(x) 82 | x = x.view(-1, 3584) 83 | x = self.classifer(x) 84 | return x 85 | 86 | 87 | if __name__ == '__main__': 88 | model = VGG19(in_channels=1,classes=5) 89 | input = torch.randn(size=(1,1,224)) 90 | output = model(input) 91 | print(output.shape) 92 | 93 | -------------------------------------------------------------------------------- /Models/DenseNet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class DenseLayer(torch.nn.Module): 4 | def __init__(self,in_channels,middle_channels=128,out_channels=32): 5 | super(DenseLayer, self).__init__() 6 | self.layer = torch.nn.Sequential( 7 | torch.nn.BatchNorm1d(in_channels), 8 | torch.nn.ReLU(inplace=True), 9 | torch.nn.Conv1d(in_channels,middle_channels,1), 10 | torch.nn.BatchNorm1d(middle_channels), 11 | torch.nn.ReLU(inplace=True), 12 | torch.nn.Conv1d(middle_channels,out_channels,3,padding=1) 13 | ) 14 | def forward(self,x): 15 | return torch.cat([x,self.layer(x)],dim=1) 16 | 17 | 18 | class DenseBlock(torch.nn.Sequential): 19 | def __init__(self,layer_num,growth_rate,in_channels,middele_channels=128): 20 | super(DenseBlock, self).__init__() 21 | for i in range(layer_num): 22 | layer = DenseLayer(in_channels+i*growth_rate,middele_channels,growth_rate) 23 | self.add_module('denselayer%d'%(i),layer) 24 | 25 | class Transition(torch.nn.Sequential): 26 | def __init__(self,channels): 27 | super(Transition, self).__init__() 28 | self.add_module('norm',torch.nn.BatchNorm1d(channels)) 29 | self.add_module('relu',torch.nn.ReLU(inplace=True)) 30 | self.add_module('conv',torch.nn.Conv1d(channels,channels//2,3,padding=1)) 31 | self.add_module('Avgpool',torch.nn.AvgPool1d(2)) 32 | 33 | 34 | class DenseNet(torch.nn.Module): 35 | def __init__(self,layer_num=(6,12,24,16),growth_rate=32,init_features=64,in_channels=1,middele_channels=128,classes=5): 36 | super(DenseNet, self).__init__() 37 | self.feature_channel_num=init_features 38 | self.conv=torch.nn.Conv1d(in_channels,self.feature_channel_num,7,2,3) 39 | self.norm=torch.nn.BatchNorm1d(self.feature_channel_num) 40 | self.relu=torch.nn.ReLU() 41 | self.maxpool=torch.nn.MaxPool1d(3,2,1) 42 | 43 | self.DenseBlock1=DenseBlock(layer_num[0],growth_rate,self.feature_channel_num,middele_channels) 44 | self.feature_channel_num=self.feature_channel_num+layer_num[0]*growth_rate 45 | self.Transition1=Transition(self.feature_channel_num) 46 | 47 | self.DenseBlock2=DenseBlock(layer_num[1],growth_rate,self.feature_channel_num//2,middele_channels) 48 | self.feature_channel_num=self.feature_channel_num//2+layer_num[1]*growth_rate 49 | self.Transition2 = Transition(self.feature_channel_num) 50 | 51 | self.DenseBlock3 = DenseBlock(layer_num[2],growth_rate,self.feature_channel_num//2,middele_channels) 52 | self.feature_channel_num=self.feature_channel_num//2+layer_num[2]*growth_rate 53 | self.Transition3 = Transition(self.feature_channel_num) 54 | 55 | self.DenseBlock4 = DenseBlock(layer_num[3],growth_rate,self.feature_channel_num//2,middele_channels) 56 | self.feature_channel_num=self.feature_channel_num//2+layer_num[3]*growth_rate 57 | 58 | self.avgpool=torch.nn.AdaptiveAvgPool1d(1) 59 | 60 | self.classifer = torch.nn.Sequential( 61 | torch.nn.Linear(self.feature_channel_num, self.feature_channel_num//2), 62 | torch.nn.ReLU(), 63 | torch.nn.Dropout(0.5), 64 | torch.nn.Linear(self.feature_channel_num//2, classes), 65 | 66 | ) 67 | 68 | 69 | def forward(self,x): 70 | x = self.conv(x) 71 | x = self.norm(x) 72 | x = self.relu(x) 73 | x = self.maxpool(x) 74 | 75 | x = self.DenseBlock1(x) 76 | x = self.Transition1(x) 77 | 78 | x = self.DenseBlock2(x) 79 | x = self.Transition2(x) 80 | 81 | x = self.DenseBlock3(x) 82 | x = self.Transition3(x) 83 | 84 | x = self.DenseBlock4(x) 85 | x = self.avgpool(x) 86 | x = x.view(-1,self.feature_channel_num) 87 | x = self.classifer(x) 88 | 89 | return x 90 | 91 | 92 | 93 | if __name__ == '__main__': 94 | input = torch.randn(size=(1,1,224)) 95 | model = DenseNet(layer_num=(6,12,24,16),growth_rate=32,in_channels=1,classes=5) 96 | output = model(input) 97 | print(output.shape) 98 | 99 | 100 | -------------------------------------------------------------------------------- /Models/shuffuleNetV2.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class channel_shuffle(torch.nn.Module): 4 | def __init__(self, groups): 5 | super().__init__() 6 | self.groups = groups 7 | 8 | def forward(self, x): 9 | b, c, l = x.size() 10 | group_channel = c // self.groups 11 | x = x.reshape(b, self.groups, group_channel, l) 12 | x = x.permute(0, 2, 1, 3).contiguous() 13 | x = x.reshape(b, c, l) 14 | return x 15 | 16 | class shuffuleV2Block(torch.nn.Module): 17 | def __init__(self, In_channel, Med_channel, Out_channel, stride=2): 18 | super(shuffuleV2Block, self).__init__() 19 | self.stride = stride # Added to store the stride value 20 | self.In_channel = In_channel 21 | self.Out_channel = Out_channel 22 | 23 | if self.stride == 2: 24 | self.left = torch.nn.Sequential( 25 | torch.nn.Conv1d(self.In_channel, self.In_channel, 3, self.stride, padding=1, groups=self.In_channel), 26 | torch.nn.BatchNorm1d(self.In_channel), 27 | torch.nn.ReLU(), 28 | torch.nn.Conv1d(self.In_channel, Out_channel, 1), 29 | torch.nn.BatchNorm1d(Out_channel), 30 | torch.nn.ReLU(), 31 | ) 32 | else: 33 | self.In_channel = self.In_channel//2 34 | self.Out_channel = self.Out_channel//2 35 | 36 | self.right = torch.nn.Sequential( 37 | torch.nn.Conv1d(self.In_channel, Med_channel, 1), 38 | torch.nn.BatchNorm1d(Med_channel), 39 | torch.nn.ReLU(), 40 | torch.nn.Conv1d(Med_channel, Med_channel, 3, self.stride, padding=1, groups=Med_channel), 41 | torch.nn.BatchNorm1d(Med_channel), 42 | torch.nn.ReLU(), 43 | torch.nn.Conv1d(Med_channel, self.Out_channel, 1), 44 | torch.nn.BatchNorm1d(self.Out_channel), 45 | torch.nn.ReLU(), 46 | ) 47 | self.shuffule = channel_shuffle(2) 48 | 49 | def forward(self, x): 50 | if self.stride == 2: 51 | xl = self.left(x) 52 | xr = self.right(x) 53 | x_out = torch.cat((xl, xr), 1) 54 | 55 | else: 56 | xl,xr = x.chunk(2,dim=1) 57 | xr = self.right(xr) 58 | x_out = torch.cat((xl, xr), 1) 59 | 60 | return self.shuffule(x_out) 61 | 62 | 63 | 64 | 65 | class shuffuleNetV2(torch.nn.Module): 66 | def __init__(self,in_channels = 2, classes = 125): 67 | super().__init__() 68 | self.feature = torch.nn.Sequential( 69 | torch.nn.Conv1d(in_channels,24,3,2,1), 70 | torch.nn.MaxPool1d(3,2,1), 71 | shuffuleV2Block(24,24,58,2), 72 | 73 | shuffuleV2Block(116,116//4,116,1), 74 | shuffuleV2Block(116,116//4,116,1), 75 | shuffuleV2Block(116,116//4,116,1), 76 | 77 | shuffuleV2Block(116, 116, 116, 2), 78 | 79 | shuffuleV2Block(232, 232//4, 232, 1), 80 | shuffuleV2Block(232, 232//4, 232, 1), 81 | shuffuleV2Block(232, 232//4, 232, 1), 82 | shuffuleV2Block(232, 232//4, 232, 1), 83 | shuffuleV2Block(232, 232//4, 232, 1), 84 | shuffuleV2Block(232, 232//4, 232, 1), 85 | shuffuleV2Block(232, 232//4, 232, 1), 86 | 87 | shuffuleV2Block(232, 232, 232, 2), 88 | shuffuleV2Block(464, 464//4, 464, 1), 89 | shuffuleV2Block(464, 464//4, 464, 1), 90 | shuffuleV2Block(464, 464//4, 464, 1), 91 | 92 | torch.nn.Conv1d(464,1024,1), 93 | torch.nn.AdaptiveAvgPool1d(1) 94 | ) 95 | 96 | self.classifier = torch.nn.Sequential( 97 | torch.nn.Flatten(), 98 | torch.nn.Linear(1024,classes) 99 | 100 | ) 101 | 102 | def forward(self,x): 103 | x = self.feature(x) 104 | x = self.classifier(x) 105 | return x 106 | if __name__ == "__main__": 107 | x = torch.randn(1, 2, 200) 108 | # model = shuffuleV2Block(300, 300 // 4, 300, 1) 109 | model = shuffuleNetV2(2, 125) 110 | output = model(x) 111 | print(output.size()) 112 | -------------------------------------------------------------------------------- /Models/Mnasnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class SE_block(torch.nn.Module): 4 | def __init__(self,in_channel,ratio=1): 5 | super(SE_block, self).__init__() 6 | self.avepool = torch.nn.AdaptiveAvgPool1d(1) 7 | self.linear1 = torch.nn.Linear(in_channel,in_channel//ratio) 8 | self.linear2 = torch.nn.Linear(in_channel//ratio,in_channel) 9 | self.Hardsigmoid = torch.nn.Hardsigmoid(inplace=True) 10 | self.Relu = torch.nn.ReLU(inplace=True) 11 | 12 | def forward(self,input): 13 | b,c,_ = input.shape 14 | x = self.avepool(input) 15 | x = x.view([b,c]) 16 | x = self.linear1(x) 17 | x = self.Relu(x) 18 | x = self.linear2(x) 19 | x = self.Hardsigmoid(x) 20 | x = x.view([b,c,1]) 21 | 22 | return input*x 23 | 24 | 25 | 26 | 27 | class conv(torch.nn.Module): 28 | def __init__(self, in_channels, out_channels, keral,stride=1, groups=1,use_activation=True): 29 | super().__init__() 30 | self.use_activation = use_activation 31 | padding = keral//2 32 | self.conv = torch.nn.Conv1d(in_channels, out_channels, keral, stride,padding, groups=groups) 33 | self.bath = torch.nn.BatchNorm1d(out_channels) 34 | self.activation = torch.nn.ReLU(inplace=True) 35 | 36 | def forward(self,x): 37 | x = self.conv(x) 38 | if x.size()[-1] != 1: 39 | x = self.bath(x) 40 | if self.use_activation: 41 | x = self.activation(x) 42 | return x 43 | 44 | 45 | 46 | 47 | class SepConv(torch.nn.Module): 48 | def __init__(self,in_channels,out_channels,stride): 49 | super().__init__() 50 | self.conv = conv(in_channels,in_channels,3,stride,out_channels,True) 51 | self.conv1 = conv(in_channels,out_channels,1,use_activation=False) 52 | 53 | def forward(self,x): 54 | x = self.conv(x) 55 | x = self.conv1(x) 56 | return x 57 | 58 | 59 | 60 | 61 | class MBConv(torch.nn.Module): 62 | def __init__(self,in_channels,out_channels,keral,stride,t=3,use_attention = False): 63 | super().__init__() 64 | self.use_attention = use_attention 65 | self.conv = conv(in_channels,in_channels*t,1) 66 | self.conv1 = conv(in_channels*t,in_channels*t,keral,stride=stride,groups=in_channels*t) 67 | self.attention = SE_block(in_channels*t) 68 | self.conv2 = conv(in_channels*t,out_channels,1,use_activation=False) 69 | 70 | def forward(self,x): 71 | x = self.conv(x) 72 | x = self.conv1(x) 73 | if self.use_attention: 74 | x = self.attention(x) 75 | x = self.conv2(x) 76 | 77 | return x 78 | 79 | class MnasNetA1(torch.nn.Module): 80 | def __init__(self,in_channels,classes): 81 | super().__init__() 82 | 83 | self.fearures = torch.nn.Sequential( 84 | conv(in_channels,32,3,stride=2,use_activation=False), 85 | SepConv(32,16,1), 86 | MBConv(16,16,3,2,6), 87 | MBConv(16,24,3,1,6), 88 | MBConv(24,24,5,2,3,True), 89 | MBConv(24,24,5,2,3,True), 90 | MBConv(24,40,5,2,3,True), 91 | MBConv(40, 40, 3, 2,6), 92 | MBConv(40, 40, 3, 1,6 ), 93 | MBConv(40, 40, 3, 1,6), 94 | MBConv(40, 80, 3, 1,6), 95 | MBConv(80, 80, 3, 1,6,True), 96 | MBConv(80, 112, 3, 1,6,True), 97 | MBConv(112, 112, 5, 2,6,True), 98 | MBConv(112, 112, 5, 1,6,True), 99 | MBConv(112, 160, 5, 1,6,True), 100 | MBConv(160, 160, 3, 2,6), 101 | torch.nn.AdaptiveAvgPool1d(1) 102 | ) 103 | self.classifier = torch.nn.Sequential( 104 | torch.nn.Flatten(), 105 | torch.nn.Linear(160,80), 106 | torch.nn.ReLU(inplace=True), 107 | torch.nn.Linear(80, classes), 108 | ) 109 | def forward(self,x): 110 | x = self.fearures(x) 111 | x = self.classifier(x) 112 | return x 113 | 114 | 115 | if __name__ == "__main__": 116 | model = MnasNetA1(3,5) 117 | input = torch.randn(1,3,224) 118 | output = model(input) 119 | print(output.size()) 120 | 121 | 122 | 123 | -------------------------------------------------------------------------------- /Models/Xception.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SeparableConv1d(torch.nn.Module): 5 | def __init__(self, in_channels, out_channels, kernel_size, stride, padding): 6 | super(SeparableConv1d, self).__init__() 7 | 8 | # 深度卷积 9 | self.depthwise = torch.nn.Conv1d(in_channels, in_channels, kernel_size, stride, padding, groups=in_channels) 10 | # 逐点卷积 11 | self.pointwise = torch.nn.Conv1d(in_channels, out_channels, kernel_size=1) 12 | 13 | def forward(self, x): 14 | x = self.depthwise(x) 15 | x = self.pointwise(x) 16 | return x 17 | 18 | 19 | class Entry(torch.nn.Module): 20 | def __init__(self,in_channels): 21 | super().__init__() 22 | self.beforeresidual = torch.nn.Sequential( 23 | torch.nn.Conv1d(in_channels,32,3,2,1), 24 | torch.nn.ReLU(), 25 | torch.nn.Conv1d(32, 64, 3, 2, 1), 26 | torch.nn.ReLU() 27 | ) 28 | 29 | self.residual_branch1 = torch.nn.Conv1d(64, 128, 1, 2) 30 | self.residual_model1 = torch.nn.Sequential( 31 | SeparableConv1d(64,128,3,1,1), 32 | torch.nn.ReLU(), 33 | SeparableConv1d(128, 128, 3, 1, 1), 34 | torch.nn.MaxPool1d(3,2,1) 35 | ) 36 | 37 | self.residual_branch2 = torch.nn.Conv1d(256, 256, 1, 2) 38 | self.residual_model2 = torch.nn.Sequential( 39 | torch.nn.ReLU(), 40 | SeparableConv1d(256,256,3,1,1), 41 | torch.nn.ReLU(), 42 | SeparableConv1d(256, 256, 3, 1, 1), 43 | torch.nn.MaxPool1d(3,2,1) 44 | ) 45 | 46 | self.residual_branch3 = torch.nn.Conv1d(512, 728, 1, 2) 47 | self.residual_model3 = torch.nn.Sequential( 48 | torch.nn.ReLU(), 49 | SeparableConv1d(512,728,3,1,1), 50 | torch.nn.ReLU(), 51 | SeparableConv1d(728, 728, 3, 1, 1), 52 | torch.nn.MaxPool1d(3,2,1) 53 | ) 54 | 55 | 56 | def forward(self,x): 57 | x = self.beforeresidual(x) 58 | 59 | x1 = self.residual_branch1(x) 60 | x = self.residual_model1(x) 61 | x = torch.cat([x,x1],dim=1) 62 | 63 | x1 = self.residual_branch2(x) 64 | x = self.residual_model2(x) 65 | x = torch.cat([x,x1],dim=1) 66 | 67 | x1 = self.residual_branch3(x) 68 | x = self.residual_model3(x) 69 | # x = torch.cat([x,x1],dim=1) 70 | x = x+x1 71 | 72 | return x 73 | 74 | 75 | class Middleflow(torch.nn.Module): 76 | def __init__(self): 77 | super().__init__() 78 | self.layers = torch.nn.Sequential( 79 | torch.nn.ReLU(), 80 | SeparableConv1d(728,728,3,1,1), 81 | torch.nn.ReLU(), 82 | SeparableConv1d(728, 728, 3, 1, 1), 83 | torch.nn.ReLU(), 84 | SeparableConv1d(728, 728, 3, 1, 1), 85 | ) 86 | 87 | def forward(self,x): 88 | return x + self.layers(x) 89 | 90 | 91 | class Exitflow(torch.nn.Module): 92 | def __init__(self,classes): 93 | super().__init__() 94 | 95 | self.residual = torch.nn.Conv1d(728,1024,1,2) 96 | self.residual_model = torch.nn.Sequential( 97 | torch.nn.ReLU(), 98 | SeparableConv1d(728,728,3,1,1), 99 | torch.nn.ReLU(), 100 | SeparableConv1d(728, 1024, 3, 1, 1), 101 | torch.nn.MaxPool1d(3,2,1) 102 | ) 103 | self.last_layer = torch.nn.Sequential( 104 | SeparableConv1d(1024,1536,3,1,1), 105 | torch.nn.ReLU(), 106 | SeparableConv1d(1536, 2048, 3, 1, 1), 107 | torch.nn.ReLU(), 108 | torch.nn.AdaptiveAvgPool1d(1), 109 | torch.nn.Flatten(), 110 | torch.nn.Linear(2048,classes) 111 | ) 112 | 113 | 114 | def forward(self,x): 115 | x = self.residual_model(x) + self.residual(x) 116 | x = self.last_layer(x) 117 | 118 | return x 119 | 120 | 121 | 122 | class Xception(torch.nn.Module): 123 | def __init__(self,in_channels,classes): 124 | super().__init__() 125 | self.layers = torch.nn.Sequential( 126 | Entry(in_channels), 127 | Middleflow(), 128 | Middleflow(), 129 | Middleflow(), 130 | Middleflow(), 131 | Middleflow(), 132 | Middleflow(), 133 | Middleflow(), 134 | Middleflow(), 135 | Exitflow(classes) 136 | ) 137 | def forward(self,x): 138 | return self.layers(x) 139 | 140 | 141 | 142 | if __name__ == '__main__': 143 | input = torch.randn((1,3,224)) 144 | # model = SeparableConv1d(12,12,3,1,1) 145 | # model = Entry(12) 146 | # model = Middleflow() 147 | model = Xception(3,5) 148 | output = model(input) 149 | print(output.size()) 150 | 151 | -------------------------------------------------------------------------------- /Models/EfficientNet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | # from thop import profile 3 | 4 | class SEModule(torch.nn.Module): 5 | def __init__(self,in_channel,ratio=4): 6 | super(SEModule, self).__init__() 7 | self.avepool = torch.nn.AdaptiveAvgPool1d(1) 8 | self.linear1 = torch.nn.Linear(in_channel,in_channel//ratio) 9 | self.linear2 = torch.nn.Linear(in_channel//ratio,in_channel) 10 | self.Hardsigmoid = torch.nn.Hardsigmoid(inplace=True) 11 | self.Relu = torch.nn.ReLU(inplace=True) 12 | 13 | def forward(self,input): 14 | b,c,_ = input.shape 15 | x = self.avepool(input) 16 | x = x.view([b,c]) 17 | x = self.linear1(x) 18 | x = self.Relu(x) 19 | x = self.linear2(x) 20 | x = self.Hardsigmoid(x) 21 | x = x.view([b,c,1]) 22 | 23 | return input*x 24 | 25 | 26 | class MBConvBlock(torch.nn.Module): 27 | def __init__(self, in_channels, out_channels, expand_ratio, kernel_size, stride, se_ratio=4): 28 | super(MBConvBlock, self).__init__() 29 | # Expansion phase 30 | expanded_channels = int(in_channels * expand_ratio) 31 | self.expand_conv = torch.nn.Conv1d(in_channels, expanded_channels, kernel_size=1, stride=1, padding=0, bias=False) 32 | self.bn1 = torch.nn.BatchNorm1d(expanded_channels) 33 | # Depthwise convolution 34 | self.depthwise_conv = torch.nn.Conv1d(expanded_channels, expanded_channels, kernel_size=kernel_size, stride=stride, 35 | padding=kernel_size // 2, groups=expanded_channels, bias=False) 36 | self.bn2 = torch.nn.BatchNorm1d(expanded_channels) 37 | # Squeeze and Excitation (SE) phase 38 | self.se = SEModule(expanded_channels, se_ratio) 39 | # Linear Bottleneck 40 | self.linear_bottleneck = torch.nn.Conv1d(expanded_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False) 41 | self.bn3 = torch.nn.BatchNorm1d(out_channels) 42 | # Skip connection if input and output channels are the same and stride is 1 43 | self.use_skip_connection = (stride == 1) and (in_channels == out_channels) 44 | self.leakyrelu = torch.nn.LeakyReLU(0.02) 45 | 46 | def forward(self, x): 47 | identity = x 48 | # Expansion phase 49 | x = self.leakyrelu(self.bn1(self.expand_conv(x))) 50 | # Depthwise convolution phase 51 | x = self.leakyrelu(self.bn2(self.depthwise_conv(x))) 52 | # Squeeze and Excitation phase 53 | x = self.se(x) 54 | # Linear Bottleneck phase 55 | x = self.bn3(self.linear_bottleneck(x)) 56 | 57 | # Skip connection 58 | if self.use_skip_connection: 59 | x = identity + x 60 | 61 | return x 62 | 63 | 64 | class EfficientNetB0(torch.nn.Module): 65 | def __init__(self, in_channels=3,classes=1000): 66 | super(EfficientNetB0, self).__init__() 67 | 68 | # Initial stem convolution 69 | self.stem = torch.nn.Sequential( 70 | torch.nn.Conv1d(in_channels, 32, kernel_size=3, stride=2, padding=1, bias=False), 71 | torch.nn.BatchNorm1d(32), 72 | torch.nn.LeakyReLU(0.02) 73 | ) 74 | 75 | # Building blocks 76 | self.blocks = torch.nn.Sequential( 77 | MBConvBlock(32, 16, 1, 3, 1), 78 | 79 | MBConvBlock(16, 24, 6, 3, 2), 80 | MBConvBlock(24, 24, 6, 3, 1), 81 | 82 | MBConvBlock(24, 40, 6, 5, 2), 83 | MBConvBlock(40, 40, 6, 5, 1), 84 | 85 | MBConvBlock(40, 80, 6, 3, 2), 86 | MBConvBlock(80, 80, 6, 3, 1), 87 | MBConvBlock(80, 80, 6, 3, 1), 88 | 89 | 90 | MBConvBlock(80, 112, 6, 5, 1), 91 | MBConvBlock(112, 112, 6, 5, 1), 92 | MBConvBlock(112, 112, 6, 5, 1), 93 | 94 | MBConvBlock(112, 192, 6, 5, 2), 95 | MBConvBlock(192, 192, 6, 5, 1), 96 | MBConvBlock(192, 192, 6, 5, 1), 97 | MBConvBlock(192, 192, 6, 5, 1), 98 | 99 | MBConvBlock(192, 320, 6, 3, 1), 100 | ) 101 | 102 | # Head 103 | self.head = torch.nn.Sequential( 104 | torch.nn.Conv1d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False), 105 | torch.nn.BatchNorm1d(1280), 106 | torch.nn.LeakyReLU(0.02) 107 | ) 108 | 109 | # Global average pooling and classifier 110 | self.avg_pool = torch.nn.AdaptiveAvgPool1d(1) 111 | self.fc = torch.nn.Linear(1280, classes) 112 | 113 | def forward(self, x): 114 | x = self.stem(x) 115 | x = self.blocks(x) 116 | x = self.head(x) 117 | x = self.avg_pool(x) 118 | x = x.view(x.size(0), -1) 119 | x = self.fc(x) 120 | return x 121 | 122 | 123 | if __name__ == '__main__': 124 | # input = torch.randn((1,1,224)) 125 | model = EfficientNetB0(2,200) 126 | 127 | # input = torch.randn(1, 2, 200) 128 | # flops, params = profile(model, inputs=(input,)) 129 | 130 | # print("FLOPs=", str(flops / 1e6) + '{}'.format("M")) 131 | # print("params=", str(params / 1e6) + '{}'.format("M")) 132 | -------------------------------------------------------------------------------- /Models/GoogLeNet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class Inception(torch.nn.Module): 5 | def __init__(self, in_channels=56, ch1=64, ch3_reduce=96, ch3=128, ch5_reduce=16, ch5=32, pool_proj=32): 6 | super(Inception, self).__init__() 7 | 8 | self.branch1 = torch.nn.Sequential( 9 | torch.nn.Conv1d(in_channels, ch1, kernel_size=1), 10 | torch.nn.BatchNorm1d(ch1) 11 | ) 12 | 13 | self.branch3 = torch.nn.Sequential( 14 | torch.nn.Conv1d(in_channels, ch3_reduce, kernel_size=1), 15 | torch.nn.BatchNorm1d(ch3_reduce), 16 | torch.nn.Conv1d(ch3_reduce, ch3, kernel_size=3, padding=1), 17 | torch.nn.BatchNorm1d(ch3), 18 | ) 19 | 20 | self.branch5 = torch.nn.Sequential( 21 | torch.nn.Conv1d(in_channels, ch5_reduce, kernel_size=1), 22 | torch.nn.BatchNorm1d(ch5_reduce), 23 | torch.nn.Conv1d(ch5_reduce, ch5, kernel_size=5, padding=2), 24 | torch.nn.BatchNorm1d(ch5), 25 | ) 26 | 27 | self.branch_pool = torch.nn.Sequential( 28 | torch.nn.MaxPool1d(kernel_size=3, stride=1, padding=1), 29 | torch.nn.Conv1d(in_channels, pool_proj, kernel_size=1) 30 | ) 31 | 32 | def forward(self, x): 33 | return torch.cat([self.branch1(x), self.branch3(x), self.branch5(x), self.branch_pool(x)], 1) 34 | 35 | 36 | class GoogLeNet(torch.nn.Module): 37 | def __init__(self, in_channels=2, classes=5, in_sample_points=224): 38 | super(GoogLeNet, self).__init__() 39 | 40 | self.features = torch.nn.Sequential( 41 | torch.nn.Linear(in_sample_points, 224), 42 | torch.nn.Conv1d(in_channels, 64, kernel_size=7, 43 | stride=2, padding=3), 44 | torch.nn.MaxPool1d(3, 2, padding=1), 45 | torch.nn.Conv1d(64, 192, 3, padding=1), 46 | torch.nn.MaxPool1d(3, 2, padding=1), 47 | Inception(192, 64, 96, 128, 16, 32, 32), 48 | Inception(256, 128, 128, 192, 32, 96, 64), 49 | torch.nn.MaxPool1d(3, 2, padding=1), 50 | Inception(480, 192, 96, 208, 16, 48, 64), 51 | ) 52 | 53 | self.classifer_max_pool = torch.nn.MaxPool1d(5, 3) 54 | 55 | self.classifer = torch.nn.Sequential( 56 | torch.nn.Linear(2048, 1024), 57 | torch.nn.Dropout(0.5), 58 | torch.nn.ReLU(), 59 | torch.nn.Linear(1024, 512), 60 | torch.nn.Dropout(0.5), 61 | torch.nn.ReLU(), 62 | torch.nn.Linear(512, classes), 63 | ) 64 | 65 | self.Inception_4b = Inception(512, 160, 112, 224, 24, 64, 64) 66 | self.Inception_4c = Inception(512, 128, 128, 256, 24, 64, 64) 67 | self.Inception_4d = Inception(512, 112, 144, 288, 32, 64, 64) 68 | 69 | self.classifer1 = torch.nn.Sequential( 70 | torch.nn.Linear(2112, 1056), 71 | torch.nn.Dropout(0.5), 72 | torch.nn.ReLU(), 73 | torch.nn.Linear(1056, 528), 74 | torch.nn.Dropout(0.5), 75 | torch.nn.ReLU(), 76 | torch.nn.Linear(528, classes), 77 | ) 78 | 79 | self.Inception_4e = Inception(528, 256, 160, 320, 32, 128, 128) 80 | self.max_pool = torch.nn.MaxPool1d(3, 2, 1) 81 | 82 | self.Inception_5a = Inception(832, 256, 160, 320, 32, 128, 128) 83 | self.Inception_5b = Inception(832, 384, 192, 384, 48, 128, 128) 84 | 85 | self.avg_pool = torch.nn.AvgPool1d(7, stride=1) 86 | self.dropout = torch.nn.Dropout(0.4) 87 | self.classifer2 = torch.nn.Sequential( 88 | torch.nn.Linear(1024, 512), 89 | torch.nn.Dropout(0.5), 90 | torch.nn.ReLU(), 91 | torch.nn.Linear(512, classes), 92 | ) 93 | 94 | def forward(self, x): 95 | x = self.features(x) 96 | 97 | y = self.classifer(self.classifer_max_pool(x).view(-1, 2048)) 98 | 99 | x = self.Inception_4b(x) 100 | x = self.Inception_4c(x) 101 | x = self.Inception_4d(x) 102 | 103 | y1 = self.classifer1(self.classifer_max_pool(x).view(-1, 2112)) 104 | 105 | x = self.Inception_4e(x) 106 | x = self.max_pool(x) 107 | x = self.Inception_5a(x) 108 | x = self.Inception_5b(x) 109 | x = self.avg_pool(x) 110 | x = self.dropout(x) 111 | x = x.view(-1, 1024) 112 | x = self.classifer2(x) 113 | 114 | # return x, y, y1 115 | return x 116 | 117 | 118 | class GoogLeNetLoss(torch.nn.Module): 119 | def __init__(self): 120 | super(GoogLeNetLoss, self).__init__() 121 | self.CrossEntropyLoss = torch.nn.CrossEntropyLoss() 122 | 123 | def forward(self, data, label): 124 | c2_loss = self.CrossEntropyLoss(data[0], label) 125 | c0_loss = self.CrossEntropyLoss(data[1], label) 126 | c1_loss = self.CrossEntropyLoss(data[2], label) 127 | 128 | loss = c2_loss + 0.3*(c0_loss+c1_loss) 129 | 130 | return loss 131 | 132 | 133 | if __name__ == '__main__': 134 | model = GoogLeNet(in_channels=2, classes=2) 135 | input = torch.randn(size=(2, 2, 224)) 136 | # [c2,c0,c1] = model(input) 137 | output = model(input) 138 | criterion = GoogLeNetLoss() 139 | label = torch.tensor([1, 0]) 140 | print(f"Loss:{criterion(output,label)}") 141 | print(f"result:{output}") 142 | -------------------------------------------------------------------------------- /Models/MobileNetV3.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class conv(torch.nn.Module): 5 | def __init__(self, in_channels, out_channels, keral,stride=1, groups=1,activation = None): 6 | super().__init__() 7 | 8 | padding = keral//2 9 | self.use_activation = activation 10 | self.conv = torch.nn.Conv1d(in_channels, out_channels, keral, stride,padding, groups=groups) 11 | self.bath = torch.nn.BatchNorm1d(out_channels) 12 | if self.use_activation == 'Relu': 13 | self.activation = torch.nn.ReLU6() 14 | elif self.use_activation == 'H_swish': 15 | self.activation = torch.nn.Hardswish() 16 | 17 | def forward(self,x): 18 | x = self.conv(x) 19 | if x.size()[-1] != 1: 20 | x = self.bath(x) 21 | if self.use_activation != None: 22 | x = self.activation(x) 23 | return x 24 | 25 | 26 | class bottleneck(torch.nn.Module): 27 | def __init__(self,in_channels,keral_size,expansion_size,out_channels,use_attenton = False,activation = 'Relu',stride=1): 28 | super().__init__() 29 | 30 | self.stride = stride 31 | self.in_channels = in_channels 32 | self.out_channels = out_channels 33 | self.use_attenton = use_attenton 34 | 35 | self.conv = conv(in_channels,expansion_size,1,activation=activation) 36 | self.conv1 = conv(expansion_size,expansion_size,keral_size,stride=stride,groups=expansion_size,activation=activation) 37 | 38 | if self.use_attenton: 39 | self.attenton = SE_block(expansion_size) 40 | 41 | self.conv2 = conv(expansion_size,out_channels,1,activation=activation) 42 | 43 | def forward(self,x): 44 | 45 | x1 = self.conv(x) 46 | x1 = self.conv1(x1) 47 | if self.use_attenton: 48 | x1 = self.attenton(x1) 49 | x1 = self.conv2(x1) 50 | 51 | if self.stride == 1 and self.in_channels == self.out_channels: 52 | x1 += x 53 | 54 | return x1 55 | 56 | class SE_block(torch.nn.Module): 57 | def __init__(self,in_channel,ratio=1): 58 | super(SE_block, self).__init__() 59 | self.avepool = torch.nn.AdaptiveAvgPool1d(1) 60 | self.linear1 = torch.nn.Linear(in_channel,in_channel//ratio) 61 | self.linear2 = torch.nn.Linear(in_channel//ratio,in_channel) 62 | self.Hardsigmoid = torch.nn.Hardsigmoid(inplace=True) 63 | self.Relu = torch.nn.ReLU(inplace=True) 64 | 65 | def forward(self,input): 66 | b,c,_ = input.shape 67 | x = self.avepool(input) 68 | x = x.view([b,c]) 69 | x = self.linear1(x) 70 | x = self.Relu(x) 71 | x = self.linear2(x) 72 | x = self.Hardsigmoid(x) 73 | x = x.view([b,c,1]) 74 | 75 | return input*x 76 | 77 | 78 | class MobileNetV3_small(torch.nn.Module): 79 | def __init__(self,in_channels,classes): 80 | super().__init__() 81 | self.fearures = torch.nn.Sequential( 82 | conv(in_channels,16,3,2,activation='H_swish'), 83 | bottleneck(16,3,16,16,True,'Relu',2), 84 | bottleneck(16,3,72,24,False,'Relu',2), 85 | bottleneck(24,3,88,24,False,'Relu',1), 86 | bottleneck(24,5,96,40,False,'H_swish',2), 87 | bottleneck(40,5,240,40,True,'H_swish',1), 88 | bottleneck(40,5,240,40,True,'H_swish',1), 89 | bottleneck(40,5,120,48,True,'H_swish',1), 90 | bottleneck(48,5,144,48,True,'H_swish',1), 91 | bottleneck(48,5,288,96,True,'H_swish',2), 92 | bottleneck(96,5,576,96,True,'H_swish',1), 93 | bottleneck(96,5,576,96,True,'H_swish',1), 94 | conv(96, 576, 1, 1, activation='H_swish'), 95 | torch.nn.AdaptiveAvgPool1d(1), 96 | ) 97 | self.classifier = torch.nn.Sequential( 98 | conv(576, 1024, 1, 1, activation='H_swish'), 99 | conv(1024, classes, 1, 1, activation='H_swish'), 100 | torch.nn.Flatten() 101 | 102 | ) 103 | def forward(self,x): 104 | x = self.fearures(x) 105 | x = self.classifier(x) 106 | 107 | return x 108 | 109 | class MobileNetV3_large(torch.nn.Module): 110 | def __init__(self,in_channels,classes): 111 | super().__init__() 112 | self.fearures = torch.nn.Sequential( 113 | conv(in_channels,16,3,2,activation='H_swish'), 114 | bottleneck(16,3,16,16,False,'Relu',1), 115 | bottleneck(16,3,64,24,False,'Relu',2), 116 | bottleneck(24,3,72,24,False,'Relu',1), 117 | bottleneck(24,5,72,40,True,'Relu',2), 118 | bottleneck(40,5,120,40,True,'Relu',1), 119 | bottleneck(40,5,120,40,True,'Relu',1), 120 | bottleneck(40,3,240,80,False,'H_swish',2), 121 | bottleneck(80,3,200,80,False,'H_swish',1), 122 | bottleneck(80,3,184,80,False,'H_swish',1), 123 | bottleneck(80,3,184,80,False,'H_swish',1), 124 | bottleneck(80,3,480,112,True,'H_swish',1), 125 | bottleneck(112,3,672,112,True,'H_swish',1), 126 | bottleneck(112,5,672,160,True,'H_swish',2), 127 | bottleneck(160,5,960,160,True,'H_swish',2), 128 | bottleneck(160,5,960,160,True,'H_swish',2), 129 | 130 | 131 | 132 | conv(160, 960, 1, 1, activation='H_swish'), 133 | torch.nn.AdaptiveAvgPool1d(1), 134 | ) 135 | self.classifier = torch.nn.Sequential( 136 | conv(960, 1280, 1, 1, activation='H_swish'), 137 | conv(1280, classes, 1, 1, activation='H_swish'), 138 | torch.nn.Flatten() 139 | 140 | ) 141 | def forward(self,x): 142 | x = self.fearures(x) 143 | x = self.classifier(x) 144 | 145 | return x 146 | 147 | 148 | 149 | if __name__ == "__main__": 150 | input = torch.randn((1,112,224)) 151 | # model = MobileV3_small(in_channels=112, classes=5) 152 | model = MobileNetV3_large(in_channels=112, classes=5) 153 | output = model(input) 154 | print(output.shape) 155 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import matplotlib.pyplot as plt 4 | from torch.utils.data import DataLoader, Dataset,random_split 5 | from Package_dataset import package_dataset 6 | 7 | 8 | # import os 9 | # os.environ['KMP_DUPLICATE_LIB_OK']='True' 10 | 11 | from Models.LeNet import LeNet 12 | from Models.AlexNet import AlexNet 13 | from Models.ZFNet import ZFNet 14 | from Models.VGG19 import VGG19 15 | from Models.GoogLeNet import GoogLeNet 16 | from Models.ResNet50 import ResNet50 17 | from Models.DenseNet import DenseNet 18 | from Models.SqueezeNet import SqueezeNet 19 | from Models.Mnasnet import MnasNetA1 20 | from Models.MobileNetV1 import MobileNetV1 21 | from Models.MobileNetV2 import MobileNetV2 22 | from Models.MobileNetV3 import MobileNetV3_large, MobileNetV3_small 23 | from Models.shuffuleNetV1 import shuffuleNetV1_G3 24 | from Models.shuffuleNetV2 import shuffuleNetV2 25 | from Models.Xception import Xception 26 | from Models.EfficientNet import EfficientNetB0 27 | 28 | data = np.load('Dataset/data.npy') 29 | label = np.load('Dataset/label.npy') 30 | 31 | dataset_partition_rate = 0.7 32 | epoch_number = 1000 33 | show_result_epoch = 10 34 | 35 | dataset, channels, length, classes = package_dataset(data, label) 36 | 37 | # partition dataset 38 | train_len = int(len(dataset) * dataset_partition_rate) 39 | test_len = int(len(dataset)) - train_len 40 | train_dataset, test_dataset = random_split(dataset=dataset, lengths=[train_len, test_len]) 41 | 42 | 43 | # 数据库加载 44 | class Dataset(Dataset): 45 | def __init__(self, data): 46 | self.len = len(data) 47 | self.x_data = torch.from_numpy(np.array(list(map(lambda x: x[0], data)), dtype=np.float32)) 48 | self.y_data = torch.from_numpy(np.array(list(map(lambda x: x[-1], data)))).squeeze().long() 49 | 50 | def __getitem__(self, index): 51 | return self.x_data[index], self.y_data[index] 52 | 53 | def __len__(self): 54 | return self.len 55 | 56 | 57 | # 数据库dataloader 58 | Train_dataset = Dataset(train_dataset) 59 | Test_dataset = Dataset(test_dataset) 60 | dataloader = DataLoader(Train_dataset, shuffle=True, batch_size=50) 61 | testloader = DataLoader(Test_dataset, shuffle=True, batch_size=50) 62 | # 训练设备选择GPU还是CPU 63 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 64 | 65 | # 模型初始化 66 | model = LeNet(in_channels=channels, input_sample_points=length, classes=classes) 67 | # model = AlexNet(in_channels=channels, input_sample_points=length, classes=classes) 68 | # model = AlexNet(in_channels=channels, input_sample_points=length, classes=classes) 69 | # model = ZFNet(in_channels=channels, input_sample_points=length, classes=classes) 70 | # model = VGG19(in_channels=channels, classes=classes) 71 | # model = GoogLeNet(in_channels=channels, classes=classes) 72 | # model =ResNet50(in_channels=channels, classes=classes) 73 | # model =DenseNet(in_channels=channels, classes=classes) 74 | # model =SqueezeNet(in_channels=channels, classes=classes) 75 | # model =MobileNetV1(in_channels=channels, classes=classes) 76 | # model =MobileNetV2(in_channels=channels, classes=classes) 77 | # model =MobileNetV3_small(in_channels=channels, classes=classes) 78 | # model =MobileNetV3_large(in_channels=channels, classes=classes) 79 | # model =shuffuleNetV1_G3(in_channels=channels, classes=classes) 80 | # model =shuffuleNetV2(in_channels=channels, classes=classes) 81 | # model =Xception(in_channels=channels, classes=classes) 82 | # model =EfficientNetB0(in_channels=channels, classes=classes) 83 | model.to(device) 84 | 85 | # 损失函数选择 86 | criterion = torch.nn.CrossEntropyLoss() 87 | criterion.to(device) 88 | # 优化器选择 89 | # optimizer = torch.optim.Adam(model.parameters(), lr=0.001) 90 | optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) 91 | 92 | train_acc_list = [] 93 | test_acc_list = [] 94 | 95 | 96 | # 训练函数 97 | def train(epoch): 98 | model.train() 99 | train_correct = 0 100 | train_total = 0 101 | 102 | for data in dataloader: 103 | train_data_value, train_data_label = data 104 | train_data_value, train_data_label = train_data_value.to(device), train_data_label.to(device) 105 | train_data_label_pred = model(train_data_value) 106 | loss = criterion(train_data_label_pred, train_data_label) 107 | optimizer.zero_grad() 108 | loss.backward() 109 | optimizer.step() 110 | if epoch % show_result_epoch == 0: 111 | probability, predicted = torch.max(train_data_label_pred.data, dim=1) 112 | train_total += train_data_label_pred.size(0) 113 | train_correct += (predicted == train_data_label).sum().item() 114 | train_acc = round(100 * train_correct / train_total, 4) 115 | train_acc_list.append(train_acc) 116 | print('=' * 10, epoch // 10, '=' * 10) 117 | print('loss:', loss.item()) 118 | print(f'Train accuracy:{train_acc}%') 119 | test() 120 | 121 | 122 | # 测试函数 123 | def test(): 124 | model.eval() 125 | test_correct = 0 126 | test_total = 0 127 | with torch.no_grad(): 128 | for testdata in testloader: 129 | test_data_value, test_data_label = testdata 130 | test_data_value, test_data_label = test_data_value.to(device), test_data_label.to(device) 131 | test_data_label_pred = model(test_data_value) 132 | test_probability, test_predicted = torch.max(test_data_label_pred.data, dim=1) 133 | test_total += test_data_label_pred.size(0) 134 | test_correct += (test_predicted == test_data_label).sum().item() 135 | test_acc = round(100 * test_correct / test_total, 3) 136 | test_acc_list.append(test_acc) 137 | print(f'Test accuracy:{(test_acc)}%') 138 | 139 | 140 | for epoch in range(1, epoch_number+1): 141 | train(epoch) 142 | 143 | plt.plot(np.array(range(epoch_number//show_result_epoch)) * show_result_epoch, train_acc_list) 144 | plt.plot(np.array(range(epoch_number//show_result_epoch)) * show_result_epoch, test_acc_list) 145 | plt.legend(['train', 'test']) 146 | plt.title('Result') 147 | plt.xlabel('Epoch') 148 | plt.ylabel('Accuracy') 149 | plt.show() 150 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 1D_Pytorch_Train_demo 2 | A pytorch train demo with classical CNN models 3 | 4 | ## 1.Generate simulation datasets -Create_dataset.py 5 | 6 | First of all, in case of no dataset, we generate a dataset code in simplest way. The dataset consists of to parts, includeing the training value and the label. There training value represented by `data` and training label is represented by `label` 7 | 8 | 9 | The code in `Create_dataset.py` is following 10 | 11 | 12 | ```python 13 | import numpy as np 14 | # The number of samples simulated 15 | numbers = 100 16 | # The number of channel simulated 17 | channels = 3 18 | # The length of data simulated 19 | length = 224 20 | # The number of categories simulated 21 | classes = 2 22 | 23 | # Generate random data 24 | data = np.random.randn(numbers,channels,length) 25 | # Generate label 26 | label = np.random.randint(0,classes,numbers) 27 | 28 | # Saveing data and label to the Dataset file 29 | np.save('Dataset/data.npy',data,allow_pickle=True) 30 | np.save('Dataset/label.npy',label,allow_pickle=True) 31 | ``` 32 | `np.random.randn` demo 33 | ```python 34 | import numpy as np 35 | data = np.random.randn(100,3,244) 36 | print(data.shape) 37 | # (100, 3, 244) 38 | ``` 39 | 40 | Using `np.random.randint` to generate label, The purpose of this code is to generate `numbers` of intergers contained in [0,classes) 41 | 42 | ```python 43 | label = np.random.randint(0,classes,numbers) 44 | ``` 45 | `np.random.randint` demo 46 | ```python 47 | import numpy as np 48 | 49 | label = np.random.randint(0, 2, 10) 50 | print(label) 51 | # [0 0 0 1 1 0 1 0 0 0] 52 | ``` 53 | 54 | After runing this code, we generate two npy files under the `Dataset` folder 55 | 56 | 57 | ## 2.Packaing the datasset-Package_dataset.py 58 | Before training out model, we need a function to packge the value and label that form is like `[value,label]`,In order to split and train easily. This is a defoult form,if you are a new coders, you only need to just do it like this. 59 | 60 | ```python 61 | import numpy as np 62 | def package_dataset(data, label): 63 | dataset = [[i, j] for i, j in zip(data, label)] 64 | # channel number 65 | channels = data[0].shape[0] 66 | # data length 67 | length = data[0].shape[1] 68 | # data classes 69 | classes = len(np.unique(label)) 70 | return dataset, channels, length, classes 71 | 72 | 73 | if __name__ == '__main__': 74 | data = np.load('Dataset/data.npy') 75 | label = np.load('Dataset/label.npy') 76 | dataset, channels, length, classes = package_dataset(data, label) 77 | print(channels, length, classes) 78 | # 3 224 2 79 | ``` 80 | This API input `data` and `label` , return`dataset`,`channels`,`length`,`classes` 81 | Follwing code is used to packge the data and label. 82 | 83 | ```python 84 | dataset = [[i, j] for i, j in zip(data, label)] 85 | ``` 86 | 87 | if it looks like a little abstract to a novice here's a simple example,By `zip()` and list generation a dataset consisting of multiple `[value,label]`is obtained 88 | 89 | ```python 90 | data = [[1, 2, 3], 91 | [3, 1, 3], 92 | [1, 2, 3]] 93 | 94 | label = [0,1,0] 95 | 96 | dataset = [[i, j] for i, j in zip(data, label)] 97 | 98 | print(dataset) 99 | #[[[1, 2, 3], 0], [[3, 1, 3], 1], [[1, 2, 3], 0]] 100 | ``` 101 | ## 3.Training code decomposition explantion 102 | 103 | ### 3.1 Import required tool packge 104 | Now we will show the each part of the code step by step 105 | ```python 106 | import numpy as np 107 | import torch 108 | import matplotlib.pyplot as plt 109 | from torch.utils.data import DataLoader, Dataset,random_split 110 | ``` 111 | Import our `packge` API 112 | ```python 113 | from Package_dataset import package_dataset 114 | ``` 115 | Import model, you can choose model which you need 116 | ```python 117 | from Models.LeNet import LeNet 118 | from Models.AlexNet import AlexNet 119 | from Models.ZFNet import ZFNet 120 | from Models.VGG19 import VGG19 121 | from Models.GoogLeNet import GoogLeNet 122 | from Models.ResNet50 import ResNet50 123 | from Models.DenseNet import DenseNet 124 | from Models.SqueezeNet import SqueezeNet 125 | from Models.Mnasnet import MnasNetA1 126 | from Models.MobileNetV1 import MobileNetV1 127 | from Models.MobileNetV2 import MobileNetV2 128 | from Models.MobileNetV3 import MobileNetV3_large, MobileNetV3_small 129 | from Models.shuffuleNetV1 import shuffuleNetV1_G3 130 | from Models.shuffuleNetV2 import shuffuleNetV2 131 | from Models.Xception import Xception 132 | from Models.EfficientNet import EfficientNetB0 133 | ``` 134 | ### 3.2 load dataset 135 | Loding dataset and read the value and label. 136 | ```python 137 | data = np.load('Dataset/data.npy') 138 | label = np.load('Dataset/label.npy') 139 | ``` 140 | ### 3.3 split train dataset and test datset 141 | 142 | Number of how many times we train out model `epoch_number` 143 | 144 | And train how many times to perform a test. `show_result_epoch` 145 | Why we need `show_result_epoch`, Due to sometime model trains to fast. If we print result after each epoch, Efficiency aside, you can't read the worlds when you keep print. 146 | ```python 147 | dataset_partition_rate = 0.7 148 | epoch_number = 1000 149 | show_result_epoch = 10 150 | ``` 151 | Use our API `package_dataset` output dataset, channels, length, classes 152 | ```python 153 | dataset, channels, length, classes = package_dataset(data, label) 154 | ``` 155 | Split dataset, For this purpose , we use `torch.utils.data.random_split`, the `random_split` enter two parameters. One is `dataset` which is wating for split. There is also a list of lengths to pass on. 156 | ```python 157 | # partition dataset 158 | train_len = int(len(dataset) * dataset_partition_rate) 159 | test_len = int(len(dataset)) - train_len 160 | train_dataset, test_dataset = random_split(dataset=dataset, lengths=[train_len, test_len]) 161 | ``` 162 | Code the class of load dataset. This class is used to transform the raw dataset to a form which pytorch can understand. This allows the data to be used by tools such as Pytorch's Dataloader to train learning models. 163 | ### 3.4 Database loading class implementation 164 | ```python 165 | class Dataset(Dataset): 166 | def __init__(self, data): 167 | self.len = len(data) 168 | self.x_data = torch.from_numpy(np.array(list(map(lambda x: x[0], data)), dtype=np.float32)) 169 | self.y_data = torch.from_numpy(np.array(list(map(lambda x: x[-1], data)))).squeeze().long() 170 | 171 | def __getitem__(self, index): 172 | return self.x_data[index], self.y_data[index] 173 | 174 | def __len__(self): 175 | return self.len 176 | ``` 177 | Loading 'dataloder', 'train_dataset' and 'test_dataset' is' random_split 'to the input' dataset 'according to the ratio we set, divided the training set and the test set. In addition to the number of training samples contained by 'dataset', the data format of each training sample is the same as' [value, label] ' 178 | The official or most accurate expression of the phrase 'Train_dataset = Dataset(train_dataset)' is by instantiating the 'Dataset' to create a specific dataset object for the 'train_dataset' data preparation. Okay, I feel a little out of person, too. 179 | There is no need to tangle here to understand that we need to deal with the 'train_dataset' and 'test_dataset' separately through the 'Dataset', We then take the Train_dataset and the Test_dataset, respectively, and throw them into the DataLoader. The two variables' shuffle 'commonly used in' DataLoader 'to be set to' Ture 'is to scramble the input training set or test set, and then' batch_size 'refers to the size of the Mini-Batch strategy used in training.。 180 | ### 3.5 construct dataloader 181 | ```python 182 | Train_dataset = Dataset(train_dataset) 183 | Test_dataset = Dataset(test_dataset) 184 | dataloader = DataLoader(Train_dataset, shuffle=True, batch_size=50) 185 | testloader = DataLoader(Test_dataset, shuffle=True, batch_size=50) 186 | ``` 187 | ### 3.6 choose device 188 | 189 | Choose the training device, do you choose CPU training or GPU training, and what this code does is if cuda is available in general that means you're installing pytorch on a GPU then the default device is the GPU, and if you don't have a GPU, Then 'torch.cuda.is_available()' will return 'False' will select the CPU, generally speaking, we use our own laptop, or desktop when there is only one graphics card, if your device is a server, and installed multiple graphics cards in the case, Here 'cuda:0' can be set to other numbered 'cuda' such as' cuda:1 ' 'cuda:2' and so on 190 | ```python 191 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 192 | ``` 193 | ### 3.7 choose model 194 | Before this time, we return 'channels',' length ', 'classes' in the' Package_dataset 'kind of function comes first, in the part of the model instantiation, will need to use this write parameter, if the adaptive pooling layer is used in the process of model construction, In general, there is no need to pass in the number of sample points, that is, the length of the value, so there are generally two cases, initialization needs to input the number of input channels, the number of categories, and the length of the value is also in the number of one-dimensional sample points, or only need to input the number of channels, and the number of categories. 195 | Where 'model.to(device)' is to deploy the model to the 'device' we selected in the previous step 196 | ```python 197 | # Initialize 198 | # model = LeNet(in_channels=channels, input_sample_points=length, classes=classes) 199 | # model = AlexNet(in_channels=channels, input_sample_points=length, classes=classes) 200 | # model = AlexNet(in_channels=channels, input_sample_points=length, classes=classes) 201 | # model = ZFNet(in_channels=channels, input_sample_points=length, classes=classes) 202 | # model = VGG19(in_channels=channels, classes=classes) 203 | # model = GoogLeNet(in_channels=channels, classes=classes) 204 | # model =ResNet50(in_channels=channels, classes=classes) 205 | # model =DenseNet(in_channels=channels, classes=classes) 206 | # model =SqueezeNet(in_channels=channels, classes=classes) 207 | # model =MobileNetV1(in_channels=channels, classes=classes) 208 | # model =MobileNetV2(in_channels=channels, classes=classes) 209 | # model =MobileNetV3_small(in_channels=channels, classes=classes) 210 | # model =MobileNetV3_large(in_channels=channels, classes=classes) 211 | # model =shuffuleNetV1_G3(in_channels=channels, classes=classes) 212 | # model =shuffuleNetV2(in_channels=channels, classes=classes) 213 | # model =Xception(in_channels=channels, classes=classes) 214 | model =EfficientNetB0(in_channels=channels, classes=classes) 215 | model.to(device) 216 | ``` 217 | ### 3.8 choose loss function 218 | For the selection of loss function, after instantiating the model, it is necessary to further select the degree of inconsistency between the results trained by the model and the actual results, that is, the size of the loss. The cross-error entropy loss function used in the following code, And this loss function can also be thought of as a model so you can move the loss function to the GPU in the same way that you move the model to the GPU. In the multi-classification task, the cross error entropy can be used directly for the loss function. 219 | ```python 220 | criterion = torch.nn.CrossEntropyLoss() 221 | criterion.to(device) 222 | ``` 223 | ### 3.9 choose optimer 224 | Optimizer selection, because the core idea of deep learning algorithms is backpropagation and gradient descent, the role of the optimizer is to apply the loss to the parameter values that the original model can learn through the calculation of specified rules, of which there are two kinds worth using, one is' Adam 'and one is' SGD' plus momentum, and the others do not need to be tried. 225 | ```python 226 | # optimizer = torch.optim.Adam(model.parameters(), lr=0.001) 227 | optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) 228 | ``` 229 | ### 3.10 Initializes the list of saved training set accuracy and test set accuracy 230 | Initalize to list. they are used to save the accuracy of traindataset and test test. 231 | ```python 232 | train_acc_list = [] 233 | test_acc_list = [] 234 | ``` 235 | ### 3.11 Detailed training function (key) 236 | Next is the most difficult part of the training function part, I will be very detailed to explain, the 'train' function for additional dismantling, if you are small white hope you do not be scared off, read this you will surpass the vast majority of people who can only copy and paste. First of all, 'train' function needs to pass a parameter is' epoch 'this parameter is the current number of rounds, the role of this parameter is to determine whether the current number of training rounds is an integer multiple of' show_result_epoch ', if it is to calculate the training set and accuracy of the accuracy of the test set. 237 | ``` 238 | def train(epoch): 239 | ``` 240 | Set the model to 'train' mode, and then execute the 'model.eval()' function when predicting the test set, two states mainly affect 'Dorpout' and 'BatchNormilze', take 'Dorpout' for example, if in 'train' mode, The nodes Droput drops each time he makes a prediction are random, but in 'eval' mode which node he drops is fixed. ** If you want to reproduce the results of the test set, you must add mode switching code during training and testing, otherwise the results will not be reproduced ** 241 | ```python 242 | model.train() 243 | ``` 244 | 245 | 'train_correct' number of correct training set samples, 'train_total' is used to store the total number of all samples, 246 | ```python 247 | train_correct = 0 248 | train_total = 0 249 | ``` 250 | 251 | 'train_correct' number of correct training set samples, 'train_total' is used to store the total number of all samples 252 | 253 | ```python 254 | for data in dataloader: 255 | train_data_value, train_data_label = data 256 | train_data_value, train_data_label = train_data_value.to(device), train_data_label.to(device) 257 | train_data_label_pred = model(train_data_value) 258 | loss = criterion(train_data_label_pred, train_data_label) 259 | optimizer.zero_grad() 260 | loss.backward() 261 | optimizer.step() 262 | ``` 263 | 264 | > dataloader = DataLoader(Train_dataset, shuffle=True, batch_size=50) 265 | 266 | Let's say the length of 'dataloder' is n, this dataloder is from this code, you can see that the size of 'batch_size' is set to 50, and let's say that the length of 'Train_dataset' we entered is 70, At last, the dataloder can get a sample of length 50 and a sample of length 20 through iteration. If the value of 'batch_size' is set to 10, then 7 samples of length 10 will be returned. If the value of 'batch_size' is set to 100, then a he of length 70 will be returned and he is not a v bully. So to sum it up, we're going to get three scenarios. 267 | * When the value of batch_size is greater than the length of the first parameter 'Train_dataset' or 'Test_dataset' of the input 'DataLoader()', the iterator iterates over the data only once, The size of the data is the length of the input 'Train_dataset' or 'Test_dataset', that is, just to mess it up a bit, but the 'mini_batch' policy is not used 268 | * When the value of 'batch_size' is less than the length of the first parameter 'Train_dataset' or 'Test_dataset' of the input 'DataLoader()', And the length of the input 'Train_dataset' or 'Test_dataset' is an integer multiple of 'batch_size' , Suppose the dataset length of input 'Dataloader' is n batch_size 'is b, and the number of iterations t=n|b. The | represents divisible, that is, the data length of each iteration can be iterated t times is b 269 | * When the value of 'batch_size' is less than the length of the first parameter 'Train_dataset' or 'Test_dataset' of the input 'DataLoader()', And when the length of the input 'Train_dataset' or 'Test_dataset' is not an integer multiple of 'batch_size' , similarly assume that the dataset length of 'Dataloader' is $n$batch_size size is b, The number of iterations is t+1. The same t=n|b, the length of the data generated by the first iteration t is b, and the length of the last iteration is n-t×b 270 | 271 | ```python 272 | for data in dataloader: 273 | ``` 274 | 275 | Do the following with the data from each iteration 276 | First of all, data contains two values, one is the value in this batch and the other is the label in this batch 277 | 278 | ```python 279 | train_data_value, train_data_label = data 280 | ``` 281 | The resulting values and labels are then placed in 'device' 282 | ```python 283 | train_data_value, train_data_label = train_data_value.to(device), train_data_label.to(device) 284 | ``` 285 | Then call the model to predict 'train_data_label_pred' as the predicted result, here it should be mentioned that the data dimension of this result is' (len(data),classes) ', 'len(data)' the data length of the local iteration to 'data'. 'classes' is the number of classes for the categorical task. 286 | ```python 287 | train_data_label_pred = model(train_data_value) 288 | ``` 289 | Then call the model to predict 'train_data_label_pred' as the predicted result, here it should be mentioned that the data dimension of this result is' (len(data),classes) ', 'len(data)' the data length of the local iteration to 'data'. 'classes' is the number of classes for the categorical task. 290 | ```python 291 | loss = criterion(train_data_label_pred, train_data_label) 292 | ``` 293 | Gradient clearing, calling the 'optimizer' 'zero_grad' method, will make clear the gradient saved for each learnable parameter, the reason for the need to separate this step is said to be reserved for the purpose of some tasks need to accumulate backpropagated gradients. 294 | ```python 295 | optimizer.zero_grad() 296 | ``` 297 | Backpropagation gradient, which assigns a gradient to each learning parameter by backpropagation 298 | ```python 299 | loss.backward() 300 | ``` 301 | 302 | Parameter update, using the gradient and the original parameters to get the parameters after this round of learning, the mode of action is to perform the following gradient descent, the specific mode of action depends on the selected optimizer. 303 | ```python 304 | optimizer.step() 305 | ``` 306 | The following is the part of calculating accuracy and testing, 'epoch' is the current training round, 'show_result_epoch' is how many rounds to view the accuracy and loss of a training set and test set, and record the loss of the training set and test set to draw a change curve of accuracy. 307 | ```python 308 | if epoch % show_result_epoch == 0: 309 | ``` 310 | By 'torch.max' to get the predicted label, 'torch.max' returns two values, one is the maximum probability 'probability', one is the index of the maximum 'predicted', which is what we think of as the label. 311 | ```python 312 | probability, predicted = torch.max(train_data_label_pred.data, dim=1) 313 | ``` 314 | `torch.max` demo 315 | 316 | ```python 317 | import torch 318 | 319 | data = torch.Tensor([[0.6, 0.4], 320 | [0.3, 0.7]]) 321 | 322 | 323 | probability, predicted = torch.max(data.data, dim=1) 324 | 325 | print(probability) 326 | # tensor([0.6000, 0.7000]) 327 | print(predicted) 328 | # tensor([0, 1]) 329 | ``` 330 | 331 | Record the number of training samples to the variable 'train_total' that holds the total number of training samples, and the following 'size(0)' represents reading the first value of the Tensor dimension. For example, 'train_data_label_pred' dimension is' (20,2) '20 is the size of the' batch 'of this time, is the number of categories,' train_data_label_pred.size(0) 'is to take 20 out. 332 | ```python 333 | train_total += train_data_label_pred.size(0) 334 | ``` 335 | Record the number of predicted samples that are correct. 'predicted == train_data_label' Compare the predicted label with the real label. This is a syntax. First the two Tensor have the same length, and then return an array containing 'true' and 'Flase'. The predicted labels of the corresponding positions are the same as "Ture" and the predicted labels are different as "False". Then add a ".sum() "in" (predicted == train_data_label) "to return the number of samples that are predicted correctly. A simple example is as follows: '.item() 'returns the value in Tensor. 336 | 337 | ```python 338 | train_correct += (predicted == train_data_label).sum().item() 339 | ``` 340 | 341 | ```python 342 | import torch 343 | 344 | predicted = torch.Tensor([0, 1, 1, 0, 1]) 345 | train_data_label = torch.Tensor([0, 0, 1, 0, 1]) 346 | 347 | print(predicted == train_data_label) 348 | # tensor([ True, False, True, True, True]) 349 | print((predicted == train_data_label).sum()) 350 | # tensor(4) 351 | print((predicted == train_data_label).sum().item()) 352 | # 4 353 | ``` 354 | Calculate the training set accuracy and add to 'train_acc_list', 'train_correct' is the number of correct predicted, 'train_total' is the total number of predicted samples, 'round' is used to scope floating-point numbers to how many decimal places. 355 | ```python 356 | train_acc = round(100 * train_correct / train_total, 4) 357 | train_acc_list.append(train_acc) 358 | ``` 359 | print accuracy and loss 360 | 361 | ```python 362 | print('=' * 10, epoch // 10, '=' * 10) 363 | print('loss:', loss.item()) 364 | print(f'Train accuracy:{train_acc}%') 365 | ``` 366 | Complete traning section code 367 | ```python 368 | def train(epoch): 369 | model.train() 370 | train_correct = 0 371 | train_total = 0 372 | for data in dataloader: 373 | train_data_value, train_data_label = data 374 | train_data_value, train_data_label = train_data_value.to(device), train_data_label.to(device) 375 | train_data_label_pred = model(train_data_value) 376 | loss = criterion(train_data_label_pred, train_data_label) 377 | optimizer.zero_grad() 378 | loss.backward() 379 | optimizer.step() 380 | if epoch % show_result_epoch == 0: 381 | probability, predicted = torch.max(train_data_label_pred.data, dim=1) 382 | train_total += train_data_label_pred.size(0) 383 | train_correct += (predicted == train_data_label).sum().item() 384 | train_acc = round(100 * train_correct / train_total, 4) 385 | train_acc_list.append(train_acc) 386 | print('=' * 10, epoch // 10, '=' * 10) 387 | print('loss:', loss.item()) 388 | print(f'Train accuracy:{train_acc}%') 389 | test() 390 | ``` 391 | ### 3.12 Test function interpreation 392 | Similarly, the test function is basically the same except that there is no backpropagation calculation loss and training process. The only thing that is more is that before entering the call iterator to read the test set sample, a 'with torch.no_grad()' is used to close the function related to gradient change. Here, when I just read it, I also wondered why I need to lock the following gradients every time I do backpropagation. The answer I got from learning is that even if the gradient is not used to update the parameter, if the gradient is not locked, PyTorch will track and store the intermediate value for automatic gradient calculation by default, which will affect the operation efficiency. It is also necessary to avoid gradient changes that may occur even if the gradient is not updated by the optimizer under certain circumstances, so the use of 'torch.no_grad()' is necessary. 393 | ```python 394 | def test(): 395 | model.eval() 396 | test_correct = 0 397 | test_total = 0 398 | with torch.no_grad(): 399 | for testdata in testloader: 400 | test_data_value, test_data_label = testdata 401 | test_data_value, test_data_label = test_data_value.to(device), test_data_label.to(device) 402 | test_data_label_pred = model(test_data_value) 403 | test_probability, test_predicted = torch.max(test_data_label_pred.data, dim=1) 404 | test_total += test_data_label_pred.size(0) 405 | test_correct += (test_predicted == test_data_label).sum().item() 406 | test_acc = round(100 * test_correct / test_total, 3) 407 | test_acc_list.append(test_acc) 408 | print(f'Test accuracy:{(test_acc)}%') 409 | ``` 410 | ### 3.13 Let's training 411 | In order not to print the '0' training as a whole becomes' (1, epoch_number+1) ', this is not difficult but sometimes if you are confused or want to know why, you can change to range(epoch_number) and run to see. 412 | ```python 413 | for epoch in range(1, epoch_number+1): 414 | train(epoch) 415 | ``` 416 | ### 3.14 plot accuracy curve 417 | 418 | ```python 419 | plt.plot(np.array(range(epoch_number//show_result_epoch)) * show_result_epoch, train_acc_list) 420 | plt.plot(np.array(range(epoch_number//show_result_epoch)) * show_result_epoch, test_acc_list) 421 | plt.legend(['train', 'test']) 422 | plt.title('Result') 423 | plt.xlabel('Epoch') 424 | plt.ylabel('Accuracy') 425 | plt.show() 426 | ``` 427 | 428 | The number of training rounds is 1000, and the final results are as follows, ** Since our data is a randomly generated binary classification dataset, the accuracy of the test set eventually moves up and down by 50%. **. 429 | ![在这里插入图片描述](https://img-blog.csdnimg.cn/direct/04b46eda59c142978643a1ec7e50cee2.png) 430 | 431 | ## 4.Complete Code 432 | I hope you can look at the above content before looking at the complete code below, and then you will no longer be afraid of the following code will have a transparent feeling. 433 | ```python 434 | import numpy as np 435 | import torch 436 | import matplotlib.pyplot as plt 437 | from torch.utils.data import DataLoader, Dataset,random_split 438 | from Package_dataset import package_dataset 439 | 440 | from Models.LeNet import LeNet 441 | from Models.AlexNet import AlexNet 442 | from Models.ZFNet import ZFNet 443 | from Models.VGG19 import VGG19 444 | from Models.GoogLeNet import GoogLeNet 445 | from Models.ResNet50 import ResNet50 446 | from Models.DenseNet import DenseNet 447 | from Models.SqueezeNet import SqueezeNet 448 | from Models.Mnasnet import MnasNetA1 449 | from Models.MobileNetV1 import MobileNetV1 450 | from Models.MobileNetV2 import MobileNetV2 451 | from Models.MobileNetV3 import MobileNetV3_large, MobileNetV3_small 452 | from Models.shuffuleNetV1 import shuffuleNetV1_G3 453 | from Models.shuffuleNetV2 import shuffuleNetV2 454 | from Models.Xception import Xception 455 | 456 | data = np.load('Dataset/data.npy') 457 | label = np.load('Dataset/label.npy') 458 | 459 | dataset_partition_rate = 0.7 460 | epoch_number = 1000 461 | show_result_epoch = 10 462 | 463 | dataset, channels, length, classes = package_dataset(data, label) 464 | 465 | # partition dataset 466 | train_len = int(len(dataset) * dataset_partition_rate) 467 | test_len = int(len(dataset)) - train_len 468 | train_dataset, test_dataset = random_split(dataset=dataset, lengths=[train_len, test_len]) 469 | 470 | 471 | # 数据库加载 472 | class Dataset(Dataset): 473 | def __init__(self, data): 474 | self.len = len(data) 475 | self.x_data = torch.from_numpy(np.array(list(map(lambda x: x[0], data)), dtype=np.float32)) 476 | self.y_data = torch.from_numpy(np.array(list(map(lambda x: x[-1], data)))).squeeze().long() 477 | 478 | def __getitem__(self, index): 479 | return self.x_data[index], self.y_data[index] 480 | 481 | def __len__(self): 482 | return self.len 483 | 484 | 485 | # 数据库dataloader 486 | Train_dataset = Dataset(train_dataset) 487 | Test_dataset = Dataset(test_dataset) 488 | dataloader = DataLoader(Train_dataset, shuffle=True, batch_size=50) 489 | testloader = DataLoader(Test_dataset, shuffle=True, batch_size=50) 490 | # 训练设备选择GPU还是CPU 491 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 492 | 493 | # 模型初始化 494 | # model = LeNet(in_channels=channels, input_sample_points=length, classes=classes) 495 | # model = AlexNet(in_channels=channels, input_sample_points=length, classes=classes) 496 | # model = AlexNet(in_channels=channels, input_sample_points=length, classes=classes) 497 | # model = ZFNet(in_channels=channels, input_sample_points=length, classes=classes) 498 | # model = VGG19(in_channels=channels, classes=classes) 499 | # model = GoogLeNet(in_channels=channels, classes=classes) 500 | # model =ResNet50(in_channels=channels, classes=classes) 501 | # model =DenseNet(in_channels=channels, classes=classes) 502 | # model =SqueezeNet(in_channels=channels, classes=classes) 503 | # model =MobileNetV1(in_channels=channels, classes=classes) 504 | # model =MobileNetV2(in_channels=channels, classes=classes) 505 | # model =MobileNetV3_small(in_channels=channels, classes=classes) 506 | # model =MobileNetV3_large(in_channels=channels, classes=classes) 507 | # model =shuffuleNetV1_G3(in_channels=channels, classes=classes) 508 | model =shuffuleNetV2(in_channels=channels, classes=classes) 509 | # model =Xception(in_channels=channels, classes=classes) 510 | model.to(device) 511 | 512 | # 损失函数选择 513 | criterion = torch.nn.CrossEntropyLoss() 514 | criterion.to(device) 515 | # 优化器选择 516 | # optimizer = torch.optim.Adam(model.parameters(), lr=0.001) 517 | optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) 518 | 519 | train_acc_list = [] 520 | test_acc_list = [] 521 | 522 | 523 | # 训练函数 524 | def train(epoch): 525 | model.train() 526 | train_correct = 0 527 | train_total = 0 528 | for data in dataloader: 529 | train_data_value, train_data_label = data 530 | train_data_value, train_data_label = train_data_value.to(device), train_data_label.to(device) 531 | train_data_label_pred = model(train_data_value) 532 | loss = criterion(train_data_label_pred, train_data_label) 533 | optimizer.zero_grad() 534 | loss.backward() 535 | optimizer.step() 536 | if epoch % show_result_epoch == 0: 537 | probability, predicted = torch.max(train_data_label_pred.data, dim=1) 538 | train_total += train_data_label_pred.size(0) 539 | train_correct += (predicted == train_data_label).sum().item() 540 | train_acc = round(100 * train_correct / train_total, 4) 541 | train_acc_list.append(train_acc) 542 | print('=' * 10, epoch // 10, '=' * 10) 543 | print('loss:', loss.item()) 544 | print(f'Train accuracy:{train_acc}%') 545 | test() 546 | 547 | 548 | # 测试函数 549 | def test(): 550 | model.eval() 551 | test_correct = 0 552 | test_total = 0 553 | with torch.no_grad(): 554 | for testdata in testloader: 555 | test_data_value, test_data_label = testdata 556 | test_data_value, test_data_label = test_data_value.to(device), test_data_label.to(device) 557 | test_data_label_pred = model(test_data_value) 558 | test_probability, test_predicted = torch.max(test_data_label_pred.data, dim=1) 559 | test_total += test_data_label_pred.size(0) 560 | test_correct += (test_predicted == test_data_label).sum().item() 561 | test_acc = round(100 * test_correct / test_total, 3) 562 | test_acc_list.append(test_acc) 563 | print(f'Test accuracy:{(test_acc)}%') 564 | 565 | 566 | for epoch in range(1, epoch_number+1): 567 | train(epoch) 568 | 569 | plt.plot(np.array(range(epoch_number//show_result_epoch)) * show_result_epoch, train_acc_list) 570 | plt.plot(np.array(range(epoch_number//show_result_epoch)) * show_result_epoch, test_acc_list) 571 | plt.legend(['train', 'test']) 572 | plt.title('Result') 573 | plt.xlabel('Epoch') 574 | plt.ylabel('Accuracy') 575 | plt.show() 576 | ``` 577 | ## Summary of experience 578 | * Optimizer only Adam and SGD plus momentum is worth trying SGD momentum is generally set to 0.9 579 | * The fastest way to improve accuracy is to normalize the data during data preprocessing, followed by BatchNormalize after the convolution layer 580 | * Change the ReLu function to LeakLyRelu when there is no change in accuracy, i.e., the gradient disappears 581 | * For one-dimensional data, ResNet tends to have the highest accuracy in the base model 582 | * Piling attention mechanisms and recurrent neural networks on top of the model has a high probability of improving accuracy and is not recommended if you are trying to do something good 583 | * The cuda memory error is that the batch_size is set too large, or the memory usage is too much, and the restart cannot be changed to a smaller batch_size 584 | * bat_size hyperparameters such as the number of convolution layers are set to an integer multiple of the number of cores being processed, usually a multiple of 32 585 | --------------------------------------------------------------------------------