├── MIXCNN.py ├── README.md ├── cwru_data.npy ├── cwru_label.npy └── model_2_view.py /MIXCNN.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | class DepthwiseConv1D(nn.Module): 5 | def __init__(self, dim_in, kernel_size, dilation_rate, depth_multiplier, padding="same", 6 | use_bias=False): 7 | super().__init__() 8 | self.conv = nn.Conv1d(in_channels=dim_in, out_channels=dim_in * depth_multiplier, kernel_size=kernel_size, stride=1, padding=padding, groups=dim_in, 9 | bias=use_bias, dilation=dilation_rate) 10 | 11 | def forward(self, x): 12 | x = self.conv(x) 13 | return x 14 | 15 | class Mixconv(nn.Module): 16 | def __init__(self, channal=64, kersize=64, m=1, c=1, dim_in=128): 17 | super(Mixconv, self).__init__() 18 | self.depth_conv_1 = DepthwiseConv1D(dim_in=dim_in, kernel_size=kersize, dilation_rate=m, depth_multiplier=c, padding="same", 19 | use_bias=False) 20 | self.act_2 = nn.ReLU() 21 | self.bn_2 = nn.BatchNorm1d(dim_in * m) 22 | self.conv_1 = nn.Conv1d(dim_in * m, channal, kernel_size=1, stride=1, padding="same") 23 | self.act_3 = nn.ReLU() 24 | self.bn_3 = nn.BatchNorm1d(channal) 25 | 26 | def forward(self, x): 27 | x1 = x 28 | x = self.depth_conv_1(x) 29 | x = self.act_2(x) 30 | x = self.bn_2(x) 31 | x = torch.add(x, x1) 32 | x = self.conv_1(x) 33 | x = self.act_3(x) 34 | x = self.bn_3(x) 35 | return x 36 | 37 | 38 | 39 | 40 | ###################################################################################################################### 41 | class MIXCNN(nn.Module): 42 | def __init__(self): 43 | super(MIXCNN, self).__init__() 44 | self.conv_1 = nn.Conv1d(1, 128, kernel_size=32, stride=4) 45 | self.bn_1 = nn.BatchNorm1d(128) 46 | self.act_1 = nn.ReLU() 47 | self.mix_1 = Mixconv(dim_in=128, channal=128, kersize=64, m=1, c=1) 48 | self.mix_2 = Mixconv(dim_in=128, channal=128, kersize=64, m=1, c=1) 49 | self.mix_3 = Mixconv(dim_in=128, channal=128, kersize=64, m=1, c=1) 50 | self.bn_2 = nn.BatchNorm1d(128) 51 | self.act_2 = nn.ReLU() 52 | self.pool = nn.AdaptiveAvgPool1d(1) 53 | self.fc = nn.Linear(128, 10) 54 | def forward(self, x): 55 | x = self.conv_1(x) 56 | x = F.pad(x, (387, 388), "constant", 0) 57 | x = self.bn_1(x) 58 | x = self.act_1(x) 59 | x = self.mix_1(x) 60 | x = self.mix_2(x) 61 | x = self.mix_3(x) 62 | x = self.bn_2(x) 63 | x = self.act_2(x) 64 | x = self.pool(x).squeeze() 65 | x = self.fc(x) 66 | return x 67 | 68 | if __name__ == '__main__': 69 | input = torch.rand(2, 1, 1024).cuda() 70 | model = MIXCNN().cuda() 71 | output = model(input) 72 | print(output.size()) 73 | 74 | 75 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MIXCNN 2 | 3 | 4 | ### Official Materials: 5 | - TF: https://github.com/zhiqan/MIXCNN 6 | - Paper: 7 | - [PUBLISHED PAPERS](https://doi.org/10.1109/TII.2022.3224979) 8 | 9 | ### Cited: 10 | ```html 11 | @ARTICLE{9964316, 12 | author={Zhao, Zhiqian and Jiao, Yinghou}, 13 | journal={IEEE Transactions on Industrial Informatics}, 14 | title={A Fault Diagnosis Method for Rotating Machinery Based on CNN With Mixed Information}, 15 | year={2023}, 16 | volume={19}, 17 | number={8}, 18 | pages={9091-9101}, 19 | doi={10.1109/TII.2022.3224979}} 20 | ``` 21 | 22 | 23 | # Contact 24 | - **Chao He** 25 | - **22110435#bjtu.edu.cn (please replace # by @)** 26 | -------------------------------------------------------------------------------- /cwru_data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liguge/MIXCNN_pytorch/651a627a891b755399a4668d9f16fa9d2d165122/cwru_data.npy -------------------------------------------------------------------------------- /cwru_label.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liguge/MIXCNN_pytorch/651a627a891b755399a4668d9f16fa9d2d165122/cwru_label.npy -------------------------------------------------------------------------------- /model_2_view.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch.utils import data as da 4 | from timm.loss import LabelSmoothingCrossEntropy 5 | import argparse 6 | from MIXCNN import MIXCNN 7 | from sklearn.model_selection import train_test_split 8 | from sklearn.preprocessing import StandardScaler 9 | args = None 10 | 11 | def parse_args(): 12 | parser = argparse.ArgumentParser(description='Train') 13 | parser.add_argument('--data_dir', type=str, default= "data\\5HP", help='the directory of the data') 14 | parser.add_argument("--pretrained", type=bool, default=True, help='whether to load the pretrained model') 15 | parser.add_argument('--batch_size', type=int, default=32, help='batchsize of the training process') 16 | parser.add_argument('--step_len', type=list, default=range(210, 430, 10), help='the weight decay') 17 | parser.add_argument('--sample_len', type=int, default=420, help='the learning rate schedule') 18 | parser.add_argument('--rate', type=list, default=[0.7, 0.15, 0.15], help='') 19 | parser.add_argument('--acces', type=list, default=[], help='initialization list') 20 | parser.add_argument('--epochs', type=int, default=80, help='max number of epoch') 21 | parser.add_argument('--losses', type=list, default=[], help='initialization list') 22 | args = parser.parse_args() 23 | return args 24 | 25 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 26 | 27 | class Dataset(da.Dataset): 28 | def __init__(self, X, y): 29 | self.Data = X 30 | self.Label = y 31 | def __getitem__(self, index): 32 | txt = self.Data[index] 33 | label = self.Label[index] 34 | return txt, label 35 | def __len__(self): 36 | return len(self.Data) 37 | 38 | # STEP1 加载数据 39 | def load_data(args): 40 | # path = r'data\5HP' 41 | # path = args.data_dir 42 | # rate = args.rate 43 | source_data = np.load('H:\MIXCNN\cwru_data.npy') 44 | source_label = np.load('H:\MIXCNN\cwru_label.npy') 45 | source_data = StandardScaler().fit_transform(source_data.T).T 46 | source_data = np.expand_dims(source_data, axis=1) 47 | x_train, x_test, y_train, y_test = train_test_split(source_data, source_label, test_size=0.3, random_state=0, stratify=source_label) 48 | x_train, x_validate, y_train, y_validate = train_test_split(x_train, y_train, test_size=0.2, random_state=0, stratify=y_train) 49 | # 切片 50 | # sample = tf.data.Dataset.from_tensor_slices((x_train, y_train)) # 按照样本数进行切片得到每一片的表述(2048+10,1) 51 | # sample = sample.shuffle(1000).batch(10) # 打乱分批量(10,400,2) 52 | # sample_validate = tf.data.Dataset.from_tensor_slices((x_validate, y_validate)) 53 | # sample_validate = sample_validate.shuffle(1000).batch(10) # 打乱分批量(10,400,2) 54 | # sample_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)) 55 | # sample_test = sample_test.shuffle(1000).batch(10) # 打乱分批量(10,400,2) 56 | Train = Dataset(x_train, y_train) 57 | Validate = Dataset(x_validate, y_validate) 58 | Test = Dataset(x_test, y_test) 59 | train_loader = da.DataLoader(Train, batch_size=args.batch_size, shuffle=True) 60 | validate_loader = da.DataLoader(Validate, batch_size=args.batch_size, shuffle=True) 61 | test_loader = da.DataLoader(Test, batch_size=args.batch_size, shuffle=False) 62 | return train_loader, validate_loader, test_loader 63 | 64 | 65 | # STEP2 设计网络结构,建立网络容器 66 | # def create_model(): 67 | # Con_net = keras.Sequential([ # 网络容器 68 | # layers.Conv1D(filters=32, kernel_size=20, strides=1, padding='same', activation='relu'), # 添加卷积层 69 | # layers.BatchNormalization(), # 添加正则化层 70 | # layers.MaxPooling1D(pool_size=2, strides=2, padding='same'), # 池化层 71 | # layers.Conv1D(filters=32, kernel_size=20, strides=1, padding='same', activation='relu'), # 添加卷积层 72 | # layers.BatchNormalization(), # 添加正则化层 73 | # layers.MaxPooling1D(pool_size=2, strides=2, padding='same'), # 池化层 74 | # layers.Flatten(), # 打平层,方便全连接层使用 75 | # layers.Dense(100, activation='relu'), # 全连接层,120个节点 76 | # layers.Dense(10, activation='softmax'), # 全连接层,10个类别节点 77 | # ]) 78 | # return Con_net 79 | # class Con_net(nn.Module): 80 | # def __init__(self): 81 | # super(Con_net, self).__init__() 82 | # self.p1_1 = nn.Sequential(nn.Conv1d(2, 32, kernel_size=20, stride=1, padding='same'), 83 | # nn.BatchNorm1d(32), 84 | # nn.ReLU(inplace=True)) 85 | # self.p1_2 = nn.MaxPool1d(2, 2) 86 | # self.p2_1 = nn.Sequential(nn.Conv1d(32, 32, kernel_size=20, stride=1, padding='same'), 87 | # nn.BatchNorm1d(32), 88 | # nn.ReLU(inplace=True)) 89 | # self.p2_2 = nn.MaxPool1d(2, 2) 90 | # self.p3_1 = nn.Sequential(nn.Linear(32*105, 100), #需要根据输出修改前置神经元个数 91 | # nn.ReLU(inplace=True)) #全连接层之后还需要加激活函数 92 | # self.p3_2 = nn.Sequential(nn.Linear(100, 10)) 93 | # 94 | # def forward(self, x): 95 | # x = self.p1_2(self.p1_1(x)) 96 | # x = self.p2_2(self.p2_1(x)) 97 | # x = x.reshape(-1, x.size()[1]*x.size()[2]) 98 | # x = self.p3_2(self.p3_1(x)) 99 | # return x 100 | 101 | model = MIXCNN().to(device) 102 | 103 | def train(args, train_loader, validate_loader, test_loader): 104 | res = [] 105 | optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) 106 | criterion = LabelSmoothingCrossEntropy() 107 | train_loss = 0.0 108 | train_acc = 0.0 109 | model.train() 110 | for epoch in range(args.epochs): 111 | for step, (img, label) in enumerate(train_loader): 112 | img = img.float() 113 | img = img.to(device) 114 | label = label.to(device) 115 | label = label.long() 116 | out = model(img) 117 | out = torch.squeeze(out).float() 118 | loss = criterion(out, label) 119 | optimizer.zero_grad() 120 | loss.backward() 121 | optimizer.step() 122 | train_loss += loss.item() 123 | _, pred = out.max(1) 124 | num_correct = (pred == label).sum().item() 125 | acc = num_correct / img.shape[0] 126 | train_acc += acc 127 | if step % 37 == 0: 128 | print(epoch, step, 'loss:', float(loss)) 129 | # print(epoch, step, 'loss:', float(loss)) 130 | res.append(test(test_loader)) 131 | result.append(res) 132 | return test(test_loader) 133 | # torch.save(model.state_dict(), './cnn_save_weights_400.pt') 134 | 135 | # def train(sample1, sample1_validate, sample1_test, sample_len): 136 | # res = [] 137 | # Con_net = create_model() # 建立网络模型 138 | # Con_net.build(input_shape=(10, sample_len, 2)) # 构建一个卷积网络,输入的尺寸 ---------------------- 139 | # optimizer = optimizers.Adam(lr=1e-4) # 设置优化器 140 | # variables = Con_net.trainable_variables 141 | # for epoch in range(epochs): # 外循环,遍历多少次训练数据集 142 | # for step, (x, y) in enumerate(sample1): # 遍历一次训练集的所有样例 143 | # with tf.GradientTape() as tape: # 构建梯度环境 # [b, 32, 32, 3] => [b, 1, 1, 512] 144 | # out = Con_net(x) # flatten, => [b, 512] 145 | # loss = tf.losses.categorical_crossentropy(y, out) # compute loss 146 | # loss = tf.reduce_mean(loss) # 求损失的平均值 147 | # grads = tape.gradient(loss, variables) 148 | # optimizer.apply_gradients(zip(grads, variables)) 149 | # if step % 1000 == 0: 150 | # print(epoch, step, 'loss:', float(loss)) 151 | # # print("验证集正确率") 152 | # # test(Con_net, sample1_validate) 153 | # # print("测试集正确率") 154 | # res.append(test(Con_net, sample1_test)) 155 | # result.append(res) 156 | # # Con_net.save_weights('./cnn_save_weights_400') 157 | 158 | def test(sample_data): 159 | 160 | test_acc = 0. 161 | model.eval() 162 | for img, label in sample_data: 163 | # torch.load('./cnn_save_weights_400.pt') 164 | img = img.float() 165 | img = img.to(device) 166 | label = label.to(device) 167 | label = label.long() 168 | out = model(img) 169 | out = torch.squeeze(out).float() 170 | _, pred = out.max(1) 171 | num_correct = (pred == label).sum().item() 172 | acc = num_correct / img.shape[0] 173 | test_acc += acc 174 | acc = test_acc / len(sample_data) 175 | # print('acc:', acc) 176 | return acc 177 | # 178 | # 179 | # def test(Con_net, sample_data): 180 | # total_num = 0 181 | # total_correct = 0 182 | # for x, y in sample_data: 183 | # # Con_net = create_model() # 建立网络模型 184 | # # Con_net.load_weights('./cnn_save_weights_400') 185 | # out = Con_net(x) # 前向计算 186 | # predict = tf.argmax(out, axis=-1) # axis=-1, 倒数第一维, 返回每行的最大值坐标 187 | # # print("predict", predict) 188 | # y = tf.cast(y, tf.int64) 189 | # # print("y", y) 190 | # m = predict == y 191 | # m = tf.cast(m, dtype=tf.int64) # tensor张量类型 192 | # total_correct += int(tf.reduce_sum(m)) 193 | # total_num += x.shape[0] 194 | # if total_num < total_correct: 195 | # print("error---------------------------") 196 | # print("正确",total_correct,"总数",total_num) 197 | # acc = total_correct / total_num 198 | # # print('acc:', acc) 199 | # return acc 200 | 201 | def run_step(args): # epoch=10 202 | # step_len = list(range(210, 430, 10)) 203 | #step_len = [420] 204 | # step_len = [210] 205 | sample1, sample1_validate, sample1_test = load_data(args) 206 | acc = train(args, sample1, sample1_validate, sample1_test) 207 | print(acc) 208 | 209 | 210 | # def run_sample(): 211 | # sample_len = list(range(1,7)) 212 | # # sample_len = [1] 213 | # for i in sample_len: 214 | # sample1, sample1_validate, sample1_test = load_data(step=210, sample_len=420*i) 215 | # train(sample1, sample1_validate, sample1_test, sample_len=420*i) 216 | 217 | 218 | # 当epoch=10时,随着步长的变化,实验结果的变化 219 | if __name__ == '__main__': 220 | args = parse_args() 221 | result = [] 222 | run_step(args) 223 | # run_sample() 224 | print(result) --------------------------------------------------------------------------------