├── .DS_Store
├── .gitignore
├── .idea
├── Deep-CNN-for-Estimation-of-Remaining-Useful-Life.iml
├── inspectionProfiles
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
├── other.xml
├── vcs.xml
└── workspace.xml
├── CMAPSSData
├── Damage Propagation Modeling.pdf
├── RUL_FD001.txt
├── RUL_FD002.txt
├── RUL_FD003.txt
├── RUL_FD004.txt
├── readme.txt
├── test_FD001.txt
├── test_FD002.txt
├── test_FD003.txt
├── test_FD004.txt
├── train_FD001.txt
├── train_FD002.txt
├── train_FD003.txt
└── train_FD004.txt
├── LICENSE
├── README.md
├── __pycache__
├── dataset_prepare.cpython-38.pyc
├── evaluation.cpython-38.pyc
├── load_data.cpython-38.pyc
├── loss_visualization.cpython-38.pyc
├── main.cpython-38.pyc
├── model.cpython-38.pyc
├── target_rul.cpython-38.pyc
└── test_prediction.cpython-38.pyc
├── _trials
├── .DS_Store
└── RUL Prediction with CNN (RMSE = 32.839).png
├── dataset_prepare.py
├── evaluation.py
├── import_survival.py
├── load_data.py
├── load_data.pyc
├── loss_visualization.py
├── main.py
├── model.py
├── requirements.txt
├── target_rul.py
└── test_prediction.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .Rproj.user
2 | .Rhistory
3 | .RData
4 | .Ruserdata
5 |
--------------------------------------------------------------------------------
/.idea/Deep-CNN-for-Estimation-of-Remaining-Useful-Life.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/other.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | <<<<<<< Updated upstream
6 |
7 |
8 |
9 | =======
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 | >>>>>>> Stashed changes
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 | 1619153715806
142 |
143 |
144 | 1619153715806
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 | <<<<<<< Updated upstream
159 | =======
160 |
161 |
162 |
163 | >>>>>>> Stashed changes
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 | <<<<<<< Updated upstream
216 |
217 | =======
218 |
219 |
220 |
221 |
222 |
223 | >>>>>>> Stashed changes
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
--------------------------------------------------------------------------------
/CMAPSSData/Damage Propagation Modeling.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/CMAPSSData/Damage Propagation Modeling.pdf
--------------------------------------------------------------------------------
/CMAPSSData/RUL_FD001.txt:
--------------------------------------------------------------------------------
1 | 112
2 | 98
3 | 69
4 | 82
5 | 91
6 | 93
7 | 91
8 | 95
9 | 111
10 | 96
11 | 97
12 | 124
13 | 95
14 | 107
15 | 83
16 | 84
17 | 50
18 | 28
19 | 87
20 | 16
21 | 57
22 | 111
23 | 113
24 | 20
25 | 145
26 | 119
27 | 66
28 | 97
29 | 90
30 | 115
31 | 8
32 | 48
33 | 106
34 | 7
35 | 11
36 | 19
37 | 21
38 | 50
39 | 142
40 | 28
41 | 18
42 | 10
43 | 59
44 | 109
45 | 114
46 | 47
47 | 135
48 | 92
49 | 21
50 | 79
51 | 114
52 | 29
53 | 26
54 | 97
55 | 137
56 | 15
57 | 103
58 | 37
59 | 114
60 | 100
61 | 21
62 | 54
63 | 72
64 | 28
65 | 128
66 | 14
67 | 77
68 | 8
69 | 121
70 | 94
71 | 118
72 | 50
73 | 131
74 | 126
75 | 113
76 | 10
77 | 34
78 | 107
79 | 63
80 | 90
81 | 8
82 | 9
83 | 137
84 | 58
85 | 118
86 | 89
87 | 116
88 | 115
89 | 136
90 | 28
91 | 38
92 | 20
93 | 85
94 | 55
95 | 128
96 | 137
97 | 82
98 | 59
99 | 117
100 | 20
101 |
--------------------------------------------------------------------------------
/CMAPSSData/RUL_FD002.txt:
--------------------------------------------------------------------------------
1 | 18
2 | 79
3 | 106
4 | 110
5 | 15
6 | 155
7 | 6
8 | 90
9 | 11
10 | 79
11 | 6
12 | 73
13 | 30
14 | 11
15 | 37
16 | 67
17 | 68
18 | 99
19 | 22
20 | 54
21 | 97
22 | 10
23 | 142
24 | 77
25 | 88
26 | 163
27 | 126
28 | 138
29 | 83
30 | 78
31 | 75
32 | 11
33 | 53
34 | 173
35 | 63
36 | 100
37 | 151
38 | 55
39 | 48
40 | 37
41 | 44
42 | 27
43 | 18
44 | 6
45 | 15
46 | 112
47 | 131
48 | 13
49 | 122
50 | 13
51 | 98
52 | 53
53 | 52
54 | 106
55 | 103
56 | 152
57 | 123
58 | 26
59 | 178
60 | 73
61 | 169
62 | 39
63 | 39
64 | 14
65 | 11
66 | 121
67 | 86
68 | 56
69 | 115
70 | 17
71 | 148
72 | 104
73 | 78
74 | 86
75 | 98
76 | 36
77 | 94
78 | 52
79 | 91
80 | 15
81 | 141
82 | 74
83 | 146
84 | 17
85 | 47
86 | 194
87 | 21
88 | 79
89 | 97
90 | 8
91 | 9
92 | 73
93 | 183
94 | 97
95 | 73
96 | 49
97 | 31
98 | 97
99 | 9
100 | 14
101 | 106
102 | 8
103 | 8
104 | 106
105 | 116
106 | 120
107 | 61
108 | 168
109 | 35
110 | 80
111 | 9
112 | 50
113 | 151
114 | 78
115 | 91
116 | 7
117 | 181
118 | 150
119 | 106
120 | 15
121 | 67
122 | 145
123 | 180
124 | 7
125 | 179
126 | 124
127 | 82
128 | 108
129 | 79
130 | 121
131 | 120
132 | 39
133 | 38
134 | 9
135 | 167
136 | 87
137 | 88
138 | 7
139 | 51
140 | 55
141 | 155
142 | 47
143 | 81
144 | 43
145 | 98
146 | 10
147 | 92
148 | 11
149 | 165
150 | 34
151 | 115
152 | 59
153 | 99
154 | 103
155 | 108
156 | 83
157 | 171
158 | 15
159 | 9
160 | 42
161 | 13
162 | 41
163 | 88
164 | 14
165 | 155
166 | 188
167 | 96
168 | 82
169 | 135
170 | 182
171 | 36
172 | 107
173 | 14
174 | 95
175 | 142
176 | 23
177 | 6
178 | 144
179 | 35
180 | 97
181 | 68
182 | 14
183 | 67
184 | 191
185 | 19
186 | 10
187 | 158
188 | 183
189 | 43
190 | 12
191 | 148
192 | 13
193 | 37
194 | 122
195 | 80
196 | 93
197 | 132
198 | 32
199 | 103
200 | 174
201 | 111
202 | 68
203 | 192
204 | 121
205 | 134
206 | 48
207 | 85
208 | 8
209 | 23
210 | 8
211 | 6
212 | 57
213 | 83
214 | 172
215 | 101
216 | 81
217 | 86
218 | 165
219 | 73
220 | 121
221 | 139
222 | 75
223 | 151
224 | 145
225 | 11
226 | 108
227 | 14
228 | 126
229 | 61
230 | 85
231 | 8
232 | 101
233 | 153
234 | 89
235 | 190
236 | 12
237 | 62
238 | 134
239 | 101
240 | 121
241 | 167
242 | 17
243 | 161
244 | 181
245 | 16
246 | 152
247 | 148
248 | 56
249 | 111
250 | 23
251 | 84
252 | 12
253 | 43
254 | 48
255 | 122
256 | 191
257 | 56
258 | 131
259 | 51
260 |
--------------------------------------------------------------------------------
/CMAPSSData/RUL_FD003.txt:
--------------------------------------------------------------------------------
1 | 44
2 | 51
3 | 27
4 | 120
5 | 101
6 | 99
7 | 71
8 | 55
9 | 55
10 | 66
11 | 77
12 | 115
13 | 115
14 | 31
15 | 108
16 | 56
17 | 136
18 | 132
19 | 85
20 | 56
21 | 18
22 | 119
23 | 78
24 | 9
25 | 58
26 | 11
27 | 88
28 | 144
29 | 124
30 | 89
31 | 79
32 | 55
33 | 71
34 | 65
35 | 87
36 | 137
37 | 145
38 | 22
39 | 8
40 | 41
41 | 131
42 | 115
43 | 128
44 | 69
45 | 111
46 | 7
47 | 137
48 | 55
49 | 135
50 | 11
51 | 78
52 | 120
53 | 87
54 | 87
55 | 55
56 | 93
57 | 88
58 | 40
59 | 49
60 | 128
61 | 129
62 | 58
63 | 117
64 | 28
65 | 115
66 | 87
67 | 92
68 | 103
69 | 100
70 | 63
71 | 35
72 | 45
73 | 99
74 | 117
75 | 45
76 | 27
77 | 86
78 | 20
79 | 18
80 | 133
81 | 15
82 | 6
83 | 145
84 | 104
85 | 56
86 | 25
87 | 68
88 | 144
89 | 41
90 | 51
91 | 81
92 | 14
93 | 67
94 | 10
95 | 127
96 | 113
97 | 123
98 | 17
99 | 8
100 | 28
101 |
--------------------------------------------------------------------------------
/CMAPSSData/RUL_FD004.txt:
--------------------------------------------------------------------------------
1 | 22
2 | 39
3 | 107
4 | 75
5 | 149
6 | 78
7 | 94
8 | 14
9 | 99
10 | 162
11 | 143
12 | 7
13 | 71
14 | 105
15 | 12
16 | 160
17 | 162
18 | 104
19 | 194
20 | 82
21 | 91
22 | 11
23 | 26
24 | 142
25 | 39
26 | 92
27 | 76
28 | 124
29 | 64
30 | 118
31 | 6
32 | 22
33 | 147
34 | 126
35 | 36
36 | 73
37 | 89
38 | 11
39 | 151
40 | 10
41 | 97
42 | 30
43 | 42
44 | 60
45 | 85
46 | 134
47 | 34
48 | 45
49 | 24
50 | 86
51 | 119
52 | 151
53 | 142
54 | 176
55 | 157
56 | 67
57 | 97
58 | 8
59 | 154
60 | 139
61 | 51
62 | 33
63 | 184
64 | 46
65 | 12
66 | 133
67 | 46
68 | 46
69 | 12
70 | 33
71 | 15
72 | 176
73 | 23
74 | 89
75 | 124
76 | 163
77 | 25
78 | 74
79 | 78
80 | 114
81 | 96
82 | 10
83 | 172
84 | 166
85 | 115
86 | 70
87 | 94
88 | 56
89 | 86
90 | 96
91 | 50
92 | 73
93 | 154
94 | 129
95 | 171
96 | 71
97 | 105
98 | 113
99 | 37
100 | 7
101 | 13
102 | 22
103 | 9
104 | 120
105 | 100
106 | 107
107 | 41
108 | 153
109 | 126
110 | 59
111 | 18
112 | 66
113 | 13
114 | 14
115 | 139
116 | 13
117 | 75
118 | 8
119 | 109
120 | 137
121 | 41
122 | 192
123 | 23
124 | 86
125 | 184
126 | 15
127 | 195
128 | 126
129 | 120
130 | 165
131 | 101
132 | 116
133 | 126
134 | 36
135 | 7
136 | 122
137 | 159
138 | 88
139 | 173
140 | 146
141 | 130
142 | 108
143 | 53
144 | 162
145 | 59
146 | 100
147 | 56
148 | 145
149 | 76
150 | 57
151 | 31
152 | 88
153 | 173
154 | 34
155 | 7
156 | 133
157 | 172
158 | 6
159 | 22
160 | 83
161 | 82
162 | 84
163 | 95
164 | 174
165 | 111
166 | 72
167 | 109
168 | 87
169 | 179
170 | 158
171 | 126
172 | 12
173 | 8
174 | 10
175 | 123
176 | 103
177 | 12
178 | 106
179 | 12
180 | 32
181 | 37
182 | 116
183 | 15
184 | 10
185 | 46
186 | 142
187 | 24
188 | 135
189 | 56
190 | 43
191 | 178
192 | 71
193 | 104
194 | 15
195 | 166
196 | 89
197 | 36
198 | 11
199 | 92
200 | 96
201 | 59
202 | 13
203 | 167
204 | 151
205 | 154
206 | 109
207 | 116
208 | 91
209 | 11
210 | 88
211 | 108
212 | 76
213 | 14
214 | 89
215 | 145
216 | 17
217 | 66
218 | 154
219 | 41
220 | 182
221 | 73
222 | 39
223 | 58
224 | 14
225 | 145
226 | 88
227 | 162
228 | 189
229 | 120
230 | 98
231 | 33
232 | 184
233 | 110
234 | 68
235 | 24
236 | 75
237 | 18
238 | 16
239 | 166
240 | 98
241 | 176
242 | 81
243 | 118
244 | 35
245 | 131
246 | 194
247 | 112
248 | 26
249 |
--------------------------------------------------------------------------------
/CMAPSSData/readme.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/CMAPSSData/readme.txt
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deep CNN for Estimation of Remaining Useful Life
2 | Inspired by Babu, G. S., Zhao, P., & Li, X. L. (2016, April). Deep convolutional neural network
3 | based regression approach for estimation of remaining useful life. In _International conference on database systems for
4 | advanced applications_ (pp. 214-228). Springer, Cham.
5 | _Author: Jiaxiang Cheng, Nanyang Technological University, Singapore_
6 |
7 |
8 |
9 | ## Environment
10 | ```
11 | python==3.8.8
12 | pytorch==1.8.1
13 | pandas==1.2.4
14 | scikit-learn==0.23.2
15 | numpy==1.20.1
16 | matplotlib==3.3.4
17 | scipy==1.6.2
18 | ```
19 |
20 | ## Usage
21 | You may simply give the following command for both training and evaluation:
22 | ```
23 | python main.py
24 | ```
25 | Then you will get the following running information:
26 | ```
27 | ...
28 |
29 | Epoch : 30 loss : 3.285 RMSE = 34.636 Score = 14473
30 | Epoch : 31 loss : 3.277 RMSE = 34.599 Score = 14815
31 | Epoch : 32 loss : 3.269 RMSE = 34.95 Score = 12690
32 | Epoch : 33 loss : 3.259 RMSE = 32.885 Score = 6656
33 | Epoch : 34 loss : 3.25 RMSE = 32.354 Score = 5344
34 | Epoch : 35 loss : 3.241 RMSE = 32.318 Score = 4898
35 |
36 | ...
37 | ```
38 | As the model and data sets are not heavy, the evaluation will be conducted after each
39 | training epoch to catch up with the performance closely.
40 | The prediction results will be saved in the folder ```_trials```.
41 |
42 | ## Citation
43 | [](https://zenodo.org/badge/latestdoi/360762936)
44 |
45 | ## License
46 | [](https://opensource.org/licenses/Apache-2.0)
47 |
--------------------------------------------------------------------------------
/__pycache__/dataset_prepare.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/__pycache__/dataset_prepare.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/evaluation.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/__pycache__/evaluation.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/load_data.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/__pycache__/load_data.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/loss_visualization.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/__pycache__/loss_visualization.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/main.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/__pycache__/main.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/model.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/__pycache__/model.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/target_rul.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/__pycache__/target_rul.cpython-38.pyc
--------------------------------------------------------------------------------
/__pycache__/test_prediction.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/__pycache__/test_prediction.cpython-38.pyc
--------------------------------------------------------------------------------
/_trials/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/_trials/.DS_Store
--------------------------------------------------------------------------------
/_trials/RUL Prediction with CNN (RMSE = 32.839).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/_trials/RUL Prediction with CNN (RMSE = 32.839).png
--------------------------------------------------------------------------------
/dataset_prepare.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from target_rul import target_rul
3 | import torch
4 | from sklearn.model_selection import train_test_split
5 |
6 |
7 | def train_val_prepare(max_cycle, idx, X_ss, calcul, nf, ns):
8 |
9 | train_img = []
10 | train_y = []
11 | id_engine = 1
12 | i = 0
13 | id_engine_end = max_cycle[id_engine - 1] - 1
14 | id_engine_start = 0
15 |
16 | while i <= ns - 15:
17 |
18 | img = X_ss[i:i + 15, ]
19 | img = img.astype('float32')
20 | train_img.append(img)
21 |
22 | train_y.append(target_rul(max_cycle[id_engine - 1], idx[i + 14, 1], calcul))
23 |
24 | i = i + 1
25 | if i + 14 <= ns - 1 and int(idx[i + 14, 0]) != id_engine:
26 | id_engine_start += max_cycle[id_engine - 1]
27 | i = i + 14
28 | id_engine += 1
29 | id_engine_end += max_cycle[id_engine - 1]
30 |
31 | # print("No.", id_engine, "No.", i, "th instance")
32 |
33 | # converting the list to numpy array
34 | train_x = np.array(train_img)
35 | # defining the target
36 | train_y = np.array(train_y)
37 | # train_x.shape
38 | # train_y.shape
39 |
40 | # create validation set
41 | # train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, test_size=0.1)
42 | # (train_x.shape, train_y.shape), (val_x.shape, val_y.shape)
43 |
44 | # converting training images into torch format
45 | train_x = train_x.reshape(train_x.shape[0], 1, 15, nf)
46 | train_x = torch.from_numpy(train_x)
47 |
48 | # converting the target into torch format
49 | train_y = train_y.astype(int)
50 | train_y = torch.from_numpy(train_y)
51 |
52 | # shape of training data
53 | # train_x.shape, train_y.shape
54 |
55 | # # converting validation images into torch format
56 | # val_x = val_x.reshape(val_x.shape[0], 1, 15, nf)
57 | # val_x = torch.from_numpy(val_x)
58 | #
59 | # # converting the target into torch format
60 | # val_y = val_y.astype(int)
61 | # val_y = torch.from_numpy(val_y)
62 |
63 | # shape of validation data
64 | # val_x.shape, val_y.shape
65 |
66 | return train_x, train_y # val_x, val_y
67 |
68 |
69 | def test_prepare(Xt_ss, idx_t, nf, ns_t):
70 | test_img = []
71 | id_engine_t = 1
72 | i_t = 0
73 | while i_t <= ns_t - 15:
74 | img_t = Xt_ss[i_t:i_t + 15, ]
75 | img_t = img_t.astype('float32')
76 | test_img.append(img_t)
77 | i_t = i_t + 1
78 | if i_t + 14 <= ns_t - 1:
79 | if int(idx_t[i_t + 14, 0]) != id_engine_t:
80 | i_t = i_t + 14
81 | id_engine_t += 1
82 | # print("No.", id_engine_t, "No.", i_t, "th instance")
83 |
84 | # converting the list to numpy array
85 | test_x = np.array(test_img)
86 | # test_x.shape
87 |
88 | # converting training images into torch format
89 | test_x = test_x.reshape(test_x.shape[0], 1, 15, nf)
90 | test_x = torch.from_numpy(test_x)
91 | # test_x.shape
92 | return test_x
93 |
--------------------------------------------------------------------------------
/evaluation.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 |
5 | def scoring(predictions, max_cycle_t, y_test):
6 | result = []
7 | ix = 1
8 | k = 0
9 | while k < 100:
10 | result.append((predictions[max_cycle_t[k] + ix - 15 - 1]))
11 | # t = max_cycle_t[k] + ix - 15 - 1
12 | ix = max_cycle_t[k] + ix - 14
13 | k += 1
14 |
15 | j = 0
16 | rmse = 0
17 | score = 0
18 | while j < 100:
19 | h = result[j] - y_test[j]
20 | rmse = rmse + pow(h, 2)
21 | if h < 0:
22 | score += np.exp(-h / 13) - 1
23 | else:
24 | score += np.exp(h / 10) - 1
25 | j += 1
26 |
27 | rmse = np.sqrt(rmse / 100)
28 | rmse, score = round(np.asscalar(rmse), 3), round(np.asscalar(score))
29 | return result, rmse, score
30 |
31 |
32 | def visualization(y_test, result, root_mse):
33 | plt.figure(figsize=(15, 6)) # plotting
34 | plt.axvline(x=100, c='r', linestyle='--') # size of the training set
35 |
36 | plt.plot(y_test, label='Actual Data') # actual plot
37 | plt.plot(result, label='Predicted Data') # predicted plot
38 | plt.title('Remaining Useful Life Prediction')
39 | plt.legend()
40 | plt.xlabel("Samples")
41 | plt.ylabel("Remaining Useful Life")
42 | plt.savefig('./_trials/RUL Prediction with CNN (RMSE = {}).png'.format(root_mse))
43 | plt.show()
44 |
--------------------------------------------------------------------------------
/import_survival.py:
--------------------------------------------------------------------------------
1 | # 4. import_survival.py
2 | # %matplotlib inline
3 | import pandas as pd
4 | import numpy as np
5 | import matplotlib.pyplot as plt
6 | import seaborn as sns;
7 |
8 | sns.set()
9 |
10 | from lifelines import KaplanMeierFitter, CoxTimeVaryingFitter
11 |
12 | from scipy.optimize import curve_fit
13 |
14 |
15 | def exponential_model(z, a, b):
16 | return a * np.exp(-b * z)
17 |
18 |
19 | def add_remaining_useful_life(df):
20 | # Get the total number of cycles for each unit
21 | grouped_by_unit = df.groupby(by="unit_nr")
22 | max_cycle = grouped_by_unit["time_cycles"].max()
23 |
24 | # Merge the max cycle back into the original frame
25 | result_frame = df.merge(max_cycle.to_frame(name='max_cycle'), left_on='unit_nr', right_index=True)
26 |
27 | # Calculate remaining useful life for each row
28 | remaining_useful_life = result_frame["max_cycle"] - result_frame["time_cycles"]
29 | result_frame["RUL"] = remaining_useful_life
30 |
31 | # drop max_cycle as it's no longer needed
32 | result_frame = result_frame.drop("max_cycle", axis=1)
33 | return result_frame
34 |
35 |
36 | def evaluate(y_true, y_hat, label='test'):
37 | mse = mean_squared_error(y_true, y_hat)
38 | rmse = np.sqrt(mse)
39 | variance = r2_score(y_true, y_hat)
40 | print('{} set RMSE:{}, R2:{}'.format(label, rmse, variance))
41 |
42 |
43 | # load data FD001.py
44 | # define filepath to read data
45 | dir_path = './CMAPSSData/'
46 |
47 | # define column names for easy indexing
48 | index_names = ['unit_nr', 'time_cycles']
49 | setting_names = ['setting_1', 'setting_2', 'setting_3']
50 | sensor_names = ['s_{}'.format(i) for i in range(1, 22)]
51 | col_names = index_names + setting_names + sensor_names
52 |
53 | # read data
54 | train = pd.read_csv((dir_path + 'train_FD001.txt'), sep='\s+', header=None, names=col_names)
55 | test = pd.read_csv((dir_path + 'test_FD001.txt'), sep='\s+', header=None, names=col_names)
56 | y_test = pd.read_csv((dir_path + 'RUL_FD001.txt'), sep='\s+', header=None, names=['RUL'])
57 |
58 | # inspect first few rows
59 | train.head()
60 |
61 | train = add_remaining_useful_life(train)
62 | train[index_names + ['RUL']].head()
63 |
64 | # clip RUL as discussed in SVR and problem framing analysis
65 | train['RUL'].clip(upper=125, inplace=True)
66 |
67 | # drop non-informative features, derived from EDA
68 | drop_sensors = ['s_1', 's_5', 's_6', 's_10', 's_16', 's_18', 's_19']
69 | drop_labels = setting_names + drop_sensors
70 | train.drop(labels=drop_labels, axis=1, inplace=True)
71 |
72 | remaining_sensors = ['s_2', 's_3', 's_4', 's_7', 's_8', 's_9',
73 | 's_11', 's_12', 's_13', 's_14', 's_15', 's_17', 's_20', 's_21']
74 |
75 | train['breakdown'] = 0
76 | idx_last_record = train.reset_index().groupby(by='unit_nr')['index'].last() # engines breakdown at the last cycle
77 | train.at[idx_last_record, 'breakdown'] = 1
78 |
79 | train['start'] = train['time_cycles'] - 1
80 | train.tail() # check results
81 |
82 | cut_off = 200
83 | train_censored = train[train['time_cycles'] <= cut_off].copy()
84 |
85 | data = train_censored[index_names + ['breakdown']].groupby('unit_nr').last()
86 |
87 | plt.figure(figsize=(15, 7))
88 | survival = KaplanMeierFitter()
89 | survival.fit(data['time_cycles'], data['breakdown'])
90 | survival.plot()
91 | plt.ylabel("Probability of survival")
92 | plt.show()
93 | plt.close()
94 |
95 | # Cox Proportional Hazards models
96 |
97 | train_cols = index_names + remaining_sensors + ['start', 'breakdown']
98 | predict_cols = ['time_cycles'] + remaining_sensors + ['start', 'breakdown'] # breakdown value will be 0
99 |
100 | ctv = CoxTimeVaryingFitter()
101 | ctv.fit(train_censored[train_cols], id_col="unit_nr", event_col='breakdown',
102 | start_col='start', stop_col='time_cycles', show_progress=True, step_size=1)
103 |
104 | ctv.print_summary()
105 |
106 | plt.figure(figsize=(10, 5))
107 | ctv.plot()
108 | plt.show()
109 | plt.close()
110 |
111 | df = train_censored.groupby("unit_nr").last()
112 | df = df[df['breakdown'] == 0] # get engines from dataset which are still functioning so we can predict their RUL
113 | df_to_predict = df[df['breakdown'] == 0].copy()
114 |
115 | predictions = ctv.predict_log_partial_hazard(df_to_predict[predict_cols])
116 | predictions.rename(columns={0: "predictions"}, inplace=True)
117 |
118 | df_last = train.groupby('unit_nr').last()
119 | predictions['RUL'] = df_to_predict['RUL']
120 | predictions.head(10)
121 |
122 | plt.figure(figsize=(15, 5))
123 | plt.plot(predictions['RUL'], predictions['predictions'], '.b')
124 | xlim = plt.gca().get_xlim()
125 | plt.xlim(xlim[1], xlim[0])
126 | plt.xlabel('RUL')
127 | plt.ylabel('log_partial_hazard')
128 |
129 | plt.show()
130 |
131 | X = train.loc[train['unit_nr'].isin(df_to_predict.index)]
132 | X_unique = len(X['unit_nr'].unique())
133 |
134 | plt.figure(figsize=(15, 5))
135 |
136 | for i in range(1, X_unique, 2):
137 | X_sub = X.loc[X['unit_nr'] == i]
138 | predictions = ctv.predict_partial_hazard(X_sub)[0].values
139 | plt.plot(X_sub['time_cycles'].values, np.log(predictions))
140 |
141 | plt.xlabel('time_cycles')
142 | plt.ylabel('log_partial_hazard')
143 | plt.show()
144 |
145 | # Regressing log-partial hazard to RUL
146 |
147 | df_hazard = train_censored.copy()
148 | df_hazard['hazard'] = ctv.predict_log_partial_hazard(df_hazard)
149 |
150 | df_hazard.plot('hazard', 'RUL', 'scatter', figsize=(15, 5))
151 | plt.xlabel('hazard')
152 | plt.ylabel('RUL')
153 | plt.show()
154 |
155 | popt, pcov = curve_fit(exponential_model, df_hazard['hazard'], df_hazard['RUL'])
156 | print(popt)
157 |
158 | # prep test set
159 | test = X_test.drop(labels=drop_labels, axis=1)
160 | test['breakdown'] = 0
161 | test['start'] = test['time_cycles'] - 1
162 |
163 | # predict and evaluate
164 | y_hat = exponential_model(df_hazard['hazard'], *popt)
165 | evaluate(df_hazard['RUL'], y_hat, 'train')
166 |
167 | y_pred = ctv.predict_log_partial_hazard(test.groupby('unit_nr').last())
168 | y_hat = exponential_model(y_pred, *popt)
169 | evaluate(y_test, y_hat)
170 |
171 | # Repeat on complete dataset
172 | ctv2 = CoxTimeVaryingFitter()
173 | ctv2.fit(train[train_cols], id_col="unit_nr", event_col='breakdown',
174 | start_col='start', stop_col='time_cycles', show_progress=True)
175 |
176 | train['hazard'] = ctv2.predict_log_partial_hazard(train)
177 | popt2, pcov2 = curve_fit(exponential_model, train['hazard'], train['RUL'])
178 |
179 | y_hat = exponential_model(train['hazard'], *popt2)
180 | evaluate(train['RUL'], y_hat, 'train')
181 |
182 | y_pred = ctv2.predict_log_partial_hazard(test.groupby('unit_nr').last())
183 | y_hat = exponential_model(y_pred, *popt2)
184 | evaluate(y_test, y_hat)
185 |
--------------------------------------------------------------------------------
/load_data.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from sklearn.preprocessing import StandardScaler, MinMaxScaler
3 |
4 |
5 | def load_data_FD001():
6 | dir_path = './CMAPSSData/'
7 |
8 | # define column names for easy indexing
9 | index_names = ['unit_nr', 'time_cycles']
10 | setting_names = ['setting_1', 'setting_2', 'setting_3']
11 | sensor_names = ['s_{}'.format(i) for i in range(1, 22)]
12 | col_names = index_names + setting_names + sensor_names
13 |
14 | # read data
15 | train_raw = pd.read_csv((dir_path + 'train_FD001.txt'), sep='\s+', header=None, names=col_names)
16 | test_raw = pd.read_csv((dir_path + 'test_FD001.txt'), sep='\s+', header=None, names=col_names)
17 | y_test = pd.read_csv((dir_path + 'RUL_FD001.txt'), sep='\s+', header=None, names=['RUL']).to_numpy()
18 |
19 | grouped_by_unit = train_raw.groupby(by="unit_nr")
20 | max_cycle = grouped_by_unit["time_cycles"].max().to_numpy()
21 |
22 | grouped_by_unit_t = test_raw.groupby(by="unit_nr")
23 | max_cycle_t = grouped_by_unit_t["time_cycles"].max().to_numpy()
24 |
25 | # drop non-informative features, derived from EDA
26 | drop_sensors = ['s_1', 's_5', 's_10', 's_16', 's_18', 's_19']
27 | drop_labels = setting_names + drop_sensors
28 | train_raw.drop(labels=drop_labels, axis=1, inplace=True)
29 | test_raw.drop(labels=drop_labels, axis=1, inplace=True)
30 |
31 | remaining_sensors = ['s_2', 's_3', 's_4', 's_6', 's_7', 's_8', 's_9',
32 | 's_11', 's_12', 's_13', 's_14', 's_15', 's_17', 's_20', 's_21']
33 |
34 | return train_raw, test_raw, max_cycle, max_cycle_t, y_test
35 |
36 |
37 | def load_data_FD002():
38 | dir_path = './CMAPSSData/'
39 |
40 | # define column names for easy indexing
41 | index_names = ['unit_nr', 'time_cycles']
42 | setting_names = ['setting_1', 'setting_2', 'setting_3']
43 | sensor_names = ['s_{}'.format(i) for i in range(1, 22)]
44 | col_names = index_names + setting_names + sensor_names
45 |
46 | # read data
47 | train_raw = pd.read_csv((dir_path + 'train_FD002.txt'), sep='\s+', header=None, names=col_names)
48 | test_raw = pd.read_csv((dir_path + 'test_FD002.txt'), sep='\s+', header=None, names=col_names)
49 | y_test = pd.read_csv((dir_path + 'RUL_FD002.txt'), sep='\s+', header=None, names=['RUL']).to_numpy()
50 |
51 | grouped_by_unit = train_raw.groupby(by="unit_nr")
52 | max_cycle = grouped_by_unit["time_cycles"].max().to_numpy()
53 |
54 | grouped_by_unit_t = test_raw.groupby(by="unit_nr")
55 | max_cycle_t = grouped_by_unit_t["time_cycles"].max().to_numpy()
56 |
57 | # drop non-informative features, derived from EDA
58 | # drop_sensors = ['s_1', 's_5', 's_10', 's_16', 's_18', 's_19']
59 | drop_labels = setting_names
60 | train_raw.drop(labels=drop_labels, axis=1, inplace=True)
61 | test_raw.drop(labels=drop_labels, axis=1, inplace=True)
62 |
63 | return train_raw, test_raw, max_cycle, max_cycle_t, y_test
64 |
65 |
66 | def load_data_FD003():
67 | dir_path = './CMAPSSData/'
68 |
69 | # define column names for easy indexing
70 | index_names = ['unit_nr', 'time_cycles']
71 | setting_names = ['setting_1', 'setting_2', 'setting_3']
72 | sensor_names = ['s_{}'.format(i) for i in range(1, 22)]
73 | col_names = index_names + setting_names + sensor_names
74 |
75 | # read data
76 | train_raw = pd.read_csv((dir_path + 'train_FD003.txt'), sep='\s+', header=None, names=col_names)
77 | test_raw = pd.read_csv((dir_path + 'test_FD003.txt'), sep='\s+', header=None, names=col_names)
78 | y_test = pd.read_csv((dir_path + 'RUL_FD003.txt'), sep='\s+', header=None, names=['RUL']).to_numpy()
79 |
80 | grouped_by_unit = train_raw.groupby(by="unit_nr")
81 | max_cycle = grouped_by_unit["time_cycles"].max().to_numpy()
82 |
83 | grouped_by_unit_t = test_raw.groupby(by="unit_nr")
84 | max_cycle_t = grouped_by_unit_t["time_cycles"].max().to_numpy()
85 |
86 | # drop non-informative features, derived from EDA
87 | drop_sensors = ['s_1', 's_5', 's_10', 's_16', 's_18', 's_19']
88 | drop_labels = setting_names + drop_sensors
89 | train_raw.drop(labels=drop_labels, axis=1, inplace=True)
90 | test_raw.drop(labels=drop_labels, axis=1, inplace=True)
91 |
92 | return train_raw, test_raw, max_cycle, max_cycle_t, y_test
93 |
94 |
95 | def load_data_FD004():
96 | dir_path = './CMAPSSData/'
97 |
98 | # define column names for easy indexing
99 | index_names = ['unit_nr', 'time_cycles']
100 | setting_names = ['setting_1', 'setting_2', 'setting_3']
101 | sensor_names = ['s_{}'.format(i) for i in range(1, 22)]
102 | col_names = index_names + setting_names + sensor_names
103 |
104 | # read data
105 | train_raw = pd.read_csv((dir_path + 'train_FD004.txt'), sep='\s+', header=None, names=col_names)
106 | test_raw = pd.read_csv((dir_path + 'test_FD004.txt'), sep='\s+', header=None, names=col_names)
107 | y_test = pd.read_csv((dir_path + 'RUL_FD004.txt'), sep='\s+', header=None, names=['RUL']).to_numpy()
108 |
109 | grouped_by_unit = train_raw.groupby(by="unit_nr")
110 | max_cycle = grouped_by_unit["time_cycles"].max().to_numpy()
111 |
112 | grouped_by_unit_t = test_raw.groupby(by="unit_nr")
113 | max_cycle_t = grouped_by_unit_t["time_cycles"].max().to_numpy()
114 |
115 | # drop non-informative features, derived from EDA
116 | # drop_sensors = ['s_1', 's_5', 's_10', 's_16', 's_18', 's_19']
117 | drop_labels = setting_names
118 | train_raw.drop(labels=drop_labels, axis=1, inplace=True)
119 | test_raw.drop(labels=drop_labels, axis=1, inplace=True)
120 |
121 | return train_raw, test_raw, max_cycle, max_cycle_t, y_test
122 |
123 |
124 | def get_info(train_raw, test_raw):
125 | mm = MinMaxScaler()
126 | ss = StandardScaler()
127 |
128 | X = train_raw.iloc[:, 2:]
129 | idx = train_raw.iloc[:, 0:2].to_numpy()
130 | X_ss = ss.fit_transform(X)
131 |
132 | X_t = test_raw.iloc[:, 2:]
133 | idx_t = test_raw.iloc[:, 0:2].to_numpy()
134 | Xt_ss = ss.fit_transform(X_t)
135 |
136 | nf = X_ss.shape[1]
137 | ns = X_ss.shape[0]
138 | ns_t = Xt_ss.shape[0]
139 |
140 | return X_ss, idx, Xt_ss, idx_t, nf, ns, ns_t
141 |
--------------------------------------------------------------------------------
/load_data.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiaxiang-cheng/PyTorch-CNN-for-RUL-Prediction/047d2ff262875232978bd6c6718ba305e03cc6e7/load_data.pyc
--------------------------------------------------------------------------------
/loss_visualization.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 |
4 | def loss_visualization(train_losses, val_losses):
5 | # plotting the training and validation loss
6 | plt.plot(train_losses, label='Training loss')
7 | plt.plot(val_losses, label='Validation loss')
8 | plt.legend()
9 | plt.show()
10 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | """RUL Prediction with CNN"""
2 |
3 | from torch import nn, optim
4 | from torch.autograd import Variable
5 |
6 | import evaluation
7 | from dataset_prepare import *
8 | from load_data import *
9 | from model import *
10 | from test_prediction import *
11 |
12 | N_EPOCH = 250
13 |
14 |
15 | def train(n_epochs, model, train_x, train_y, test_x, max_cycle_t, y_test):
16 | rmse_history = []
17 | for epoch in range(1, n_epochs + 1):
18 | model.train()
19 |
20 | # getting the training set
21 | x_train, y_train = Variable(train_x), Variable(train_y)
22 |
23 | # clearing the Gradients of the model parameters
24 | optimizer.zero_grad()
25 |
26 | # prediction for training and validation set
27 | output_train = model(x_train)
28 |
29 | # computing the training and validation loss
30 | loss_train = criterion(output_train, y_train)
31 | loss_train.backward()
32 | optimizer.step()
33 |
34 | if epoch % 1 == 0:
35 | # prediction on testing dataset
36 | pred = test_prediction(model, test_x)
37 | # evaluate the prediction accuracy
38 | _, rmse, score = evaluation.scoring(pred, max_cycle_t, y_test)
39 | print('Epoch :', epoch, '\t', 'loss :', round(loss_train.item(), 3), '\t',
40 | "RMSE =", rmse, '\t', "Score =", score)
41 | rmse_history.append(rmse)
42 |
43 | return rmse_history
44 |
45 |
46 | if __name__ == "__main__":
47 | # loading data
48 | train_raw, test_raw, max_cycle, max_cycle_t, y_test = load_data_FD001()
49 | X_ss, idx, Xt_ss, idx_t, nf, ns, ns_t = get_info(train_raw, test_raw)
50 |
51 | # prepare training and validation dataset
52 | train_x, train_y = train_val_prepare(max_cycle, idx, X_ss, "linear", nf, ns)
53 | # prepare testing dataset
54 | test_x = test_prepare(Xt_ss, idx_t, nf, ns_t)
55 |
56 | # initialize the cnn model
57 | model = CNN1(nf)
58 | # defining the optimizer
59 | optimizer = optim.Adam(model.parameters(), lr=0.01)
60 | # defining the loss function
61 | criterion = nn.CrossEntropyLoss()
62 |
63 | # train the model
64 | rmse_history = train(N_EPOCH, model, train_x, train_y, test_x, max_cycle_t, y_test)
65 |
66 | # prediction on testing dataset
67 | predictions = test_prediction(model, test_x)
68 | # evaluate the prediction accuracy
69 | result, rmse, score = evaluation.scoring(predictions, max_cycle_t, y_test)
70 | evaluation.visualization(y_test, result, rmse)
71 | print(min(rmse_history))
72 |
--------------------------------------------------------------------------------
/model.py:
--------------------------------------------------------------------------------
1 | from torch.nn import Linear, ReLU, Sequential, Conv2d, AvgPool2d, Module, BatchNorm2d
2 |
3 |
4 | class CNN1(Module):
5 | def __init__(self, nf):
6 | super(CNN1, self).__init__()
7 |
8 | self.cnn_layers = Sequential(
9 | # Defining a 2D convolution layer
10 | Conv2d(1, 8, kernel_size=(4, nf), stride=1),
11 | BatchNorm2d(8),
12 | ReLU(inplace=True),
13 | AvgPool2d(kernel_size=(2, 1), stride=2),
14 | # Defining another 2D convolution layer
15 | Conv2d(8, 14, kernel_size=(3, 1), stride=1),
16 | BatchNorm2d(14),
17 | ReLU(inplace=True),
18 | AvgPool2d(kernel_size=(2, 1), stride=2),
19 | )
20 |
21 | self.linear_layers = Sequential(
22 | Linear(14 * 2 * 1, 131)
23 | )
24 |
25 | # Defining the forward pass
26 | def forward(self, x):
27 | x = self.cnn_layers(x)
28 | x = x.view(x.size(0), -1)
29 | x = self.linear_layers(x)
30 | return x
31 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | python==3.8.8
2 | pytorch==1.8.1
3 | pandas==1.2.5
4 | scikit-learn==0.23.2
5 | numpy==1.20.2
6 | matplotlib==3.3.4
7 | scipy==1.6.2
--------------------------------------------------------------------------------
/target_rul.py:
--------------------------------------------------------------------------------
1 | def target_rul(max_cycle, cycle, func):
2 |
3 | if func == "linear":
4 | target = min(max_cycle - cycle, 130)
5 | else:
6 | target = 100
7 |
8 | return target
9 |
--------------------------------------------------------------------------------
/test_prediction.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 |
5 | def test_prediction(model, test_x):
6 | # generating predictions for test set
7 | with torch.no_grad():
8 | output = model(test_x)
9 |
10 | softmax = torch.exp(output).cpu()
11 | prob = list(softmax.numpy())
12 | predictions = np.argmax(prob, axis=1)
13 |
14 | return predictions
15 |
--------------------------------------------------------------------------------