├── .DS_Store
├── .gitignore
├── .idea
├── CSI.iml
├── misc.xml
├── modules.xml
├── vcs.xml
└── workspace.xml
├── DataProcessing
├── CsiToAmplitudeAndPhase.py
├── CsiToAmplitudeAndPhase.pyc
├── DataCalculate.py
├── DataCalculate.pyc
├── DataProcess.py
├── Normalize.py
├── Normalize.pyc
├── PhaseSanitization.py
├── PhaseSanitization.pyc
├── SpliceData.py
├── Split.py
├── SplitAandP.py
├── WeightedMovingAverage.py
├── WeightedMovingAverage.pyc
├── __init__.py
├── __init__.pyc
└── __pycache__
│ ├── CsiToAmplitudeAndPhase.cpython-36.pyc
│ ├── PhaseSanitization.cpython-36.pyc
│ ├── WeightedMovingAverage.cpython-36.pyc
│ └── __init__.cpython-36.pyc
├── DlTrain
├── CNN.py
├── CNN.pyc
├── Data.py
├── Data.pyc
├── GpusTrain.py
├── GpusTrain.pyc
├── LSTM.py
├── LSTM.pyc
├── Parameters.py
├── Parameters.pyc
├── Train.py
├── Train.pyc
├── Train2.py
├── __init__.py
├── __init__.pyc
├── __pycache__
│ ├── CNN.cpython-36.pyc
│ ├── Data.cpython-36.pyc
│ ├── LSTM.cpython-36.pyc
│ ├── Parameters.cpython-36.pyc
│ └── __init__.cpython-36.pyc
└── a.png
├── Experiment Data Location.md
├── ExperimentalReport.md
├── Net
├── CNN_Init.py
├── CNN_Init.pyc
├── __init__.py
├── __init__.pyc
└── __pycache__
│ ├── CNN.cpython-36.pyc
│ └── __init__.cpython-36.pyc
├── Plot
├── Draw.py
└── __init__.py
├── Readme.md
├── Test
├── DataProcessingTest.py
└── __init__.py
├── Util
├── Matrix.py
├── Matrix.pyc
├── MergeMat.py
├── OpenMatUtil.py
├── OpenTfRecordsUtil.py
├── ReadAndDecodeUtil.py
├── ReadAndDecodeUtil.pyc
├── Readtxt.py
├── WriteAndCodeUtil.py
├── WriteHd5Util.py
├── WriteHd5Util.pyc
├── __init__.py
├── __init__.pyc
├── __pycache__
│ ├── ReadAndDecodeUtil.cpython-36.pyc
│ ├── Readtxt.cpython-36.pyc
│ ├── WriteHd5Util.cpython-36.pyc
│ ├── __init__.cpython-36.pyc
│ └── loadHd.cpython-36.pyc
├── loadHd.py
├── loadHd.pyc
└── open_val_sf.h5
├── auto_update_data.sh
└── 不同数据集对比实验记录.md
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | Data/
3 | Model/
4 | EResult/
5 | ExperientalRecord/
6 | Log/
7 | venv/
8 |
--------------------------------------------------------------------------------
/.idea/CSI.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | <<<<<<< HEAD
6 |
7 |
8 | =======
9 |
10 |
11 |
12 |
13 |
14 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | <<<<<<< HEAD
28 |
29 | =======
30 |
31 |
32 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 | <<<<<<< HEAD
88 |
89 |
90 | =======
91 |
92 |
93 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 | <<<<<<< HEAD
102 |
103 |
104 |
105 |
106 |
107 | =======
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 | <<<<<<< HEAD
131 |
132 |
133 |
134 |
135 |
136 | =======
137 |
138 |
139 |
140 |
141 |
142 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
143 |
144 |
145 |
146 |
147 | <<<<<<< HEAD
148 |
149 |
150 |
151 |
152 |
153 | =======
154 |
155 |
156 |
157 |
158 |
159 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 | <<<<<<< HEAD
198 |
199 | =======
200 |
201 |
202 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
203 |
204 |
205 |
206 |
207 |
208 |
209 | true
210 | DEFINITION_ORDER
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 |
368 |
369 |
370 |
371 |
372 |
373 |
374 |
375 |
376 |
377 |
378 |
379 |
380 |
381 |
382 |
383 |
384 |
385 |
386 |
387 |
388 |
389 |
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 |
402 |
403 |
404 |
405 |
406 |
407 |
408 |
409 |
410 | 1529146872241
411 |
412 |
413 | 1529146872241
414 |
415 |
416 |
417 |
418 |
419 |
420 | <<<<<<< HEAD
421 |
422 |
423 |
424 |
425 |
426 |
427 |
428 |
429 | =======
430 |
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 |
439 |
440 |
441 |
442 |
443 |
444 |
445 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
446 |
447 |
448 |
449 |
450 |
451 |
452 |
453 |
454 |
455 |
456 |
457 |
458 |
459 |
460 |
461 |
462 |
463 |
464 |
465 |
466 |
467 |
468 |
469 |
470 |
471 | file://$PROJECT_DIR$/Train/TrainNet.py
472 | 158
473 |
474 |
475 |
476 | file://$PROJECT_DIR$/Train/TrainNet.py
477 | 91
478 |
479 |
480 |
481 | <<<<<<< HEAD
482 |
483 |
484 |
485 | =======
486 |
487 |
488 |
489 |
490 |
491 |
492 |
493 |
494 |
495 |
496 |
497 |
498 |
499 |
500 |
501 |
502 |
503 |
504 |
505 |
506 |
507 |
508 |
509 |
510 |
511 |
512 |
513 |
514 |
515 |
516 |
517 |
518 |
519 |
520 |
521 |
522 |
523 |
524 |
525 |
526 |
527 |
528 |
529 |
530 |
531 |
532 |
533 |
534 |
535 |
536 |
537 |
538 |
539 |
540 |
541 |
542 |
543 |
544 |
545 |
546 |
547 |
548 |
549 |
550 |
551 |
552 |
553 |
554 |
555 |
556 |
557 |
558 |
559 |
560 |
561 |
562 |
563 |
564 |
565 |
566 |
567 |
568 |
569 |
570 |
571 |
572 |
573 |
574 |
575 |
576 |
577 |
578 |
579 |
580 |
581 |
582 |
583 |
584 |
585 |
586 |
587 |
588 |
589 |
590 |
591 |
592 |
593 |
594 |
595 |
596 |
597 |
598 |
599 |
600 |
601 |
602 |
603 |
604 |
605 |
606 |
607 |
608 |
609 |
610 |
611 |
612 |
613 |
614 |
615 |
616 |
617 |
618 |
619 |
620 |
621 |
622 |
623 |
624 |
625 |
626 |
627 |
628 |
629 |
630 |
631 |
632 |
633 |
634 |
635 |
636 |
637 |
638 |
639 |
640 |
641 |
642 |
643 |
644 |
645 |
646 |
647 |
648 |
649 |
650 |
651 |
652 |
653 |
654 |
655 |
656 |
657 |
658 |
659 |
660 |
661 |
662 |
663 |
664 |
665 |
666 |
667 |
668 |
669 |
670 |
671 |
672 |
673 |
674 |
675 |
676 |
677 |
678 |
679 |
680 |
681 |
682 |
683 |
684 |
685 |
686 |
687 |
688 |
689 |
690 |
691 |
692 |
693 |
694 |
695 |
696 |
697 |
698 |
699 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
700 |
701 |
702 |
703 |
704 |
705 |
706 |
707 |
708 |
709 |
710 |
711 |
712 |
713 |
714 |
715 |
716 |
717 |
718 |
719 |
720 |
721 |
722 |
723 |
724 |
725 |
726 |
727 |
728 |
729 |
730 |
731 |
732 |
733 |
734 |
735 |
736 |
737 |
738 |
739 |
740 |
741 |
742 |
743 |
744 |
745 |
746 |
747 |
748 |
749 |
750 |
751 |
752 |
753 |
754 |
755 |
756 |
757 |
758 |
759 |
760 |
761 |
762 |
763 |
764 |
765 |
766 |
767 |
768 |
769 |
770 |
771 | <<<<<<< HEAD
772 |
773 |
774 | =======
775 |
776 |
777 |
778 |
779 |
780 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
781 |
782 |
783 |
784 |
785 |
786 | <<<<<<< HEAD
787 |
788 |
789 | =======
790 |
791 |
792 |
793 |
794 |
795 |
796 |
797 |
798 |
799 |
800 |
801 |
802 |
803 |
804 |
805 |
806 |
807 |
808 |
809 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
810 |
811 |
812 |
813 |
814 |
815 | <<<<<<< HEAD
816 |
817 |
818 |
819 |
820 |
821 | =======
822 |
823 |
824 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
825 |
826 |
827 |
828 |
829 |
830 | <<<<<<< HEAD
831 |
832 |
833 |
834 |
835 |
836 | =======
837 |
838 |
839 |
840 |
841 |
842 |
843 |
844 |
845 |
846 | >>>>>>> c46530fe609db5bf4ed75b0dd4fcc32c8c0ac34c
847 |
848 |
849 |
850 |
851 |
852 |
853 |
854 |
855 |
856 |
857 |
858 |
859 |
860 |
861 |
--------------------------------------------------------------------------------
/DataProcessing/CsiToAmplitudeAndPhase.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | from math import *
3 | import numpy as np
4 | import scipy.io as scio
5 |
6 | '''
7 | Nrx: 3 接收端天线数量
8 | Ntx: 2 发送端天线数量
9 | rate:csi[rate] 不同数据包的rate不一样
10 | csi: [2x3x30 double] 180
11 |
12 | '''
13 |
14 |
15 | def getAmplitudesAndPhases(Csi_Mat_Path): # N为对应180的数量
16 |
17 | data = scio.loadmat(Csi_Mat_Path)
18 | csi_data = data['csi']
19 | N = len(csi_data)
20 |
21 | '''
22 | 设置N为10方便调试
23 | '''
24 | # N = 30000
25 |
26 | """
27 | 根据复数计算振幅和相位
28 | """
29 | amplitudes = np.ndarray(shape=(N, 30, 6))
30 | phases = np.ndarray(shape=(N, 30, 6))
31 |
32 | for m in range(N):
33 | for i in range(0, 6):
34 | for j in range(0, 30):
35 | index = j + i * 30
36 | amplitudes[m][j][i] = sqrt(csi_data[m][index].real ** 2 + csi_data[m][index].imag ** 2)
37 | phases[m][j][i] = np.angle(csi_data[m][index])
38 |
39 | return amplitudes, phases, N
40 |
41 |
42 | def getAmplitudesAndPhasesLength(Csi_Mat_Path): # N为对应180的数量
43 |
44 | data = scio.loadmat(Csi_Mat_Path)
45 | csi_data = data['csi']
46 | N = len(csi_data)
47 | return N
48 |
--------------------------------------------------------------------------------
/DataProcessing/CsiToAmplitudeAndPhase.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/CsiToAmplitudeAndPhase.pyc
--------------------------------------------------------------------------------
/DataProcessing/DataCalculate.py:
--------------------------------------------------------------------------------
1 |
2 | # -*-coding:utf-8-*-
3 |
4 | from DataProcessing.CsiToAmplitudeAndPhase import getAmplitudesAndPhases
5 | from DataProcessing.PhaseSanitization import PhaseSanitization
6 | from DataProcessing.WeightedMovingAverage import weightMoveAverage
7 |
8 |
9 | def DataCalculate(Csi_Mat_Path):
10 | # 从原始csi数据中的复数计算的到振幅和相位
11 | amplitudes_and_phases = getAmplitudesAndPhases(Csi_Mat_Path)
12 | N = amplitudes_and_phases[2]
13 |
14 | amplitudes = amplitudes_and_phases[0]
15 | phases = amplitudes_and_phases[1]
16 | phases2=phases
17 |
18 | amplitudes2 = weightMoveAverage(amplitudes, N)
19 |
20 | for k in range(0, N):
21 | phases2[k] = PhaseSanitization(phases[k], 30, 6)
22 | break
23 |
24 | return amplitudes,phases, amplitudes2, phases2,N
25 | # amplitudes和amplitudes的维度都是n*180,现在将其写为n*360即可,但是要注意打上label
26 |
27 |
28 | #DataCalculate('/media/xue/软件/CSI/RawMatData/fixed/eating/1/eating_1_1.mat')
29 |
--------------------------------------------------------------------------------
/DataProcessing/DataCalculate.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/DataCalculate.pyc
--------------------------------------------------------------------------------
/DataProcessing/DataProcess.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | import os
3 | import time
4 |
5 | from threading import Thread
6 | import scipy.io as scio
7 |
8 | import numpy as np
9 |
10 | from DataProcessing.CsiToAmplitudeAndPhase import getAmplitudesAndPhasesLength
11 | from DataProcessing.DataCalculate import DataCalculate
12 | from DataProcessing.Normalize import Normalize
13 |
14 |
15 | def dataProcessOfFixedFiles(path, path_p, path_ap, path_p2, path_ap2, doneN, totalN,
16 | begin_time): # path:.../new targetpath:../demo.tfrecords
17 |
18 | files2 = os.listdir(path)
19 |
20 |
21 |
22 | for f2 in files2:
23 | # f2:eatting settig ...
24 |
25 | files3 = os.listdir(path + '/' + f2)
26 | for f3 in files3:
27 | # f3:1 2 3...
28 | l = int(f3) - 1
29 | files4 = os.listdir(path + '/' + f2 + '/' + f3)
30 |
31 | for f4 in files4:
32 | if '.mat' in f4:
33 | # f4:1_1.mat 2_2.mat...
34 |
35 | result = DataCalculate(path + '/' + f2 + '/' + f3 + '/' + f4)
36 |
37 |
38 | '''
39 | 经过去噪等算法处理之后的数据
40 | '''
41 | ap = result[0]
42 | p = result[1]
43 | ap2 = result[2]
44 | p2 = result[3]
45 |
46 |
47 |
48 | N = result[4]
49 | label = [l] * N
50 |
51 | ap = ap.reshape(N, -1)
52 | ap2 = ap2.reshape(N, -1)
53 | p = p.reshape(N, -1)
54 | p2 = p2.reshape(N, -1)
55 |
56 | # 拼接后的数据
57 | # x = np.concatenate((ap.reshape(N, -1), p.reshape(N, -1)), axis=1)
58 |
59 | # lenght = x.shape[0]
60 |
61 | for i in range(N):
62 | ap[i] = Normalize(ap[i])
63 | p[i] = Normalize(p[i])
64 | ap2[i] = Normalize(ap2[i])
65 | p2[i] = Normalize(p2[i])
66 |
67 | scio.savemat(path_ap + '/' + f2 + '-' + f3 + '-' + f4, {'x': ap, 'y': label})
68 | scio.savemat(path_ap2 + '/' + f2 + '-' + f3 + '-' + f4, {'x': ap2, 'y': label})
69 | scio.savemat(path_p + '/' + f2 + '-' + f3 + '-' + f4, {'x': p, 'y': label})
70 | scio.savemat(path_p2 + '/' + f2 + '-' + f3 + '-' + f4, {'x': p2, 'y': label})
71 |
72 | doneN += N
73 |
74 | now_time = time.time()
75 | secod = round(now_time - begin_time, 2)
76 | fenzhong = int(secod / 60)
77 | secod = round(secod - fenzhong * 60, 2)
78 | xiaoshi = int(fenzhong / 60)
79 | fenzhong = fenzhong - xiaoshi * 60
80 | persent = round((doneN / totalN) * 100, 2)
81 |
82 | str1 = '已进行:' + str(persent) + '% 用时:' + str(xiaoshi) + '小时' + str(fenzhong) + '分钟' + str(
83 | secod) + '秒 预计还需:'
84 |
85 | secod = int((secod * totalN) / doneN)
86 | fenzhong = int(secod / 60)
87 | secod = secod - fenzhong * 60
88 | xiaoshi = int(fenzhong / 60)
89 | fenzhong = fenzhong - xiaoshi * 60
90 |
91 | str2 = str(xiaoshi) + '小时' + str(fenzhong) + '分钟' + str(secod) + '秒'
92 | print str1 + str2 + 'dataProcessOfFixedFiles-' + f2 + '-' + f3 + '-' + f4
93 |
94 | print 'success:' + path
95 |
96 |
97 | def dataProcessOfOpenAndSemiFiles(path, path_p, path_ap, path_p2,
98 | path_ap2, doneN, totalN,
99 | begin_time): # path:.../new targetpath:../demo.tfrecords
100 |
101 | files3 = os.listdir(path)
102 |
103 | for f3 in files3:
104 | # f3:1 2 3...
105 |
106 | files4 = os.listdir(path + '/' + f3)
107 |
108 | l = int(f3) - 1
109 | for f4 in files4:
110 | if '.mat' in f4:
111 | # f4:1_1.mat 2_2.mat...
112 | result = DataCalculate(path + '/' + f3 + '/' + f4)
113 |
114 | ap = result[0]
115 | p = result[1]
116 | ap2 = result[2]
117 | p2 = result[3]
118 |
119 |
120 |
121 | N = result[4]
122 | label = [l] * N
123 |
124 | ap = ap.reshape(N, -1)
125 | ap2 = ap2.reshape(N, -1)
126 | p = p.reshape(N, -1)
127 | p2 = p2.reshape(N, -1)
128 |
129 | # 拼接后的数据
130 | # x = np.concatenate((ap.reshape(N, -1), p.reshape(N, -1)), axis=1)
131 |
132 | # lenght = x.shape[0]
133 |
134 | for i in range(N):
135 | ap[i] = Normalize(ap[i])
136 | p[i] = Normalize(p[i])
137 | ap2[i] = Normalize(ap2[i])
138 | p2[i] = Normalize(p2[i])
139 |
140 | scio.savemat(path_ap + '/' + f3 + '-' + f4, {'x': ap, 'y': label})
141 | scio.savemat(path_ap2 + '/' + f3 + '-' + f4, {'x': ap2, 'y': label})
142 | scio.savemat(path_p + '/' + f3 + '-' + f4, {'x': p, 'y': label})
143 | scio.savemat(path_p2 + '/' + f3 + '-' + f4, {'x': p2, 'y': label})
144 |
145 | doneN += N
146 |
147 | now_time = time.time()
148 | secod = round(now_time - begin_time, 2)
149 | fenzhong = int(secod / 60)
150 | secod = round(secod - fenzhong * 60, 2)
151 | xiaoshi = int(fenzhong / 60)
152 | fenzhong = fenzhong - xiaoshi * 60
153 | persent = round((doneN / totalN) * 100, 2)
154 |
155 | str1 = '已进行:' + str(persent) + '% 用时:' + str(xiaoshi) + '小时' + str(fenzhong) + '分钟' + str(
156 | secod) + '秒 预计还需:'
157 |
158 | secod = int((secod * totalN) / doneN)
159 | fenzhong = int(secod / 60)
160 | secod = secod - fenzhong * 60
161 | xiaoshi = int(fenzhong / 60)
162 | fenzhong = fenzhong - xiaoshi * 60
163 |
164 | str2 = str(xiaoshi) + '小时' + str(fenzhong) + '分钟' + str(secod) + '秒'
165 | print str1 + str2 + 'dataProcessOfOpenAndSemiFiles-' + path + '-' + f3 + '-' + f4
166 |
167 | print('success:' + path)
168 |
169 |
170 | def getSemiOrOpenLength(path):
171 | N = 0
172 | files3 = os.listdir(path)
173 |
174 | for f3 in files3:
175 | # f3:1 2 3...
176 |
177 | files4 = os.listdir(path + '/' + f3)
178 |
179 | l = int(f3) - 1
180 | for f4 in files4:
181 | if '.mat' in f4:
182 | N += getAmplitudesAndPhasesLength(path + '/' + f3 + '/' + f4)
183 |
184 | return N
185 |
186 |
187 | def getDataLength(fixed_path, open_path, semi_path):
188 | N = 0
189 |
190 | files2 = os.listdir(fixed_path)
191 |
192 | for f2 in files2:
193 | # f2:eatting settig ...
194 |
195 | files3 = os.listdir(fixed_path + '/' + f2)
196 | for f3 in files3:
197 | # f3:1 2 3...
198 | l = int(f3) - 1
199 | files4 = os.listdir(fixed_path + '/' + f2 + '/' + f3)
200 |
201 | for f4 in files4:
202 | if '.mat' in f4:
203 | # print('dataProcessOfFixedFiles-' + f2 + '-' + f3 + '-' + f4)
204 | # f4:1_1.mat 2_2.mat...
205 | N += getAmplitudesAndPhasesLength(fixed_path + '/' + f2 + '/' + f3 + '/' + f4)
206 | print 'N of fixed:' + str(N)
207 |
208 | N2 = getSemiOrOpenLength(open_path)
209 | print 'N of open:' + str(N2)
210 | N3 = getSemiOrOpenLength(semi_path)
211 | print 'N of semi:' + str(N3)
212 |
213 | return N + N2 + N3
214 |
215 |
216 | if __name__ == '__main__':
217 | '''
218 | N of fixed:39600968
219 | N of open:7476236
220 | N of semi:12373101
221 | total N:59450305
222 | '''
223 | path_fixed = '/media/xue/软件/CSI/RawMatData/fixed'
224 | path_open = '/media/xue/软件/CSI/RawMatData/open'
225 | path_semi = '/media/xue/软件/CSI/RawMatData/semi'
226 |
227 | # N = getDataLength(path_fixed, path_open, path_semi)
228 | # print 'total N:' + str(N)
229 |
230 | beagin_time = time.time()
231 |
232 | '''
233 |
234 | dataProcessOfFixedFiles(path='/media/xue/软件/CSI/RawMatData/fixed',
235 | path_ap='/media/xue/软件/CSI/MatData/AmplitudeWithOutNoiseRemoval',
236 | path_ap2='/media/xue/软件/CSI/MatData/AmplitudeWithNoiseRemoval',
237 | path_p='/media/xue/软件/CSI/MatData/PhaseWithOutNoiseRemoval',
238 | path_p2='/media/xue/软件/CSI/MatData/PhaseWithNoiseRemoval',
239 | begin_time=beagin_time, doneN=0, totalN=59450305)
240 | '''
241 |
242 | '''
243 | dataProcessOfOpenAndSemiFiles(path='/media/xue/Data Storage/CSI/RawMatData/open',
244 | path_ap='/media/xue/Data Storage/CSI/MatData/AmplitudeWithOutNoiseRemoval',
245 | path_ap2='/media/xue/Data Storage/CSI/MatData/AmplitudeWithNoiseRemoval',
246 | path_p='/media/xue/Data Storage/CSI/MatData/PhaseWithOutNoiseRemoval',
247 | path_p2='/media/xue/Data Storage/CSI/MatData/PhaseWithNoiseRemoval',
248 | begin_time=beagin_time, doneN=39600968, totalN=59450305
249 | )
250 | '''
251 |
252 | dataProcessOfOpenAndSemiFiles(path='/media/xue/Data Storage/CSI/RawMatData/semi',
253 | path_ap='/media/xue/Data Storage/CSI/MatData/AmplitudeWithOutNoiseRemoval',
254 | path_ap2='/media/xue/Data Storage/CSI/MatData/AmplitudeWithNoiseRemoval',
255 | path_p='/media/xue/Data Storage/CSI/MatData/PhaseWithOutNoiseRemoval',
256 | path_p2='/media/xue/Data Storage/CSI/MatData/PhaseWithNoiseRemoval',
257 | begin_time=beagin_time, doneN=39600968 + 7476236, totalN=59450305
258 | )
259 |
--------------------------------------------------------------------------------
/DataProcessing/Normalize.py:
--------------------------------------------------------------------------------
1 | import os
2 | import scipy.io as scio
3 | import numpy as np
4 |
5 |
6 | def Normalize(data):
7 |
8 | mx = max(data)
9 | mn = min(data)
10 |
11 | if mx <= 1:
12 | if mn >= -1:
13 | return data
14 |
15 | m = np.mean(data)
16 | return [(float(i) - m) / (mx - mn) for i in data]
17 |
18 |
19 | def NormalizeFiles(path, target_path):
20 | files1 = os.listdir(path)
21 | for f1 in files1:
22 | data = scio.loadmat(path + '\\' + f1)
23 | x = data['x']
24 | y = data['y']
25 | l = len(x)
26 |
27 | for i in range(0, l):
28 | x[i] = Normalize(x[i])
29 | print(i)
30 |
31 | print('success:' + path)
32 | scio.savemat(target_path + f1,
33 | {'x': x, 'y': y})
34 |
35 |
36 |
--------------------------------------------------------------------------------
/DataProcessing/Normalize.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/Normalize.pyc
--------------------------------------------------------------------------------
/DataProcessing/PhaseSanitization.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | '''
6 | Input:
7 | raw phase :PM(180--->6*30)
8 | number of subcarriersn :Sub(30)
9 | number of Tx-Rx pairs:M(6)
10 |
11 | Output:
12 | calibrated phase values:PC
13 |
14 | Algorithm:
15 | 1:unwrap
16 | 2:polyfit
17 | '''
18 |
19 |
20 | def plotPhase(data, name, xlable='', ylable=''):
21 | plt.plot(data)
22 |
23 | plt.xlabel(xlable)
24 | plt.ylabel(ylable)
25 |
26 | plt.savefig('../EResult/' + name + '.png')
27 | plt.close()
28 |
29 |
30 | def PhaseSanitization(pm, sub=30, m=6):
31 |
32 | for i in range(0, m):
33 | pm[:, i] = np.unwrap(pm[:, i])
34 |
35 |
36 | y = np.mean(pm, 1)
37 | pc = np.ndarray(shape=(30, 6))
38 |
39 | x = range(0, sub)
40 | p = np.polyfit(x, y, 1)
41 | yf = [p[0] * tx for tx in x]
42 |
43 | for t in range(0, m):
44 | for s in range(0, 30):
45 | pc[s][t] = pm[s][t] - yf[s]
46 |
47 | return pc
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/DataProcessing/PhaseSanitization.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/PhaseSanitization.pyc
--------------------------------------------------------------------------------
/DataProcessing/SpliceData.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | import os
3 | import time
4 |
5 | import scipy.io as scio
6 | import numpy as np
7 | import tensorflow as tf
8 | import shutil
9 |
10 |
11 | # 生成整数型的属性
12 | def _int64_feature(value):
13 | return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
14 |
15 |
16 | # 生成字符串类型的属性
17 | def _bytes_feature(value):
18 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
19 |
20 |
21 | def spliceList(pathA, pathP, targetPath, files, folderName, fraction, start_time, fragmentLength=200):
22 | if not os.path.exists(targetPath + '/' + folderName):
23 | os.mkdir(targetPath + '/' + folderName)
24 | else:
25 | shutil.rmtree(targetPath + '/' + folderName)
26 | os.mkdir(targetPath + '/' + folderName)
27 |
28 | writer_train = tf.python_io.TFRecordWriter(targetPath + '/' + folderName + '/' + 'train.tfrecords')
29 | writer_val = tf.python_io.TFRecordWriter(targetPath + '/' + folderName + '/' + 'val.tfrecords')
30 |
31 | files_size = float(len(files))
32 | count = float(1)
33 |
34 | for file in files:
35 | data_A = scio.loadmat(pathA + '/' + file)
36 | data_P = scio.loadmat(pathP + '/' + file)
37 |
38 | data_A_x = data_A['x']
39 | data_A_y = data_A['y']
40 | data_P_x = data_P['x']
41 |
42 | x = np.concatenate((data_A_x, data_P_x), axis=1)
43 |
44 | length = data_A_x.shape[0]
45 | y = data_A_y[0][0]
46 |
47 | index = int(length / fragmentLength)
48 |
49 | trainindex = int(index * 0.8)
50 |
51 | traincount = 0
52 | valcount = 0
53 |
54 | for i in range(index):
55 | localx = np.array(x[i * fragmentLength:(i + 1) * fragmentLength]) # 200*360
56 | data_raw = localx.tostring()
57 |
58 | example = tf.train.Example(features=tf.train.Features(feature={
59 | 'label': _int64_feature(y),
60 | 'data_raw': _bytes_feature(data_raw)
61 | }))
62 |
63 | if i <= trainindex:
64 | traincount += 1
65 | writer_train.write(example.SerializeToString())
66 | else:
67 | valcount += 1
68 | writer_val.write(example.SerializeToString())
69 |
70 | t_fraction=round(count / files_size,5)
71 | fraction = round(fraction + t_fraction/6, 4)
72 | now_time = time.time()
73 | secod = round(now_time - start_time, 2)
74 | fenzhong = int(secod / 60)
75 | secod = round(secod - fenzhong * 60, 2)
76 | xiaoshi = int(fenzhong / 60)
77 | fenzhong = fenzhong - xiaoshi * 60
78 |
79 | str1 = '已经进行:' + str(fraction) + '% ,用时:' + str(xiaoshi) + '小时' + str(fenzhong) + '分钟' + str(
80 | secod) + '秒 预计还需:'
81 |
82 | secod = round(secod / fraction, 2)
83 | fenzhong = int(secod / 60)
84 | secod = round(secod - fenzhong * 60, 2)
85 | xiaoshi = int(fenzhong / 60)
86 | fenzhong = fenzhong - xiaoshi * 60
87 |
88 | str1 = str1 + str(xiaoshi) + '小时' + str(fenzhong) + '分钟' + str(secod) + '秒'
89 | print str1
90 |
91 | writer_train.close()
92 | writer_val.close()
93 |
94 |
95 | def spliceListSingle(pathA, targetPath, files, folderName, fraction, start_time, fragmentLength=200):
96 | if not os.path.exists(targetPath + '/' + folderName):
97 | os.mkdir(targetPath + '/' + folderName)
98 | else:
99 | shutil.rmtree(targetPath + '/' + folderName)
100 | os.mkdir(targetPath + '/' + folderName)
101 |
102 | writer_train = tf.python_io.TFRecordWriter(targetPath + '/' + folderName + '/' + 'train.tfrecords')
103 | writer_val = tf.python_io.TFRecordWriter(targetPath + '/' + folderName + '/' + 'val.tfrecords')
104 |
105 | files_size = float(len(files))
106 | count = float(1)
107 |
108 | for file in files:
109 | data_A = scio.loadmat(pathA + '/' + file)
110 |
111 | x = data_A['x']
112 | data_A_y = data_A['y']
113 |
114 | length = x.shape[0]
115 | y = data_A_y[0][0]
116 |
117 | index = int(length / fragmentLength)
118 |
119 | trainindex = int(index * 0.8)
120 |
121 | traincount = 0
122 | valcount = 0
123 |
124 | for i in range(index):
125 | localx = np.array(x[i * fragmentLength:(i + 1) * fragmentLength]) # 200*360
126 | data_raw = localx.tostring()
127 |
128 | example = tf.train.Example(features=tf.train.Features(feature={
129 | 'label': _int64_feature(y),
130 | 'data_raw': _bytes_feature(data_raw)
131 | }))
132 |
133 | if i <= trainindex:
134 | traincount += 1
135 | writer_train.write(example.SerializeToString())
136 | else:
137 | valcount += 1
138 | writer_val.write(example.SerializeToString())
139 |
140 | fraction = round(fraction + float(count / files_size), 2)
141 | now_time = time.time()
142 | secod = round(now_time - start_time, 2)
143 | fenzhong = int(secod / 60)
144 | secod = round(secod - fenzhong * 60, 2)
145 | xiaoshi = int(fenzhong / 60)
146 | fenzhong = fenzhong - xiaoshi * 60
147 |
148 | str1 = '已经进行:' + str(fraction) + '% ,用时:' + str(xiaoshi) + '小时' + str(fenzhong) + '分钟' + str(
149 | secod) + '秒 预计还需:'
150 |
151 | secod = round(secod / fraction, 2)
152 | fenzhong = int(secod / 60)
153 | secod = round(secod - fenzhong * 60, 2)
154 | xiaoshi = int(fenzhong / 60)
155 | fenzhong = fenzhong - xiaoshi * 60
156 |
157 | str1 = str1 + str(xiaoshi) + '小时' + str(fenzhong) + '分钟' + str(secod) + '秒'
158 | print str1
159 |
160 | writer_train.close()
161 | writer_val.close()
162 |
163 |
164 | def spliceProcessSingle(pathA, targetPath, fraction, start_time):
165 | filesA = os.listdir(pathA)
166 |
167 | files_open = []
168 | files_semi = []
169 | files_fixed = []
170 |
171 | for file in filesA:
172 | if 'open' in file:
173 | files_open.append(file)
174 | elif 'semi' in file:
175 | files_semi.append(file)
176 | else:
177 | files_fixed.append(file)
178 |
179 | spliceListSingle(pathA, targetPath, files_open, 'open', fraction + 0, start_time)
180 | spliceListSingle(pathA, targetPath, files_open, 'semi', fraction + 1 / 3, start_time)
181 | spliceListSingle(pathA, targetPath, files_open, 'fixed', fraction + 2 / 3, start_time)
182 |
183 | print 'success single:' + targetPath
184 |
185 |
186 | def spliceProcess(pathA, pathP, targetPath, fraction, start_time):
187 | filesA = os.listdir(pathA)
188 |
189 | files_open = []
190 | files_semi = []
191 | files_fixed = []
192 |
193 | for file in filesA:
194 |
195 | if 'open' in file:
196 | files_open.append(file)
197 | elif 'semi' in file:
198 | files_semi.append(file)
199 | else:
200 | files_fixed.append(file)
201 |
202 | spliceList(pathA, pathP, targetPath, files_open, 'open', fraction + 0, start_time)
203 | spliceList(pathA, pathP, targetPath, files_semi, 'semi', fraction + 1 / 3, start_time)
204 | spliceList(pathA, pathP, targetPath, files_fixed, 'fixed', fraction + 2 / 3, start_time)
205 |
206 | print 'success:' + targetPath
207 |
208 |
209 | if __name__ == '__main__':
210 | AmplitudeWithNoiseRemoval = '/media/xue/Data Storage/CSI/MatData/AmplitudeWithNoiseRemoval'
211 | AmplitudeWithOutNoiseRemoval = '/media/xue/Data Storage/CSI/MatData/AmplitudeWithOutNoiseRemoval'
212 | PhaseWithNoiseRemoval = '/media/xue/Data Storage/CSI/MatData/PhaseWithNoiseRemoval'
213 | PhaseWithOutNoiseRemoval = '/media/xue/Data Storage/CSI/MatData/PhaseWithOutNoiseRemoval'
214 |
215 | AmplitudeWithout_PhaseWith = '/media/xue/Data Storage/CSI/TfRecordsData/AmplitudeWithout_PhaseWith'
216 | AmplitudeWithOut_PhaseWithout = '/media/xue/Data Storage/CSI/TfRecordsData/AmplitudeWithOut_PhaseWithout'
217 | AmplitudeWith_PhaseWith = '/media/xue/Data Storage/CSI/TfRecordsData/AmplitudeWith_PhaseWith'
218 | AmplitudeWith_PhaseWithout = '/media/xue/Data Storage/CSI/TfRecordsData/AmplitudeWith_PhaseWithout'
219 |
220 | OnlyAmplitude = '/media/xue/Data Storage/CSI/TfRecordsData/OnlyAmplitude'
221 | OnlyPhase = '/media/xue/Data Storage/CSI/TfRecordsData/OnlyPhase'
222 |
223 | start_time = time.time()
224 |
225 | # spliceProcess(AmplitudeWithOutNoiseRemoval, PhaseWithNoiseRemoval, AmplitudeWithout_PhaseWith, 0, start_time)
226 | # spliceProcess(AmplitudeWithOutNoiseRemoval, PhaseWithOutNoiseRemoval, AmplitudeWithOut_PhaseWithout, 1 / 6,
227 | # start_time)
228 | # spliceProcess(AmplitudeWithNoiseRemoval, PhaseWithNoiseRemoval, AmplitudeWith_PhaseWith, 2 / 6, start_time)
229 | # spliceProcess(AmplitudeWithNoiseRemoval, PhaseWithOutNoiseRemoval, AmplitudeWith_PhaseWithout, 3 / 6, start_time)
230 |
231 |
232 | spliceProcessSingle(AmplitudeWithNoiseRemoval, OnlyAmplitude, 4 / 6, start_time)
233 | spliceProcessSingle( PhaseWithNoiseRemoval, OnlyPhase, 5 / 6, start_time)
234 |
--------------------------------------------------------------------------------
/DataProcessing/Split.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 | import os
3 |
4 | import scipy.io as scio
5 |
6 | import tensorflow as tf
7 |
8 | from Util.WriteHd5Util import writeToH5
9 |
10 | import numpy as np
11 |
12 | import random
13 |
14 | class bean:
15 |
16 | def __init__(self, x, y):
17 | self.x = x
18 | self.y = y
19 |
20 |
21 | # 生成整数型的属性
22 | def _int64_feature(value):
23 | return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
24 |
25 |
26 | # 生成字符串类型的属性
27 | def _bytes_feature(value):
28 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
29 |
30 |
31 | '''
32 | 传入的数据维度为N*360&&360*1
33 | 将其进行切片后返回
34 | '''
35 |
36 |
37 | def split(x, y, fragmentLength=200):
38 | lenght = len(y[0])
39 | index = int(lenght / fragmentLength)
40 |
41 | tempx = []
42 | tempy = []
43 |
44 | for i in range(index):
45 | localx = x[i * fragmentLength:(i + 1) * fragmentLength] # 200*360
46 | # localx = np.reshape(localx, newshape=(-1, fragmentLength * 360))
47 | tempx.append(localx)
48 | tempy.append(y[0][i])
49 |
50 | return (tempx, tempy)
51 |
52 |
53 | def getshuffefilelist(files):
54 | fileslist = []
55 |
56 | for i in range(1, 6):
57 | for file in files:
58 | if file[0] == str(i):
59 | fileslist.append(file)
60 | files.remove(file)
61 | break
62 |
63 | return fileslist, files
64 |
65 |
66 | '''
67 | 将文件夹里的mat文件逐个读出,然后对其进行split(切片),将其暂存在内存中,最后对其进行随机化并存储在h5文件中
68 | '''
69 |
70 |
71 | def SplitProcess(sourcePath, targetPath, fragmentLength=200):
72 | files = os.listdir(sourcePath)
73 | writer_train = tf.python_io.TFRecordWriter(targetPath + '/' + 'train.tfrecords')
74 | writer_val = tf.python_io.TFRecordWriter(targetPath + '/' + 'val.tfrecords')
75 |
76 | traincount=0
77 | valcount=0
78 |
79 | for file in files:
80 | print file
81 | data = scio.loadmat(sourcePath + '/' + file)
82 | tempx = data['x']
83 | tempy = data['y']
84 |
85 | lenght = len(tempy[0])
86 | index = int(lenght / fragmentLength)
87 |
88 | trainindex = int(index * 0.8)
89 |
90 | for i in range(index):
91 | localx = np.array(tempx[i * fragmentLength:(i + 1) * fragmentLength]) # 200*360
92 | data_raw = localx.tostring()
93 | label = int(tempy[0][i])
94 | example = tf.train.Example(features=tf.train.Features(feature={
95 | 'label': _int64_feature(label),
96 | 'data_raw': _bytes_feature(data_raw)
97 | }))
98 |
99 | if i <= trainindex:
100 | traincount+=1
101 | writer_train.write(example.SerializeToString())
102 | else:
103 | valcount+=1
104 | writer_val.write(example.SerializeToString())
105 |
106 | writer_train.close()
107 | writer_val.close()
108 |
109 | print sourcePath+'-traincount: '+str( traincount)
110 | print sourcePath+'-valcount: '+str(valcount)
111 |
112 |
113 |
114 | if __name__ == '__main__':
115 | fragmentLength=1000
116 | fixed_path = '/data/after-dataprocess/fixed/'
117 | fixed_hd_target = '/data/after-split'+str(fragmentLength)+'/Fixed'
118 |
119 | SplitProcess(fixed_path, fixed_hd_target, fragmentLength=fragmentLength)
120 |
--------------------------------------------------------------------------------
/DataProcessing/SplitAandP.py:
--------------------------------------------------------------------------------
1 | import os
2 | import scipy.io as scio
3 |
4 |
5 | def slpitAandP(path,pathA,pathP):
6 | files1 = os.listdir(path)
7 |
8 | for f1 in files1:#fixed open semi
9 | files2=os.listdir(path+'/'+f1)
10 | os.makedirs(pathA+'/'+f1)
11 | for f2 in files2:# .mat
12 | data = scio.loadmat(path+'/'+f1+'/'+f2)
13 | tempx = data['x']
14 | tempy = data['y']
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/DataProcessing/WeightedMovingAverage.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 |
3 | '''
4 | 对Amplitude使用加权移动平均法进行去噪处理(平滑处理)
5 | 设置m为100
6 |
7 | 用法:Amplitude=WeightMoveAverage(Amplitude, N, m=100)
8 |
9 | '''
10 |
11 | import numpy as np
12 | import matplotlib.pyplot as plt
13 |
14 |
15 | def plotAmplitudes(data, name, xlable='', ylable=''):
16 | plt.plot(data)
17 |
18 | plt.xlabel(xlable)
19 | plt.ylabel(ylable)
20 |
21 | plt.savefig('../EResult/' + name + '.png')
22 | plt.close()
23 |
24 |
25 | def weightMoveAverage(amplitude, N, m=100): # N为Amplitude的长度
26 |
27 | m_item_sum = mItemSum(m)
28 |
29 | for a in range(0, 30):
30 | for t in range(0, 6):
31 | ass = amplitude[:, a, t]
32 | amplitude[:, a, t] = demo(ass, m, N, m_item_sum)
33 | break
34 | break
35 |
36 |
37 |
38 | return amplitude
39 |
40 |
41 | def mItemSum(m):
42 | m_item_sum = 0
43 | for i in range(1, m + 1):
44 | m_item_sum = m_item_sum + i
45 | return m_item_sum
46 |
47 |
48 | def demo(ass, m, N, m_item_sum):
49 | for n in range(m - 1, N):
50 | sum = 0
51 | x = ass[n - m + 1:n + 1]
52 | y = range(1, m + 1)
53 | for a, b in zip(x, y):
54 | sum += a * b
55 | ass[n] = sum / m_item_sum
56 | return ass
57 |
58 |
59 |
--------------------------------------------------------------------------------
/DataProcessing/WeightedMovingAverage.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/WeightedMovingAverage.pyc
--------------------------------------------------------------------------------
/DataProcessing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/__init__.py
--------------------------------------------------------------------------------
/DataProcessing/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/__init__.pyc
--------------------------------------------------------------------------------
/DataProcessing/__pycache__/CsiToAmplitudeAndPhase.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/__pycache__/CsiToAmplitudeAndPhase.cpython-36.pyc
--------------------------------------------------------------------------------
/DataProcessing/__pycache__/PhaseSanitization.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/__pycache__/PhaseSanitization.cpython-36.pyc
--------------------------------------------------------------------------------
/DataProcessing/__pycache__/WeightedMovingAverage.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/__pycache__/WeightedMovingAverage.cpython-36.pyc
--------------------------------------------------------------------------------
/DataProcessing/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DataProcessing/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/DlTrain/CNN.py:
--------------------------------------------------------------------------------
1 |
2 | #coding:utf-8
3 |
4 | import tensorflow as tf
5 |
6 | from DlTrain.Parameters import trainBatchSize, lstmTimeStep, lstmHiddenUnits, classes
7 |
8 | '''
9 | CNN
10 | '''
11 |
12 |
13 | def cnn_weight_variable(shape):
14 | initializer = tf.contrib.layers.xavier_initializer()
15 | return tf.Variable(initializer(shape))
16 |
17 |
18 | def cnn_bias_variable(shape):
19 | initial = tf.constant(0.1, shape=shape)
20 | return tf.Variable(initial)
21 |
22 |
23 | def conv2d(c_input, c_filter, c_strides):
24 | return tf.nn.conv2d(input=c_input, filter=c_filter, strides=c_strides, padding='VALID')
25 |
26 |
27 | def max_pool_2x2(p_input, p_ksize, p_strides):
28 | return tf.nn.max_pool(value=p_input, ksize=p_ksize, strides=p_strides, padding='VALID')
29 |
30 |
31 | '''
32 | CNN Net
33 | '''
34 |
35 |
36 | def CNN(in_x):
37 | in_x = tf.reshape(in_x, [trainBatchSize, lstmTimeStep, lstmHiddenUnits, 1])
38 |
39 | w_conv1 = cnn_weight_variable([5, 5, 1, 6])
40 | b_conv1 = cnn_bias_variable([6])
41 |
42 | h_conv1 = tf.nn.relu(conv2d(c_input=in_x, c_filter=w_conv1, c_strides=[1, 1, 1, 1]) + b_conv1)
43 | h_pool1 = max_pool_2x2(h_conv1, [1, 2, 2, 1], [1, 2, 2, 1])
44 |
45 | w_conv2 = cnn_weight_variable([5, 3, 6, 10])
46 | b_conv2 = cnn_bias_variable([10])
47 | h_conv2 = tf.nn.relu(conv2d(c_input=h_pool1, c_filter=w_conv2, c_strides=[1, 3, 3, 1]) + b_conv2)
48 |
49 | h_pool3_flat = tf.reshape(h_conv2, [-1, 3200]) # 将32*10*10reshape为3200*1
50 |
51 | w_fc1 = cnn_weight_variable([3200, 1000])
52 | b_fc1 = cnn_bias_variable([1000])
53 | h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, w_fc1) + b_fc1)
54 |
55 | w_fc2 = cnn_weight_variable([1000, 200])
56 | b_fc2 = cnn_bias_variable([200])
57 | h_fc2 = tf.nn.relu(tf.matmul(h_fc1, w_fc2) + b_fc2)
58 |
59 | w_fc3 = cnn_weight_variable([200, classes])
60 | b_fc3 = cnn_bias_variable([classes])
61 | h_fc3 = tf.nn.relu(tf.matmul(h_fc2, w_fc3) + b_fc3)
62 |
63 | out_y = tf.nn.softmax(h_fc3,name='cnnSoftmax')
64 |
65 | return out_y
66 |
67 |
68 |
--------------------------------------------------------------------------------
/DlTrain/CNN.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/CNN.pyc
--------------------------------------------------------------------------------
/DlTrain/Data.py:
--------------------------------------------------------------------------------
1 |
2 | #coding:utf-8
3 |
4 | import random
5 |
6 | from Util.loadHd import load
7 | import numpy as np
8 |
9 |
10 | def convertY(y_list):
11 | yListLength = len(y_list)
12 | yCoverted = np.zeros(shape=yListLength)
13 | for listItemIndex in range(0, yListLength):
14 | yCoverted[listItemIndex] = y_list[listItemIndex]
15 |
16 | return yCoverted
17 |
18 |
19 | class Data:
20 | x = []
21 | y = []
22 | dataLength = 0
23 | isTest = False # 是否为测试代码模式
24 | indexList = []
25 |
26 | def __init__(self, path, is_test):
27 | self.dataPath = path
28 | Data.isTest = is_test
29 | Data.loadData(self)
30 |
31 | def loadData(self):
32 | if Data.isTest:
33 | return
34 | data = load(self.dataPath)
35 | Data.x = data[0]
36 | Data.y = data[1]
37 | Data.dataLength = len(Data.y)
38 |
39 | def getNextManualShuffleBatch(self, batch_size):
40 | if self.isTest:
41 | X = np.random.random(size=(batch_size, 72000))
42 | Y = np.random.randint(0, 5, size=batch_size)
43 | return X, Y
44 | else:
45 | if len(self.indexList) < batch_size:
46 | self.indexList = list(range(0, Data.dataLength))
47 |
48 | randomIndexes = random.sample(range(0, len(self.indexList)), batch_size)
49 | X = []
50 | Y = []
51 |
52 | for randomIndex in randomIndexes:
53 | X.append(self.x[randomIndex])
54 | Y.append(self.y[randomIndex])
55 |
56 | sortedIndexes = sorted(randomIndexes, reverse=True)
57 | # 如果还抛异常用可以手动捕捉一下跳过
58 | indexlen=int(len(sortedIndexes)/2)
59 | i=0
60 | for sortedIndex in sortedIndexes:
61 | self.indexList.pop(sortedIndex)
62 | i+=1
63 | if i==indexlen:
64 | break
65 |
66 | X = np.reshape(X, newshape=(-1, 72000))
67 | Y = np.reshape(Y, newshape=(-1, 1))
68 | Y = convertY(Y)
69 | return X, Y
70 |
71 | def getNextAutoShuffleBatch(self, batch_size):
72 | if self.isTest:
73 | X = np.random.random(size=(batch_size, 72000))
74 | Y = np.random.randint(0, 5, size=batch_size)
75 | return X, Y
76 | else:
77 |
78 | if len(self.indexList) < batch_size:
79 | self.indexList = list(range(0, Data.dataLength))
80 |
81 | randomIndexes = random.sample(range(0, len(self.indexList)), batch_size)
82 | X = []
83 | Y = []
84 |
85 | for randomIndex in randomIndexes:
86 | X.append(self.x[randomIndex])
87 | Y.append(self.y[randomIndex])
88 |
89 | X = np.reshape(X, newshape=(-1, 72000))
90 | Y = np.reshape(Y, newshape=(-1, 1))
91 | Y = convertY(Y)
92 | return X, Y
93 |
--------------------------------------------------------------------------------
/DlTrain/Data.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/Data.pyc
--------------------------------------------------------------------------------
/DlTrain/GpusTrain.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | import tensorflow as tf
5 |
6 | import numpy as np
7 | from tensorflow.contrib.timeseries.python.timeseries import model
8 |
9 | from DlTrain.CNN import CNN
10 | from DlTrain.LSTM import LSTM
11 | from DlTrain.Parameters import lstmInputDimension, tfRootPath, logRoot, pbRoot, matrixRoot, trainBatchSize, \
12 | lstmTimeStep, trainingIterations, valBatchSize, valPerTrainIterations
13 | from Util.Matrix import drawMatrix
14 | from Util.ReadAndDecodeUtil import read_and_decode
15 |
16 | os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
17 |
18 |
19 | def average_losses(loss):
20 | tf.add_to_collection('losses', loss)
21 |
22 | # Assemble all of the losses for the current tower only.
23 | losses = tf.get_collection('losses')
24 |
25 | # Calculate the total loss for the current tower.
26 | regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
27 | total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
28 |
29 | # Compute the moving average of all individual losses and the total loss.
30 | loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
31 | loss_averages_op = loss_averages.apply(losses + [total_loss])
32 |
33 | with tf.control_dependencies([loss_averages_op]):
34 | total_loss = tf.identity(total_loss)
35 | return total_loss
36 |
37 |
38 | def average_gradients(tower_grads):
39 | average_grads = []
40 |
41 | for i in range(2, 12):
42 | del tower_grads[1][2]
43 |
44 | for grad_and_vars in zip(*tower_grads):
45 | # Note that each grad_and_vars looks like the following:
46 | # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
47 | grads = [g for g, _ in grad_and_vars]
48 | # Average over the 'tower' dimension.
49 |
50 | grad = tf.stack(grads, 0)
51 | grad = tf.reduce_mean(grad, 0)
52 |
53 | # Keep in mind that the Variables are redundant because they are shared
54 | # across towers. So .. we will just return the first tower's pointer to
55 | # the Variable.
56 | v = grad_and_vars[0][1]
57 | grad_and_var = (grad, v)
58 | average_grads.append(grad_and_var)
59 | return average_grads
60 |
61 |
62 | def feed_all_gpu(inp_dict, models, payload_per_gpu, batch_x, batch_y):
63 | for i in range(len(models)):
64 | x, y, _, _, _ = models[i]
65 | start_pos = i * payload_per_gpu
66 | stop_pos = (i + 1) * payload_per_gpu
67 | inp_dict[x] = batch_x[start_pos:stop_pos]
68 | inp_dict[y] =np.reshape( batch_y[start_pos:stop_pos],newshape=(-1,1))
69 | return inp_dict
70 |
71 |
72 | def sort(list):
73 | max = -1
74 |
75 | for l in list:
76 | if max < int(l):
77 | max = int(l)
78 |
79 | return max
80 |
81 |
82 | def init_folder(rootType, which):
83 | folders_dict = {}
84 |
85 | # Log file init
86 | if not os.path.exists(logRoot + '/' + rootType):
87 | os.mkdir(logRoot + '/' + rootType)
88 |
89 | if not os.path.exists(logRoot + '/' + rootType + '/' + which):
90 | os.mkdir(logRoot + '/' + rootType + '/' + which)
91 |
92 | logfiles = os.listdir(logRoot + '/' + rootType + '/' + which)
93 | logsort = sort(logfiles)
94 |
95 | if logsort == -1:
96 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/0')
97 |
98 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/0/train')
99 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/0/val')
100 |
101 | trainLogPath = logRoot + '/' + rootType + '/' + which + '/0/train'
102 | valLogPath = logRoot + '/' + rootType + '/' + which + '/0/val'
103 |
104 | else:
105 |
106 | intLastIndex = logsort
107 | intLastIndex += 1
108 |
109 | lastIndex = str(intLastIndex)
110 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/' + lastIndex)
111 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/train')
112 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/val')
113 | trainLogPath = logRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/train'
114 | valLogPath = logRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/val'
115 |
116 | folders_dict['trainLogPath'] = trainLogPath
117 | folders_dict['valLogPath'] = valLogPath
118 |
119 | # Pb file init
120 | if not os.path.exists(pbRoot + '/' + rootType):
121 | os.mkdir(pbRoot + '/' + rootType)
122 |
123 | if not os.path.exists(pbRoot + '/' + rootType + '/' + which):
124 | os.mkdir(pbRoot + '/' + rootType + '/' + which)
125 |
126 | pbfiles = os.listdir(pbRoot + '/' + rootType + '/' + which)
127 | pbsort = sort(pbfiles)
128 | if pbsort == -1:
129 | os.mkdir(pbRoot + '/' + rootType + '/' + which + '/0')
130 | pbPath = pbRoot + '/' + rootType + '/' + which + '/0/model.pb'
131 |
132 |
133 | else:
134 | intLastIndex = pbsort
135 | intLastIndex += 1
136 | lastIndex = str(intLastIndex)
137 | os.mkdir(pbRoot + '/' + rootType + '/' + which + '/' + lastIndex)
138 | pbPath = pbRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/model.pb'
139 |
140 | folders_dict['pbPath'] = pbPath
141 |
142 | # matrix file init
143 | if not os.path.exists(matrixRoot + '/' + rootType):
144 | os.mkdir(matrixRoot + '/' + rootType)
145 |
146 | if not os.path.exists(matrixRoot + '/' + rootType + '/' + which):
147 | os.mkdir(matrixRoot + '/' + rootType + '/' + which)
148 |
149 | matrixfiles = os.listdir(matrixRoot + '/' + rootType + '/' + which)
150 | masort = sort(matrixfiles)
151 | trainPredictionTxtPath = '/trainPredictionLabel.txt'
152 | trainReallyTxtPath = '/trainReallyLabel.txt'
153 | matrixPicturePath = '/confusionMatrix.png'
154 |
155 | if masort == -1:
156 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/0')
157 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/0/Picture')
158 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/0/Txt')
159 |
160 | trainPredictionTxtPath = matrixRoot + '/' + rootType + '/' + which + '/0/Txt' + trainPredictionTxtPath
161 | trainReallyTxtPath = matrixRoot + '/' + rootType + '/' + which + '/0/Txt' + trainReallyTxtPath
162 | matrixPicturePath = matrixRoot + '/' + rootType + '/' + which + '/0/Picture' + matrixPicturePath
163 |
164 | else:
165 | intLastIndex = masort
166 | intLastIndex += 1
167 | lastIndex = str(intLastIndex)
168 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex)
169 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Picture')
170 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Txt')
171 |
172 | trainPredictionTxtPath = matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Txt' + trainPredictionTxtPath
173 | trainReallyTxtPath = matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Txt' + trainReallyTxtPath
174 | matrixPicturePath = matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Picture' + matrixPicturePath
175 |
176 | folders_dict['trainPredictionTxtPath'] = trainPredictionTxtPath
177 | folders_dict['trainReallyTxtPath'] = trainReallyTxtPath
178 | folders_dict['matrixPicturePath'] = matrixPicturePath
179 |
180 | return folders_dict
181 |
182 |
183 | def openLabel(label, batch_size):
184 | l = np.zeros(shape=(batch_size, 5))
185 | for li in range(batch_size):
186 | l[li][label[li]] = 1
187 |
188 | return l
189 |
190 |
191 | def multi_gpu(baseIr, rootType, which, InputDimension=lstmInputDimension, num_gpu=2):
192 | print 'start...'
193 | folders_dict = init_folder(rootType=rootType, which=which)
194 |
195 | train_tf_path = tfRootPath + rootType + '/' + which + '/train.tfrecords'
196 | val_tf_path = tfRootPath + rootType + '/' + which + '/val.tfrecords'
197 |
198 | batch_size = trainBatchSize * num_gpu
199 |
200 | tf.reset_default_graph()
201 |
202 | with tf.Session() as sess:
203 | with tf.device('/cpu:0'):
204 |
205 | x_train, y_train = read_and_decode(train_tf_path)
206 | num_threads = 5
207 | min_after_dequeue_train = 10000
208 |
209 | train_capacity_train = min_after_dequeue_train + num_threads * trainBatchSize
210 | train_x_batch, train_y_batch = tf.train.shuffle_batch([x_train, y_train],
211 | batch_size=batch_size, capacity=train_capacity_train,
212 | min_after_dequeue=min_after_dequeue_train)
213 |
214 | x_val, y_val = read_and_decode(val_tf_path)
215 | num_threads = 5
216 | min_after_dequeue_val = 2500
217 | train_capacity_val = min_after_dequeue_val + num_threads * trainBatchSize
218 | val_x_batch, val_y_batch = tf.train.shuffle_batch([x_val, y_val],
219 | batch_size=batch_size, capacity=train_capacity_val,
220 | min_after_dequeue=min_after_dequeue_val)
221 |
222 | coord = tf.train.Coordinator()
223 | threads = tf.train.start_queue_runners(sess=sess, coord=coord)
224 |
225 | #
226 | # for thread in threads:
227 | # try:
228 | #
229 | # thread.start()
230 | # print 'start thread'
231 | # except RuntimeError:
232 | # print 'start thread exception'
233 | # break
234 |
235 | trainPredictionFile = open(folders_dict['trainPredictionTxtPath'], 'wb')
236 | trainReallyTxtFile = open(folders_dict['trainReallyTxtPath'], 'wb')
237 |
238 |
239 |
240 | learning_rate = tf.placeholder(tf.float32, shape=[])
241 | opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
242 |
243 | print('build model...')
244 | print('build model on gpu tower...')
245 | models = []
246 | for gpu_id in range(num_gpu):
247 | with tf.device('/gpu:%d' % gpu_id):
248 | print('tower:%d...' % gpu_id)
249 |
250 | with tf.name_scope('tower_%d' % gpu_id):
251 | with tf.variable_scope('cpu_variables', reuse=gpu_id > 0):
252 | x = tf.placeholder(tf.float32, shape=[None, lstmTimeStep * InputDimension],
253 | name='inputLstm')
254 | y = tf.placeholder(tf.int32, shape=[None, 1], name='Label')
255 |
256 | cnnInput = LSTM(x)
257 | pred = CNN(cnnInput)
258 | loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y)
259 | loss = tf.reduce_mean(loss)
260 |
261 | grads = []
262 | grads = opt.compute_gradients(loss)
263 |
264 | models.append((x, y, pred, loss, grads))
265 |
266 | print('build model on gpu tower done.')
267 |
268 | print('reduce model on cpu...')
269 | tower_x, tower_y, tower_preds, tower_losses, tower_grads = zip(*models)
270 |
271 | with tf.name_scope('Loss'):
272 | aver_loss_op = tf.reduce_mean(tower_losses)
273 | tf.summary.scalar('Loss', aver_loss_op)
274 |
275 | apply_gradient_op = opt.apply_gradients(average_gradients(tower_grads))
276 |
277 | all_y = tf.stack(tower_y, 0, 'all_y')
278 | all_pred = tf.stack(tower_preds, 0, 'all_pred')
279 |
280 |
281 |
282 | pr_y = tf.cast(tf.argmax(all_pred, 2), tf.int32, name='pr_y')
283 | pr_y=tf.reshape(pr_y,(2,-1,1))
284 |
285 |
286 | correctPrediction = tf.equal(pr_y, all_y)
287 |
288 |
289 |
290 | with tf.name_scope('Accuracy'):
291 | accuracy = tf.reduce_mean(tf.cast(correctPrediction, 'float'))
292 | tf.summary.scalar('Accuracy', accuracy)
293 |
294 | print('reduce model on cpu done.')
295 |
296 | print('run train op...')
297 |
298 | merged = tf.summary.merge_all()
299 |
300 | trainLogWriter = tf.summary.FileWriter(folders_dict['trainLogPath'], sess.graph)
301 | valLogWriter = tf.summary.FileWriter(folders_dict['valLogPath'], sess.graph)
302 |
303 | sess.run(tf.global_variables_initializer())
304 |
305 | start_time = time.time()
306 | payload_per_gpu = trainBatchSize
307 | total_batch = int(trainingIterations / num_gpu)
308 | avg_loss = 0.0
309 |
310 | inp_dict = {}
311 | inp_dict[learning_rate] = baseIr
312 |
313 | for batch_idx in range(total_batch):
314 | batch_x, batch_y = sess.run([train_x_batch, train_y_batch])
315 | batch_x = np.reshape(batch_x, newshape=(-1, 72000))
316 | # batch_y=np.reshape(batch_y,newshape=(-1,1))
317 | # batch_y = openLabel(batch_y, batch_size)
318 | inp_dict = feed_all_gpu(inp_dict, models, payload_per_gpu, batch_x, batch_y)
319 | _, _loss, train_merge = sess.run([apply_gradient_op, aver_loss_op, merged], inp_dict)
320 |
321 |
322 |
323 | trainLogWriter.add_summary(train_merge, batch_idx)
324 |
325 | print('step: %d ,Train loss:%.4f' % (batch_idx, _loss))
326 | avg_loss += _loss
327 |
328 | if batch_idx % valPerTrainIterations == 0:
329 | batch_x, batch_y = sess.run([val_x_batch, val_y_batch])
330 | batch_x = np.reshape(batch_x, newshape=(-1, 72000))
331 | #batch_y = openLabel(batch_y, batch_size)
332 | inp_dict_val = feed_all_gpu({}, models, payload_per_gpu, batch_x, batch_y)
333 |
334 | batch_pred, batch_y ,acc= sess.run([pr_y, all_y,accuracy], inp_dict_val)
335 | batch_pred=np.reshape(batch_pred,newshape=(-1))
336 | batch_y=np.reshape(batch_y,newshape=(-1))
337 |
338 |
339 |
340 | print('Val Accuracy: %0.4f%%' % (100.0 * acc))
341 | np.savetxt(trainReallyTxtFile, batch_y)
342 | np.savetxt(trainPredictionFile, batch_pred)
343 |
344 | #valLogWriter.add_summary(_mergedval, batch_idx)
345 |
346 | trainLogWriter.close()
347 | valLogWriter.close()
348 |
349 | trainReallyTxtFile.close()
350 | trainPredictionFile.close()
351 |
352 | constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["correct_pred"])
353 | with tf.gfile.FastGFile(folders_dict['pbPath'], mode='wb') as f:
354 | f.write(constant_graph.SerializeToString())
355 |
356 | coord.request_stop()
357 | coord.join(threads, stop_grace_period_secs=5)
358 |
359 | sess.close()
360 |
361 | drawMatrix(folders_dict['trainReallyTxtPath'], folders_dict['trainPredictionTxtPath'],
362 | folders_dict['matrixPicturePath'])
363 |
364 | stop_time = time.time()
365 | elapsed_time = stop_time - start_time
366 | print('Cost time: ' + str(elapsed_time) + ' sec.')
367 | print('training done.\n')
368 |
369 |
370 | rootType = ['AmplitudeWithout_PhaseWith', 'AmplitudeWithOut_PhaseWithout', 'AmplitudeWith_PhaseWith',
371 | 'AmplitudeWith_PhaseWithout', 'OnlyAmplitude', 'OnlyPhase']
372 | for i in range(6):
373 | print str(i) + '....'
374 | if i < 5:
375 | multi_gpu(rootType=rootType[i], which='fixed', baseIr=0.2)
376 | multi_gpu(rootType=rootType[i], which='open', baseIr=0.15)
377 | multi_gpu(rootType=rootType[i], which='semi', baseIr=0.1)
378 | else:
379 | multi_gpu(rootType=rootType[i], which='fixed', baseIr=0.2, InputDimension=180)
380 | multi_gpu(rootType=rootType[i], which='open', baseIr=0.15, InputDimension=180)
381 | multi_gpu(rootType=rootType[i], which='semi', baseIr=0.1, InputDimension=180)
382 |
--------------------------------------------------------------------------------
/DlTrain/GpusTrain.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/GpusTrain.pyc
--------------------------------------------------------------------------------
/DlTrain/LSTM.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.contrib import rnn
3 |
4 | from DlTrain.Parameters import lstmTimeStep, lstmHiddenUnits, lstmLayerNum, trainBatchSize
5 |
6 | initializer = tf.contrib.layers.xavier_initializer()
7 |
8 |
9 | def LSTM(x,lstmInputDimension):
10 | x = tf.reshape(x, [-1, lstmTimeStep, lstmInputDimension])
11 |
12 | lstm_cell = rnn.BasicLSTMCell(num_units=lstmHiddenUnits, forget_bias=1.0, state_is_tuple=True)
13 |
14 | lstm_cell = rnn.DropoutWrapper(cell=lstm_cell, input_keep_prob=0.1)
15 |
16 | mlstm_cell = rnn.MultiRNNCell([lstm_cell] * lstmLayerNum, state_is_tuple=True)
17 |
18 | init_state = mlstm_cell.zero_state(trainBatchSize, dtype=tf.float32)
19 |
20 | outputs, final_state = tf.nn.dynamic_rnn(mlstm_cell, inputs=x, initial_state=init_state, time_major=False)
21 |
22 | return outputs
23 |
--------------------------------------------------------------------------------
/DlTrain/LSTM.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/LSTM.pyc
--------------------------------------------------------------------------------
/DlTrain/Parameters.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 | import os
3 |
4 |
5 | fragmentLength = 1000
6 |
7 | """
8 | LSTM
9 | """
10 |
11 | lstmTimeStep = 200
12 | lstmHiddenUnits = 64
13 | lstmLayerNum = 1
14 | lstmInputDimension = 360
15 |
16 | '''
17 | the parameters of global
18 | '''
19 | classes = 5
20 |
21 | trainBatchSize = 64
22 |
23 | valBatchSize = trainBatchSize
24 |
25 | trainingIterations = 10000 # 训练迭代次数
26 | valIterations = 100
27 |
28 | baseIr = 0.01
29 |
30 | valPerTrainIterations = 4
31 |
32 | '''
33 | IO
34 | '''
35 |
36 |
37 |
38 | # Log path
39 |
40 | logRoot = '/media/xue/Data Storage/CSI/Train/Log'
41 | #logRoot = '/home/xue/Log'
42 |
43 | # pb path
44 | pbRoot = '/media/xue/Data Storage/CSI/Train/Model'
45 |
46 | # matrix path
47 |
48 | matrixRoot = '/media/xue/Data Storage/CSI/Train/ConfusionMatrix'
49 |
50 | tfRootPath = '/media/xue/Data Storage/CSI/TfRecordsData/'
51 |
52 |
53 | def sort(list):
54 | max = -1
55 |
56 | for l in list:
57 | if max < int(l):
58 | max = int(l)
59 |
60 | return max
61 |
62 |
63 |
64 | def init_folder(rootType, which):
65 | folders_dict = {}
66 |
67 | # Log file init
68 | if not os.path.exists(logRoot + '/' + rootType):
69 | os.mkdir(logRoot + '/' + rootType)
70 |
71 | if not os.path.exists(logRoot + '/' + rootType + '/' + which):
72 | os.mkdir(logRoot + '/' + rootType + '/' + which)
73 |
74 | logfiles = os.listdir(logRoot + '/' + rootType + '/' + which)
75 | logsort = sort(logfiles)
76 |
77 | if logsort == -1:
78 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/0')
79 |
80 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/0/train')
81 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/0/val')
82 |
83 | trainLogPath = logRoot + '/' + rootType + '/' + which + '/0/train/'
84 | valLogPath = logRoot + '/' + rootType + '/' + which + '/0/val/'
85 |
86 | else:
87 |
88 | intLastIndex = logsort
89 | intLastIndex += 1
90 |
91 | lastIndex = str(intLastIndex)
92 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/' + lastIndex)
93 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/train')
94 | os.mkdir(logRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/val')
95 | trainLogPath = logRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/train/'
96 | valLogPath = logRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/val/'
97 |
98 | folders_dict['trainLogPath'] = trainLogPath
99 | folders_dict['valLogPath'] = valLogPath
100 |
101 | # Pb file init
102 | if not os.path.exists(pbRoot + '/' + rootType):
103 | os.mkdir(pbRoot + '/' + rootType)
104 |
105 | if not os.path.exists(pbRoot + '/' + rootType + '/' + which):
106 | os.mkdir(pbRoot + '/' + rootType + '/' + which)
107 |
108 | pbfiles = os.listdir(pbRoot + '/' + rootType + '/' + which)
109 | pbsort = sort(pbfiles)
110 | if pbsort == -1:
111 | os.mkdir(pbRoot + '/' + rootType + '/' + which + '/0')
112 | pbPath = pbRoot + '/' + rootType + '/' + which + '/0/model.pb'
113 |
114 |
115 | else:
116 | intLastIndex = pbsort
117 | intLastIndex += 1
118 | lastIndex = str(intLastIndex)
119 | os.mkdir(pbRoot + '/' + rootType + '/' + which + '/' + lastIndex)
120 | pbPath = pbRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/model.pb'
121 |
122 | folders_dict['pbPath'] = pbPath
123 |
124 | # matrix file init
125 | if not os.path.exists(matrixRoot + '/' + rootType):
126 | os.mkdir(matrixRoot + '/' + rootType)
127 |
128 | if not os.path.exists(matrixRoot + '/' + rootType + '/' + which):
129 | os.mkdir(matrixRoot + '/' + rootType + '/' + which)
130 |
131 | matrixfiles = os.listdir(matrixRoot + '/' + rootType + '/' + which)
132 | masort = sort(matrixfiles)
133 | trainPredictionTxtPath = '/trainPredictionLabel.txt'
134 | trainReallyTxtPath = '/trainReallyLabel.txt'
135 | matrixPicturePath = '/confusionMatrix.png'
136 |
137 | if masort == -1:
138 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/0')
139 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/0/Picture')
140 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/0/Txt')
141 |
142 | trainPredictionTxtPath = matrixRoot + '/' + rootType + '/' + which + '/0/Txt' + trainPredictionTxtPath
143 | trainReallyTxtPath = matrixRoot + '/' + rootType + '/' + which + '/0/Txt' + trainReallyTxtPath
144 | matrixPicturePath = matrixRoot + '/' + rootType + '/' + which + '/0/Picture' + matrixPicturePath
145 |
146 | else:
147 | intLastIndex = masort
148 | intLastIndex += 1
149 | lastIndex = str(intLastIndex)
150 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex)
151 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Picture')
152 | os.mkdir(matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Txt')
153 |
154 | trainPredictionTxtPath = matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Txt' + trainPredictionTxtPath
155 | trainReallyTxtPath = matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Txt' + trainReallyTxtPath
156 | matrixPicturePath = matrixRoot + '/' + rootType + '/' + which + '/' + lastIndex + '/Picture' + matrixPicturePath
157 |
158 | folders_dict['trainPredictionTxtPath'] = trainPredictionTxtPath
159 | folders_dict['trainReallyTxtPath'] = trainReallyTxtPath
160 | folders_dict['matrixPicturePath'] = matrixPicturePath
161 |
162 | return folders_dict
163 |
--------------------------------------------------------------------------------
/DlTrain/Parameters.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/Parameters.pyc
--------------------------------------------------------------------------------
/DlTrain/Train.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | import os
3 | import threading
4 |
5 | import tensorflow as tf
6 | import numpy as np
7 |
8 | from DlTrain.CNN import CNN
9 | from DlTrain.Data import Data
10 | from DlTrain.LSTM import LSTM
11 |
12 | from DlTrain.Parameters import lstmTimeStep, lstmInputDimension, valIterations, \
13 | trainBatchSize, trainingIterations, valBatchSize, \
14 | valPerTrainIterations, logRoot, pbRoot, matrixRoot, tfRootPath, init_folder
15 |
16 | from Util.Matrix import drawMatrix
17 | from Util.ReadAndDecodeUtil import read_and_decode
18 |
19 | os.environ['CUDA_VISIBLE_DEVICES']='0'
20 |
21 | def trainauto(baseIr, rootType, which, InputDimension=lstmInputDimension, gpu_code=0):
22 | tf.reset_default_graph()
23 | with tf.Session() as sess:
24 |
25 | train_tf_path = tfRootPath + rootType + '/' + which + '/train.tfrecords'
26 | val_tf_path = tfRootPath + rootType + '/' + which + '/val.tfrecords'
27 | print 'start...'
28 | folders_dict = init_folder(rootType=rootType, which=which)
29 |
30 | lstmInput = tf.placeholder(tf.float32, shape=[None, lstmTimeStep * InputDimension], name='inputLstm')
31 | Label = tf.placeholder(tf.int32, shape=[None, ], name='Label')
32 |
33 | cnnInput = LSTM(lstmInput,InputDimension)
34 | cnnOutput = CNN(cnnInput)
35 |
36 | with tf.name_scope('baseIr'):
37 | tf.summary.scalar('baseIr', baseIr) # 写入tensorboard中的EVENTS
38 |
39 | with tf.name_scope('Loss'):
40 | loss = tf.losses.sparse_softmax_cross_entropy(labels=Label, logits=cnnOutput)
41 | tf.summary.scalar('Loss', loss)
42 |
43 | trainOp = tf.train.GradientDescentOptimizer(learning_rate=baseIr).minimize(loss)
44 |
45 | predictionLabels = tf.cast(tf.argmax(cnnOutput, 1), tf.int32, name='PredictionLabels')
46 |
47 | correctPrediction = tf.equal(predictionLabels, Label)
48 |
49 | with tf.name_scope('Accuracy'):
50 | Accuracy = tf.reduce_mean(tf.cast(correctPrediction, tf.float32))
51 | tf.summary.scalar('Accuracy', Accuracy)
52 |
53 | x_train, y_train = read_and_decode(train_tf_path,InputDimension)
54 | num_threads = 3
55 | min_after_dequeue_train = 10000
56 |
57 | train_capacity_train = min_after_dequeue_train + num_threads * trainBatchSize
58 |
59 | # 使用shuffle_batch可以随机打乱输入
60 | train_x_batch, train_y_batch = tf.train.shuffle_batch([x_train, y_train],
61 | batch_size=trainBatchSize,
62 | capacity=train_capacity_train,
63 | min_after_dequeue=min_after_dequeue_train)
64 |
65 | x_val, y_val = read_and_decode(val_tf_path,InputDimension)
66 | num_threads = 3
67 | min_after_dequeue_val = 2500
68 |
69 | train_capacity_val = min_after_dequeue_val + num_threads * trainBatchSize
70 |
71 | # 使用shuffle_batch可以随机打乱输入
72 | val_x_batch, val_y_batch = tf.train.shuffle_batch([x_val, y_val],
73 | batch_size=valBatchSize, capacity=train_capacity_val,
74 | min_after_dequeue=min_after_dequeue_val)
75 |
76 | coord = tf.train.Coordinator()
77 |
78 | isTestMode = False # 是否是验证阶段
79 | isTestCode = False # 是否是测试代码模式(产生随机数据)
80 |
81 | isWriteFlag = True # 是否将label写入文件
82 | saver = tf.train.Saver(max_to_keep=1)
83 | merged = tf.summary.merge_all()
84 |
85 | trainLogPath = folders_dict['trainLogPath']
86 | valLogPath = folders_dict['valLogPath']
87 | trainPredictionTxtPath = folders_dict['trainPredictionTxtPath']
88 | trainReallyTxtPath = folders_dict['trainReallyTxtPath']
89 | pbPath = folders_dict['pbPath']
90 | matrixPicturePath = folders_dict['matrixPicturePath']
91 |
92 | sess.run(tf.global_variables_initializer())
93 | threads = tf.train.start_queue_runners(sess=sess, coord=coord)
94 |
95 | if not isTestMode:
96 |
97 | trainLogWriter = tf.summary.FileWriter(trainLogPath, sess.graph)
98 | valLogWriter = tf.summary.FileWriter(valLogPath, sess.graph)
99 |
100 | if isWriteFlag:
101 | valPredictionTxtFile = open(trainPredictionTxtPath, 'wb')
102 | valReallyTxtFile = open(trainReallyTxtPath, 'wb')
103 |
104 | for step in range(trainingIterations + 1):
105 |
106 | # X, Y = trainData.getNextManualShuffleBatch(trainBatchSize)
107 | X, Y = sess.run([train_x_batch, train_y_batch])
108 | X = np.reshape(X, newshape=(-1, 200*InputDimension))
109 |
110 | sess.run(trainOp, feed_dict={lstmInput: X, Label: Y})
111 | if step % valPerTrainIterations == 0:
112 | valX, valY = sess.run([val_x_batch, val_y_batch])
113 | valX = np.reshape(valX, newshape=(-1, 200*InputDimension))
114 |
115 | out_labels = sess.run(predictionLabels, feed_dict={lstmInput: valX, Label: valY})
116 |
117 | if isWriteFlag:
118 | np.savetxt(valReallyTxtFile, valY)
119 | np.savetxt(valPredictionTxtFile, out_labels)
120 |
121 | valLoss, valAccuracy = sess.run([loss, Accuracy], feed_dict={lstmInput: valX, Label: valY})
122 | print('step:%d, valLoss:%f, valAccuracy:%f' % (step, valLoss, valAccuracy))
123 | valSummary, _ = sess.run([merged, trainOp], feed_dict={lstmInput: X, Label: Y})
124 | valLogWriter.add_summary(valSummary, step)
125 |
126 |
127 |
128 | trainSummary, _ = sess.run([merged, trainOp], feed_dict={lstmInput: X, Label: Y})
129 |
130 | trainLogWriter.add_summary(trainSummary, step)
131 |
132 | constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
133 | ["PredictionLabels"])
134 |
135 | if not isTestCode:
136 | trainLogWriter.close()
137 | valLogWriter.close()
138 |
139 | if isWriteFlag:
140 | valPredictionTxtFile.close()
141 | valReallyTxtFile.close()
142 |
143 | with tf.gfile.FastGFile(pbPath, mode='wb') as f:
144 | f.write(constant_graph.SerializeToString())
145 |
146 | drawMatrix(reallyTxtPath=trainReallyTxtPath, predictionTxtPath=trainPredictionTxtPath,
147 | matrixPath=matrixPicturePath)
148 |
149 | coord.request_stop()
150 | coord.join(threads, stop_grace_period_secs=5)
151 | sess.close()
152 |
153 |
154 | rootType = ['AmplitudeWithout_PhaseWith', 'AmplitudeWithOut_PhaseWithout', 'AmplitudeWith_PhaseWith',
155 | 'AmplitudeWith_PhaseWithout', 'OnlyAmplitude', 'OnlyPhase']
156 |
157 |
158 |
159 | trainauto(rootType=rootType[0], which='fixed', baseIr=0.2, gpu_code=0)
160 | trainauto(rootType=rootType[0], which='open', baseIr=0.15, gpu_code=0)
161 | trainauto(rootType=rootType[0], which='semi', baseIr=0.1)
162 |
163 | trainauto(rootType=rootType[1], which='fixed', baseIr=0.2, gpu_code=0)
164 | trainauto(rootType=rootType[1], which='open', baseIr=0.15, gpu_code=0)
165 | trainauto(rootType=rootType[1], which='semi', baseIr=0.1)
166 |
167 | trainauto(rootType=rootType[2], which='fixed', baseIr=0.2, gpu_code=0)
168 | trainauto(rootType=rootType[2], which='open', baseIr=0.15, gpu_code=0)
169 | trainauto(rootType=rootType[2], which='semi', baseIr=0.1)
170 |
171 |
172 |
173 |
174 | #
175 | # for i in range(6):
176 | # print str(i) + '....'
177 | # if i < 5:
178 | # trainauto(rootType=rootType[i], which='fixed', baseIr=0.2, gpu_code=0)
179 | # trainauto(rootType=rootType[i], which='open', baseIr=0.15, gpu_code=1)
180 | # trainauto(rootType=rootType[i], which='semi', baseIr=0.1)
181 | # else:
182 | # trainauto(rootType=rootType[i], which='fixed', baseIr=0.2, InputDimension=180)
183 | # trainauto(rootType=rootType[i], which='open', baseIr=0.15, InputDimension=180)
184 | # trainauto(rootType=rootType[i], which='semi', baseIr=0.1, InputDimension=180)
185 |
--------------------------------------------------------------------------------
/DlTrain/Train.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/Train.pyc
--------------------------------------------------------------------------------
/DlTrain/Train2.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | import os
3 | import threading
4 |
5 | import tensorflow as tf
6 | import numpy as np
7 |
8 | from DlTrain.CNN import CNN
9 | from DlTrain.Data import Data
10 | from DlTrain.LSTM import LSTM
11 |
12 | from DlTrain.Parameters import lstmTimeStep, lstmInputDimension, valIterations, \
13 | trainBatchSize, trainingIterations, valBatchSize, \
14 | valPerTrainIterations, logRoot, pbRoot, matrixRoot, tfRootPath, init_folder
15 |
16 | from Util.Matrix import drawMatrix
17 | from Util.ReadAndDecodeUtil import read_and_decode
18 | os.environ['CUDA_VISIBLE_DEVICES']='0,1'
19 |
20 | def trainauto(baseIr, rootType, which, InputDimension=lstmInputDimension, gpu_code=0):
21 | tf.reset_default_graph()
22 | with tf.Session() as sess:
23 |
24 | train_tf_path = tfRootPath + rootType + '/' + which + '/train.tfrecords'
25 | val_tf_path = tfRootPath + rootType + '/' + which + '/val.tfrecords'
26 | print 'start...'
27 | folders_dict = init_folder(rootType=rootType, which=which)
28 |
29 | lstmInput = tf.placeholder(tf.float32, shape=[None, lstmTimeStep * InputDimension], name='inputLstm')
30 | Label = tf.placeholder(tf.int32, shape=[None, ], name='Label')
31 |
32 | cnnInput = LSTM(lstmInput,InputDimension)
33 | cnnOutput = CNN(cnnInput)
34 |
35 | with tf.name_scope('baseIr'):
36 | tf.summary.scalar('baseIr', baseIr) # 写入tensorboard中的EVENTS
37 |
38 | with tf.name_scope('Loss'):
39 | loss = tf.losses.sparse_softmax_cross_entropy(labels=Label, logits=cnnOutput)
40 | tf.summary.scalar('Loss', loss)
41 |
42 | trainOp = tf.train.GradientDescentOptimizer(learning_rate=baseIr).minimize(loss)
43 |
44 | predictionLabels = tf.cast(tf.argmax(cnnOutput, 1), tf.int32, name='PredictionLabels')
45 |
46 | correctPrediction = tf.equal(predictionLabels, Label)
47 |
48 | with tf.name_scope('Accuracy'):
49 | Accuracy = tf.reduce_mean(tf.cast(correctPrediction, tf.float32))
50 | tf.summary.scalar('Accuracy', Accuracy)
51 |
52 | x_train, y_train = read_and_decode(train_tf_path,InputDimension)
53 | num_threads = 3
54 | min_after_dequeue_train = 10000
55 |
56 | train_capacity_train = min_after_dequeue_train + num_threads * trainBatchSize
57 |
58 | # 使用shuffle_batch可以随机打乱输入
59 | train_x_batch, train_y_batch = tf.train.shuffle_batch([x_train, y_train],
60 | batch_size=trainBatchSize,
61 | capacity=train_capacity_train,
62 | min_after_dequeue=min_after_dequeue_train)
63 |
64 | x_val, y_val = read_and_decode(val_tf_path,InputDimension)
65 | num_threads = 3
66 | min_after_dequeue_val = 2500
67 |
68 | train_capacity_val = min_after_dequeue_val + num_threads * trainBatchSize
69 |
70 | # 使用shuffle_batch可以随机打乱输入
71 | val_x_batch, val_y_batch = tf.train.shuffle_batch([x_val, y_val],
72 | batch_size=valBatchSize, capacity=train_capacity_val,
73 | min_after_dequeue=min_after_dequeue_val)
74 |
75 | coord = tf.train.Coordinator()
76 |
77 | isTestMode = False # 是否是验证阶段
78 | isTestCode = False # 是否是测试代码模式(产生随机数据)
79 |
80 | isWriteFlag = True # 是否将label写入文件
81 | saver = tf.train.Saver(max_to_keep=1)
82 | merged = tf.summary.merge_all()
83 |
84 | trainLogPath = folders_dict['trainLogPath']
85 | valLogPath = folders_dict['valLogPath']
86 | trainPredictionTxtPath = folders_dict['trainPredictionTxtPath']
87 | trainReallyTxtPath = folders_dict['trainReallyTxtPath']
88 | pbPath = folders_dict['pbPath']
89 | matrixPicturePath = folders_dict['matrixPicturePath']
90 |
91 | sess.run(tf.global_variables_initializer())
92 | threads = tf.train.start_queue_runners(sess=sess, coord=coord)
93 |
94 | if not isTestMode:
95 |
96 | trainLogWriter = tf.summary.FileWriter(trainLogPath, sess.graph)
97 | valLogWriter = tf.summary.FileWriter(valLogPath, sess.graph)
98 |
99 | if isWriteFlag:
100 | valPredictionTxtFile = open(trainPredictionTxtPath, 'wb')
101 | valReallyTxtFile = open(trainReallyTxtPath, 'wb')
102 |
103 | for step in range(trainingIterations + 1):
104 |
105 | # X, Y = trainData.getNextManualShuffleBatch(trainBatchSize)
106 | X, Y = sess.run([train_x_batch, train_y_batch])
107 | X = np.reshape(X, newshape=(-1, 200*InputDimension))
108 |
109 | sess.run(trainOp, feed_dict={lstmInput: X, Label: Y})
110 | if step % valPerTrainIterations == 0:
111 | valX, valY = sess.run([val_x_batch, val_y_batch])
112 | valX = np.reshape(valX, newshape=(-1, 200*InputDimension))
113 |
114 | out_labels = sess.run(predictionLabels, feed_dict={lstmInput: valX, Label: valY})
115 |
116 | if isWriteFlag:
117 | np.savetxt(valReallyTxtFile, valY)
118 | np.savetxt(valPredictionTxtFile, out_labels)
119 |
120 | valLoss, valAccuracy = sess.run([loss, Accuracy], feed_dict={lstmInput: valX, Label: valY})
121 | print('step:%d, valLoss:%f, valAccuracy:%f' % (step, valLoss, valAccuracy))
122 | valSummary, _ = sess.run([merged, trainOp], feed_dict={lstmInput: X, Label: Y})
123 | valLogWriter.add_summary(valSummary, step)
124 |
125 |
126 | trainSummary, _ = sess.run([merged, trainOp], feed_dict={lstmInput: X, Label: Y})
127 |
128 | trainLogWriter.add_summary(trainSummary, step)
129 |
130 | constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
131 | ["PredictionLabels"])
132 |
133 | if not isTestCode:
134 | trainLogWriter.close()
135 | valLogWriter.close()
136 |
137 | if isWriteFlag:
138 | valPredictionTxtFile.close()
139 | valReallyTxtFile.close()
140 |
141 | with tf.gfile.FastGFile(pbPath, mode='wb') as f:
142 | f.write(constant_graph.SerializeToString())
143 |
144 | drawMatrix(reallyTxtPath=trainReallyTxtPath, predictionTxtPath=trainPredictionTxtPath,
145 | matrixPath=matrixPicturePath)
146 |
147 | coord.request_stop()
148 | coord.join(threads, stop_grace_period_secs=5)
149 | sess.close()
150 |
151 |
152 | rootType = ['AmplitudeWithout_PhaseWith', 'AmplitudeWithOut_PhaseWithout', 'AmplitudeWith_PhaseWith',
153 | 'AmplitudeWith_PhaseWithout', 'OnlyAmplitude', 'OnlyPhase']
154 |
155 |
156 | #
157 | #
158 | # trainauto(rootType=rootType[3], which='fixed', baseIr=0.2, gpu_code=0)
159 | # trainauto(rootType=rootType[3], which='open', baseIr=0.15, gpu_code=0)
160 | # trainauto(rootType=rootType[3], which='semi', baseIr=0.1, gpu_code=1)
161 |
162 | trainauto(rootType=rootType[4], which='fixed', baseIr=0.2, InputDimension=180, gpu_code=1)
163 | trainauto(rootType=rootType[4], which='open', baseIr=0.15, InputDimension=180, gpu_code=1)
164 | trainauto(rootType=rootType[4], which='semi', baseIr=0.1, InputDimension=180, gpu_code=1)
165 |
166 | trainauto(rootType=rootType[5], which='fixed', baseIr=0.2, InputDimension=180, gpu_code=1)
167 | trainauto(rootType=rootType[5], which='open', baseIr=0.15, InputDimension=180, gpu_code=1)
168 | trainauto(rootType=rootType[5], which='semi', baseIr=0.1, InputDimension=180, gpu_code=1)
169 |
170 |
171 |
172 |
173 | #
174 | # for i in range(6):
175 | # print str(i) + '....'
176 | # if i < 5:
177 | # trainauto(rootType=rootType[i], which='fixed', baseIr=0.2, gpu_code=0)
178 | # trainauto(rootType=rootType[i], which='open', baseIr=0.15, gpu_code=1)
179 | # trainauto(rootType=rootType[i], which='semi', baseIr=0.1)
180 | # else:
181 | # trainauto(rootType=rootType[i], which='fixed', baseIr=0.2, InputDimension=180)
182 | # trainauto(rootType=rootType[i], which='open', baseIr=0.15, InputDimension=180)
183 | # trainauto(rootType=rootType[i], which='semi', baseIr=0.1, InputDimension=180)
184 |
--------------------------------------------------------------------------------
/DlTrain/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/__init__.py
--------------------------------------------------------------------------------
/DlTrain/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/__init__.pyc
--------------------------------------------------------------------------------
/DlTrain/__pycache__/CNN.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/__pycache__/CNN.cpython-36.pyc
--------------------------------------------------------------------------------
/DlTrain/__pycache__/Data.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/__pycache__/Data.cpython-36.pyc
--------------------------------------------------------------------------------
/DlTrain/__pycache__/LSTM.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/__pycache__/LSTM.cpython-36.pyc
--------------------------------------------------------------------------------
/DlTrain/__pycache__/Parameters.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/__pycache__/Parameters.cpython-36.pyc
--------------------------------------------------------------------------------
/DlTrain/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/DlTrain/a.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/DlTrain/a.png
--------------------------------------------------------------------------------
/Experiment Data Location.md:
--------------------------------------------------------------------------------
1 | # Experiment Data Location
2 |
3 | ## Log
4 |
5 | root:
6 |
7 | > /media/xue/Data Storage/CSI/Train/Log
8 |
9 | 下分6个子文件夹,对应参数 **rootType**,第一次需动态新建:
10 |
11 | ```
12 | AmplitudeWithout_PhaseWith
13 | AmplitudeWithOut_PhaseWithout
14 | AmplitudeWith_PhaseWith
15 | AmplitudeWith_PhaseWithout
16 | OnlyAmplitude
17 | OnlyPhase
18 | ```
19 |
20 | 每一个**rootType**文件夹下分3个文件夹,对应参数which,第一次需动态新建:
21 |
22 | ```
23 | fixed
24 | open
25 | semi
26 | ```
27 |
28 | 每一个which文件夹下面对应若干个子文件夹,命名为0,1,2....有几次训练就有几个文件夹,每次动态新建:
29 |
30 | ```
31 | 0
32 | 1
33 | 2
34 | ...
35 | ```
36 |
37 | 每一个子文件夹下又有两个子文件夹:
38 |
39 | ```
40 | 'train'
41 | 'val'
42 | ```
43 |
44 | ## Pb
45 |
46 | pbRoot:
47 |
48 | > ```
49 | > '/media/xue/Data Storage/CSI/Train/Model'
50 | > ```
51 |
52 | 和log类似的逻辑和文件层级关系:
53 |
54 | 下分6个子文件夹,对应参数 **rootType**,第一次需动态新建:
55 |
56 | ```
57 | AmplitudeWithout_PhaseWith
58 | AmplitudeWithOut_PhaseWithout
59 | AmplitudeWith_PhaseWith
60 | AmplitudeWith_PhaseWithout
61 | OnlyAmplitude
62 | OnlyPhase
63 | ```
64 |
65 | 每一个**rootType**文件夹下分3个文件夹,对应参数which,第一次需动态新建:
66 |
67 | ```
68 | fixed
69 | open
70 | semi
71 | ```
72 |
73 | 每一个which文件夹下面对应若干个子文件夹,命名为0,1,2....有几次训练就有几个文件夹,每次动态新建:
74 |
75 | ```
76 | 0
77 | 1
78 | 2
79 | ...
80 | ```
81 |
82 | ## Confusion matrix
83 |
84 | matrixRoot:
85 |
86 | > ```
87 | > '/media/xue/Data Storage/CSI/Train/ConfusionMatrix'
88 | > ```
89 |
90 | 和log类似的逻辑和文件层级关系:
91 |
92 | 下分6个子文件夹,对应参数 **rootType**,第一次需动态新建:
93 |
94 | ```
95 | AmplitudeWithout_PhaseWith
96 | AmplitudeWithOut_PhaseWithout
97 | AmplitudeWith_PhaseWith
98 | AmplitudeWith_PhaseWithout
99 | OnlyAmplitude
100 | OnlyPhase
101 | ```
102 |
103 | 每一个**rootType**文件夹下分3个文件夹,对应参数which,第一次需动态新建:
104 |
105 | ```
106 | fixed
107 | open
108 | semi
109 | ```
110 |
111 | 每一个which文件夹下面对应若干个子文件夹,命名为0,1,2....有几次训练就有几个文件夹,每次动态新建:
112 |
113 | ```
114 | 0
115 | 1
116 | 2
117 | ...
118 | ```
119 |
120 | 每一个子文件夹下有三个文件(训练中生成):
121 |
122 | ```
123 | trainPredictionLabel.txt
124 | trainReallyLabel.txt
125 | confusionMatrix.png
126 |
127 | ```
128 |
129 |
--------------------------------------------------------------------------------
/ExperimentalReport.md:
--------------------------------------------------------------------------------
1 | # 实验报告
2 |
3 | 实验共分三个数据集:fixed、open、semi
4 |
5 | ## 规则约定
6 |
7 | - accuracy/loss中红色线为train,蓝色线为val
8 | - 每个数据集中表现最好(准确率最高/最稳定)一组的序号加粗显示
9 |
10 | ## fixed
11 |
12 | | 序号 | train batch | train Iterations | base Ir | accuracy | loss | matrix |
13 | | :---: | ----------- | ---------------- | ------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
14 | | 0 | 64 | 1000 | 0.05 |  |  |  |
15 | | 1 | 64 | 5000 | 0.05 |  |  |  |
16 | | **2** | 64 | 3000 | 0.1 |  |  |  |
17 | | 3 | 64 | 10000 | 0.1 |  |  |  |
18 | | 8 | 64 | 10000 | 0.2 |  |  |  |
19 | | 9 | 64 | 10000 | 0.05 |  |  |  |
20 |
21 | ## open
22 |
23 | | 序号 | train batch | train Iterations | base Ir | accuracy | loss | matrix |
24 | | ----- | ----------- | ---------------- | ------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
25 | | 0 | 64 | 1000 | 0.05 |  |  |  |
26 | | 1 | 64 | 3000 | 0.1 |  |  |  |
27 | | 2 | 64 | 3000 | 0.01 |  |  |  |
28 | | 3 | 64 | 3000 | 0.2 |  |  |  |
29 | | 4 | 64 | 3000 | 0.25 |  |  |  |
30 | | 5 | 64 | 3000 | 0.15 |  |  |  |
31 | | 6 | 64 | 5000 | 0.15 |  |  |  |
32 | | **7** | 64 | 10000 | 0.15 |  |  |  |
33 |
34 | ## semi
35 |
36 | | 序号 | train batch | train Iterations | base Ir | accuracy | loss | matrix |
37 | | :---: | ----------- | ---------------- | ------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
38 | | 0 | 64 | 1000 | 0.1 |  |  |  |
39 | | 1 | 64 | 10000 | 0.15 |  |  |  |
40 | | **2** | 64 | 10000 | 0.1 |  |  |  |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/Net/CNN_Init.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 |
4 | def cnn_weight_variable(shape):
5 | initializer = tf.contrib.layers.xavier_initializer()
6 | return tf.Variable(initializer(shape))
7 |
8 |
9 | def cnn_bias_variable(shape):
10 | initial = tf.constant(0.1, shape=shape)
11 | return tf.Variable(initial)
12 |
13 |
14 | def conv2d(x, f, s):
15 | return tf.nn.conv2d(input=x, filter=f, strides=s, padding='SAME')
16 |
17 |
18 | def max_pool_2x2(x, k, s):
19 | return tf.nn.max_pool(value=x, ksize=k, strides=s, padding='SAME')
--------------------------------------------------------------------------------
/Net/CNN_Init.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Net/CNN_Init.pyc
--------------------------------------------------------------------------------
/Net/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Net/__init__.py
--------------------------------------------------------------------------------
/Net/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Net/__init__.pyc
--------------------------------------------------------------------------------
/Net/__pycache__/CNN.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Net/__pycache__/CNN.cpython-36.pyc
--------------------------------------------------------------------------------
/Net/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Net/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/Plot/Draw.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | from matplotlib import pyplot as plt
3 | import scipy.io as scio
4 | import numpy as np
5 |
6 | def Draw(path):
7 | data = scio.loadmat(path)
8 | Amplitude = data['X_normAllAmplitude'][0]
9 | Phase = data['X_normAllPhase'][0]
10 |
11 | plt.subplot(2, 1, 1)
12 | my_x_ticks = np.arange(0, 180, 60)
13 | plt.xticks(my_x_ticks)
14 | plt.title('X_normAllAmplitude')
15 | plt.plot(Amplitude)
16 | plt.subplot(2, 1, 2)
17 | plt.xticks(my_x_ticks)
18 | plt.plot(Phase)
19 | plt.title('X_normAllPhase')
20 | plt.show()
21 |
22 |
23 | classes=5
24 | def expend_y(labels):
25 | one_hot_index = np.arange(len(labels)) * classes + labels
26 |
27 | one_hot = np.zeros((len(labels), classes))
28 | one_hot.flat[one_hot_index] = 1
29 |
30 | return one_hot
31 |
32 |
33 |
34 |
35 | if __name__ == '__main__':
36 | path = '/home/dmrfcoder/Document/CSI/DataSet/new/fixed/eating/1/eating_1.mat'
37 | #Draw(path)
38 | la = [1, 2, 0, 3, 4]
39 | la = np.reshape(la, 5, 1)
40 |
41 | b = expend_y(la)
42 | print(b)
43 |
--------------------------------------------------------------------------------
/Plot/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Plot/__init__.py
--------------------------------------------------------------------------------
/Readme.md:
--------------------------------------------------------------------------------
1 | # CSI
2 |
3 | ## Deep DeepCount
4 |
5 | LSTM--->CNN-->FC-->Softmax
6 | ### LSTM
7 |
8 | Input dimension:time_360
9 |
10 | one layer
11 |
12 | Units number:N=64
13 |
14 | Output dimension time_N_1
15 | ### CNN
16 |
17 | Two CNN blocks
18 | each block contains filter and max pooling components
19 |
20 | #### first filter
21 |
22 | - cnn
23 |
24 | input dimision:200_64
25 |
26 | so the time is 200 ?
27 |
28 | 6 filters
29 |
30 | kernel size:5_5
31 |
32 | stride:1_1
33 | - max pool:
34 |
35 | size:2_2
36 | stride:2_2
37 |
38 | output:98_30_6
39 |
40 | #### second filter
41 |
42 | - cnn
43 |
44 | 10 filters
45 |
46 | kernel size:5_3
47 |
48 | stride:3*3
49 | - max pool
50 |
51 | kernel size:1
52 |
53 | stride:1
54 |
55 | del this max pool?
56 |
57 | out put:32_10_10
58 |
59 | ### FC
60 |
61 | Three layers
62 |
63 | Input:32_10_10(flat to 3200*1)
64 |
65 | + 1000
66 | + 200
67 | + 5
68 |
69 |
70 | output:5_1
71 | ### Softmax
72 |
73 | 5 units
74 |
75 | ## Data Processing
76 |
77 | ### Algorithm
78 |
79 | - Amplitude Noise Removal
80 |
81 | 使用加权平均算法对振幅进行降噪,m设置为100
82 |
83 | - Phase Sanitization
84 |
85 | 首先对原始phase数据(180--->6*30) unwrap,然后计算出every subcarrier的均值y,利用y和x:[0:Sub-1]进行线性拟合(linear fit),最终算出calibrated phase value 并返回.
86 |
87 | ### Code
88 |
89 | - DataProcess
90 |
91 | > 使用weight moving等算法对原始数据进行处理,得到净数据
92 |
93 | - Normalize(已经在上一步进行了归一化处理)
94 |
95 | > 对净数据进行归一化处理
96 |
97 | 经过以上两步处理得到fixed、open、semi三个文件夹下的数据文件夹,每个数据文件夹下的数据都是N×360的且已经做过归一化处理
98 |
99 |
100 |
101 | ## Comparative Experiment
102 |
103 | 在两个方面做两组对比实验:
104 |
105 | - Only Amplitude(Without P)
106 | - Only Phase(Without A)
107 | - Without Amplitude noise removal but with Phase noise removal(Without A)
108 | - With Amplitude noise removal but without Phase noise removal(Without P)
109 | - Amplitude with noise removal and Phase with noise removal(With P*A)
110 | - Amplitude without noise removal and Phase without noise removal(Raw Data)
111 |
112 |
113 | 基于此,数据集应有如下几种:
114 |
115 | - Amplitude with noise removal
116 |
117 | - Phase with noise removal
118 |
119 | - Amplitude without noise removal (原始数据就是,但是需要将小数据集拼接成一个数据集)
120 |
121 | - Phase without noise removal (原始数据就是,但是需要将小数据集拼接成一个数据集)
122 |
123 |
124 | 使用以上四个数据集,组合成以下数据集进行训练:
125 |
126 | - Amplitude without noise removal&Phase with noise removal
127 |
128 | - Amplitude with noise removal&Phase without noise removal
129 |
130 | - Amplitude with noise removal&Phase with noise removal
131 |
132 | - Amplitude without noise removal&Phase without noise removal
133 |
134 |
135 | 另外使用另一个网络对如下数据集进行训练:
136 |
137 | - Amplitude with noise removal
138 |
139 | - Phase with noise removal
140 |
141 |
142 | 新网络的改进策略是讲原来的360统一换成180,切片长度和Units number不变,这样LSTM的输出维度就不变,这样CNN部分就不用修改。
--------------------------------------------------------------------------------
/Test/DataProcessingTest.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | import random
3 |
4 | import scipy.io as scio
5 |
6 | import DataProcessing
7 |
8 |
9 | def DataProcessingTest():
10 | datapath = ''
11 | result = DataProcessing.DataProcessing.DataProcessing(datapath)
12 | amplitudes = result[0]
13 | phases = result[1]
14 | l = len(amplitudes)
15 | list = []
16 | for i in range(0, l):
17 | list.append(amplitudes[i] + phases[i])
18 |
19 | scio.savemat('demo.mat', {'key', list})
20 |
21 |
22 | train_batch = 64
23 | lens = 100
24 | train_indexs = list(range(0, lens))
25 | for i in range(0, 100):
26 | if len(train_indexs) < train_batch:
27 | train_indexs = list(range(0, lens))
28 |
29 | indexs = random.sample(range(0, len(train_indexs)), train_batch)
30 |
31 | sort = sorted(indexs, reverse=True)
32 |
33 | for ind in sort:
34 | print(ind, len(train_indexs))
35 |
36 | train_indexs.pop(ind)
37 |
--------------------------------------------------------------------------------
/Test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Test/__init__.py
--------------------------------------------------------------------------------
/Util/Matrix.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | from sklearn.metrics import confusion_matrix
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 |
6 | label = [0, 1, 2, 3, 4]
7 | label2 = [4, 3, 2, 1, 0]
8 |
9 |
10 | def drawMatrix(reallyTxtPath, predictionTxtPath, matrixPath):
11 | a = []
12 | b = []
13 |
14 | y_true = np.loadtxt(reallyTxtPath)
15 | y_pred = np.loadtxt(predictionTxtPath)
16 |
17 | # with open(reallyTxtPath, 'r') as f:
18 | # for line in f:
19 | # data = line.split()
20 | # a.append(int(data[0][0]))
21 | #
22 | # with open(predictionTxtPath, 'r') as f:
23 | # for line in f:
24 | # data = line.split()
25 | # b.append(int(data[0][0]))
26 |
27 | y_true=np.append(y_true, label)
28 | y_pred=np.append(y_pred, label)
29 |
30 | cm = confusion_matrix(y_true, y_pred)
31 | np.set_printoptions(precision=2)
32 | cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
33 |
34 | plt.figure(figsize=(9, 8), dpi=120)
35 |
36 | ind_array = np.array(label)
37 | x, y = np.meshgrid(ind_array, ind_array)
38 |
39 | for x_val, y_val in zip(x.flatten(), y.flatten()):
40 | c = cm_normalized[y_val][x_val]
41 | if c > 0.5:
42 | plt.text(x_val, y_val, "%0.2f" % (c,), color='white', fontsize=35, va='center', ha='center')
43 | else:
44 | plt.text(x_val, y_val, "%0.2f" % (c,), color='black', fontsize=35, va='center', ha='center')
45 |
46 | # offset the tick
47 |
48 | tick_marks = np.array(range(5)) + 0.5
49 | plt.gca().set_xticks(tick_marks, minor=True)
50 | plt.gca().set_yticks(tick_marks, minor=True)
51 | plt.gca().xaxis.set_ticks_position('none')
52 | plt.gca().yaxis.set_ticks_position('none')
53 | plt.grid(True, which='minor', linestyle='-')
54 | plt.gcf().subplots_adjust(bottom=0.15)
55 |
56 | labels = [1, 2, 3, 4, 5]
57 | plt.xticks(tick_marks - 0.5, labels, fontsize=35)
58 | plt.yticks(tick_marks - 0.5, labels, fontsize=35)
59 |
60 | plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.binary)
61 | # show confusion matrix
62 | plt.savefig(matrixPath, format='png', dip=(420, 317))
63 | plt.close()
64 | #plt.show()
65 |
66 |
67 |
--------------------------------------------------------------------------------
/Util/Matrix.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/Matrix.pyc
--------------------------------------------------------------------------------
/Util/MergeMat.py:
--------------------------------------------------------------------------------
1 | # -*_coding:utf-8-*-
2 | import os
3 |
4 | import scipy.io as scio
5 |
6 | def merge(path,target_path):
7 | files=os.listdir(path)
8 | for f1 in files:
9 | mat_data = scio.loadmat(path + '\\' + f1)
10 | x = mat_data['x']
11 | y = mat_data['y']
12 |
13 |
14 |
--------------------------------------------------------------------------------
/Util/OpenMatUtil.py:
--------------------------------------------------------------------------------
1 | #-*-coding:utf-8-*-
2 | import scipy.io as scio
3 | import numpy as np
4 |
5 |
6 | data_path='/home/dmrfcoder/Document/CSI/DataSet/new/fixed/eating/1/eating_1_2.mat'
7 |
8 | # print type(data)
9 | # print data
10 |
11 |
12 | a=np.random.random(size=[2,3,6])
13 | b=np.random.random(size=[2,3,6])
14 | print(a)
15 | print(b)
16 |
17 | d=a.reshape(2,-1)
18 | e=b.reshape(2,-1)
19 | c=np.concatenate((a.reshape(2,-1), b.reshape(2,-1)))
20 | print(c)
21 |
22 |
23 |
24 | dataNew = open('demo.mat','a')
25 |
26 | for i in range(10):
27 | scio.savemat("demo.mat", {'0': i,'1':i},appendmat=True)
28 | scio.savemat("demo.mat", {'2': i,'3':i},appendmat=True)
29 |
30 |
31 | data = scio.loadmat("demo.mat")
32 | test_x=data['A']
33 | test_y=data['B']
34 | print(test_x.shape)
35 | print(test_x[0].shape)
36 | a=test_x[0].reshape([360,1])
37 | print(a.shape)
38 | #a=np.where(test_x[0]==1)
39 | #print(a[0][0])
40 |
41 |
42 |
--------------------------------------------------------------------------------
/Util/OpenTfRecordsUtil.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | # 读取文件。
3 | import tensorflow as tf
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 |
7 | reader = tf.TFRecordReader()
8 | filename_queue = tf.train.string_input_producer(
9 |
10 | ['E:\\yczhao Data\\open.tfrecords'])
11 |
12 | _, serialized_example = reader.read(filename_queue)
13 |
14 | # 解析读取的样例。
15 | features = tf.parse_single_example(
16 | serialized_example,
17 | features={
18 | 'label': tf.FixedLenFeature([], tf.int64),
19 | 'data_raw': tf.FixedLenFeature([], tf.string)
20 | })
21 |
22 | images = tf.decode_raw(features['data_raw'], tf.float64)
23 | # images = tf.reshape(images, [1, 360])
24 |
25 | labels = tf.cast(features['label'], tf.int64)
26 |
27 | sess = tf.Session()
28 |
29 | # 启动多线程处理输入数据。
30 | coord = tf.train.Coordinator()
31 | threads = tf.train.start_queue_runners(sess=sess, coord=coord)
32 |
33 | for i in range(20):
34 | image, label = sess.run([images, labels])
35 | print(label)
36 |
37 | print(image, label, i)
38 |
--------------------------------------------------------------------------------
/Util/ReadAndDecodeUtil.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 | import tensorflow as tf
3 |
4 |
5 | # 读取tfrecords数据
6 | def read_and_decode(filename,dimision):
7 | # 根据文件名生成一个队列
8 | filename_queue = tf.train.string_input_producer([filename])
9 |
10 | reader = tf.TFRecordReader()
11 | _, serialized_example = reader.read(filename_queue) # 返回文件名和文件
12 | features = tf.parse_single_example(serialized_example,
13 | features={
14 | 'label': tf.FixedLenFeature([], tf.int64),
15 | 'data_raw': tf.FixedLenFeature([], tf.string),
16 | })
17 |
18 | img = tf.decode_raw(features['data_raw'], tf.float64)
19 | img = tf.reshape(img, [200, dimision])
20 | label = tf.cast(features['label'], tf.int64)
21 |
22 | return img, label
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/Util/ReadAndDecodeUtil.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/ReadAndDecodeUtil.pyc
--------------------------------------------------------------------------------
/Util/Readtxt.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import pandas as pd
4 | import scipy.io as scio
5 | import numpy as np
6 | import h5py
7 | import pandas as pd
8 |
9 |
10 | def read(path1,target):
11 | files=os.listdir(path1)
12 | d1=[]
13 | d2=[]
14 | #f = h5py.File('F:\\csi\\opennpy_x.txt', 'a')
15 | f = pd.HDFStore(target, 'a')
16 |
17 | isd=0
18 | for f1 in files:
19 | print(f1)
20 | data=scio.loadmat(path1+'\\'+f1)
21 | x=data['x']
22 | y=data['y'][0]
23 |
24 |
25 |
26 |
27 | l=len(x)
28 | for i in range(0,l):
29 | temp_x=x[i]
30 | a = pd.DataFrame(temp_x)
31 | f.append('x',a)
32 |
33 | b=pd.DataFrame(y)
34 | f.append('y',b)
35 |
36 |
37 |
38 | f.close()
39 |
40 |
41 | return (d1,d2)
42 |
43 | f=read('F:\\csi\\open2_val','F:\\csi\\open_val2.h5')
--------------------------------------------------------------------------------
/Util/WriteAndCodeUtil.py:
--------------------------------------------------------------------------------
1 | # -*_coding:utf-8-*-
2 | import os
3 |
4 | import scipy.io as scio
5 | import tensorflow as tf
6 | import numpy as np
7 |
8 |
9 | # 生成整数型的属性
10 | def _int64_feature(value):
11 | return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
12 |
13 |
14 | # 生成字符串类型的属性
15 | def _bytes_feature(value):
16 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
17 |
18 | list_x=[]
19 | list_y=[]
20 |
21 | list_x_val=[]
22 | list_y_val=[]
23 |
24 |
25 | def DoConvert(path_mat,new_path_mat,slice_len=200):
26 |
27 |
28 |
29 |
30 | files1=os.listdir(path_mat)
31 |
32 | for f1 in files1:
33 | mat_data = scio.loadmat(path_mat+'\\'+f1)
34 | x = mat_data['x']
35 | y =mat_data['y']
36 | label=y[0][0]
37 | item_count = y.shape[1]
38 | '''
39 | 一共有item_count数量的数据,x的数据类型为float64,每一个x的维度为1*360
40 | 已经归一化处理过
41 | '''
42 | item_count = int(item_count / slice_len)
43 |
44 | new_x=[]
45 | new_y=[]
46 | new_x_val=[]
47 | new_y_val=[]
48 |
49 | for i in range(0, item_count):
50 | data_raw = x[i * slice_len:(i + 1) * slice_len]
51 | if i/item_count>=0.8:
52 | print('doing_val:' + f1 + ' ' + str(i) + ' label:' + str(label))
53 |
54 | new_x_val.append(data_raw)
55 | new_y_val.append(label)
56 |
57 | else:
58 | print('doing:' + f1 + ' ' + str(i) + ' label:' + str(label))
59 | new_x.append(data_raw)
60 | new_y.append(label)
61 |
62 | scio.savemat(new_path_mat+'\\'+f1,{'x':new_x, 'y': new_y})
63 | scio.savemat(new_path_mat+'\\val_'+f1,{'x':new_x_val, 'y': new_y_val})
64 |
65 |
66 | print('success')
67 |
68 |
69 |
70 | if __name__ == '__main__':
71 | path_mat='F:\\csi\\open'
72 | new_path_mat='F:\\csi\\open2'
73 |
74 |
75 | DoConvert(path_mat,new_path_mat )
--------------------------------------------------------------------------------
/Util/WriteHd5Util.py:
--------------------------------------------------------------------------------
1 | import pickle
2 |
3 | import pandas as pd
4 | import numpy as np
5 |
6 |
7 | def writeToH5(data, path):
8 | f = pd.HDFStore(path, 'a')
9 | l = len(data)
10 |
11 | y = []
12 |
13 | for i in range(l):
14 | tempx = data[i].x
15 | pdatax = pd.DataFrame(tempx)
16 | f.append(str(i), pdatax)
17 |
18 | tempy = data[i].y
19 | y.append(tempy)
20 |
21 | pdatay = pd.DataFrame(y)
22 | f.append('y', pdatay)
23 |
24 | f.close()
25 | print('success')
26 |
--------------------------------------------------------------------------------
/Util/WriteHd5Util.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/WriteHd5Util.pyc
--------------------------------------------------------------------------------
/Util/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/__init__.py
--------------------------------------------------------------------------------
/Util/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/__init__.pyc
--------------------------------------------------------------------------------
/Util/__pycache__/ReadAndDecodeUtil.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/__pycache__/ReadAndDecodeUtil.cpython-36.pyc
--------------------------------------------------------------------------------
/Util/__pycache__/Readtxt.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/__pycache__/Readtxt.cpython-36.pyc
--------------------------------------------------------------------------------
/Util/__pycache__/WriteHd5Util.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/__pycache__/WriteHd5Util.cpython-36.pyc
--------------------------------------------------------------------------------
/Util/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/Util/__pycache__/loadHd.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/__pycache__/loadHd.cpython-36.pyc
--------------------------------------------------------------------------------
/Util/loadHd.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import numpy as np
4 | import pandas as pd
5 |
6 | from Util.WriteHd5Util import writeToH5
7 |
8 |
9 |
10 | def convertY(y_list):
11 | yListLength = len(y_list)
12 | yCoverted = np.zeros(shape=yListLength)
13 | for listItemIndex in range(0, yListLength):
14 | yCoverted[listItemIndex] = y_list[listItemIndex]
15 |
16 | return yCoverted
17 |
18 |
19 |
20 |
21 |
22 | def load(path, path2):
23 | # f = pd.HDFStore(path, 'r')
24 |
25 | # x=f['x'].values
26 | # y=f['y'].values
27 | x = np.random.random(size=(2, 200, 360))
28 | x[0][0][0] = 0
29 | x[1][0][0] = 99
30 | x[0][199][359] = 88
31 | x[1][199][359] = 101
32 | y = [1, 0]
33 | # y=convertY(y)
34 | # final_x = np.reshape(x, (-1, 200, 360))
35 | final_x = x # np.reshape(x, (-1, 72000))
36 | l = len(y)
37 | data = []
38 | for i in range(l):
39 | b = bean(final_x[i], y[i])
40 | data.append(b)
41 |
42 | random.shuffle(data)
43 |
44 | writeToH5(data, path2)
45 |
46 |
47 | return (final_x, y)
48 |
49 |
50 | def open():
51 | f = pd.HDFStore('open_val_sf.h5', 'r')
52 | # x = f['x'].values
53 | y = f['y'].values
54 | x = []
55 |
56 | l = len(y)
57 |
58 | for i in range(l):
59 | tx = f[str(i)].values
60 | x.append(tx)
61 | # x = np.reshape(x, (-1, 200,360))
62 | print(x[0][0][0],
63 | x[1][0][0],
64 | x[0][199][359],
65 | x[1][199][359])
66 |
67 |
68 | # load('F:\csi\openh5\\open_val.h5','open_val_sf.h5')
69 |
70 | #open()
71 |
72 |
--------------------------------------------------------------------------------
/Util/loadHd.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/loadHd.pyc
--------------------------------------------------------------------------------
/Util/open_val_sf.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DmrfCoder/CSI/8fb93842e9fed674e251d3ba9f80056537584aea/Util/open_val_sf.h5
--------------------------------------------------------------------------------
/auto_update_data.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | git add .
3 | git commit -m"update Experimental Record"
4 | git push origin master
5 |
--------------------------------------------------------------------------------
/不同数据集对比实验记录.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # 不同数据集对比实验记录
4 |
5 | ## AmplitudeWith_PhaseWith
6 |
7 | | 数据集 | accuracy | confusion matrix | baseir | acc |
8 | | ------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------ | ---- |
9 | | fixed |  |  | 0.2 | 0.8 |
10 | | open |  |  | 0.15 | 0.85 |
11 | | Semi |  |  | 0.1 | 0.9 |
12 |
13 | ## AmplitudeWith_PhaseWithout
14 |
15 | | 数据集 | accuracy | confusion matrix | baseir | acc |
16 | | ------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------ | ----------------------- |
17 | | fixed |  |  | 0.2 | 最高0.9,后期过拟合0.75 |
18 | | open |  |  | 0.15 | 0.98 |
19 | | Semi |  |  | 0.1 | 一塌糊涂 |
20 |
21 |
22 |
23 | ## AmplitudeWithout_PhaseWith
24 |
25 | | 数据集 | accuracy | confusion matrix | baseir | acc |
26 | | ------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------ | ---------------- |
27 | | fixed |  |  | 0.2 | 最高0.9,最终0.7 |
28 | | open |  |  | 0.15 | 0.98 |
29 | | Semi |  |  | 0.1 | 一塌糊涂 |
30 |
31 | ## AmplitudeWithOut_PhaseWithout
32 |
33 | | 数据集 | accuracy | confusion matrix | baseir | acc |
34 | | ------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------ | ---- |
35 | | fixed |  |  | 0.2 | 0.85 |
36 | | open |  |  | 0.15 | 0.98 |
37 | | Semi |  |  | 0.1 | 0.98 |
38 |
39 | ## OnlyAmplitude
40 |
41 | | 数据集 | accuracy | confusion matrix | baseir | acc |
42 | | ------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------ | ---- |
43 | | fixed |  |  | 0.2 | 0.97 |
44 | | open |  |  | 0.15 | 0.98 |
45 | | Semi |  |  | 0.1 | 0.98 |
46 |
47 | ## OnlyAphase
48 |
49 |
50 |
51 | | 数据集 | accuracy | confusion matrix | baseir | acc |
52 | | ------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------ | -------- |
53 | | fixed |  |  | 0.2 | 0.85 |
54 | | open |  |  | 0.15 | 0.85 |
55 | | Semi |  |  | 0.1 | 一塌糊涂 |
56 |
57 |
--------------------------------------------------------------------------------