├── README.md ├── channel-data ├── README.md ├── channel_data10.mat ├── channel_data10.txt ├── channel_data_360_10.mat ├── channel_data_360_10.txt ├── channel_data_450_10.mat ├── channel_data_450_10.txt ├── channel_data_90_10.mat └── channel_data_90_10.txt ├── multi-point prediction online ├── README.md ├── applyhatch.m ├── applyhatch_plusC.m ├── channel_180.m ├── channel_180_length_5.py ├── channel_360.m ├── channel_360_length_5.py ├── channel_90.m ├── channel_90_10.m ├── channel_90_16.m ├── channel_90_20.m ├── channel_90_length_10.py ├── channel_90_length_16.py ├── channel_90_length_20.py ├── channel_90_length_5.py ├── channel_90_pre_total.m ├── channel_data10.mat ├── channel_data_360_10.mat ├── channel_data_450_10.mat ├── channel_data_90_10.mat ├── combine_180_360.m ├── magnify.m ├── makehatch.m ├── makehatch_plus.m ├── prediction_180_length_5.xlsx ├── prediction_360_length_5.xlsx ├── prediction_90_length_10.xlsx ├── prediction_90_length_16.xlsx ├── prediction_90_length_20.xlsx ├── prediction_90_length_5.xlsx └── total_prediction_length.m └── single-point prediction online ├── README.md ├── applyhatch.m ├── applyhatch_plusC.m ├── channel_180.m ├── channel_180_4800_1600_1600.py ├── channel_360.m ├── channel_360_4800_1600_1600.py ├── channel_90.m ├── channel_90.py ├── channel_90_1.m ├── channel_90_10.m ├── channel_90_20.m ├── channel_90_4800_1600_1600.py ├── channel_data10.mat ├── channel_data_360_10.mat ├── channel_data_450_10.mat ├── channel_data_90_10.mat ├── combine_180_360.m ├── magnify.m ├── makehatch.m ├── makehatch_plus.m ├── prediction_180.xlsx ├── prediction_180_4800_1600_1600.xlsx ├── prediction_360.xlsx ├── prediction_360_4800_1600_1600.xlsx ├── prediction_90.xlsx ├── prediction_90_4800_1600_1600.xlsx ├── single_pre_IL.m └── total_time_step.m /README.md: -------------------------------------------------------------------------------- 1 | # TVT-data-code-channel-prediction-model -------------------------------------------------------------------------------- /channel-data/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /channel-data/channel_data10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/channel-data/channel_data10.mat -------------------------------------------------------------------------------- /channel-data/channel_data_360_10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/channel-data/channel_data_360_10.mat -------------------------------------------------------------------------------- /channel-data/channel_data_450_10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/channel-data/channel_data_450_10.mat -------------------------------------------------------------------------------- /channel-data/channel_data_90_10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/channel-data/channel_data_90_10.mat -------------------------------------------------------------------------------- /multi-point prediction online/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /multi-point prediction online/applyhatch.m: -------------------------------------------------------------------------------- 1 | function applyhatch(h,patterns,colorlist) 2 | %APPLYHATCH Apply hatched patterns to a figure 3 | % APPLYHATCH(H,PATTERNS) creates a new figure from the figure H by 4 | % replacing distinct colors in H with the black and white 5 | % patterns in PATTERNS. The format for PATTERNS can be 6 | % a string of the characters '/', '', '|', '-', '+', 'x', '.' 7 | % a cell array of matrices of zeros (white) and ones (black) 8 | % 9 | % APPLYHATCH(H,PATTERNS,COLORS) maps the colors in the n by 3 10 | % matrix COLORS to PATTERNS. Each row of COLORS specifies an RGB 11 | % color value. 12 | % 13 | % Note this function makes a bitmap image of H and so is limited 14 | % to low-resolution, bitmap output. 15 | % 16 | % Example 1: 17 | % bar(rand(3,4)); 18 | % applyhatch(gcf,'-x.'); 19 | % 20 | % Example 2: 21 | % colormap(cool(6)); 22 | % pie(rand(6,1)); 23 | % legend('Jan','Feb','Mar','Apr','May','Jun'); 24 | % applyhatch(gcf,'|-+./',cool(6)); 25 | % 26 | % See also: MAKEHATCH 27 | 28 | % By Ben Hinkle, bhinkle@mathworks.com 29 | % This code is in the public domain. 30 | 31 | oldppmode = get(h,'paperpositionmode'); 32 | oldunits = get(h,'units'); 33 | set(h,'paperpositionmode','auto'); 34 | set(h,'units','pixels'); 35 | figsize = get(h,'position'); 36 | if nargin == 2 37 | colorlist = []; 38 | end 39 | bits = hardcopy(h,'-dzbuffer','-r0'); 40 | set(h,'paperpositionmode',oldppmode); 41 | 42 | bwidth = size(bits,2); 43 | bheight = size(bits,1); 44 | bsize = bwidth * bheight; 45 | if ~isempty(colorlist) 46 | colorlist = uint8(255*colorlist); 47 | [colors,colori] = nextnonbw(0,colorlist,bits); 48 | else 49 | colors = (bits(:,:,1) ~= bits(:,:,2)) | ... 50 | (bits(:,:,1) ~= bits(:,:,3)); 51 | end 52 | pati = 1; 53 | colorind = find(colors); 54 | while ~isempty(colorind) 55 | colorval(1) = bits(colorind(1)); 56 | colorval(2) = bits(colorind(1)+bsize); 57 | colorval(3) = bits(colorind(1)+2*bsize); 58 | if iscell(patterns) 59 | pattern = patterns{pati}; 60 | elseif isa(patterns,'char') 61 | pattern = makehatch(patterns(pati)); 62 | else 63 | pattern = patterns; 64 | end 65 | pattern = uint8(255*(1-pattern)); 66 | pheight = size(pattern,2); 67 | pwidth = size(pattern,1); 68 | ratioh = ceil(bheight/pheight); 69 | ratiow = ceil(bwidth/pwidth); 70 | bigpattern = repmat(pattern,[ratioh ratiow]); 71 | if ratioh*pheight > bheight 72 | bigpattern(bheight+1:end,:) = []; 73 | end 74 | if ratiow*pwidth > bwidth 75 | bigpattern(:,bwidth+1:end) = []; 76 | end 77 | bigpattern = repmat(bigpattern,[1 1 3]); 78 | color = (bits(:,:,1) == colorval(1)) & ... 79 | (bits(:,:,2) == colorval(2)) & ... 80 | (bits(:,:,3) == colorval(3)); 81 | color = repmat(color,[1 1 3]); 82 | bits(color) = bigpattern(color); 83 | if ~isempty(colorlist) 84 | [colors,colori] = nextnonbw(colori,colorlist,bits); 85 | else 86 | colors = (bits(:,:,1) ~= bits(:,:,2)) | ... 87 | (bits(:,:,1) ~= bits(:,:,3)); 88 | end 89 | colorind = find(colors); 90 | pati = (pati + 1); 91 | if pati > length(patterns) 92 | pati = 1; 93 | end 94 | end 95 | 96 | newfig = figure('units','pixels','visible','off'); 97 | imaxes = axes('parent',newfig,'units','pixels'); 98 | im = image(bits,'parent',imaxes); 99 | fpos = get(newfig,'position'); 100 | set(newfig,'position',[fpos(1:2) figsize(3) figsize(4)+1]); 101 | set(imaxes,'position',[0 0 figsize(3) figsize(4)+1],'visible','off'); 102 | set(newfig,'visible','on'); 103 | 104 | function [colors,out] = nextnonbw(ind,colorlist,bits) 105 | out = ind+1; 106 | colors = []; 107 | while out <= size(colorlist,1) 108 | if isequal(colorlist(out,:),[255 255 255]) | ... 109 | isequal(colorlist(out,:),[0 0 0]) 110 | out = out+1; 111 | else 112 | colors = (colorlist(out,1) == bits(:,:,1)) & ... 113 | (colorlist(out,2) == bits(:,:,2)) & ... 114 | (colorlist(out,3) == bits(:,:,3)); 115 | return 116 | end 117 | end 118 | -------------------------------------------------------------------------------- /multi-point prediction online/applyhatch_plusC.m: -------------------------------------------------------------------------------- 1 | function im_hatchC = applyhatch_plusC(h,patterns,patterncolors,colorlist,dpi,hatchsc) 2 | %APPLYHATCH_PLUSC Apply colored hatched patterns to a figure 3 | % im_hatch = applyhatch_plusC(h,patterns,patterncolors,colorlist,dpi,hatchsc) 4 | % 5 | % APPLYHATCH_PLUSC(H,PATTERNS) creates a new figure from the figure H by 6 | % replacing distinct colors in H with the black and white 7 | % patterns in PATTERNS. The format for PATTERNS can be 8 | % a string of the characters '/', '\', '|', '-', '+', 'x', '.' 9 | % a cell array of matrices of zeros (white) and ones (black) 10 | % 11 | % By default the lines are of uniform thickenss. hatch patterns line 12 | % thickness can be modified using a direct call to MAKEHATCH_PLUS using 13 | % the following syntax: makehatch_plus('HHn',m) where; 14 | % HH the hatch character written twice, '//', '\\', '||', '--', '++' 15 | % n integer number for thickness 16 | % m integer number for the matrix size (n<=m) 17 | % Ex. makehatch_plus('\\4',9) 18 | % 19 | % APPLYHATCH_PLUSC(H,PATTERNS,COLORS) maps the colors in the n by 3 20 | % matrix COLORS to PATTERNS. Each row of COLORS specifies an RGB 21 | % color value. COLORS can also be a character string list. 22 | % 23 | % Note this function makes a bitmap image of H and so is limited 24 | % to bitmap output. 25 | % 26 | % Example 1: basic operation using color char string 27 | % bar(rand(3,6)); 28 | % im_hatchC = applyhatch_plusC(1,'\-x.\x','rkgrgb');% 29 | % 30 | % Example 2: basic operation using color matrix 31 | % bar(rand(3,4)); 32 | % im_hatchC = applyhatch_plusC(1,'\-x.',[1 0 0;0 1 0;0 0 1;0 1 1]); 33 | % 34 | % Example 3: basic operation using resolution modification 35 | % pie(rand(6,1)); 36 | % legend('Jan','Feb','Mar','Apr','May','Jun'); 37 | % im_hatch = applyhatch_plusC(gcf,'|-+.\/','rgbcmy',[],150,0.5); 38 | % imwrite(im_hatch,'im_hatch.tiff','tiff') 39 | % Note : have not been able to understand exactly how colors are assigned 40 | % for some plot functions, so better to leave COLORLIST empty for 41 | % starters 42 | % 43 | % Example 4: basic operation with user defined patterns 44 | % bar(rand(3,3)); 45 | % im_hatch = applyhatch_plusC(gcf,{makehatch_plus('\',6),1-makehatch_plus('\',6),makehatch_plus('\',1)},'ggg');% 46 | % 47 | % Example 5: using variable thickness hatches 48 | % bar(rand(3,3)); 49 | % im_hatch = applyhatch_plusC(gcf,{makehatch_plus('\',9),makehatch_plus('\\4',9),makehatch_plus('\\8',9)},'rgb');% 50 | % 51 | % Example 6: basic operation using IMAGE plot 52 | % data = reshape([randperm(8) randperm(8) randperm(8)],4,6) 53 | % image(data) 54 | % im_hatch = applyhatch_plusC(1,'|-+.\/x/','rgbcmykr',colormap); 55 | % Note : do not use imagesc, as you need an indexed image if you want to 56 | % control the hatch assignments related to data values. 57 | % 58 | % Modification of APPLYHATCH_PLUS to allow colored patterns 59 | % Modified Brian FG Katz 25-feb-2010 60 | % im_hatch = applyhatch_plusC(h,patterns,patterncolors,colorlist,dpi,hatchsc) 61 | % 62 | % input patterncolors RGB matrix of colors for patterns 63 | % (length(PATTERNS) X 3) or string of color char 64 | % 'r' 'g' 'b' 'c' 'm' 'y' of length = length(PATTERNS) 65 | % DPI allows specification of bitmap resolution, making plot resolution 66 | % better for printing 67 | % HATCHSC multiplier for hatch scale to increase size of pattern for better operation 68 | % at higher resolutions (not used when PATTERNS 69 | % defines pattern matrix) 70 | % default [] uses screen resolution as in APPLYHATCH 71 | % output IM_HATCH RGB bitmap matrix of new figure 72 | % use IMWRITE to output in desired format 73 | % 74 | % Modified Brian FG Katz 21-sep-11 75 | % Variable line thickness 76 | % 77 | % See also: APPLYHATCH, APPLYHATCH_PLUS 78 | 79 | % By Ben Hinkle, bhinkle@mathworks.com 80 | % This code is in the public domain. 81 | 82 | oldppmode = get(h,'paperpositionmode'); 83 | oldunits = get(h,'units'); 84 | oldcolor = get(h,'color'); 85 | oldpos = get(h,'position'); 86 | set(h,'paperpositionmode','auto'); 87 | set(h,'units','pixels'); 88 | set(h,'color',[1 1 1]); 89 | figsize = get(h,'position'); 90 | 91 | if nargin < 6; hatchsc = 1 ; end 92 | if nargin < 5; dpi = 0 ; end % defaults to screen resolution 93 | if nargin < 4; colorlist = [] ; end 94 | 95 | if length(patterns) ~= size(patterncolors,1) 96 | if length(patterns) == size(patterncolors',1) 97 | % no problem 98 | else 99 | error('PATTERN and PATTERNCOLORS must be the same length') 100 | end 101 | end 102 | 103 | if ischar(patterncolors), 104 | patterncolors = charcolor2rgb(patterncolors); 105 | end 106 | 107 | bits = print(h,'-RGBImage',['-r' num2str(dpi)]); 108 | bitsC = ones(size(bits))*0; 109 | blackpixels = intersect(find(bits(:,:,1)==255), (intersect(find(bits(:,:,1)==bits(:,:,2)),find(bits(:,:,1)==bits(:,:,3)))) ) ; 110 | 111 | set(h,'paperpositionmode',oldppmode); 112 | set(h,'color',oldcolor); 113 | 114 | bwidth = size(bits,2); 115 | bheight = size(bits,1); 116 | bsize = bwidth * bheight; 117 | if ~isempty(colorlist) 118 | colorlist = uint8(floor(255*colorlist)); 119 | [colors,colori] = nextnonbw(0,colorlist,bits); 120 | else 121 | colors = (bits(:,:,1) ~= bits(:,:,2)) | ... 122 | (bits(:,:,1) ~= bits(:,:,3)); 123 | end 124 | pati = 1; 125 | colorind = find(colors); 126 | while ~isempty(colorind) 127 | colorval(1) = bits(colorind(1)); 128 | colorval(2) = bits(colorind(1)+bsize); 129 | colorval(3) = bits(colorind(1)+2*bsize); 130 | if iscell(patterns) 131 | pattern = patterns{pati}; 132 | elseif isa(patterns,'char') 133 | pattern = makehatch_plus(patterns(pati),6*hatchsc); 134 | else 135 | pattern = patterns; 136 | end 137 | patternC = uint8(255*pattern); 138 | pattern = uint8(255*(1-pattern)); 139 | pheight = size(pattern,2); 140 | pwidth = size(pattern,1); 141 | ratioh = ceil(bheight/pheight); 142 | ratiow = ceil(bwidth/pwidth); 143 | bigpattern = repmat(pattern,[ratioh ratiow]); 144 | if ratioh*pheight > bheight 145 | bigpattern(bheight+1:end,:) = []; 146 | end 147 | if ratiow*pwidth > bwidth 148 | bigpattern(:,bwidth+1:end) = []; 149 | end 150 | bigpattern = repmat(bigpattern,[1 1 3]); 151 | % Create RGB pattern 152 | pat_size = size(pattern,1)*size(pattern,2) ; 153 | pat_id = find(patternC); 154 | patternCrgb = repmat(ones(size(patternC))*255,[1 1 3]) ; 155 | for rgbLOOP = 1:3, 156 | patternCrgb(pat_id+(pat_size*(rgbLOOP-1)))=patternCrgb(pat_id+(pat_size*(rgbLOOP-1)))*patterncolors(pati,rgbLOOP) ; 157 | end % rgbLOOP 158 | bigpatternC = repmat(patternCrgb,[ratioh ratiow 1]); 159 | bigpatternC = bigpatternC(1:size(bigpattern,1),1:size(bigpattern,2),:) ; 160 | % if ratioh*pheight > bheight 161 | % bigpatternC(bheight+1:end,:,:) = []; 162 | % end 163 | % if ratiow*pwidth > bwidth 164 | % bigpatternC(:,bwidth+1:end,:) = []; 165 | % end 166 | 167 | color = (bits(:,:,1) == colorval(1)) & ... 168 | (bits(:,:,2) == colorval(2)) & ... 169 | (bits(:,:,3) == colorval(3)); 170 | color = repmat(color,[1 1 3]); 171 | bits(color) = bigpattern(color); 172 | bitsC(color) = bigpatternC(color); 173 | if ~isempty(colorlist) 174 | [colors,colori] = nextnonbw(colori,colorlist,bits); 175 | else 176 | colors = (bits(:,:,1) ~= bits(:,:,2)) | ... 177 | (bits(:,:,1) ~= bits(:,:,3)); 178 | end 179 | colorind = find(colors); 180 | pati = (pati + 1); 181 | if pati > length(patterns) 182 | pati = 1; 183 | end 184 | end 185 | 186 | bitsC(blackpixels)= 255; 187 | bitsC(blackpixels+(bheight*bwidth))= 255; 188 | bitsC(blackpixels+(2*(bheight*bwidth)))= 255; 189 | 190 | 191 | newfig = figure('units','pixels','visible','off'); 192 | imaxes = axes('parent',newfig,'units','pixels'); 193 | im = image(bitsC/255,'parent',imaxes); 194 | %fpos = get(newfig,'position'); 195 | %set(newfig,'position',[fpos(1:2) figsize(3) figsize(4)+1]); 196 | if get(newfig,'WindowStyle')~='docked', 197 | set(newfig,'position',oldpos) 198 | set(imaxes,'position',[0 0 figsize(3) figsize(4)+1],'visible','off'); 199 | end 200 | set(imaxes,'visible','off'); 201 | set(newfig,'visible','on'); 202 | 203 | set(newfig,'units','normalized'); 204 | set(imaxes,'units','normalized'); 205 | set(imaxes,'DataAspectRatio',[1 1 1],'DataAspectRatioMode','manual'); 206 | 207 | 208 | if nargout == 1, im_hatchC = bitsC; end 209 | 210 | function [colors,out] = nextnonbw(ind,colorlist,bits) 211 | out = ind+1; 212 | colors = []; 213 | while out <= size(colorlist,1) 214 | if isequal(colorlist(out,:),[255 255 255]) | ... 215 | isequal(colorlist(out,:),[0 0 0]) 216 | out = out+1; 217 | else 218 | colors = (colorlist(out,1) == bits(:,:,1)) & ... 219 | (colorlist(out,2) == bits(:,:,2)) & ... 220 | (colorlist(out,3) == bits(:,:,3)); 221 | return 222 | end 223 | end 224 | 225 | function colors_rgb = charcolor2rgb(colors_char); 226 | for LOOP = 1:length(colors_char), 227 | switch colors_char(LOOP) 228 | case 'r' 229 | colors_rgb(LOOP,:) = [1 0 0] ; 230 | case 'g' 231 | colors_rgb(LOOP,:) = [0 1 0] ; 232 | case 'b' 233 | colors_rgb(LOOP,:) = [0 0 1] ; 234 | case 'c' 235 | colors_rgb(LOOP,:) = [0 1 1] ; 236 | case 'm' 237 | colors_rgb(LOOP,:) = [1 0 1] ; 238 | case 'y' 239 | colors_rgb(LOOP,:) = [1 1 0] ; 240 | case 'k' 241 | colors_rgb(LOOP,:) = [0 0 0] ; 242 | otherwise 243 | error('Invalid folor char string') 244 | end 245 | end -------------------------------------------------------------------------------- /multi-point prediction online/channel_180.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_180.m -------------------------------------------------------------------------------- /multi-point prediction online/channel_180_length_5.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\multi-point prediction online\prediction_180_length_5.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | dataFile = 'channel_data10.mat' 19 | data_com = scio.loadmat(dataFile) 20 | data_real = np.abs(data_com['h']) 21 | y = data_real[0][:8000] 22 | 23 | warnings.filterwarnings('ignore') 24 | 25 | # 定义常量(答辩状主要展示下面两个参数的影响) 26 | time_step = 5 27 | iter_time = 200 28 | iter_time_new = 1 # 滑动训练,一次训练一次 29 | 30 | # 定义常量 31 | rnn_unit = 5 # hidden layer units 32 | input_size = 1 33 | output_size = 1 34 | train_end = 4800 35 | data_num = len(y) 36 | lr = 0.03 # 学习率 37 | batch_size = None # 因为数据量较小,所以全部用,不分批 38 | train_begin = 0 39 | validation_end = 6400 40 | 41 | tf.reset_default_graph() 42 | 43 | # 输入层、输出层权重、偏置 44 | weights = { 45 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 46 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 47 | } 48 | biases = { 49 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 50 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 51 | } 52 | 53 | 54 | def get_data(time_step, train_begin, train_end, validation_end): 55 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 56 | 57 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 58 | for i in range(train_end - time_step): 59 | data_m.append(y[i:i + time_step]) 60 | 61 | # 这里的维度需要再斟酌一下 62 | data_x = np.reshape(data_m, [-1, time_step]) 63 | data_train_x = data_x[:, :, np.newaxis] 64 | data_train_y = y[train_begin + time_step:train_end] 65 | data_train_y = np.reshape(data_train_y, [-1, 1]) 66 | # data_train_y = data_y[:, :, np.newaxis] 67 | 68 | # 分批处理 69 | # for i in range(np.shape(data_x)[0] - batch_size): 70 | # if i % batch_size == 0: 71 | # batch_index.append(i) 72 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 73 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 74 | # 75 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 76 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 77 | 78 | for i in range(train_end - time_step, validation_end - time_step): 79 | data_validation_m.append(y[i:i + time_step]) 80 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 81 | data_validation_x = data_validation_x[:, :, np.newaxis] 82 | data_validation_y = y[train_end:validation_end] 83 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 84 | 85 | for i in range(validation_end - time_step, len(y) - time_step): 86 | data_test_m.append(y[i:i + time_step]) 87 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 88 | data_test_x = data_test_x[:, :, np.newaxis] 89 | data_test_y = y[validation_end:len(y)] 90 | data_test_y = np.reshape(data_test_y, [-1, 1]) 91 | 92 | # 构造总数据,为滑动训练更新作准备 93 | for i in range(len(y) - time_step): 94 | data_total_m.append(y[i:i + time_step]) 95 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 96 | data_total_x = data_total_x[:, :, np.newaxis] 97 | data_total_y = y[time_step:len(y)] 98 | data_total_y = np.reshape(data_total_y, [-1, 1]) 99 | 100 | # data_test_m_x_m, data_test_m_y_m = [], [] 101 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 102 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 103 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 104 | # 105 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 106 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 107 | 108 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 109 | 110 | 111 | def lstm(X): 112 | batch_size = tf.shape(X)[0] 113 | time_step = tf.shape(X)[1] 114 | w_in = weights['in'] 115 | b_in = biases['in'] 116 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 117 | input_rnn = tf.matmul(input, w_in) + b_in 118 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 119 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 120 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 121 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 122 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 123 | print(output_rnn) 124 | print(final_states) 125 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 126 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 127 | final_output = final_states.h 128 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 129 | # 作为输出层的输入 130 | w_out = weights['out'] 131 | b_out = biases['out'] 132 | pred = tf.matmul(final_output, w_out) + b_out 133 | # pred = tf.add(pred, 0, name="pred1") 134 | # tf.add_to_collection(name='pred', value=pred) 135 | return pred, final_states 136 | 137 | 138 | def train_lstm(X, Y): 139 | pred, _ = lstm(X) 140 | # 损失函数 141 | # 这是将数据变成了一列 142 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 143 | # with tf.name_scope("train1"): 144 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 145 | # tf.add_to_collection(name='train1', value=train_op) 146 | # train_op = tf.add(train_op, 0, name="train1") 147 | return pred, loss, train_op 148 | 149 | 150 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 151 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 152 | 153 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 154 | 155 | with tf.Session() as sess: 156 | pred, loss, train_op = train_lstm(X, Y) 157 | sess.run(tf.global_variables_initializer()) 158 | # 先重复训练 159 | train_loss_return = [] 160 | validation_loss_return = np.zeros(iter_time) 161 | test_loss_return = [] 162 | 163 | for i in range(iter_time): 164 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 165 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 166 | test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 167 | train_loss_return.append(train_loss) 168 | validation_loss_return[i] = validation_loss 169 | test_loss_return.append(0) 170 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss, 'test_loss', test_loss) 171 | 172 | # 求出在预训练模型上的表现情况 173 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 174 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 175 | 176 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 177 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 178 | 179 | 180 | # 保存模型 181 | # saver = tf.train.Saver() 182 | # saver.save(sess, "save_net/net.ckpt") 183 | # 184 | # with tf.Session() as sess: 185 | # saver = tf.train.import_meta_graph("save_net/net.ckpt.meta") 186 | # saver.restore(sess, tf.train.latest_checkpoint("save_net")) 187 | # graph = tf.get_default_graph() 188 | # X = graph.get_tensor_by_name("X:0") 189 | # Y = graph.get_tensor_by_name("Y:0") 190 | # pred = graph.get_tensor_by_name("pred1:0") 191 | # train_op = tf.get_collection('train1') 192 | # # graph.get_operation_by_name() 193 | 194 | test_predict = [] 195 | train_new_size = 100 196 | 197 | prediction_length1 = 5 198 | 199 | # 先预测下一个时刻的值 200 | test_x_buff = total_x[validation_end - time_step] 201 | # test_x_buff1 = np.reshape(test_x_buff, [-1]) 202 | # test_x_buff1 = test_x_buff1[np.newaxis, :, np.newaxis] 203 | test_x_buff = test_x_buff[np.newaxis, :, :] 204 | # print(test_x_buff == test_x_buff1) 205 | for i in range(1, prediction_length1): 206 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 207 | # test_loss_return.append(test_loss) 208 | test_predict.append(test_predict_result_1_buff) 209 | test_x_buff = total_x[validation_end - time_step + i] 210 | test_x_buff = np.reshape(test_x_buff, [-1]) 211 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 212 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 213 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 214 | test_predict.append(test_predict_result_1_buff) 215 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 216 | # test_predict.append(test_predict_result_1_buff) 217 | # test_x_buff = total_x[validation_end - time_step + 2] 218 | # test_x_buff = np.reshape(test_x_buff, [-1]) 219 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 220 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 221 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 222 | # test_predict.append(test_predict_result_1_buff) 223 | # test_x_buff = total_x[validation_end - time_step + 3] 224 | # test_x_buff = np.reshape(test_x_buff, [-1]) 225 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 226 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 227 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 228 | # test_predict.append(test_predict_result_1_buff) 229 | # test_x_buff = total_x[validation_end - time_step + 4] 230 | # test_x_buff = np.reshape(test_x_buff, [-1]) 231 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 232 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 233 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 234 | # test_predict.append(test_predict_result_1_buff) 235 | 236 | for i in range(validation_end - time_step + prediction_length1, data_num - time_step, prediction_length1): 237 | 238 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 239 | train_x_new = total_x[i - train_new_size:i] 240 | train_y_new = total_y[i - train_new_size:i] 241 | for j in range(iter_time_new): 242 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 243 | train_loss_return.append(train_loss) 244 | 245 | # 预测下一个时刻的值 246 | test_x_buff = total_x[i] 247 | test_x_buff = test_x_buff[np.newaxis, :, :] 248 | for j in range(1, prediction_length1): 249 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 250 | # test_loss_return.append(test_loss) 251 | test_predict.append(test_predict_result_1_buff) 252 | test_x_buff = total_x[i + j] 253 | test_x_buff = np.reshape(test_x_buff, [-1]) 254 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 255 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 256 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 257 | test_predict.append(test_predict_result_1_buff) 258 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 259 | # test_predict.append(test_predict_result_1_buff) 260 | # test_x_buff = total_x[i + 2] 261 | # test_x_buff = np.reshape(test_x_buff, [-1]) 262 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 263 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 264 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 265 | # test_predict.append(test_predict_result_1_buff) 266 | # test_x_buff = total_x[i + 3] 267 | # test_x_buff = np.reshape(test_x_buff, [-1]) 268 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 269 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 270 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 271 | # test_predict.append(test_predict_result_1_buff) 272 | # test_x_buff = total_x[i + 4] 273 | # test_x_buff = np.reshape(test_x_buff, [-1]) 274 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 275 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 276 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 277 | # test_predict.append(test_predict_result_1_buff) 278 | 279 | test_predict = np.reshape(test_predict, [-1]) 280 | # test_predict = test_predict.reshape((-1)) 281 | 282 | train_predict = sess.run(pred, feed_dict={X: train_x}) 283 | train_predict = train_predict.reshape((-1)) 284 | 285 | total_predict = sess.run(pred, feed_dict={X: total_x}) 286 | total_predict = total_predict.reshape((-1)) 287 | 288 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 289 | validation_predict = validation_predict.reshape((-1)) 290 | 291 | for i in range(len(train_loss_return)): 292 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 293 | for i in range(len(validation_loss_return)): 294 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 295 | for i in range(len(test_loss_return)): 296 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 297 | 298 | for i in range(len(train_predict)): 299 | ws.cell(row=i + 1, column=4).value = train_predict[i] 300 | 301 | for i in range(len(validation_predict)): 302 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 303 | 304 | for i in range(len(total_predict)): 305 | ws.cell(row=i + 1, column=6).value = total_predict[i] 306 | # 增量学习时的测试集表现 307 | for i in range(len(test_predict)): 308 | ws.cell(row=i + 1, column=7).value = test_predict[i] 309 | # 预训练集上的表现 310 | for i in range(len(train_predict_pre_training)): 311 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 312 | # 预验证集上的表现 313 | for i in range(len(validation_predict_pre_training)): 314 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 315 | 316 | 317 | wb.save(filename="prediction_180_length_5.xlsx") 318 | 319 | plt.figure(figsize=(24, 8)) 320 | plt.plot(y[:-1]) 321 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 322 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 323 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 324 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 325 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 326 | # plt.plot([k for k in total_predict]) 327 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 328 | plt.show() 329 | 330 | plt.figure() 331 | plt.plot(train_loss_return[:-1]) 332 | plt.plot(validation_loss_return[:-1]) 333 | plt.plot(test_loss_return[:-1]) 334 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 335 | plt.show() 336 | 337 | -------------------------------------------------------------------------------- /multi-point prediction online/channel_360.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_360.m -------------------------------------------------------------------------------- /multi-point prediction online/channel_360_length_5.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\multi-point prediction online\prediction_360_length_5.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | dataFile = 'channel_data_360_10.mat' 19 | data_com = scio.loadmat(dataFile) 20 | data_real = np.abs(data_com['h']) 21 | y = data_real[0][:8000] 22 | 23 | warnings.filterwarnings('ignore') 24 | 25 | # 定义常量(答辩状主要展示下面两个参数的影响) 26 | time_step = 5 27 | iter_time = 200 28 | iter_time_new = 1 # 滑动训练,一次训练一次 29 | 30 | # 定义常量 31 | rnn_unit = 5 # hidden layer units 32 | input_size = 1 33 | output_size = 1 34 | train_end = 4800 35 | data_num = len(y) 36 | lr = 0.03 # 学习率 37 | batch_size = None # 因为数据量较小,所以全部用,不分批 38 | train_begin = 0 39 | validation_end = 6400 40 | 41 | tf.reset_default_graph() 42 | 43 | # 输入层、输出层权重、偏置 44 | weights = { 45 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 46 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 47 | } 48 | biases = { 49 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 50 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 51 | } 52 | 53 | 54 | def get_data(time_step, train_begin, train_end, validation_end): 55 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 56 | 57 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 58 | for i in range(train_end - time_step): 59 | data_m.append(y[i:i + time_step]) 60 | 61 | # 这里的维度需要再斟酌一下 62 | data_x = np.reshape(data_m, [-1, time_step]) 63 | data_train_x = data_x[:, :, np.newaxis] 64 | data_train_y = y[train_begin + time_step:train_end] 65 | data_train_y = np.reshape(data_train_y, [-1, 1]) 66 | # data_train_y = data_y[:, :, np.newaxis] 67 | 68 | # 分批处理 69 | # for i in range(np.shape(data_x)[0] - batch_size): 70 | # if i % batch_size == 0: 71 | # batch_index.append(i) 72 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 73 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 74 | # 75 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 76 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 77 | 78 | for i in range(train_end - time_step, validation_end - time_step): 79 | data_validation_m.append(y[i:i + time_step]) 80 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 81 | data_validation_x = data_validation_x[:, :, np.newaxis] 82 | data_validation_y = y[train_end:validation_end] 83 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 84 | 85 | for i in range(validation_end - time_step, len(y) - time_step): 86 | data_test_m.append(y[i:i + time_step]) 87 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 88 | data_test_x = data_test_x[:, :, np.newaxis] 89 | data_test_y = y[validation_end:len(y)] 90 | data_test_y = np.reshape(data_test_y, [-1, 1]) 91 | 92 | # 构造总数据,为滑动训练更新作准备 93 | for i in range(len(y) - time_step): 94 | data_total_m.append(y[i:i + time_step]) 95 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 96 | data_total_x = data_total_x[:, :, np.newaxis] 97 | data_total_y = y[time_step:len(y)] 98 | data_total_y = np.reshape(data_total_y, [-1, 1]) 99 | 100 | # data_test_m_x_m, data_test_m_y_m = [], [] 101 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 102 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 103 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 104 | # 105 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 106 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 107 | 108 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 109 | 110 | 111 | def lstm(X): 112 | batch_size = tf.shape(X)[0] 113 | time_step = tf.shape(X)[1] 114 | w_in = weights['in'] 115 | b_in = biases['in'] 116 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 117 | input_rnn = tf.matmul(input, w_in) + b_in 118 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 119 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 120 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 121 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 122 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 123 | print(output_rnn) 124 | print(final_states) 125 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 126 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 127 | final_output = final_states.h 128 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 129 | # 作为输出层的输入 130 | w_out = weights['out'] 131 | b_out = biases['out'] 132 | pred = tf.matmul(final_output, w_out) + b_out 133 | # pred = tf.add(pred, 0, name="pred1") 134 | # tf.add_to_collection(name='pred', value=pred) 135 | return pred, final_states 136 | 137 | 138 | def train_lstm(X, Y): 139 | pred, _ = lstm(X) 140 | # 损失函数 141 | # 这是将数据变成了一列 142 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 143 | # with tf.name_scope("train1"): 144 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 145 | # tf.add_to_collection(name='train1', value=train_op) 146 | # train_op = tf.add(train_op, 0, name="train1") 147 | return pred, loss, train_op 148 | 149 | 150 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 151 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 152 | 153 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 154 | 155 | with tf.Session() as sess: 156 | pred, loss, train_op = train_lstm(X, Y) 157 | sess.run(tf.global_variables_initializer()) 158 | # 先重复训练 159 | train_loss_return = [] 160 | validation_loss_return = np.zeros(iter_time) 161 | test_loss_return = [] 162 | 163 | for i in range(iter_time): 164 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 165 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 166 | test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 167 | train_loss_return.append(train_loss) 168 | validation_loss_return[i] = validation_loss 169 | test_loss_return.append(0) 170 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss, 'test_loss', test_loss) 171 | 172 | # 求出在预训练模型上的表现情况 173 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 174 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 175 | 176 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 177 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 178 | 179 | 180 | # 保存模型 181 | # saver = tf.train.Saver() 182 | # saver.save(sess, "save_net/net.ckpt") 183 | # 184 | # with tf.Session() as sess: 185 | # saver = tf.train.import_meta_graph("save_net/net.ckpt.meta") 186 | # saver.restore(sess, tf.train.latest_checkpoint("save_net")) 187 | # graph = tf.get_default_graph() 188 | # X = graph.get_tensor_by_name("X:0") 189 | # Y = graph.get_tensor_by_name("Y:0") 190 | # pred = graph.get_tensor_by_name("pred1:0") 191 | # train_op = tf.get_collection('train1') 192 | # # graph.get_operation_by_name() 193 | 194 | test_predict = [] 195 | train_new_size = 100 196 | 197 | prediction_length1 = 5 198 | 199 | # 先预测下一个时刻的值 200 | test_x_buff = total_x[validation_end - time_step] 201 | # test_x_buff1 = np.reshape(test_x_buff, [-1]) 202 | # test_x_buff1 = test_x_buff1[np.newaxis, :, np.newaxis] 203 | test_x_buff = test_x_buff[np.newaxis, :, :] 204 | # print(test_x_buff == test_x_buff1) 205 | for i in range(1, prediction_length1): 206 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 207 | # test_loss_return.append(test_loss) 208 | test_predict.append(test_predict_result_1_buff) 209 | test_x_buff = total_x[validation_end - time_step + i] 210 | test_x_buff = np.reshape(test_x_buff, [-1]) 211 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 212 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 213 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 214 | test_predict.append(test_predict_result_1_buff) 215 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 216 | # test_predict.append(test_predict_result_1_buff) 217 | # test_x_buff = total_x[validation_end - time_step + 2] 218 | # test_x_buff = np.reshape(test_x_buff, [-1]) 219 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 220 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 221 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 222 | # test_predict.append(test_predict_result_1_buff) 223 | # test_x_buff = total_x[validation_end - time_step + 3] 224 | # test_x_buff = np.reshape(test_x_buff, [-1]) 225 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 226 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 227 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 228 | # test_predict.append(test_predict_result_1_buff) 229 | # test_x_buff = total_x[validation_end - time_step + 4] 230 | # test_x_buff = np.reshape(test_x_buff, [-1]) 231 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 232 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 233 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 234 | # test_predict.append(test_predict_result_1_buff) 235 | 236 | for i in range(validation_end - time_step + prediction_length1, data_num - time_step, prediction_length1): 237 | 238 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 239 | train_x_new = total_x[i - train_new_size:i] 240 | train_y_new = total_y[i - train_new_size:i] 241 | for j in range(iter_time_new): 242 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 243 | train_loss_return.append(train_loss) 244 | 245 | # 预测下一个时刻的值 246 | test_x_buff = total_x[i] 247 | test_x_buff = test_x_buff[np.newaxis, :, :] 248 | for j in range(1, prediction_length1): 249 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 250 | # test_loss_return.append(test_loss) 251 | test_predict.append(test_predict_result_1_buff) 252 | test_x_buff = total_x[i + j] 253 | test_x_buff = np.reshape(test_x_buff, [-1]) 254 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 255 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 256 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 257 | test_predict.append(test_predict_result_1_buff) 258 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 259 | # test_predict.append(test_predict_result_1_buff) 260 | # test_x_buff = total_x[i + 2] 261 | # test_x_buff = np.reshape(test_x_buff, [-1]) 262 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 263 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 264 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 265 | # test_predict.append(test_predict_result_1_buff) 266 | # test_x_buff = total_x[i + 3] 267 | # test_x_buff = np.reshape(test_x_buff, [-1]) 268 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 269 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 270 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 271 | # test_predict.append(test_predict_result_1_buff) 272 | # test_x_buff = total_x[i + 4] 273 | # test_x_buff = np.reshape(test_x_buff, [-1]) 274 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 275 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 276 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 277 | # test_predict.append(test_predict_result_1_buff) 278 | 279 | test_predict = np.reshape(test_predict, [-1]) 280 | # test_predict = test_predict.reshape((-1)) 281 | 282 | train_predict = sess.run(pred, feed_dict={X: train_x}) 283 | train_predict = train_predict.reshape((-1)) 284 | 285 | total_predict = sess.run(pred, feed_dict={X: total_x}) 286 | total_predict = total_predict.reshape((-1)) 287 | 288 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 289 | validation_predict = validation_predict.reshape((-1)) 290 | 291 | for i in range(len(train_loss_return)): 292 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 293 | for i in range(len(validation_loss_return)): 294 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 295 | for i in range(len(test_loss_return)): 296 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 297 | 298 | for i in range(len(train_predict)): 299 | ws.cell(row=i + 1, column=4).value = train_predict[i] 300 | 301 | for i in range(len(validation_predict)): 302 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 303 | 304 | for i in range(len(total_predict)): 305 | ws.cell(row=i + 1, column=6).value = total_predict[i] 306 | # 增量学习时的测试集表现 307 | for i in range(len(test_predict)): 308 | ws.cell(row=i + 1, column=7).value = test_predict[i] 309 | # 预训练集上的表现 310 | for i in range(len(train_predict_pre_training)): 311 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 312 | # 预验证集上的表现 313 | for i in range(len(validation_predict_pre_training)): 314 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 315 | 316 | 317 | wb.save(filename="prediction_360_length_5.xlsx") 318 | 319 | plt.figure(figsize=(24, 8)) 320 | plt.plot(y[:-1]) 321 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 322 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 323 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 324 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 325 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 326 | # plt.plot([k for k in total_predict]) 327 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 328 | plt.show() 329 | 330 | plt.figure() 331 | plt.plot(train_loss_return[:-1]) 332 | plt.plot(validation_loss_return[:-1]) 333 | plt.plot(test_loss_return[:-1]) 334 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 335 | plt.show() 336 | 337 | -------------------------------------------------------------------------------- /multi-point prediction online/channel_90.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_90.m -------------------------------------------------------------------------------- /multi-point prediction online/channel_90_10.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_90_10.m -------------------------------------------------------------------------------- /multi-point prediction online/channel_90_16.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_90_16.m -------------------------------------------------------------------------------- /multi-point prediction online/channel_90_20.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_90_20.m -------------------------------------------------------------------------------- /multi-point prediction online/channel_90_length_10.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\multi-point prediction online\prediction_90_length_10.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | dataFile = 'channel_data_90_10.mat' 19 | data_com = scio.loadmat(dataFile) 20 | data_real = np.abs(data_com['h']) 21 | y = data_real[0][:8000] 22 | 23 | warnings.filterwarnings('ignore') 24 | 25 | # 定义常量(答辩状主要展示下面两个参数的影响) 26 | time_step = 5 27 | iter_time = 200 28 | iter_time_new = 1 # 滑动训练,一次训练一次 29 | 30 | # 定义常量 31 | rnn_unit = 5 # hidden layer units 32 | input_size = 1 33 | output_size = 1 34 | train_end = 4800 35 | data_num = len(y) 36 | lr = 0.03 # 学习率 37 | batch_size = None # 因为数据量较小,所以全部用,不分批 38 | train_begin = 0 39 | validation_end = 6400 40 | 41 | tf.reset_default_graph() 42 | 43 | # 输入层、输出层权重、偏置 44 | weights = { 45 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 46 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 47 | } 48 | biases = { 49 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 50 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 51 | } 52 | 53 | 54 | def get_data(time_step, train_begin, train_end, validation_end): 55 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 56 | 57 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 58 | for i in range(train_end - time_step): 59 | data_m.append(y[i:i + time_step]) 60 | 61 | # 这里的维度需要再斟酌一下 62 | data_x = np.reshape(data_m, [-1, time_step]) 63 | data_train_x = data_x[:, :, np.newaxis] 64 | data_train_y = y[train_begin + time_step:train_end] 65 | data_train_y = np.reshape(data_train_y, [-1, 1]) 66 | # data_train_y = data_y[:, :, np.newaxis] 67 | 68 | # 分批处理 69 | # for i in range(np.shape(data_x)[0] - batch_size): 70 | # if i % batch_size == 0: 71 | # batch_index.append(i) 72 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 73 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 74 | # 75 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 76 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 77 | 78 | for i in range(train_end - time_step, validation_end - time_step): 79 | data_validation_m.append(y[i:i + time_step]) 80 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 81 | data_validation_x = data_validation_x[:, :, np.newaxis] 82 | data_validation_y = y[train_end:validation_end] 83 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 84 | 85 | for i in range(validation_end - time_step, len(y) - time_step): 86 | data_test_m.append(y[i:i + time_step]) 87 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 88 | data_test_x = data_test_x[:, :, np.newaxis] 89 | data_test_y = y[validation_end:len(y)] 90 | data_test_y = np.reshape(data_test_y, [-1, 1]) 91 | 92 | # 构造总数据,为滑动训练更新作准备 93 | for i in range(len(y) - time_step): 94 | data_total_m.append(y[i:i + time_step]) 95 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 96 | data_total_x = data_total_x[:, :, np.newaxis] 97 | data_total_y = y[time_step:len(y)] 98 | data_total_y = np.reshape(data_total_y, [-1, 1]) 99 | 100 | # data_test_m_x_m, data_test_m_y_m = [], [] 101 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 102 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 103 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 104 | # 105 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 106 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 107 | 108 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 109 | 110 | 111 | def lstm(X): 112 | batch_size = tf.shape(X)[0] 113 | time_step = tf.shape(X)[1] 114 | w_in = weights['in'] 115 | b_in = biases['in'] 116 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 117 | input_rnn = tf.matmul(input, w_in) + b_in 118 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 119 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 120 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 121 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 122 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 123 | print(output_rnn) 124 | print(final_states) 125 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 126 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 127 | final_output = final_states.h 128 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 129 | # 作为输出层的输入 130 | w_out = weights['out'] 131 | b_out = biases['out'] 132 | pred = tf.matmul(final_output, w_out) + b_out 133 | # pred = tf.add(pred, 0, name="pred1") 134 | # tf.add_to_collection(name='pred', value=pred) 135 | return pred, final_states 136 | 137 | 138 | def train_lstm(X, Y): 139 | pred, _ = lstm(X) 140 | # 损失函数 141 | # 这是将数据变成了一列 142 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 143 | # with tf.name_scope("train1"): 144 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 145 | # tf.add_to_collection(name='train1', value=train_op) 146 | # train_op = tf.add(train_op, 0, name="train1") 147 | return pred, loss, train_op 148 | 149 | 150 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 151 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 152 | 153 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 154 | 155 | with tf.Session() as sess: 156 | pred, loss, train_op = train_lstm(X, Y) 157 | sess.run(tf.global_variables_initializer()) 158 | # 先重复训练 159 | train_loss_return = [] 160 | validation_loss_return = np.zeros(iter_time) 161 | test_loss_return = [] 162 | 163 | for i in range(iter_time): 164 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 165 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 166 | test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 167 | train_loss_return.append(train_loss) 168 | validation_loss_return[i] = validation_loss 169 | test_loss_return.append(0) 170 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss, 'test_loss', test_loss) 171 | 172 | # 求出在预训练模型上的表现情况 173 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 174 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 175 | 176 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 177 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 178 | 179 | 180 | # 保存模型 181 | # saver = tf.train.Saver() 182 | # saver.save(sess, "save_net/net.ckpt") 183 | # 184 | # with tf.Session() as sess: 185 | # saver = tf.train.import_meta_graph("save_net/net.ckpt.meta") 186 | # saver.restore(sess, tf.train.latest_checkpoint("save_net")) 187 | # graph = tf.get_default_graph() 188 | # X = graph.get_tensor_by_name("X:0") 189 | # Y = graph.get_tensor_by_name("Y:0") 190 | # pred = graph.get_tensor_by_name("pred1:0") 191 | # train_op = tf.get_collection('train1') 192 | # # graph.get_operation_by_name() 193 | 194 | test_predict = [] 195 | train_new_size = 100 196 | 197 | prediction_length1 = 10 198 | 199 | # 先预测下一个时刻的值 200 | test_x_buff = total_x[validation_end - time_step] 201 | # test_x_buff1 = np.reshape(test_x_buff, [-1]) 202 | # test_x_buff1 = test_x_buff1[np.newaxis, :, np.newaxis] 203 | test_x_buff = test_x_buff[np.newaxis, :, :] 204 | # print(test_x_buff == test_x_buff1) 205 | for i in range(1, prediction_length1): 206 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 207 | # test_loss_return.append(test_loss) 208 | test_predict.append(test_predict_result_1_buff) 209 | test_x_buff = total_x[validation_end - time_step + i] 210 | test_x_buff = np.reshape(test_x_buff, [-1]) 211 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 212 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 213 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 214 | test_predict.append(test_predict_result_1_buff) 215 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 216 | # test_predict.append(test_predict_result_1_buff) 217 | # test_x_buff = total_x[validation_end - time_step + 2] 218 | # test_x_buff = np.reshape(test_x_buff, [-1]) 219 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 220 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 221 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 222 | # test_predict.append(test_predict_result_1_buff) 223 | # test_x_buff = total_x[validation_end - time_step + 3] 224 | # test_x_buff = np.reshape(test_x_buff, [-1]) 225 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 226 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 227 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 228 | # test_predict.append(test_predict_result_1_buff) 229 | # test_x_buff = total_x[validation_end - time_step + 4] 230 | # test_x_buff = np.reshape(test_x_buff, [-1]) 231 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 232 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 233 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 234 | # test_predict.append(test_predict_result_1_buff) 235 | 236 | for i in range(validation_end - time_step + prediction_length1, data_num - time_step, prediction_length1): 237 | 238 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 239 | train_x_new = total_x[i - train_new_size:i] 240 | train_y_new = total_y[i - train_new_size:i] 241 | for j in range(iter_time_new): 242 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 243 | train_loss_return.append(train_loss) 244 | 245 | # 预测下一个时刻的值 246 | test_x_buff = total_x[i] 247 | test_x_buff = test_x_buff[np.newaxis, :, :] 248 | for j in range(1, prediction_length1): 249 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 250 | # test_loss_return.append(test_loss) 251 | test_predict.append(test_predict_result_1_buff) 252 | test_x_buff = total_x[i + j] 253 | test_x_buff = np.reshape(test_x_buff, [-1]) 254 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 255 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 256 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 257 | test_predict.append(test_predict_result_1_buff) 258 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 259 | # test_predict.append(test_predict_result_1_buff) 260 | # test_x_buff = total_x[i + 2] 261 | # test_x_buff = np.reshape(test_x_buff, [-1]) 262 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 263 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 264 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 265 | # test_predict.append(test_predict_result_1_buff) 266 | # test_x_buff = total_x[i + 3] 267 | # test_x_buff = np.reshape(test_x_buff, [-1]) 268 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 269 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 270 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 271 | # test_predict.append(test_predict_result_1_buff) 272 | # test_x_buff = total_x[i + 4] 273 | # test_x_buff = np.reshape(test_x_buff, [-1]) 274 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 275 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 276 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 277 | # test_predict.append(test_predict_result_1_buff) 278 | 279 | test_predict = np.reshape(test_predict, [-1]) 280 | # test_predict = test_predict.reshape((-1)) 281 | 282 | train_predict = sess.run(pred, feed_dict={X: train_x}) 283 | train_predict = train_predict.reshape((-1)) 284 | 285 | total_predict = sess.run(pred, feed_dict={X: total_x}) 286 | total_predict = total_predict.reshape((-1)) 287 | 288 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 289 | validation_predict = validation_predict.reshape((-1)) 290 | 291 | for i in range(len(train_loss_return)): 292 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 293 | for i in range(len(validation_loss_return)): 294 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 295 | for i in range(len(test_loss_return)): 296 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 297 | 298 | for i in range(len(train_predict)): 299 | ws.cell(row=i + 1, column=4).value = train_predict[i] 300 | 301 | for i in range(len(validation_predict)): 302 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 303 | 304 | for i in range(len(total_predict)): 305 | ws.cell(row=i + 1, column=6).value = total_predict[i] 306 | # 增量学习时的测试集表现 307 | for i in range(len(test_predict)): 308 | ws.cell(row=i + 1, column=7).value = test_predict[i] 309 | # 预训练集上的表现 310 | for i in range(len(train_predict_pre_training)): 311 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 312 | # 预验证集上的表现 313 | for i in range(len(validation_predict_pre_training)): 314 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 315 | 316 | 317 | wb.save(filename="prediction_90_length_10.xlsx") 318 | 319 | plt.figure(figsize=(24, 8)) 320 | plt.plot(y[:-1]) 321 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 322 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 323 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 324 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 325 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 326 | # plt.plot([k for k in total_predict]) 327 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 328 | plt.show() 329 | 330 | plt.figure() 331 | plt.plot(train_loss_return[:-1]) 332 | plt.plot(validation_loss_return[:-1]) 333 | plt.plot(test_loss_return[:-1]) 334 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 335 | plt.show() 336 | 337 | -------------------------------------------------------------------------------- /multi-point prediction online/channel_90_length_16.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\multi-point prediction online\prediction_90_length_16.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | dataFile = 'channel_data_90_10.mat' 19 | data_com = scio.loadmat(dataFile) 20 | data_real = np.abs(data_com['h']) 21 | y = data_real[0][:8000] 22 | 23 | warnings.filterwarnings('ignore') 24 | 25 | # 定义常量(答辩状主要展示下面两个参数的影响) 26 | time_step = 5 27 | iter_time = 200 28 | iter_time_new = 1 # 滑动训练,一次训练一次 29 | 30 | # 定义常量 31 | rnn_unit = 5 # hidden layer units 32 | input_size = 1 33 | output_size = 1 34 | train_end = 4800 35 | data_num = len(y) 36 | lr = 0.03 # 学习率 37 | batch_size = None # 因为数据量较小,所以全部用,不分批 38 | train_begin = 0 39 | validation_end = 6400 40 | 41 | tf.reset_default_graph() 42 | 43 | # 输入层、输出层权重、偏置 44 | weights = { 45 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 46 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 47 | } 48 | biases = { 49 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 50 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 51 | } 52 | 53 | 54 | def get_data(time_step, train_begin, train_end, validation_end): 55 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 56 | 57 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 58 | for i in range(train_end - time_step): 59 | data_m.append(y[i:i + time_step]) 60 | 61 | # 这里的维度需要再斟酌一下 62 | data_x = np.reshape(data_m, [-1, time_step]) 63 | data_train_x = data_x[:, :, np.newaxis] 64 | data_train_y = y[train_begin + time_step:train_end] 65 | data_train_y = np.reshape(data_train_y, [-1, 1]) 66 | # data_train_y = data_y[:, :, np.newaxis] 67 | 68 | # 分批处理 69 | # for i in range(np.shape(data_x)[0] - batch_size): 70 | # if i % batch_size == 0: 71 | # batch_index.append(i) 72 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 73 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 74 | # 75 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 76 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 77 | 78 | for i in range(train_end - time_step, validation_end - time_step): 79 | data_validation_m.append(y[i:i + time_step]) 80 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 81 | data_validation_x = data_validation_x[:, :, np.newaxis] 82 | data_validation_y = y[train_end:validation_end] 83 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 84 | 85 | for i in range(validation_end - time_step, len(y) - time_step): 86 | data_test_m.append(y[i:i + time_step]) 87 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 88 | data_test_x = data_test_x[:, :, np.newaxis] 89 | data_test_y = y[validation_end:len(y)] 90 | data_test_y = np.reshape(data_test_y, [-1, 1]) 91 | 92 | # 构造总数据,为滑动训练更新作准备 93 | for i in range(len(y) - time_step): 94 | data_total_m.append(y[i:i + time_step]) 95 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 96 | data_total_x = data_total_x[:, :, np.newaxis] 97 | data_total_y = y[time_step:len(y)] 98 | data_total_y = np.reshape(data_total_y, [-1, 1]) 99 | 100 | # data_test_m_x_m, data_test_m_y_m = [], [] 101 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 102 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 103 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 104 | # 105 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 106 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 107 | 108 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 109 | 110 | 111 | def lstm(X): 112 | batch_size = tf.shape(X)[0] 113 | time_step = tf.shape(X)[1] 114 | w_in = weights['in'] 115 | b_in = biases['in'] 116 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 117 | input_rnn = tf.matmul(input, w_in) + b_in 118 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 119 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 120 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 121 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 122 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 123 | print(output_rnn) 124 | print(final_states) 125 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 126 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 127 | final_output = final_states.h 128 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 129 | # 作为输出层的输入 130 | w_out = weights['out'] 131 | b_out = biases['out'] 132 | pred = tf.matmul(final_output, w_out) + b_out 133 | # pred = tf.add(pred, 0, name="pred1") 134 | # tf.add_to_collection(name='pred', value=pred) 135 | return pred, final_states 136 | 137 | 138 | def train_lstm(X, Y): 139 | pred, _ = lstm(X) 140 | # 损失函数 141 | # 这是将数据变成了一列 142 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 143 | # with tf.name_scope("train1"): 144 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 145 | # tf.add_to_collection(name='train1', value=train_op) 146 | # train_op = tf.add(train_op, 0, name="train1") 147 | return pred, loss, train_op 148 | 149 | 150 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 151 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 152 | 153 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 154 | 155 | with tf.Session() as sess: 156 | pred, loss, train_op = train_lstm(X, Y) 157 | sess.run(tf.global_variables_initializer()) 158 | # 先重复训练 159 | train_loss_return = [] 160 | validation_loss_return = np.zeros(iter_time) 161 | test_loss_return = [] 162 | 163 | for i in range(iter_time): 164 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 165 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 166 | test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 167 | train_loss_return.append(train_loss) 168 | validation_loss_return[i] = validation_loss 169 | test_loss_return.append(0) 170 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss, 'test_loss', test_loss) 171 | 172 | # 求出在预训练模型上的表现情况 173 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 174 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 175 | 176 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 177 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 178 | 179 | 180 | # 保存模型 181 | # saver = tf.train.Saver() 182 | # saver.save(sess, "save_net/net.ckpt") 183 | # 184 | # with tf.Session() as sess: 185 | # saver = tf.train.import_meta_graph("save_net/net.ckpt.meta") 186 | # saver.restore(sess, tf.train.latest_checkpoint("save_net")) 187 | # graph = tf.get_default_graph() 188 | # X = graph.get_tensor_by_name("X:0") 189 | # Y = graph.get_tensor_by_name("Y:0") 190 | # pred = graph.get_tensor_by_name("pred1:0") 191 | # train_op = tf.get_collection('train1') 192 | # # graph.get_operation_by_name() 193 | 194 | test_predict = [] 195 | train_new_size = 100 196 | 197 | prediction_length1 = 16 198 | 199 | # 先预测下一个时刻的值 200 | test_x_buff = total_x[validation_end - time_step] 201 | # test_x_buff1 = np.reshape(test_x_buff, [-1]) 202 | # test_x_buff1 = test_x_buff1[np.newaxis, :, np.newaxis] 203 | test_x_buff = test_x_buff[np.newaxis, :, :] 204 | # print(test_x_buff == test_x_buff1) 205 | for i in range(1, prediction_length1): 206 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 207 | # test_loss_return.append(test_loss) 208 | test_predict.append(test_predict_result_1_buff) 209 | test_x_buff = total_x[validation_end - time_step + i] 210 | test_x_buff = np.reshape(test_x_buff, [-1]) 211 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 212 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 213 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 214 | test_predict.append(test_predict_result_1_buff) 215 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 216 | # test_predict.append(test_predict_result_1_buff) 217 | # test_x_buff = total_x[validation_end - time_step + 2] 218 | # test_x_buff = np.reshape(test_x_buff, [-1]) 219 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 220 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 221 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 222 | # test_predict.append(test_predict_result_1_buff) 223 | # test_x_buff = total_x[validation_end - time_step + 3] 224 | # test_x_buff = np.reshape(test_x_buff, [-1]) 225 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 226 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 227 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 228 | # test_predict.append(test_predict_result_1_buff) 229 | # test_x_buff = total_x[validation_end - time_step + 4] 230 | # test_x_buff = np.reshape(test_x_buff, [-1]) 231 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 232 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 233 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 234 | # test_predict.append(test_predict_result_1_buff) 235 | 236 | for i in range(validation_end - time_step + prediction_length1, data_num - time_step, prediction_length1): 237 | 238 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 239 | train_x_new = total_x[i - train_new_size:i] 240 | train_y_new = total_y[i - train_new_size:i] 241 | for j in range(iter_time_new): 242 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 243 | train_loss_return.append(train_loss) 244 | 245 | # 预测下一个时刻的值 246 | test_x_buff = total_x[i] 247 | test_x_buff = test_x_buff[np.newaxis, :, :] 248 | for j in range(1, prediction_length1): 249 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 250 | # test_loss_return.append(test_loss) 251 | test_predict.append(test_predict_result_1_buff) 252 | test_x_buff = total_x[i + j] 253 | test_x_buff = np.reshape(test_x_buff, [-1]) 254 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 255 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 256 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 257 | test_predict.append(test_predict_result_1_buff) 258 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 259 | # test_predict.append(test_predict_result_1_buff) 260 | # test_x_buff = total_x[i + 2] 261 | # test_x_buff = np.reshape(test_x_buff, [-1]) 262 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 263 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 264 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 265 | # test_predict.append(test_predict_result_1_buff) 266 | # test_x_buff = total_x[i + 3] 267 | # test_x_buff = np.reshape(test_x_buff, [-1]) 268 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 269 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 270 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 271 | # test_predict.append(test_predict_result_1_buff) 272 | # test_x_buff = total_x[i + 4] 273 | # test_x_buff = np.reshape(test_x_buff, [-1]) 274 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 275 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 276 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 277 | # test_predict.append(test_predict_result_1_buff) 278 | 279 | test_predict = np.reshape(test_predict, [-1]) 280 | # test_predict = test_predict.reshape((-1)) 281 | 282 | train_predict = sess.run(pred, feed_dict={X: train_x}) 283 | train_predict = train_predict.reshape((-1)) 284 | 285 | total_predict = sess.run(pred, feed_dict={X: total_x}) 286 | total_predict = total_predict.reshape((-1)) 287 | 288 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 289 | validation_predict = validation_predict.reshape((-1)) 290 | 291 | for i in range(len(train_loss_return)): 292 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 293 | for i in range(len(validation_loss_return)): 294 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 295 | for i in range(len(test_loss_return)): 296 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 297 | 298 | for i in range(len(train_predict)): 299 | ws.cell(row=i + 1, column=4).value = train_predict[i] 300 | 301 | for i in range(len(validation_predict)): 302 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 303 | 304 | for i in range(len(total_predict)): 305 | ws.cell(row=i + 1, column=6).value = total_predict[i] 306 | # 增量学习时的测试集表现 307 | for i in range(len(test_predict)): 308 | ws.cell(row=i + 1, column=7).value = test_predict[i] 309 | # 预训练集上的表现 310 | for i in range(len(train_predict_pre_training)): 311 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 312 | # 预验证集上的表现 313 | for i in range(len(validation_predict_pre_training)): 314 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 315 | 316 | 317 | wb.save(filename="prediction_90_length_16.xlsx") 318 | 319 | plt.figure(figsize=(24, 8)) 320 | plt.plot(y[:-1]) 321 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 322 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 323 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 324 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 325 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 326 | # plt.plot([k for k in total_predict]) 327 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 328 | plt.show() 329 | 330 | plt.figure() 331 | plt.plot(train_loss_return[:-1]) 332 | plt.plot(validation_loss_return[:-1]) 333 | plt.plot(test_loss_return[:-1]) 334 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 335 | plt.show() 336 | 337 | -------------------------------------------------------------------------------- /multi-point prediction online/channel_90_length_20.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\multi-point prediction online\prediction_90_length_20.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | dataFile = 'channel_data_90_10.mat' 19 | data_com = scio.loadmat(dataFile) 20 | data_real = np.abs(data_com['h']) 21 | y = data_real[0][:8000] 22 | 23 | warnings.filterwarnings('ignore') 24 | 25 | # 定义常量(答辩状主要展示下面两个参数的影响) 26 | time_step = 5 27 | iter_time = 200 28 | iter_time_new = 1 # 滑动训练,一次训练一次 29 | 30 | # 定义常量 31 | rnn_unit = 5 # hidden layer units 32 | input_size = 1 33 | output_size = 1 34 | train_end = 4800 35 | data_num = len(y) 36 | lr = 0.03 # 学习率 37 | batch_size = None # 因为数据量较小,所以全部用,不分批 38 | train_begin = 0 39 | validation_end = 6400 40 | 41 | tf.reset_default_graph() 42 | 43 | # 输入层、输出层权重、偏置 44 | weights = { 45 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 46 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 47 | } 48 | biases = { 49 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 50 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 51 | } 52 | 53 | 54 | def get_data(time_step, train_begin, train_end, validation_end): 55 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 56 | 57 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 58 | for i in range(train_end - time_step): 59 | data_m.append(y[i:i + time_step]) 60 | 61 | # 这里的维度需要再斟酌一下 62 | data_x = np.reshape(data_m, [-1, time_step]) 63 | data_train_x = data_x[:, :, np.newaxis] 64 | data_train_y = y[train_begin + time_step:train_end] 65 | data_train_y = np.reshape(data_train_y, [-1, 1]) 66 | # data_train_y = data_y[:, :, np.newaxis] 67 | 68 | # 分批处理 69 | # for i in range(np.shape(data_x)[0] - batch_size): 70 | # if i % batch_size == 0: 71 | # batch_index.append(i) 72 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 73 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 74 | # 75 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 76 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 77 | 78 | for i in range(train_end - time_step, validation_end - time_step): 79 | data_validation_m.append(y[i:i + time_step]) 80 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 81 | data_validation_x = data_validation_x[:, :, np.newaxis] 82 | data_validation_y = y[train_end:validation_end] 83 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 84 | 85 | for i in range(validation_end - time_step, len(y) - time_step): 86 | data_test_m.append(y[i:i + time_step]) 87 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 88 | data_test_x = data_test_x[:, :, np.newaxis] 89 | data_test_y = y[validation_end:len(y)] 90 | data_test_y = np.reshape(data_test_y, [-1, 1]) 91 | 92 | # 构造总数据,为滑动训练更新作准备 93 | for i in range(len(y) - time_step): 94 | data_total_m.append(y[i:i + time_step]) 95 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 96 | data_total_x = data_total_x[:, :, np.newaxis] 97 | data_total_y = y[time_step:len(y)] 98 | data_total_y = np.reshape(data_total_y, [-1, 1]) 99 | 100 | # data_test_m_x_m, data_test_m_y_m = [], [] 101 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 102 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 103 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 104 | # 105 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 106 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 107 | 108 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 109 | 110 | 111 | def lstm(X): 112 | batch_size = tf.shape(X)[0] 113 | time_step = tf.shape(X)[1] 114 | w_in = weights['in'] 115 | b_in = biases['in'] 116 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 117 | input_rnn = tf.matmul(input, w_in) + b_in 118 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 119 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 120 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 121 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 122 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 123 | print(output_rnn) 124 | print(final_states) 125 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 126 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 127 | final_output = final_states.h 128 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 129 | # 作为输出层的输入 130 | w_out = weights['out'] 131 | b_out = biases['out'] 132 | pred = tf.matmul(final_output, w_out) + b_out 133 | # pred = tf.add(pred, 0, name="pred1") 134 | # tf.add_to_collection(name='pred', value=pred) 135 | return pred, final_states 136 | 137 | 138 | def train_lstm(X, Y): 139 | pred, _ = lstm(X) 140 | # 损失函数 141 | # 这是将数据变成了一列 142 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 143 | # with tf.name_scope("train1"): 144 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 145 | # tf.add_to_collection(name='train1', value=train_op) 146 | # train_op = tf.add(train_op, 0, name="train1") 147 | return pred, loss, train_op 148 | 149 | 150 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 151 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 152 | 153 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 154 | 155 | with tf.Session() as sess: 156 | pred, loss, train_op = train_lstm(X, Y) 157 | sess.run(tf.global_variables_initializer()) 158 | # 先重复训练 159 | train_loss_return = [] 160 | validation_loss_return = np.zeros(iter_time) 161 | test_loss_return = [] 162 | 163 | for i in range(iter_time): 164 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 165 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 166 | test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 167 | train_loss_return.append(train_loss) 168 | validation_loss_return[i] = validation_loss 169 | test_loss_return.append(0) 170 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss, 'test_loss', test_loss) 171 | 172 | # 求出在预训练模型上的表现情况 173 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 174 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 175 | 176 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 177 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 178 | 179 | 180 | # 保存模型 181 | # saver = tf.train.Saver() 182 | # saver.save(sess, "save_net/net.ckpt") 183 | # 184 | # with tf.Session() as sess: 185 | # saver = tf.train.import_meta_graph("save_net/net.ckpt.meta") 186 | # saver.restore(sess, tf.train.latest_checkpoint("save_net")) 187 | # graph = tf.get_default_graph() 188 | # X = graph.get_tensor_by_name("X:0") 189 | # Y = graph.get_tensor_by_name("Y:0") 190 | # pred = graph.get_tensor_by_name("pred1:0") 191 | # train_op = tf.get_collection('train1') 192 | # # graph.get_operation_by_name() 193 | 194 | test_predict = [] 195 | train_new_size = 100 196 | 197 | prediction_length1 = 20 198 | 199 | # 先预测下一个时刻的值 200 | test_x_buff = total_x[validation_end - time_step] 201 | # test_x_buff1 = np.reshape(test_x_buff, [-1]) 202 | # test_x_buff1 = test_x_buff1[np.newaxis, :, np.newaxis] 203 | test_x_buff = test_x_buff[np.newaxis, :, :] 204 | # print(test_x_buff == test_x_buff1) 205 | for i in range(1, prediction_length1): 206 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 207 | # test_loss_return.append(test_loss) 208 | test_predict.append(test_predict_result_1_buff) 209 | test_x_buff = total_x[validation_end - time_step + i] 210 | test_x_buff = np.reshape(test_x_buff, [-1]) 211 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 212 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 213 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 214 | test_predict.append(test_predict_result_1_buff) 215 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 216 | # test_predict.append(test_predict_result_1_buff) 217 | # test_x_buff = total_x[validation_end - time_step + 2] 218 | # test_x_buff = np.reshape(test_x_buff, [-1]) 219 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 220 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 221 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 222 | # test_predict.append(test_predict_result_1_buff) 223 | # test_x_buff = total_x[validation_end - time_step + 3] 224 | # test_x_buff = np.reshape(test_x_buff, [-1]) 225 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 226 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 227 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 228 | # test_predict.append(test_predict_result_1_buff) 229 | # test_x_buff = total_x[validation_end - time_step + 4] 230 | # test_x_buff = np.reshape(test_x_buff, [-1]) 231 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 232 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 233 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 234 | # test_predict.append(test_predict_result_1_buff) 235 | 236 | for i in range(validation_end - time_step + prediction_length1, data_num - time_step, prediction_length1): 237 | 238 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 239 | train_x_new = total_x[i - train_new_size:i] 240 | train_y_new = total_y[i - train_new_size:i] 241 | for j in range(iter_time_new): 242 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 243 | train_loss_return.append(train_loss) 244 | 245 | # 预测下一个时刻的值 246 | test_x_buff = total_x[i] 247 | test_x_buff = test_x_buff[np.newaxis, :, :] 248 | for j in range(1, prediction_length1): 249 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 250 | # test_loss_return.append(test_loss) 251 | test_predict.append(test_predict_result_1_buff) 252 | test_x_buff = total_x[i + j] 253 | test_x_buff = np.reshape(test_x_buff, [-1]) 254 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 255 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 256 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 257 | test_predict.append(test_predict_result_1_buff) 258 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 259 | # test_predict.append(test_predict_result_1_buff) 260 | # test_x_buff = total_x[i + 2] 261 | # test_x_buff = np.reshape(test_x_buff, [-1]) 262 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 263 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 264 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 265 | # test_predict.append(test_predict_result_1_buff) 266 | # test_x_buff = total_x[i + 3] 267 | # test_x_buff = np.reshape(test_x_buff, [-1]) 268 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 269 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 270 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 271 | # test_predict.append(test_predict_result_1_buff) 272 | # test_x_buff = total_x[i + 4] 273 | # test_x_buff = np.reshape(test_x_buff, [-1]) 274 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 275 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 276 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 277 | # test_predict.append(test_predict_result_1_buff) 278 | 279 | test_predict = np.reshape(test_predict, [-1]) 280 | # test_predict = test_predict.reshape((-1)) 281 | 282 | train_predict = sess.run(pred, feed_dict={X: train_x}) 283 | train_predict = train_predict.reshape((-1)) 284 | 285 | total_predict = sess.run(pred, feed_dict={X: total_x}) 286 | total_predict = total_predict.reshape((-1)) 287 | 288 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 289 | validation_predict = validation_predict.reshape((-1)) 290 | 291 | for i in range(len(train_loss_return)): 292 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 293 | for i in range(len(validation_loss_return)): 294 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 295 | for i in range(len(test_loss_return)): 296 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 297 | 298 | for i in range(len(train_predict)): 299 | ws.cell(row=i + 1, column=4).value = train_predict[i] 300 | 301 | for i in range(len(validation_predict)): 302 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 303 | 304 | for i in range(len(total_predict)): 305 | ws.cell(row=i + 1, column=6).value = total_predict[i] 306 | # 增量学习时的测试集表现 307 | for i in range(len(test_predict)): 308 | ws.cell(row=i + 1, column=7).value = test_predict[i] 309 | # 预训练集上的表现 310 | for i in range(len(train_predict_pre_training)): 311 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 312 | # 预验证集上的表现 313 | for i in range(len(validation_predict_pre_training)): 314 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 315 | 316 | 317 | wb.save(filename="prediction_90_length_20.xlsx") 318 | 319 | plt.figure(figsize=(24, 8)) 320 | plt.plot(y[:-1]) 321 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 322 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 323 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 324 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 325 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 326 | # plt.plot([k for k in total_predict]) 327 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 328 | plt.show() 329 | 330 | plt.figure() 331 | plt.plot(train_loss_return[:-1]) 332 | plt.plot(validation_loss_return[:-1]) 333 | plt.plot(test_loss_return[:-1]) 334 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 335 | plt.show() 336 | 337 | -------------------------------------------------------------------------------- /multi-point prediction online/channel_90_length_5.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\multi-point prediction online\prediction_90_length_5.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | dataFile = 'channel_data_90_10.mat' 19 | data_com = scio.loadmat(dataFile) 20 | data_real = np.abs(data_com['h']) 21 | y = data_real[0][:8000] 22 | 23 | warnings.filterwarnings('ignore') 24 | 25 | # 定义常量(答辩状主要展示下面两个参数的影响) 26 | time_step = 5 27 | iter_time = 200 28 | iter_time_new = 1 # 滑动训练,一次训练一次 29 | 30 | # 定义常量 31 | rnn_unit = 5 # hidden layer units 32 | input_size = 1 33 | output_size = 1 34 | train_end = 4800 35 | data_num = len(y) 36 | lr = 0.03 # 学习率 37 | batch_size = None # 因为数据量较小,所以全部用,不分批 38 | train_begin = 0 39 | validation_end = 6400 40 | 41 | tf.reset_default_graph() 42 | 43 | # 输入层、输出层权重、偏置 44 | weights = { 45 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 46 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 47 | } 48 | biases = { 49 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 50 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 51 | } 52 | 53 | 54 | def get_data(time_step, train_begin, train_end, validation_end): 55 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 56 | 57 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 58 | for i in range(train_end - time_step): 59 | data_m.append(y[i:i + time_step]) 60 | 61 | # 这里的维度需要再斟酌一下 62 | data_x = np.reshape(data_m, [-1, time_step]) 63 | data_train_x = data_x[:, :, np.newaxis] 64 | data_train_y = y[train_begin + time_step:train_end] 65 | data_train_y = np.reshape(data_train_y, [-1, 1]) 66 | # data_train_y = data_y[:, :, np.newaxis] 67 | 68 | # 分批处理 69 | # for i in range(np.shape(data_x)[0] - batch_size): 70 | # if i % batch_size == 0: 71 | # batch_index.append(i) 72 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 73 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 74 | # 75 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 76 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 77 | 78 | for i in range(train_end - time_step, validation_end - time_step): 79 | data_validation_m.append(y[i:i + time_step]) 80 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 81 | data_validation_x = data_validation_x[:, :, np.newaxis] 82 | data_validation_y = y[train_end:validation_end] 83 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 84 | 85 | for i in range(validation_end - time_step, len(y) - time_step): 86 | data_test_m.append(y[i:i + time_step]) 87 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 88 | data_test_x = data_test_x[:, :, np.newaxis] 89 | data_test_y = y[validation_end:len(y)] 90 | data_test_y = np.reshape(data_test_y, [-1, 1]) 91 | 92 | # 构造总数据,为滑动训练更新作准备 93 | for i in range(len(y) - time_step): 94 | data_total_m.append(y[i:i + time_step]) 95 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 96 | data_total_x = data_total_x[:, :, np.newaxis] 97 | data_total_y = y[time_step:len(y)] 98 | data_total_y = np.reshape(data_total_y, [-1, 1]) 99 | 100 | # data_test_m_x_m, data_test_m_y_m = [], [] 101 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 102 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 103 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 104 | # 105 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 106 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 107 | 108 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 109 | 110 | 111 | def lstm(X): 112 | batch_size = tf.shape(X)[0] 113 | time_step = tf.shape(X)[1] 114 | w_in = weights['in'] 115 | b_in = biases['in'] 116 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 117 | input_rnn = tf.matmul(input, w_in) + b_in 118 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 119 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 120 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 121 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 122 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 123 | print(output_rnn) 124 | print(final_states) 125 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 126 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 127 | final_output = final_states.h 128 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 129 | # 作为输出层的输入 130 | w_out = weights['out'] 131 | b_out = biases['out'] 132 | pred = tf.matmul(final_output, w_out) + b_out 133 | # pred = tf.add(pred, 0, name="pred1") 134 | # tf.add_to_collection(name='pred', value=pred) 135 | return pred, final_states 136 | 137 | 138 | def train_lstm(X, Y): 139 | pred, _ = lstm(X) 140 | # 损失函数 141 | # 这是将数据变成了一列 142 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 143 | # with tf.name_scope("train1"): 144 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 145 | # tf.add_to_collection(name='train1', value=train_op) 146 | # train_op = tf.add(train_op, 0, name="train1") 147 | return pred, loss, train_op 148 | 149 | 150 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 151 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 152 | 153 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 154 | 155 | with tf.Session() as sess: 156 | pred, loss, train_op = train_lstm(X, Y) 157 | sess.run(tf.global_variables_initializer()) 158 | # 先重复训练 159 | train_loss_return = [] 160 | validation_loss_return = np.zeros(iter_time) 161 | test_loss_return = [] 162 | 163 | for i in range(iter_time): 164 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 165 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 166 | test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 167 | train_loss_return.append(train_loss) 168 | validation_loss_return[i] = validation_loss 169 | test_loss_return.append(0) 170 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss, 'test_loss', test_loss) 171 | 172 | # 求出在预训练模型上的表现情况 173 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 174 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 175 | 176 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 177 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 178 | 179 | 180 | # 保存模型 181 | # saver = tf.train.Saver() 182 | # saver.save(sess, "save_net/net.ckpt") 183 | # 184 | # with tf.Session() as sess: 185 | # saver = tf.train.import_meta_graph("save_net/net.ckpt.meta") 186 | # saver.restore(sess, tf.train.latest_checkpoint("save_net")) 187 | # graph = tf.get_default_graph() 188 | # X = graph.get_tensor_by_name("X:0") 189 | # Y = graph.get_tensor_by_name("Y:0") 190 | # pred = graph.get_tensor_by_name("pred1:0") 191 | # train_op = tf.get_collection('train1') 192 | # # graph.get_operation_by_name() 193 | 194 | test_predict = [] 195 | train_new_size = 100 196 | 197 | prediction_length1 = 5 198 | 199 | # 先预测下一个时刻的值 200 | test_x_buff = total_x[validation_end - time_step] 201 | # test_x_buff1 = np.reshape(test_x_buff, [-1]) 202 | # test_x_buff1 = test_x_buff1[np.newaxis, :, np.newaxis] 203 | test_x_buff = test_x_buff[np.newaxis, :, :] 204 | # print(test_x_buff == test_x_buff1) 205 | for i in range(1, prediction_length1): 206 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 207 | # test_loss_return.append(test_loss) 208 | test_predict.append(test_predict_result_1_buff) 209 | test_x_buff = total_x[validation_end - time_step + i] 210 | test_x_buff = np.reshape(test_x_buff, [-1]) 211 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 212 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 213 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 214 | test_predict.append(test_predict_result_1_buff) 215 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 216 | # test_predict.append(test_predict_result_1_buff) 217 | # test_x_buff = total_x[validation_end - time_step + 2] 218 | # test_x_buff = np.reshape(test_x_buff, [-1]) 219 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 220 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 221 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 222 | # test_predict.append(test_predict_result_1_buff) 223 | # test_x_buff = total_x[validation_end - time_step + 3] 224 | # test_x_buff = np.reshape(test_x_buff, [-1]) 225 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 226 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 227 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 228 | # test_predict.append(test_predict_result_1_buff) 229 | # test_x_buff = total_x[validation_end - time_step + 4] 230 | # test_x_buff = np.reshape(test_x_buff, [-1]) 231 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 232 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 233 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 234 | # test_predict.append(test_predict_result_1_buff) 235 | 236 | for i in range(validation_end - time_step + prediction_length1, data_num - time_step, prediction_length1): 237 | 238 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 239 | train_x_new = total_x[i - train_new_size:i] 240 | train_y_new = total_y[i - train_new_size:i] 241 | for j in range(iter_time_new): 242 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 243 | train_loss_return.append(train_loss) 244 | 245 | # 预测下一个时刻的值 246 | test_x_buff = total_x[i] 247 | test_x_buff = test_x_buff[np.newaxis, :, :] 248 | for j in range(1, prediction_length1): 249 | test_predict_result_1_buff = sess.run([pred], feed_dict={X: test_x_buff}) 250 | # test_loss_return.append(test_loss) 251 | test_predict.append(test_predict_result_1_buff) 252 | test_x_buff = total_x[i + j] 253 | test_x_buff = np.reshape(test_x_buff, [-1]) 254 | test_x_buff[time_step-1] = np.reshape(test_predict_result_1_buff, [-1])[0] 255 | test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 256 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 257 | test_predict.append(test_predict_result_1_buff) 258 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 259 | # test_predict.append(test_predict_result_1_buff) 260 | # test_x_buff = total_x[i + 2] 261 | # test_x_buff = np.reshape(test_x_buff, [-1]) 262 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 263 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 264 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 265 | # test_predict.append(test_predict_result_1_buff) 266 | # test_x_buff = total_x[i + 3] 267 | # test_x_buff = np.reshape(test_x_buff, [-1]) 268 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 269 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 270 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 271 | # test_predict.append(test_predict_result_1_buff) 272 | # test_x_buff = total_x[i + 4] 273 | # test_x_buff = np.reshape(test_x_buff, [-1]) 274 | # test_x_buff[4] = np.reshape(test_predict_result_1_buff, [-1])[0] 275 | # test_x_buff = test_x_buff[np.newaxis, :, np.newaxis] 276 | # test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 277 | # test_predict.append(test_predict_result_1_buff) 278 | 279 | test_predict = np.reshape(test_predict, [-1]) 280 | # test_predict = test_predict.reshape((-1)) 281 | 282 | train_predict = sess.run(pred, feed_dict={X: train_x}) 283 | train_predict = train_predict.reshape((-1)) 284 | 285 | total_predict = sess.run(pred, feed_dict={X: total_x}) 286 | total_predict = total_predict.reshape((-1)) 287 | 288 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 289 | validation_predict = validation_predict.reshape((-1)) 290 | 291 | for i in range(len(train_loss_return)): 292 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 293 | for i in range(len(validation_loss_return)): 294 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 295 | for i in range(len(test_loss_return)): 296 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 297 | 298 | for i in range(len(train_predict)): 299 | ws.cell(row=i + 1, column=4).value = train_predict[i] 300 | 301 | for i in range(len(validation_predict)): 302 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 303 | 304 | for i in range(len(total_predict)): 305 | ws.cell(row=i + 1, column=6).value = total_predict[i] 306 | # 增量学习时的测试集表现 307 | for i in range(len(test_predict)): 308 | ws.cell(row=i + 1, column=7).value = test_predict[i] 309 | # 预训练集上的表现 310 | for i in range(len(train_predict_pre_training)): 311 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 312 | # 预验证集上的表现 313 | for i in range(len(validation_predict_pre_training)): 314 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 315 | 316 | 317 | wb.save(filename="prediction_90_length_5.xlsx") 318 | 319 | plt.figure(figsize=(24, 8)) 320 | plt.plot(y[:-1]) 321 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 322 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 323 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 324 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 325 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 326 | # plt.plot([k for k in total_predict]) 327 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 328 | plt.show() 329 | 330 | plt.figure() 331 | plt.plot(train_loss_return[:-1]) 332 | plt.plot(validation_loss_return[:-1]) 333 | plt.plot(test_loss_return[:-1]) 334 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 335 | plt.show() 336 | 337 | -------------------------------------------------------------------------------- /multi-point prediction online/channel_90_pre_total.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_90_pre_total.m -------------------------------------------------------------------------------- /multi-point prediction online/channel_data10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_data10.mat -------------------------------------------------------------------------------- /multi-point prediction online/channel_data_360_10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_data_360_10.mat -------------------------------------------------------------------------------- /multi-point prediction online/channel_data_450_10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_data_450_10.mat -------------------------------------------------------------------------------- /multi-point prediction online/channel_data_90_10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/channel_data_90_10.mat -------------------------------------------------------------------------------- /multi-point prediction online/combine_180_360.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/combine_180_360.m -------------------------------------------------------------------------------- /multi-point prediction online/magnify.m: -------------------------------------------------------------------------------- 1 | function magnify(f1) 2 | % 3 | %magnify(f1) 4 | % 5 | % Figure creates a magnification box when under the mouse 6 | % position when a button is pressed. Press '+'/'-' while 7 | % button pressed to increase/decrease magnification. Press 8 | % '>'/'<' while button pressed to increase/decrease box size. 9 | % Hold 'Ctrl' while clicking to leave magnification on figure. 10 | % 11 | % Example: 12 | % plot(1:100,randn(1,100),(1:300)/3,rand(1,300)), grid on, 13 | % magnify; 14 | 15 | % Rick Hindman - 7/29/04 16 | 17 | if (nargin == 0), f1 = gcf; end; 18 | set(f1, ... 19 | 'WindowButtonDownFcn', @ButtonDownCallback, ... 20 | 'WindowButtonUpFcn', @ButtonUpCallback, ... 21 | 'WindowButtonMotionFcn', @ButtonMotionCallback, ... 22 | 'KeyPressFcn', @KeyPressCallback); 23 | return; 24 | 25 | function ButtonDownCallback(src,eventdata) 26 | f1 = src; 27 | a1 = get(f1,'CurrentAxes'); 28 | a2 = copyobj(a1,f1); 29 | 30 | set(f1, ... 31 | 'UserData',[f1,a1,a2], ... 32 | 'Pointer','fullcrosshair', ... 33 | 'CurrentAxes',a2); 34 | set(a2, ... 35 | 'UserData',[2,0.2], ... %magnification, frame size 36 | 'Color',get(a1,'Color'), ... 37 | 'Box','on'); 38 | xlabel(''); ylabel(''); zlabel(''); title(''); 39 | set(get(a2,'Children'), ... 40 | 'LineWidth', 2); 41 | set(a1, ... 42 | 'Color',get(a1,'Color')*0.95); 43 | set(f1, ... 44 | 'CurrentAxes',a1); 45 | ButtonMotionCallback(src); 46 | return; 47 | 48 | function ButtonUpCallback(src,eventdata) 49 | H = get(src,'UserData'); 50 | f1 = H(1); a1 = H(2); a2 = H(3); 51 | set(a1, ... 52 | 'Color',get(a2,'Color')); 53 | set(f1, ... 54 | 'UserData',[], ... 55 | 'Pointer','arrow', ... 56 | 'CurrentAxes',a1); 57 | if ~strcmp(get(f1,'SelectionType'),'alt'), 58 | delete(a2); 59 | end; 60 | return; 61 | 62 | function ButtonMotionCallback(src,eventdata) 63 | H = get(src,'UserData'); 64 | if ~isempty(H) 65 | f1 = H(1); a1 = H(2); a2 = H(3); 66 | a2_param = get(a2,'UserData'); 67 | f_pos = get(f1,'Position'); 68 | a1_pos = get(a1,'Position'); 69 | 70 | [f_cp, a1_cp] = pointer2d(f1,a1); 71 | 72 | set(a2,'Position',[(f_cp./f_pos(3:4)) 0 0]+a2_param(2)*a1_pos(3)*[-1 -1 2 2]); 73 | a2_pos = get(a2,'Position'); 74 | 75 | set(a2,'XLim',a1_cp(1)+(1/a2_param(1))*(a2_pos(3)/a1_pos(3))*diff(get(a1,'XLim'))*[-0.5 0.5]); 76 | set(a2,'YLim',a1_cp(2)+(1/a2_param(1))*(a2_pos(4)/a1_pos(4))*diff(get(a1,'YLim'))*[-0.5 0.5]); 77 | end; 78 | return; 79 | 80 | function KeyPressCallback(src,eventdata) 81 | H = get(gcf,'UserData'); 82 | if ~isempty(H) 83 | f1 = H(1); a1 = H(2); a2 = H(3); 84 | a2_param = get(a2,'UserData'); 85 | if (strcmp(get(f1,'CurrentCharacter'),'+') | strcmp(get(f1,'CurrentCharacter'),'=')) 86 | a2_param(1) = a2_param(1)*1.2; 87 | elseif (strcmp(get(f1,'CurrentCharacter'),'-') | strcmp(get(f1,'CurrentCharacter'),'_')) 88 | a2_param(1) = a2_param(1)/1.2; 89 | elseif (strcmp(get(f1,'CurrentCharacter'),'<') | strcmp(get(f1,'CurrentCharacter'),',')) 90 | a2_param(2) = a2_param(2)/1.2; 91 | elseif (strcmp(get(f1,'CurrentCharacter'),'>') | strcmp(get(f1,'CurrentCharacter'),'.')) 92 | a2_param(2) = a2_param(2)*1.2; 93 | end; 94 | set(a2,'UserData',a2_param); 95 | ButtonMotionCallback(src); 96 | end; 97 | return; 98 | 99 | 100 | 101 | % Included for completeness (usually in own file) 102 | function [fig_pointer_pos, axes_pointer_val] = pointer2d(fig_hndl,axes_hndl) 103 | % 104 | %pointer2d(fig_hndl,axes_hndl) 105 | % 106 | % Returns the coordinates of the pointer (in pixels) 107 | % in the desired figure (fig_hndl) and the coordinates 108 | % in the desired axis (axes coordinates) 109 | % 110 | % Example: 111 | % figure(1), 112 | % hold on, 113 | % for i = 1:1000, 114 | % [figp,axp]=pointer2d; 115 | % plot(axp(1),axp(2),'.','EraseMode','none'); 116 | % drawnow; 117 | % end; 118 | % hold off 119 | 120 | % Rick Hindman - 4/18/01 121 | 122 | if (nargin == 0), fig_hndl = gcf; axes_hndl = gca; end; 123 | if (nargin == 1), axes_hndl = get(fig_hndl,'CurrentAxes'); end; 124 | 125 | set(fig_hndl,'Units','pixels'); 126 | 127 | pointer_pos = get(0,'PointerLocation'); %pixels {0,0} lower left 128 | fig_pos = get(fig_hndl,'Position'); %pixels {l,b,w,h} 129 | 130 | fig_pointer_pos = pointer_pos - fig_pos([1,2]); 131 | set(fig_hndl,'CurrentPoint',fig_pointer_pos); 132 | 133 | if (isempty(axes_hndl)), 134 | axes_pointer_val = []; 135 | elseif (nargout == 2), 136 | axes_pointer_line = get(axes_hndl,'CurrentPoint'); 137 | axes_pointer_val = sum(axes_pointer_line)/2; 138 | end; 139 | 140 | -------------------------------------------------------------------------------- /multi-point prediction online/makehatch.m: -------------------------------------------------------------------------------- 1 | function A = makehatch(hatch) 2 | %MAKEHATCH Predefined hatch patterns 3 | % MAKEHATCH(HATCH) returns a matrix with the hatch pattern for HATCH 4 | % according to the following table: 5 | % HATCH pattern 6 | % ------- --------- 7 | % / right-slanted lines 8 | % left-slanted lines 9 | % | vertical lines 10 | % - horizontal lines 11 | % + crossing vertical and horizontal lines 12 | % x criss-crossing lines 13 | % . single dots 14 | % 15 | % See also: APPLYHATCH 16 | 17 | % By Ben Hinkle, bhinkle@mathworks.com 18 | % This code is in the public domain. 19 | 20 | n = 6; 21 | A=zeros(n); 22 | switch (hatch) 23 | case '/' 24 | A = fliplr(eye(n)); 25 | case '' 26 | A = eye(n); 27 | case '|' 28 | A(:,1) = 1; 29 | case '-' 30 | A(1,:) = 1; 31 | case '+' 32 | A(:,1) = 1; 33 | A(1,:) = 1; 34 | case 'x' 35 | A = eye(n) | fliplr(diag(ones(n-1,1),-1)); 36 | case '.' 37 | A(1:2,1:2)=1; 38 | otherwise 39 | error(['Undefined hatch pattern "' hatch '".']); 40 | end -------------------------------------------------------------------------------- /multi-point prediction online/makehatch_plus.m: -------------------------------------------------------------------------------- 1 | function A = makehatch_plus(hatch,n,m) 2 | %MAKEHATCH_PLUS Predefined hatch patterns 3 | % 4 | % Modification of MAKEHATCH to allow for selection of matrix size. Useful whe using 5 | % APPLYHATCH_PLUS with higher resolution output. 6 | % 7 | % input (optional) N size of hatch matrix (default = 6) 8 | % input (optional) M width of lines and dots in hatching (default = 1) 9 | % 10 | % MAKEHATCH_PLUS(HATCH,N,M) returns a matrix with the hatch pattern for HATCH 11 | % according to the following table: 12 | % HATCH pattern 13 | % ------- --------- 14 | % / right-slanted lines 15 | % \ left-slanted lines 16 | % | vertical lines 17 | % - horizontal lines 18 | % + crossing vertical and horizontal lines 19 | % x criss-crossing lines 20 | % . square dots 21 | % c circular dots 22 | % w Just a blank white pattern 23 | % k Just a totally black pattern 24 | % 25 | % See also: APPLYHATCH, APPLYHATCH_PLUS, APPLYHATCH_PLUSCOLOR, MAKEHATCH 26 | 27 | % By Ben Hinkle, bhinkle@mathworks.com 28 | % This code is in the public domain. 29 | 30 | % Modified Brian FG Katz 8-aout-03 31 | % Modified David M Kaplan 19-fevrier-08 32 | 33 | if ~exist('n','var'), n = 6; end 34 | if ~exist('m','var'), m = 1; end 35 | n=round(n); 36 | 37 | switch (hatch) 38 | case '\' 39 | [B,C] = meshgrid( 0:n-1 ); 40 | B = B-C; 41 | clear C 42 | A = abs(B) <= m/2; 43 | A = A | abs(B-n) <= m/2; 44 | A = A | abs(B+n) <= m/2; 45 | case '/' 46 | A = fliplr(makehatch_plus('\',n,m)); 47 | case '|' 48 | A=zeros(n); 49 | A(:,1:m) = 1; 50 | case '-' 51 | A = makehatch_plus('|',n,m); 52 | A = A'; 53 | case '+' 54 | A = makehatch_plus('|',n,m); 55 | A = A | A'; 56 | case 'x' 57 | A = makehatch_plus('\',n,m); 58 | A = A | fliplr(A); 59 | case '.' 60 | A=zeros(n); 61 | A(1:2*m,1:2*m)=1; 62 | case 'c' 63 | [B,C] = meshgrid( 0:n-1 ); 64 | A = sqrt(B.^2+C.^2) <= m; 65 | A = A | fliplr(A) | flipud(A) | flipud(fliplr(A)); 66 | case 'w' 67 | A = zeros(n); 68 | case 'k' 69 | A = ones(n); 70 | otherwise 71 | error(['Undefined hatch pattern "' hatch '".']); 72 | end -------------------------------------------------------------------------------- /multi-point prediction online/prediction_180_length_5.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/prediction_180_length_5.xlsx -------------------------------------------------------------------------------- /multi-point prediction online/prediction_360_length_5.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/prediction_360_length_5.xlsx -------------------------------------------------------------------------------- /multi-point prediction online/prediction_90_length_10.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/prediction_90_length_10.xlsx -------------------------------------------------------------------------------- /multi-point prediction online/prediction_90_length_16.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/prediction_90_length_16.xlsx -------------------------------------------------------------------------------- /multi-point prediction online/prediction_90_length_20.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/prediction_90_length_20.xlsx -------------------------------------------------------------------------------- /multi-point prediction online/prediction_90_length_5.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/prediction_90_length_5.xlsx -------------------------------------------------------------------------------- /multi-point prediction online/total_prediction_length.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/multi-point prediction online/total_prediction_length.m -------------------------------------------------------------------------------- /single-point prediction online/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /single-point prediction online/applyhatch.m: -------------------------------------------------------------------------------- 1 | function applyhatch(h,patterns,colorlist) 2 | %APPLYHATCH Apply hatched patterns to a figure 3 | % APPLYHATCH(H,PATTERNS) creates a new figure from the figure H by 4 | % replacing distinct colors in H with the black and white 5 | % patterns in PATTERNS. The format for PATTERNS can be 6 | % a string of the characters '/', '', '|', '-', '+', 'x', '.' 7 | % a cell array of matrices of zeros (white) and ones (black) 8 | % 9 | % APPLYHATCH(H,PATTERNS,COLORS) maps the colors in the n by 3 10 | % matrix COLORS to PATTERNS. Each row of COLORS specifies an RGB 11 | % color value. 12 | % 13 | % Note this function makes a bitmap image of H and so is limited 14 | % to low-resolution, bitmap output. 15 | % 16 | % Example 1: 17 | % bar(rand(3,4)); 18 | % applyhatch(gcf,'-x.'); 19 | % 20 | % Example 2: 21 | % colormap(cool(6)); 22 | % pie(rand(6,1)); 23 | % legend('Jan','Feb','Mar','Apr','May','Jun'); 24 | % applyhatch(gcf,'|-+./',cool(6)); 25 | % 26 | % See also: MAKEHATCH 27 | 28 | % By Ben Hinkle, bhinkle@mathworks.com 29 | % This code is in the public domain. 30 | 31 | oldppmode = get(h,'paperpositionmode'); 32 | oldunits = get(h,'units'); 33 | set(h,'paperpositionmode','auto'); 34 | set(h,'units','pixels'); 35 | figsize = get(h,'position'); 36 | if nargin == 2 37 | colorlist = []; 38 | end 39 | bits = hardcopy(h,'-dzbuffer','-r0'); 40 | set(h,'paperpositionmode',oldppmode); 41 | 42 | bwidth = size(bits,2); 43 | bheight = size(bits,1); 44 | bsize = bwidth * bheight; 45 | if ~isempty(colorlist) 46 | colorlist = uint8(255*colorlist); 47 | [colors,colori] = nextnonbw(0,colorlist,bits); 48 | else 49 | colors = (bits(:,:,1) ~= bits(:,:,2)) | ... 50 | (bits(:,:,1) ~= bits(:,:,3)); 51 | end 52 | pati = 1; 53 | colorind = find(colors); 54 | while ~isempty(colorind) 55 | colorval(1) = bits(colorind(1)); 56 | colorval(2) = bits(colorind(1)+bsize); 57 | colorval(3) = bits(colorind(1)+2*bsize); 58 | if iscell(patterns) 59 | pattern = patterns{pati}; 60 | elseif isa(patterns,'char') 61 | pattern = makehatch(patterns(pati)); 62 | else 63 | pattern = patterns; 64 | end 65 | pattern = uint8(255*(1-pattern)); 66 | pheight = size(pattern,2); 67 | pwidth = size(pattern,1); 68 | ratioh = ceil(bheight/pheight); 69 | ratiow = ceil(bwidth/pwidth); 70 | bigpattern = repmat(pattern,[ratioh ratiow]); 71 | if ratioh*pheight > bheight 72 | bigpattern(bheight+1:end,:) = []; 73 | end 74 | if ratiow*pwidth > bwidth 75 | bigpattern(:,bwidth+1:end) = []; 76 | end 77 | bigpattern = repmat(bigpattern,[1 1 3]); 78 | color = (bits(:,:,1) == colorval(1)) & ... 79 | (bits(:,:,2) == colorval(2)) & ... 80 | (bits(:,:,3) == colorval(3)); 81 | color = repmat(color,[1 1 3]); 82 | bits(color) = bigpattern(color); 83 | if ~isempty(colorlist) 84 | [colors,colori] = nextnonbw(colori,colorlist,bits); 85 | else 86 | colors = (bits(:,:,1) ~= bits(:,:,2)) | ... 87 | (bits(:,:,1) ~= bits(:,:,3)); 88 | end 89 | colorind = find(colors); 90 | pati = (pati + 1); 91 | if pati > length(patterns) 92 | pati = 1; 93 | end 94 | end 95 | 96 | newfig = figure('units','pixels','visible','off'); 97 | imaxes = axes('parent',newfig,'units','pixels'); 98 | im = image(bits,'parent',imaxes); 99 | fpos = get(newfig,'position'); 100 | set(newfig,'position',[fpos(1:2) figsize(3) figsize(4)+1]); 101 | set(imaxes,'position',[0 0 figsize(3) figsize(4)+1],'visible','off'); 102 | set(newfig,'visible','on'); 103 | 104 | function [colors,out] = nextnonbw(ind,colorlist,bits) 105 | out = ind+1; 106 | colors = []; 107 | while out <= size(colorlist,1) 108 | if isequal(colorlist(out,:),[255 255 255]) | ... 109 | isequal(colorlist(out,:),[0 0 0]) 110 | out = out+1; 111 | else 112 | colors = (colorlist(out,1) == bits(:,:,1)) & ... 113 | (colorlist(out,2) == bits(:,:,2)) & ... 114 | (colorlist(out,3) == bits(:,:,3)); 115 | return 116 | end 117 | end 118 | -------------------------------------------------------------------------------- /single-point prediction online/applyhatch_plusC.m: -------------------------------------------------------------------------------- 1 | function im_hatchC = applyhatch_plusC(h,patterns,patterncolors,colorlist,dpi,hatchsc) 2 | %APPLYHATCH_PLUSC Apply colored hatched patterns to a figure 3 | % im_hatch = applyhatch_plusC(h,patterns,patterncolors,colorlist,dpi,hatchsc) 4 | % 5 | % APPLYHATCH_PLUSC(H,PATTERNS) creates a new figure from the figure H by 6 | % replacing distinct colors in H with the black and white 7 | % patterns in PATTERNS. The format for PATTERNS can be 8 | % a string of the characters '/', '\', '|', '-', '+', 'x', '.' 9 | % a cell array of matrices of zeros (white) and ones (black) 10 | % 11 | % By default the lines are of uniform thickenss. hatch patterns line 12 | % thickness can be modified using a direct call to MAKEHATCH_PLUS using 13 | % the following syntax: makehatch_plus('HHn',m) where; 14 | % HH the hatch character written twice, '//', '\\', '||', '--', '++' 15 | % n integer number for thickness 16 | % m integer number for the matrix size (n<=m) 17 | % Ex. makehatch_plus('\\4',9) 18 | % 19 | % APPLYHATCH_PLUSC(H,PATTERNS,COLORS) maps the colors in the n by 3 20 | % matrix COLORS to PATTERNS. Each row of COLORS specifies an RGB 21 | % color value. COLORS can also be a character string list. 22 | % 23 | % Note this function makes a bitmap image of H and so is limited 24 | % to bitmap output. 25 | % 26 | % Example 1: basic operation using color char string 27 | % bar(rand(3,6)); 28 | % im_hatchC = applyhatch_plusC(1,'\-x.\x','rkgrgb');% 29 | % 30 | % Example 2: basic operation using color matrix 31 | % bar(rand(3,4)); 32 | % im_hatchC = applyhatch_plusC(1,'\-x.',[1 0 0;0 1 0;0 0 1;0 1 1]); 33 | % 34 | % Example 3: basic operation using resolution modification 35 | % pie(rand(6,1)); 36 | % legend('Jan','Feb','Mar','Apr','May','Jun'); 37 | % im_hatch = applyhatch_plusC(gcf,'|-+.\/','rgbcmy',[],150,0.5); 38 | % imwrite(im_hatch,'im_hatch.tiff','tiff') 39 | % Note : have not been able to understand exactly how colors are assigned 40 | % for some plot functions, so better to leave COLORLIST empty for 41 | % starters 42 | % 43 | % Example 4: basic operation with user defined patterns 44 | % bar(rand(3,3)); 45 | % im_hatch = applyhatch_plusC(gcf,{makehatch_plus('\',6),1-makehatch_plus('\',6),makehatch_plus('\',1)},'ggg');% 46 | % 47 | % Example 5: using variable thickness hatches 48 | % bar(rand(3,3)); 49 | % im_hatch = applyhatch_plusC(gcf,{makehatch_plus('\',9),makehatch_plus('\\4',9),makehatch_plus('\\8',9)},'rgb');% 50 | % 51 | % Example 6: basic operation using IMAGE plot 52 | % data = reshape([randperm(8) randperm(8) randperm(8)],4,6) 53 | % image(data) 54 | % im_hatch = applyhatch_plusC(1,'|-+.\/x/','rgbcmykr',colormap); 55 | % Note : do not use imagesc, as you need an indexed image if you want to 56 | % control the hatch assignments related to data values. 57 | % 58 | % Modification of APPLYHATCH_PLUS to allow colored patterns 59 | % Modified Brian FG Katz 25-feb-2010 60 | % im_hatch = applyhatch_plusC(h,patterns,patterncolors,colorlist,dpi,hatchsc) 61 | % 62 | % input patterncolors RGB matrix of colors for patterns 63 | % (length(PATTERNS) X 3) or string of color char 64 | % 'r' 'g' 'b' 'c' 'm' 'y' of length = length(PATTERNS) 65 | % DPI allows specification of bitmap resolution, making plot resolution 66 | % better for printing 67 | % HATCHSC multiplier for hatch scale to increase size of pattern for better operation 68 | % at higher resolutions (not used when PATTERNS 69 | % defines pattern matrix) 70 | % default [] uses screen resolution as in APPLYHATCH 71 | % output IM_HATCH RGB bitmap matrix of new figure 72 | % use IMWRITE to output in desired format 73 | % 74 | % Modified Brian FG Katz 21-sep-11 75 | % Variable line thickness 76 | % 77 | % See also: APPLYHATCH, APPLYHATCH_PLUS 78 | 79 | % By Ben Hinkle, bhinkle@mathworks.com 80 | % This code is in the public domain. 81 | 82 | oldppmode = get(h,'paperpositionmode'); 83 | oldunits = get(h,'units'); 84 | oldcolor = get(h,'color'); 85 | oldpos = get(h,'position'); 86 | set(h,'paperpositionmode','auto'); 87 | set(h,'units','pixels'); 88 | set(h,'color',[1 1 1]); 89 | figsize = get(h,'position'); 90 | 91 | if nargin < 6; hatchsc = 1 ; end 92 | if nargin < 5; dpi = 0 ; end % defaults to screen resolution 93 | if nargin < 4; colorlist = [] ; end 94 | 95 | if length(patterns) ~= size(patterncolors,1) 96 | if length(patterns) == size(patterncolors',1) 97 | % no problem 98 | else 99 | error('PATTERN and PATTERNCOLORS must be the same length') 100 | end 101 | end 102 | 103 | if ischar(patterncolors), 104 | patterncolors = charcolor2rgb(patterncolors); 105 | end 106 | 107 | bits = print(h,'-RGBImage',['-r' num2str(dpi)]); 108 | bitsC = ones(size(bits))*0; 109 | blackpixels = intersect(find(bits(:,:,1)==255), (intersect(find(bits(:,:,1)==bits(:,:,2)),find(bits(:,:,1)==bits(:,:,3)))) ) ; 110 | 111 | set(h,'paperpositionmode',oldppmode); 112 | set(h,'color',oldcolor); 113 | 114 | bwidth = size(bits,2); 115 | bheight = size(bits,1); 116 | bsize = bwidth * bheight; 117 | if ~isempty(colorlist) 118 | colorlist = uint8(floor(255*colorlist)); 119 | [colors,colori] = nextnonbw(0,colorlist,bits); 120 | else 121 | colors = (bits(:,:,1) ~= bits(:,:,2)) | ... 122 | (bits(:,:,1) ~= bits(:,:,3)); 123 | end 124 | pati = 1; 125 | colorind = find(colors); 126 | while ~isempty(colorind) 127 | colorval(1) = bits(colorind(1)); 128 | colorval(2) = bits(colorind(1)+bsize); 129 | colorval(3) = bits(colorind(1)+2*bsize); 130 | if iscell(patterns) 131 | pattern = patterns{pati}; 132 | elseif isa(patterns,'char') 133 | pattern = makehatch_plus(patterns(pati),6*hatchsc); 134 | else 135 | pattern = patterns; 136 | end 137 | patternC = uint8(255*pattern); 138 | pattern = uint8(255*(1-pattern)); 139 | pheight = size(pattern,2); 140 | pwidth = size(pattern,1); 141 | ratioh = ceil(bheight/pheight); 142 | ratiow = ceil(bwidth/pwidth); 143 | bigpattern = repmat(pattern,[ratioh ratiow]); 144 | if ratioh*pheight > bheight 145 | bigpattern(bheight+1:end,:) = []; 146 | end 147 | if ratiow*pwidth > bwidth 148 | bigpattern(:,bwidth+1:end) = []; 149 | end 150 | bigpattern = repmat(bigpattern,[1 1 3]); 151 | % Create RGB pattern 152 | pat_size = size(pattern,1)*size(pattern,2) ; 153 | pat_id = find(patternC); 154 | patternCrgb = repmat(ones(size(patternC))*255,[1 1 3]) ; 155 | for rgbLOOP = 1:3, 156 | patternCrgb(pat_id+(pat_size*(rgbLOOP-1)))=patternCrgb(pat_id+(pat_size*(rgbLOOP-1)))*patterncolors(pati,rgbLOOP) ; 157 | end % rgbLOOP 158 | bigpatternC = repmat(patternCrgb,[ratioh ratiow 1]); 159 | bigpatternC = bigpatternC(1:size(bigpattern,1),1:size(bigpattern,2),:) ; 160 | % if ratioh*pheight > bheight 161 | % bigpatternC(bheight+1:end,:,:) = []; 162 | % end 163 | % if ratiow*pwidth > bwidth 164 | % bigpatternC(:,bwidth+1:end,:) = []; 165 | % end 166 | 167 | color = (bits(:,:,1) == colorval(1)) & ... 168 | (bits(:,:,2) == colorval(2)) & ... 169 | (bits(:,:,3) == colorval(3)); 170 | color = repmat(color,[1 1 3]); 171 | bits(color) = bigpattern(color); 172 | bitsC(color) = bigpatternC(color); 173 | if ~isempty(colorlist) 174 | [colors,colori] = nextnonbw(colori,colorlist,bits); 175 | else 176 | colors = (bits(:,:,1) ~= bits(:,:,2)) | ... 177 | (bits(:,:,1) ~= bits(:,:,3)); 178 | end 179 | colorind = find(colors); 180 | pati = (pati + 1); 181 | if pati > length(patterns) 182 | pati = 1; 183 | end 184 | end 185 | 186 | bitsC(blackpixels)= 255; 187 | bitsC(blackpixels+(bheight*bwidth))= 255; 188 | bitsC(blackpixels+(2*(bheight*bwidth)))= 255; 189 | 190 | 191 | newfig = figure('units','pixels','visible','off'); 192 | imaxes = axes('parent',newfig,'units','pixels'); 193 | im = image(bitsC/255,'parent',imaxes); 194 | %fpos = get(newfig,'position'); 195 | %set(newfig,'position',[fpos(1:2) figsize(3) figsize(4)+1]); 196 | if get(newfig,'WindowStyle')~='docked', 197 | set(newfig,'position',oldpos) 198 | set(imaxes,'position',[0 0 figsize(3) figsize(4)+1],'visible','off'); 199 | end 200 | set(imaxes,'visible','off'); 201 | set(newfig,'visible','on'); 202 | 203 | set(newfig,'units','normalized'); 204 | set(imaxes,'units','normalized'); 205 | set(imaxes,'DataAspectRatio',[1 1 1],'DataAspectRatioMode','manual'); 206 | 207 | 208 | if nargout == 1, im_hatchC = bitsC; end 209 | 210 | function [colors,out] = nextnonbw(ind,colorlist,bits) 211 | out = ind+1; 212 | colors = []; 213 | while out <= size(colorlist,1) 214 | if isequal(colorlist(out,:),[255 255 255]) | ... 215 | isequal(colorlist(out,:),[0 0 0]) 216 | out = out+1; 217 | else 218 | colors = (colorlist(out,1) == bits(:,:,1)) & ... 219 | (colorlist(out,2) == bits(:,:,2)) & ... 220 | (colorlist(out,3) == bits(:,:,3)); 221 | return 222 | end 223 | end 224 | 225 | function colors_rgb = charcolor2rgb(colors_char); 226 | for LOOP = 1:length(colors_char), 227 | switch colors_char(LOOP) 228 | case 'r' 229 | colors_rgb(LOOP,:) = [1 0 0] ; 230 | case 'g' 231 | colors_rgb(LOOP,:) = [0 1 0] ; 232 | case 'b' 233 | colors_rgb(LOOP,:) = [0 0 1] ; 234 | case 'c' 235 | colors_rgb(LOOP,:) = [0 1 1] ; 236 | case 'm' 237 | colors_rgb(LOOP,:) = [1 0 1] ; 238 | case 'y' 239 | colors_rgb(LOOP,:) = [1 1 0] ; 240 | case 'k' 241 | colors_rgb(LOOP,:) = [0 0 0] ; 242 | otherwise 243 | error('Invalid folor char string') 244 | end 245 | end -------------------------------------------------------------------------------- /single-point prediction online/channel_180.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_180.m -------------------------------------------------------------------------------- /single-point prediction online/channel_180_4800_1600_1600.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\single-point prediction online\prediction_180_4800_1600_1600.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | # 360 19 | dataFile = 'channel_data10.mat' 20 | data_com = scio.loadmat(dataFile) 21 | data_real = np.abs(data_com['h']) 22 | y = data_real[0][:8000] 23 | 24 | warnings.filterwarnings('ignore') 25 | 26 | # 定义常量(答辩状主要展示下面两个参数的影响) 27 | time_step = 5 28 | iter_time = 200 29 | iter_time_new = 1 # 滑动训练,一次训练一次 30 | 31 | # 定义常量 32 | rnn_unit = 5 # hidden layer units 33 | input_size = 1 34 | output_size = 1 35 | train_end = 4800 36 | data_num = len(y) 37 | lr = 0.006 # 学习率 38 | batch_size = None # 因为数据量较小,所以全部用,不分批 39 | train_begin = 0 40 | validation_end = 6400 41 | 42 | tf.reset_default_graph() 43 | 44 | # 输入层、输出层权重、偏置 45 | weights = { 46 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 47 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 48 | } 49 | biases = { 50 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 51 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 52 | } 53 | 54 | 55 | def get_data(time_step, train_begin, train_end, validation_end): 56 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 57 | 58 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 59 | for i in range(train_end - time_step): 60 | data_m.append(y[i:i + time_step]) 61 | 62 | # 这里的维度需要再斟酌一下 63 | data_x = np.reshape(data_m, [-1, time_step]) 64 | data_train_x = data_x[:, :, np.newaxis] 65 | data_train_y = y[train_begin + time_step:train_end] 66 | data_train_y = np.reshape(data_train_y, [-1, 1]) 67 | # data_train_y = data_y[:, :, np.newaxis] 68 | 69 | # 分批处理 70 | # for i in range(np.shape(data_x)[0] - batch_size): 71 | # if i % batch_size == 0: 72 | # batch_index.append(i) 73 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 74 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 75 | # 76 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 77 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 78 | 79 | for i in range(train_end - time_step, validation_end - time_step): 80 | data_validation_m.append(y[i:i + time_step]) 81 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 82 | data_validation_x = data_validation_x[:, :, np.newaxis] 83 | data_validation_y = y[train_end:validation_end] 84 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 85 | 86 | for i in range(validation_end - time_step, len(y) - time_step): 87 | data_test_m.append(y[i:i + time_step]) 88 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 89 | data_test_x = data_test_x[:, :, np.newaxis] 90 | data_test_y = y[validation_end:len(y)] 91 | data_test_y = np.reshape(data_test_y, [-1, 1]) 92 | 93 | # 构造总数据,为滑动训练更新作准备 94 | for i in range(len(y) - time_step): 95 | data_total_m.append(y[i:i + time_step]) 96 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 97 | data_total_x = data_total_x[:, :, np.newaxis] 98 | data_total_y = y[time_step:len(y)] 99 | data_total_y = np.reshape(data_total_y, [-1, 1]) 100 | 101 | # data_test_m_x_m, data_test_m_y_m = [], [] 102 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 103 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 104 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 105 | # 106 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 107 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 108 | 109 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 110 | 111 | 112 | def lstm(X): 113 | batch_size = tf.shape(X)[0] 114 | time_step = tf.shape(X)[1] 115 | w_in = weights['in'] 116 | b_in = biases['in'] 117 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 118 | input_rnn = tf.matmul(input, w_in) + b_in 119 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 120 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 121 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 122 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 123 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 124 | print(output_rnn) 125 | print(final_states) 126 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 127 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 128 | final_output = final_states.h 129 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 130 | # 作为输出层的输入 131 | w_out = weights['out'] 132 | b_out = biases['out'] 133 | pred = tf.matmul(final_output, w_out) + b_out 134 | # pred = tf.add(pred, 0, name="pred1") 135 | # tf.add_to_collection(name='pred', value=pred) 136 | return pred, final_states 137 | 138 | 139 | def train_lstm(X, Y): 140 | pred, _ = lstm(X) 141 | # 损失函数 142 | # 这是将数据变成了一列 143 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 144 | # with tf.name_scope("train1"): 145 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 146 | # tf.add_to_collection(name='train1', value=train_op) 147 | # train_op = tf.add(train_op, 0, name="train1") 148 | return pred, loss, train_op 149 | 150 | 151 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 152 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 153 | 154 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 155 | 156 | with tf.Session() as sess: 157 | pred, loss, train_op = train_lstm(X, Y) 158 | sess.run(tf.global_variables_initializer()) 159 | # 先重复训练 160 | # train_loss_return = np.zeros(iter_time) 161 | train_loss_return = [] 162 | validation_loss_return = np.zeros(iter_time) 163 | test_loss_return = [] 164 | # test_loss_return = np.zeros(iter_time) 165 | 166 | for i in range(iter_time): 167 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 168 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 169 | # test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 170 | train_loss_return.append(train_loss) 171 | validation_loss_return[i] = validation_loss 172 | test_loss_return.append(0) 173 | # test_loss_return[i] = test_loss 174 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss) 175 | 176 | # 这里先给出预训练模型在训练集和验证集上的表现 177 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 178 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 179 | 180 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 181 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 182 | 183 | # 这个变量即为论文中的N_{IL} 184 | train_new_size = 80 185 | 186 | prediction_length1 = 1 187 | 188 | test_predict = [] 189 | # 先预测下一个时刻的值 190 | test_x_buff = total_x[validation_end-time_step] 191 | test_x_buff = test_x_buff[np.newaxis, :, :] 192 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 193 | test_predict.append(test_predict_result_1_buff) 194 | 195 | # test_loss_return = [] 196 | for i in range(validation_end-time_step+1, data_num-time_step, prediction_length1): 197 | 198 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 199 | train_x_new = total_x[i-train_new_size:i] 200 | train_y_new = total_y[i-train_new_size:i] 201 | for j in range(iter_time_new): 202 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 203 | 204 | train_loss_return.append(train_loss) 205 | 206 | # 预测下一个时刻的值 207 | test_x_buff = total_x[i] 208 | test_x_buff = test_x_buff[np.newaxis, :, :] 209 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 210 | test_predict.append(test_predict_result_1_buff) 211 | 212 | test_predict = np.reshape(test_predict, [-1]) 213 | # test_predict = test_predict.reshape((-1)) 214 | 215 | # 这个是最终的误差表现,即经过增量学习之后的误差表现 216 | train_predict = sess.run(pred, feed_dict={X: train_x}) 217 | train_predict = train_predict.reshape((-1)) 218 | 219 | # 总数据上的表现 220 | total_predict = sess.run(pred, feed_dict={X: total_x}) 221 | total_predict = total_predict.reshape((-1)) 222 | 223 | # 这个是经过增量学习的误差表现,为了与预训练模型进行对比 224 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 225 | validation_predict = validation_predict.reshape((-1)) 226 | 227 | # 训练集上的loss(包括预训练集和增量训练集) 228 | for i in range(len(train_loss_return)): 229 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 230 | # 验证集上的loss 231 | for i in range(len(validation_loss_return)): 232 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 233 | # 测试集上的loss 234 | for i in range(len(test_loss_return)): 235 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 236 | # 增量学习之后的训练集上的表现 237 | for i in range(len(train_predict)): 238 | ws.cell(row=i + 1, column=4).value = train_predict[i] 239 | # 增量学习之后的验证集上的表现 240 | for i in range(len(validation_predict)): 241 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 242 | # 增量学习之后的总数据上的表现 243 | for i in range(len(total_predict)): 244 | ws.cell(row=i + 1, column=6).value = total_predict[i] 245 | # 增量学习时的测试集表现 246 | for i in range(len(test_predict)): 247 | ws.cell(row=i + 1, column=7).value = test_predict[i] 248 | # 预训练集上的表现 249 | for i in range(len(train_predict_pre_training)): 250 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 251 | # 预验证集上的表现 252 | for i in range(len(validation_predict_pre_training)): 253 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 254 | 255 | wb.save(filename="prediction_180_4800_1600_1600.xlsx") 256 | 257 | plt.figure(figsize=(24, 8)) 258 | plt.plot(y[:-1]) 259 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 260 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 261 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 262 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 263 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 264 | # plt.plot([k for k in total_predict]) 265 | # plt.plot([k for k in total_predict]) 266 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 267 | plt.show() 268 | 269 | plt.figure() 270 | plt.plot(train_loss_return[:-1]) 271 | plt.plot(validation_loss_return[:-1]) 272 | plt.plot(test_loss_return[:-1]) 273 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 274 | plt.show() 275 | 276 | -------------------------------------------------------------------------------- /single-point prediction online/channel_360.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_360.m -------------------------------------------------------------------------------- /single-point prediction online/channel_360_4800_1600_1600.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\single-point prediction online\prediction_360_4800_1600_1600.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | dataFile = 'channel_data_360_10.mat' 19 | data_com = scio.loadmat(dataFile) 20 | data_real = np.abs(data_com['h']) 21 | y = data_real[0][:8000] 22 | 23 | warnings.filterwarnings('ignore') 24 | 25 | # 定义常量(答辩状主要展示下面两个参数的影响) 26 | time_step = 5 27 | iter_time = 200 28 | iter_time_new = 1 # 滑动训练,一次训练一次 29 | 30 | # 定义常量 31 | rnn_unit = 5 # hidden layer units 32 | input_size = 1 33 | output_size = 1 34 | train_end = 4800 35 | data_num = len(y) 36 | lr = 0.04 # 学习率 37 | batch_size = None # 因为数据量较小,所以全部用,不分批 38 | train_begin = 0 39 | validation_end = 6400 40 | 41 | tf.reset_default_graph() 42 | 43 | # 输入层、输出层权重、偏置 44 | weights = { 45 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 46 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 47 | } 48 | biases = { 49 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 50 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 51 | } 52 | 53 | 54 | def get_data(time_step, train_begin, train_end, validation_end): 55 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 56 | 57 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 58 | for i in range(train_end - time_step): 59 | data_m.append(y[i:i + time_step]) 60 | 61 | # 这里的维度需要再斟酌一下 62 | data_x = np.reshape(data_m, [-1, time_step]) 63 | data_train_x = data_x[:, :, np.newaxis] 64 | data_train_y = y[train_begin + time_step:train_end] 65 | data_train_y = np.reshape(data_train_y, [-1, 1]) 66 | # data_train_y = data_y[:, :, np.newaxis] 67 | 68 | # 分批处理 69 | # for i in range(np.shape(data_x)[0] - batch_size): 70 | # if i % batch_size == 0: 71 | # batch_index.append(i) 72 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 73 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 74 | # 75 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 76 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 77 | 78 | for i in range(train_end - time_step, validation_end - time_step): 79 | data_validation_m.append(y[i:i + time_step]) 80 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 81 | data_validation_x = data_validation_x[:, :, np.newaxis] 82 | data_validation_y = y[train_end:validation_end] 83 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 84 | 85 | for i in range(validation_end - time_step, len(y) - time_step): 86 | data_test_m.append(y[i:i + time_step]) 87 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 88 | data_test_x = data_test_x[:, :, np.newaxis] 89 | data_test_y = y[validation_end:len(y)] 90 | data_test_y = np.reshape(data_test_y, [-1, 1]) 91 | 92 | # 构造总数据,为滑动训练更新作准备 93 | for i in range(len(y) - time_step): 94 | data_total_m.append(y[i:i + time_step]) 95 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 96 | data_total_x = data_total_x[:, :, np.newaxis] 97 | data_total_y = y[time_step:len(y)] 98 | data_total_y = np.reshape(data_total_y, [-1, 1]) 99 | 100 | # data_test_m_x_m, data_test_m_y_m = [], [] 101 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 102 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 103 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 104 | # 105 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 106 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 107 | 108 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 109 | 110 | 111 | def lstm(X): 112 | batch_size = tf.shape(X)[0] 113 | time_step = tf.shape(X)[1] 114 | w_in = weights['in'] 115 | b_in = biases['in'] 116 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 117 | input_rnn = tf.matmul(input, w_in) + b_in 118 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 119 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 120 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 121 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 122 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 123 | print(output_rnn) 124 | print(final_states) 125 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 126 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 127 | final_output = final_states.h 128 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 129 | # 作为输出层的输入 130 | w_out = weights['out'] 131 | b_out = biases['out'] 132 | pred = tf.matmul(final_output, w_out) + b_out 133 | # pred = tf.add(pred, 0, name="pred1") 134 | # tf.add_to_collection(name='pred', value=pred) 135 | return pred, final_states 136 | 137 | 138 | def train_lstm(X, Y): 139 | pred, _ = lstm(X) 140 | # 损失函数 141 | # 这是将数据变成了一列 142 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 143 | # with tf.name_scope("train1"): 144 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 145 | # tf.add_to_collection(name='train1', value=train_op) 146 | # train_op = tf.add(train_op, 0, name="train1") 147 | return pred, loss, train_op 148 | 149 | 150 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 151 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 152 | 153 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 154 | 155 | with tf.Session() as sess: 156 | pred, loss, train_op = train_lstm(X, Y) 157 | sess.run(tf.global_variables_initializer()) 158 | # 先重复训练 159 | # train_loss_return = np.zeros(iter_time) 160 | train_loss_return = [] 161 | validation_loss_return = np.zeros(iter_time) 162 | test_loss_return = [] 163 | # test_loss_return = np.zeros(iter_time) 164 | 165 | for i in range(iter_time): 166 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 167 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 168 | # test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 169 | train_loss_return.append(train_loss) 170 | validation_loss_return[i] = validation_loss 171 | test_loss_return.append(0) 172 | # test_loss_return[i] = test_loss 173 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss) 174 | 175 | # 这里先给出预训练模型在训练集和验证集上的表现 176 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 177 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 178 | 179 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 180 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 181 | 182 | # 这个变量即为论文中的N_{IL} 183 | train_new_size = 30 184 | 185 | prediction_length1 = 1 186 | 187 | test_predict = [] 188 | # 先预测下一个时刻的值 189 | test_x_buff = total_x[validation_end-time_step] 190 | test_x_buff = test_x_buff[np.newaxis, :, :] 191 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 192 | test_predict.append(test_predict_result_1_buff) 193 | 194 | # test_loss_return = [] 195 | for i in range(validation_end-time_step+1, data_num-time_step, prediction_length1): 196 | 197 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 198 | train_x_new = total_x[i-train_new_size:i] 199 | train_y_new = total_y[i-train_new_size:i] 200 | for j in range(iter_time_new): 201 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 202 | 203 | train_loss_return.append(train_loss) 204 | 205 | # 预测下一个时刻的值 206 | test_x_buff = total_x[i] 207 | test_x_buff = test_x_buff[np.newaxis, :, :] 208 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 209 | test_predict.append(test_predict_result_1_buff) 210 | 211 | test_predict = np.reshape(test_predict, [-1]) 212 | # test_predict = test_predict.reshape((-1)) 213 | 214 | # 这个是最终的误差表现,即经过增量学习之后的误差表现 215 | train_predict = sess.run(pred, feed_dict={X: train_x}) 216 | train_predict = train_predict.reshape((-1)) 217 | 218 | # 总数据上的表现 219 | total_predict = sess.run(pred, feed_dict={X: total_x}) 220 | total_predict = total_predict.reshape((-1)) 221 | 222 | # 这个是经过增量学习的误差表现,为了与预训练模型进行对比 223 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 224 | validation_predict = validation_predict.reshape((-1)) 225 | 226 | # 训练集上的loss(包括预训练集和增量训练集) 227 | for i in range(len(train_loss_return)): 228 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 229 | # 验证集上的loss 230 | for i in range(len(validation_loss_return)): 231 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 232 | # 测试集上的loss 233 | for i in range(len(test_loss_return)): 234 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 235 | # 增量学习之后的训练集上的表现 236 | for i in range(len(train_predict)): 237 | ws.cell(row=i + 1, column=4).value = train_predict[i] 238 | # 增量学习之后的验证集上的表现 239 | for i in range(len(validation_predict)): 240 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 241 | # 增量学习之后的总数据上的表现 242 | for i in range(len(total_predict)): 243 | ws.cell(row=i + 1, column=6).value = total_predict[i] 244 | # 增量学习时的测试集表现 245 | for i in range(len(test_predict)): 246 | ws.cell(row=i + 1, column=7).value = test_predict[i] 247 | # 预训练集上的表现 248 | for i in range(len(train_predict_pre_training)): 249 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 250 | # 预验证集上的表现 251 | for i in range(len(validation_predict_pre_training)): 252 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 253 | 254 | wb.save(filename="prediction_360_4800_1600_1600.xlsx") 255 | 256 | plt.figure(figsize=(24, 8)) 257 | plt.plot(y[:-1]) 258 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 259 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 260 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 261 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 262 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 263 | # plt.plot([k for k in total_predict]) 264 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 265 | plt.show() 266 | 267 | plt.figure() 268 | plt.plot(train_loss_return[:-1]) 269 | plt.plot(validation_loss_return[:-1]) 270 | plt.plot(test_loss_return[:-1]) 271 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 272 | plt.show() 273 | 274 | -------------------------------------------------------------------------------- /single-point prediction online/channel_90.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_90.m -------------------------------------------------------------------------------- /single-point prediction online/channel_90.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\single-point prediction online\prediction_90.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | dataFile = 'channel_data_90_10.mat' 19 | data_com = scio.loadmat(dataFile) 20 | data_real = np.abs(data_com['h']) 21 | y = data_real[0][:8000] 22 | 23 | warnings.filterwarnings('ignore') 24 | 25 | # 定义常量(答辩状主要展示下面两个参数的影响) 26 | time_step = 5 27 | iter_time = 200 28 | iter_time_new = 1 # 滑动训练,一次训练一次 29 | 30 | # 定义常量 31 | rnn_unit = 5 # hidden layer units 32 | input_size = 1 33 | output_size = 1 34 | train_end = 1800 35 | data_num = len(y) 36 | lr = 0.06 # 学习率 37 | batch_size = None # 因为数据量较小,所以全部用,不分批 38 | train_begin = 0 39 | validation_end = 2400 40 | 41 | tf.reset_default_graph() 42 | 43 | # 输入层、输出层权重、偏置 44 | weights = { 45 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 46 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 47 | } 48 | biases = { 49 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 50 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 51 | } 52 | 53 | 54 | def get_data(time_step, train_begin, train_end, validation_end): 55 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 56 | 57 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 58 | for i in range(train_end - time_step): 59 | data_m.append(y[i:i + time_step]) 60 | 61 | # 这里的维度需要再斟酌一下 62 | data_x = np.reshape(data_m, [-1, time_step]) 63 | data_train_x = data_x[:, :, np.newaxis] 64 | data_train_y = y[train_begin + time_step:train_end] 65 | data_train_y = np.reshape(data_train_y, [-1, 1]) 66 | # data_train_y = data_y[:, :, np.newaxis] 67 | 68 | # 分批处理 69 | # for i in range(np.shape(data_x)[0] - batch_size): 70 | # if i % batch_size == 0: 71 | # batch_index.append(i) 72 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 73 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 74 | # 75 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 76 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 77 | 78 | for i in range(train_end - time_step, validation_end - time_step): 79 | data_validation_m.append(y[i:i + time_step]) 80 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 81 | data_validation_x = data_validation_x[:, :, np.newaxis] 82 | data_validation_y = y[train_end:validation_end] 83 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 84 | 85 | for i in range(validation_end - time_step, len(y) - time_step): 86 | data_test_m.append(y[i:i + time_step]) 87 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 88 | data_test_x = data_test_x[:, :, np.newaxis] 89 | data_test_y = y[validation_end:len(y)] 90 | data_test_y = np.reshape(data_test_y, [-1, 1]) 91 | 92 | # 构造总数据,为滑动训练更新作准备 93 | for i in range(len(y) - time_step): 94 | data_total_m.append(y[i:i + time_step]) 95 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 96 | data_total_x = data_total_x[:, :, np.newaxis] 97 | data_total_y = y[time_step:len(y)] 98 | data_total_y = np.reshape(data_total_y, [-1, 1]) 99 | 100 | # data_test_m_x_m, data_test_m_y_m = [], [] 101 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 102 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 103 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 104 | # 105 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 106 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 107 | 108 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 109 | 110 | 111 | def lstm(X): 112 | batch_size = tf.shape(X)[0] 113 | time_step = tf.shape(X)[1] 114 | w_in = weights['in'] 115 | b_in = biases['in'] 116 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 117 | input_rnn = tf.matmul(input, w_in) + b_in 118 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 119 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 120 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 121 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 122 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 123 | print(output_rnn) 124 | print(final_states) 125 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 126 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 127 | final_output = final_states.h 128 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 129 | # 作为输出层的输入 130 | w_out = weights['out'] 131 | b_out = biases['out'] 132 | pred = tf.matmul(final_output, w_out) + b_out 133 | # pred = tf.add(pred, 0, name="pred1") 134 | # tf.add_to_collection(name='pred', value=pred) 135 | return pred, final_states 136 | 137 | 138 | def train_lstm(X, Y): 139 | pred, _ = lstm(X) 140 | # 损失函数 141 | # 这是将数据变成了一列 142 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 143 | # with tf.name_scope("train1"): 144 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 145 | # tf.add_to_collection(name='train1', value=train_op) 146 | # train_op = tf.add(train_op, 0, name="train1") 147 | return pred, loss, train_op 148 | 149 | 150 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 151 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 152 | 153 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 154 | 155 | with tf.Session() as sess: 156 | pred, loss, train_op = train_lstm(X, Y) 157 | sess.run(tf.global_variables_initializer()) 158 | # 先重复训练 159 | # train_loss_return = np.zeros(iter_time) 160 | train_loss_return = [] 161 | validation_loss_return = np.zeros(iter_time) 162 | test_loss_return = [] 163 | # test_loss_return = np.zeros(iter_time) 164 | 165 | for i in range(iter_time): 166 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 167 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 168 | # test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 169 | train_loss_return.append(train_loss) 170 | validation_loss_return[i] = validation_loss 171 | test_loss_return.append(0) 172 | # test_loss_return[i] = test_loss 173 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss) 174 | 175 | # 这里先给出预训练模型在训练集和验证集上的表现 176 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 177 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 178 | 179 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 180 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 181 | 182 | # 这个变量即为论文中的N_{IL} 183 | train_new_size = 200 184 | 185 | prediction_length1 = 1 186 | 187 | test_predict = [] 188 | # 先预测下一个时刻的值 189 | test_x_buff = total_x[validation_end-time_step] 190 | test_x_buff = test_x_buff[np.newaxis, :, :] 191 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 192 | test_predict.append(test_predict_result_1_buff) 193 | 194 | # test_loss_return = [] 195 | for i in range(validation_end-time_step+1, data_num-time_step, prediction_length1): 196 | 197 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 198 | train_x_new = total_x[i-train_new_size:i] 199 | train_y_new = total_y[i-train_new_size:i] 200 | for j in range(iter_time_new): 201 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 202 | 203 | train_loss_return.append(train_loss) 204 | 205 | # 预测下一个时刻的值 206 | test_x_buff = total_x[i] 207 | test_x_buff = test_x_buff[np.newaxis, :, :] 208 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 209 | test_predict.append(test_predict_result_1_buff) 210 | 211 | test_predict = np.reshape(test_predict, [-1]) 212 | # test_predict = test_predict.reshape((-1)) 213 | 214 | # 这个是最终的误差表现,即经过增量学习之后的误差表现 215 | train_predict = sess.run(pred, feed_dict={X: train_x}) 216 | train_predict = train_predict.reshape((-1)) 217 | 218 | # 总数据上的表现 219 | total_predict = sess.run(pred, feed_dict={X: total_x}) 220 | total_predict = total_predict.reshape((-1)) 221 | 222 | # 这个是经过增量学习的误差表现,为了与预训练模型进行对比 223 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 224 | validation_predict = validation_predict.reshape((-1)) 225 | 226 | # 训练集上的loss(包括预训练集和增量训练集) 227 | for i in range(len(train_loss_return)): 228 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 229 | # 验证集上的loss 230 | for i in range(len(validation_loss_return)): 231 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 232 | # 测试集上的loss 233 | for i in range(len(test_loss_return)): 234 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 235 | # 增量学习之后的训练集上的表现 236 | for i in range(len(train_predict)): 237 | ws.cell(row=i + 1, column=4).value = train_predict[i] 238 | # 增量学习之后的验证集上的表现 239 | for i in range(len(validation_predict)): 240 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 241 | # 增量学习之后的总数据上的表现 242 | for i in range(len(total_predict)): 243 | ws.cell(row=i + 1, column=6).value = total_predict[i] 244 | # 增量学习时的测试集表现 245 | for i in range(len(test_predict)): 246 | ws.cell(row=i + 1, column=7).value = test_predict[i] 247 | # 预训练集上的表现 248 | for i in range(len(train_predict_pre_training)): 249 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 250 | # 预验证集上的表现 251 | for i in range(len(validation_predict_pre_training)): 252 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 253 | 254 | wb.save(filename="prediction_90.xlsx") 255 | 256 | plt.figure(figsize=(24, 8)) 257 | plt.plot(y[:-1]) 258 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 259 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 260 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 261 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 262 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 263 | # plt.plot([k for k in total_predict]) 264 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 265 | plt.show() 266 | 267 | plt.figure() 268 | plt.plot(train_loss_return[:-1]) 269 | plt.plot(validation_loss_return[:-1]) 270 | plt.plot(test_loss_return[:-1]) 271 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 272 | plt.show() 273 | 274 | -------------------------------------------------------------------------------- /single-point prediction online/channel_90_1.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_90_1.m -------------------------------------------------------------------------------- /single-point prediction online/channel_90_10.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_90_10.m -------------------------------------------------------------------------------- /single-point prediction online/channel_90_20.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_90_20.m -------------------------------------------------------------------------------- /single-point prediction online/channel_90_4800_1600_1600.py: -------------------------------------------------------------------------------- 1 | # 加载数据分析常用库 2 | from openpyxl import load_workbook 3 | from openpyxl import Workbook 4 | import numpy as np 5 | import tensorflow as tf 6 | # from sklearn.metrics import mean_absolute_error, mean_squared_error 7 | import matplotlib.pyplot as plt 8 | import scipy.io as scio 9 | import warnings 10 | 11 | book = load_workbook(filename=r"E:\PHD\0论文\0论文\TVT_2018\code\time series prediction_CSDN\single-point prediction online\prediction_90_4800_1600_1600.xlsx") 12 | sheetnames = book.get_sheet_names() 13 | sheet = book.get_sheet_by_name(sheetnames[0]) 14 | 15 | wb = Workbook() 16 | ws = wb.active 17 | 18 | dataFile = 'channel_data_90_10.mat' 19 | data_com = scio.loadmat(dataFile) 20 | data_real = np.abs(data_com['h']) 21 | y = data_real[0][:8000] 22 | 23 | warnings.filterwarnings('ignore') 24 | 25 | # 定义常量(答辩状主要展示下面两个参数的影响) 26 | time_step = 5 27 | iter_time = 200 28 | iter_time_new = 1 # 滑动训练,一次训练一次 29 | 30 | # 定义常量 31 | rnn_unit = 5 # hidden layer units 32 | input_size = 1 33 | output_size = 1 34 | train_end = 4800 35 | data_num = len(y) 36 | lr = 0.06 # 学习率 37 | batch_size = None # 因为数据量较小,所以全部用,不分批 38 | train_begin = 0 39 | validation_end = 6400 40 | 41 | tf.reset_default_graph() 42 | 43 | # 输入层、输出层权重、偏置 44 | weights = { 45 | 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 46 | 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) 47 | } 48 | biases = { 49 | 'in': tf.Variable(tf.constant(0.1, shape=[rnn_unit, ])), 50 | 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) 51 | } 52 | 53 | 54 | def get_data(time_step, train_begin, train_end, validation_end): 55 | data_m, data_validation_m, data_test_m, data_total_m = [], [], [], [] 56 | 57 | # 这个地方需要减一,“1”意味着为最后的预测腾出一个位置 58 | for i in range(train_end - time_step): 59 | data_m.append(y[i:i + time_step]) 60 | 61 | # 这里的维度需要再斟酌一下 62 | data_x = np.reshape(data_m, [-1, time_step]) 63 | data_train_x = data_x[:, :, np.newaxis] 64 | data_train_y = y[train_begin + time_step:train_end] 65 | data_train_y = np.reshape(data_train_y, [-1, 1]) 66 | # data_train_y = data_y[:, :, np.newaxis] 67 | 68 | # 分批处理 69 | # for i in range(np.shape(data_x)[0] - batch_size): 70 | # if i % batch_size == 0: 71 | # batch_index.append(i) 72 | # data_m_x.append(data_x[i:i + batch_size, :input_size]) 73 | # data_m_y.append(data_y[i:i + batch_size, np.newaxis]) 74 | # 75 | # data_train_x = np.reshape(data_m_x, [-1, time_step, input_size]) 76 | # data_train_y = np.reshape(data_m_y, [-1, time_step, output_size]) 77 | 78 | for i in range(train_end - time_step, validation_end - time_step): 79 | data_validation_m.append(y[i:i + time_step]) 80 | data_validation_x = np.reshape(data_validation_m, [-1, time_step]) 81 | data_validation_x = data_validation_x[:, :, np.newaxis] 82 | data_validation_y = y[train_end:validation_end] 83 | data_validation_y = np.reshape(data_validation_y, [-1, 1]) 84 | 85 | for i in range(validation_end - time_step, len(y) - time_step): 86 | data_test_m.append(y[i:i + time_step]) 87 | data_test_x = np.reshape(data_test_m, [-1, time_step]) 88 | data_test_x = data_test_x[:, :, np.newaxis] 89 | data_test_y = y[validation_end:len(y)] 90 | data_test_y = np.reshape(data_test_y, [-1, 1]) 91 | 92 | # 构造总数据,为滑动训练更新作准备 93 | for i in range(len(y) - time_step): 94 | data_total_m.append(y[i:i + time_step]) 95 | data_total_x = np.reshape(data_total_m, [-1, time_step]) 96 | data_total_x = data_total_x[:, :, np.newaxis] 97 | data_total_y = y[time_step:len(y)] 98 | data_total_y = np.reshape(data_total_y, [-1, 1]) 99 | 100 | # data_test_m_x_m, data_test_m_y_m = [], [] 101 | # for i in range(np.shape(data_test_m_x)[0] - time_step): 102 | # data_test_m_x_m.append(data_test_m_x[i:i+time_step, :input_size]) 103 | # data_test_m_y_m.append(data_test_m_y[i:i+time_step, np.newaxis]) 104 | # 105 | # data_test_x = np.reshape(data_test_m_x_m, [-1, time_step, input_size]) 106 | # data_test_y = np.reshape(data_test_m_y_m, [-1, time_step, output_size]) 107 | 108 | return data_train_x, data_train_y, data_validation_x, data_validation_y, data_test_x, data_test_y, data_total_x, data_total_y 109 | 110 | 111 | def lstm(X): 112 | batch_size = tf.shape(X)[0] 113 | time_step = tf.shape(X)[1] 114 | w_in = weights['in'] 115 | b_in = biases['in'] 116 | input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 117 | input_rnn = tf.matmul(input, w_in) + b_in 118 | input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维,作为lstm cell的输入 119 | cell = tf.contrib.rnn.BasicLSTMCell(rnn_unit) 120 | # cell=tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_unit) 121 | init_state = cell.zero_state(batch_size, dtype=tf.float32) 122 | output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state, dtype=tf.float32) 123 | print(output_rnn) 124 | print(final_states) 125 | # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 126 | output = tf.reshape(output_rnn, [-1, rnn_unit]) 127 | final_output = final_states.h 128 | final_output = tf.reshape(final_output, [-1, rnn_unit]) 129 | # 作为输出层的输入 130 | w_out = weights['out'] 131 | b_out = biases['out'] 132 | pred = tf.matmul(final_output, w_out) + b_out 133 | # pred = tf.add(pred, 0, name="pred1") 134 | # tf.add_to_collection(name='pred', value=pred) 135 | return pred, final_states 136 | 137 | 138 | def train_lstm(X, Y): 139 | pred, _ = lstm(X) 140 | # 损失函数 141 | # 这是将数据变成了一列 142 | loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1]))) 143 | # with tf.name_scope("train1"): 144 | train_op = tf.train.AdamOptimizer(lr).minimize(loss) 145 | # tf.add_to_collection(name='train1', value=train_op) 146 | # train_op = tf.add(train_op, 0, name="train1") 147 | return pred, loss, train_op 148 | 149 | 150 | X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) 151 | Y = tf.placeholder(tf.float32, shape=[None, output_size]) 152 | 153 | train_x, train_y, validation_x, validation_y, test_x, test_y, total_x, total_y = get_data(time_step, train_begin, train_end, validation_end) 154 | 155 | with tf.Session() as sess: 156 | pred, loss, train_op = train_lstm(X, Y) 157 | sess.run(tf.global_variables_initializer()) 158 | # 先重复训练 159 | # train_loss_return = np.zeros(iter_time) 160 | train_loss_return = [] 161 | validation_loss_return = np.zeros(iter_time) 162 | test_loss_return = [] 163 | # test_loss_return = np.zeros(iter_time) 164 | 165 | for i in range(iter_time): 166 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x, Y: train_y}) 167 | validation_loss = sess.run(loss, feed_dict={X: validation_x, Y: validation_y}) 168 | # test_loss = sess.run(loss, feed_dict={X: test_x, Y: test_y}) 169 | train_loss_return.append(train_loss) 170 | validation_loss_return[i] = validation_loss 171 | test_loss_return.append(0) 172 | # test_loss_return[i] = test_loss 173 | print('iter:', i, 'train_loss:', train_loss, 'validation_loss', validation_loss) 174 | 175 | # 这里先给出预训练模型在训练集和验证集上的表现 176 | train_predict_pre_training = sess.run(pred, feed_dict={X: train_x}) 177 | train_predict_pre_training = train_predict_pre_training.reshape((-1)) 178 | 179 | validation_predict_pre_training = sess.run(pred, feed_dict={X: validation_x}) 180 | validation_predict_pre_training = validation_predict_pre_training.reshape((-1)) 181 | 182 | # 这个变量即为论文中的N_{IL} 183 | train_new_size = 200 184 | 185 | prediction_length1 = 1 186 | 187 | test_predict = [] 188 | # 先预测下一个时刻的值 189 | test_x_buff = total_x[validation_end-time_step] 190 | test_x_buff = test_x_buff[np.newaxis, :, :] 191 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 192 | test_predict.append(test_predict_result_1_buff) 193 | 194 | # test_loss_return = [] 195 | for i in range(validation_end-time_step+1, data_num-time_step, prediction_length1): 196 | 197 | # 对于新数据进行新的训练(部分样本学习【只学习最近更新的】),往后滑动一个 198 | train_x_new = total_x[i-train_new_size:i] 199 | train_y_new = total_y[i-train_new_size:i] 200 | for j in range(iter_time_new): 201 | _, train_loss = sess.run([train_op, loss], feed_dict={X: train_x_new, Y: train_y_new}) 202 | 203 | train_loss_return.append(train_loss) 204 | 205 | # 预测下一个时刻的值 206 | test_x_buff = total_x[i] 207 | test_x_buff = test_x_buff[np.newaxis, :, :] 208 | test_predict_result_1_buff = sess.run(pred, feed_dict={X: test_x_buff}) 209 | test_predict.append(test_predict_result_1_buff) 210 | 211 | test_predict = np.reshape(test_predict, [-1]) 212 | # test_predict = test_predict.reshape((-1)) 213 | 214 | # 这个是最终的误差表现,即经过增量学习之后的误差表现 215 | train_predict = sess.run(pred, feed_dict={X: train_x}) 216 | train_predict = train_predict.reshape((-1)) 217 | 218 | # 总数据上的表现 219 | total_predict = sess.run(pred, feed_dict={X: total_x}) 220 | total_predict = total_predict.reshape((-1)) 221 | 222 | # 这个是经过增量学习的误差表现,为了与预训练模型进行对比 223 | validation_predict = sess.run(pred, feed_dict={X: validation_x}) 224 | validation_predict = validation_predict.reshape((-1)) 225 | 226 | # 训练集上的loss(包括预训练集和增量训练集) 227 | for i in range(len(train_loss_return)): 228 | ws.cell(row=i + 1, column=1).value = train_loss_return[i] 229 | # 验证集上的loss 230 | for i in range(len(validation_loss_return)): 231 | ws.cell(row=i + 1, column=2).value = validation_loss_return[i] 232 | # 测试集上的loss 233 | for i in range(len(test_loss_return)): 234 | ws.cell(row=i + 1, column=3).value = test_loss_return[i] 235 | # 增量学习之后的训练集上的表现 236 | for i in range(len(train_predict)): 237 | ws.cell(row=i + 1, column=4).value = train_predict[i] 238 | # 增量学习之后的验证集上的表现 239 | for i in range(len(validation_predict)): 240 | ws.cell(row=i + 1, column=5).value = validation_predict[i] 241 | # 增量学习之后的总数据上的表现 242 | for i in range(len(total_predict)): 243 | ws.cell(row=i + 1, column=6).value = total_predict[i] 244 | # 增量学习时的测试集表现 245 | for i in range(len(test_predict)): 246 | ws.cell(row=i + 1, column=7).value = test_predict[i] 247 | # 预训练集上的表现 248 | for i in range(len(train_predict_pre_training)): 249 | ws.cell(row=i + 1, column=8).value = train_predict_pre_training[i] 250 | # 预验证集上的表现 251 | for i in range(len(validation_predict_pre_training)): 252 | ws.cell(row=i + 1, column=9).value = validation_predict_pre_training[i] 253 | 254 | wb.save(filename="prediction_90_4800_1600_1600.xlsx") 255 | 256 | plt.figure(figsize=(24, 8)) 257 | plt.plot(y[:-1]) 258 | plt.plot([None for _ in range(train_end)] + [None for _ in range(train_end, validation_end)] + [x for x in test_predict]) 259 | plt.plot([m for m in train_predict] + [None for _ in range(train_end, data_num)]) 260 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict] + [None for _ in range(validation_end, data_num)]) 261 | plt.plot([m for m in train_predict_pre_training] + [None for _ in range(train_end, data_num)]) 262 | plt.plot([None for _ in range(train_end)] + [n for n in validation_predict_pre_training] + [None for _ in range(validation_end, data_num)]) 263 | # plt.plot([k for k in total_predict]) 264 | plt.legend(labels=['Real_data', 'Prediction on test', 'Prediction on train', 'Prediction on validation', 'Prediction on train_pre', 'Prediction on validation_pre']) 265 | plt.show() 266 | 267 | plt.figure() 268 | plt.plot(train_loss_return[:-1]) 269 | plt.plot(validation_loss_return[:-1]) 270 | plt.plot(test_loss_return[:-1]) 271 | plt.legend(labels=['Loss on train', 'Loss on validation', 'Loss on test']) 272 | plt.show() 273 | 274 | -------------------------------------------------------------------------------- /single-point prediction online/channel_data10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_data10.mat -------------------------------------------------------------------------------- /single-point prediction online/channel_data_360_10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_data_360_10.mat -------------------------------------------------------------------------------- /single-point prediction online/channel_data_450_10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_data_450_10.mat -------------------------------------------------------------------------------- /single-point prediction online/channel_data_90_10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/channel_data_90_10.mat -------------------------------------------------------------------------------- /single-point prediction online/combine_180_360.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/combine_180_360.m -------------------------------------------------------------------------------- /single-point prediction online/magnify.m: -------------------------------------------------------------------------------- 1 | function magnify(f1) 2 | % 3 | %magnify(f1) 4 | % 5 | % Figure creates a magnification box when under the mouse 6 | % position when a button is pressed. Press '+'/'-' while 7 | % button pressed to increase/decrease magnification. Press 8 | % '>'/'<' while button pressed to increase/decrease box size. 9 | % Hold 'Ctrl' while clicking to leave magnification on figure. 10 | % 11 | % Example: 12 | % plot(1:100,randn(1,100),(1:300)/3,rand(1,300)), grid on, 13 | % magnify; 14 | 15 | % Rick Hindman - 7/29/04 16 | 17 | if (nargin == 0), f1 = gcf; end; 18 | set(f1, ... 19 | 'WindowButtonDownFcn', @ButtonDownCallback, ... 20 | 'WindowButtonUpFcn', @ButtonUpCallback, ... 21 | 'WindowButtonMotionFcn', @ButtonMotionCallback, ... 22 | 'KeyPressFcn', @KeyPressCallback); 23 | return; 24 | 25 | function ButtonDownCallback(src,eventdata) 26 | f1 = src; 27 | a1 = get(f1,'CurrentAxes'); 28 | a2 = copyobj(a1,f1); 29 | 30 | set(f1, ... 31 | 'UserData',[f1,a1,a2], ... 32 | 'Pointer','fullcrosshair', ... 33 | 'CurrentAxes',a2); 34 | set(a2, ... 35 | 'UserData',[2,0.2], ... %magnification, frame size 36 | 'Color',get(a1,'Color'), ... 37 | 'Box','on'); 38 | xlabel(''); ylabel(''); zlabel(''); title(''); 39 | set(get(a2,'Children'), ... 40 | 'LineWidth', 2); 41 | set(a1, ... 42 | 'Color',get(a1,'Color')*0.95); 43 | set(f1, ... 44 | 'CurrentAxes',a1); 45 | ButtonMotionCallback(src); 46 | return; 47 | 48 | function ButtonUpCallback(src,eventdata) 49 | H = get(src,'UserData'); 50 | f1 = H(1); a1 = H(2); a2 = H(3); 51 | set(a1, ... 52 | 'Color',get(a2,'Color')); 53 | set(f1, ... 54 | 'UserData',[], ... 55 | 'Pointer','arrow', ... 56 | 'CurrentAxes',a1); 57 | if ~strcmp(get(f1,'SelectionType'),'alt'), 58 | delete(a2); 59 | end; 60 | return; 61 | 62 | function ButtonMotionCallback(src,eventdata) 63 | H = get(src,'UserData'); 64 | if ~isempty(H) 65 | f1 = H(1); a1 = H(2); a2 = H(3); 66 | a2_param = get(a2,'UserData'); 67 | f_pos = get(f1,'Position'); 68 | a1_pos = get(a1,'Position'); 69 | 70 | [f_cp, a1_cp] = pointer2d(f1,a1); 71 | 72 | set(a2,'Position',[(f_cp./f_pos(3:4)) 0 0]+a2_param(2)*a1_pos(3)*[-1 -1 2 2]); 73 | a2_pos = get(a2,'Position'); 74 | 75 | set(a2,'XLim',a1_cp(1)+(1/a2_param(1))*(a2_pos(3)/a1_pos(3))*diff(get(a1,'XLim'))*[-0.5 0.5]); 76 | set(a2,'YLim',a1_cp(2)+(1/a2_param(1))*(a2_pos(4)/a1_pos(4))*diff(get(a1,'YLim'))*[-0.5 0.5]); 77 | end; 78 | return; 79 | 80 | function KeyPressCallback(src,eventdata) 81 | H = get(gcf,'UserData'); 82 | if ~isempty(H) 83 | f1 = H(1); a1 = H(2); a2 = H(3); 84 | a2_param = get(a2,'UserData'); 85 | if (strcmp(get(f1,'CurrentCharacter'),'+') | strcmp(get(f1,'CurrentCharacter'),'=')) 86 | a2_param(1) = a2_param(1)*1.2; 87 | elseif (strcmp(get(f1,'CurrentCharacter'),'-') | strcmp(get(f1,'CurrentCharacter'),'_')) 88 | a2_param(1) = a2_param(1)/1.2; 89 | elseif (strcmp(get(f1,'CurrentCharacter'),'<') | strcmp(get(f1,'CurrentCharacter'),',')) 90 | a2_param(2) = a2_param(2)/1.2; 91 | elseif (strcmp(get(f1,'CurrentCharacter'),'>') | strcmp(get(f1,'CurrentCharacter'),'.')) 92 | a2_param(2) = a2_param(2)*1.2; 93 | end; 94 | set(a2,'UserData',a2_param); 95 | ButtonMotionCallback(src); 96 | end; 97 | return; 98 | 99 | 100 | 101 | % Included for completeness (usually in own file) 102 | function [fig_pointer_pos, axes_pointer_val] = pointer2d(fig_hndl,axes_hndl) 103 | % 104 | %pointer2d(fig_hndl,axes_hndl) 105 | % 106 | % Returns the coordinates of the pointer (in pixels) 107 | % in the desired figure (fig_hndl) and the coordinates 108 | % in the desired axis (axes coordinates) 109 | % 110 | % Example: 111 | % figure(1), 112 | % hold on, 113 | % for i = 1:1000, 114 | % [figp,axp]=pointer2d; 115 | % plot(axp(1),axp(2),'.','EraseMode','none'); 116 | % drawnow; 117 | % end; 118 | % hold off 119 | 120 | % Rick Hindman - 4/18/01 121 | 122 | if (nargin == 0), fig_hndl = gcf; axes_hndl = gca; end; 123 | if (nargin == 1), axes_hndl = get(fig_hndl,'CurrentAxes'); end; 124 | 125 | set(fig_hndl,'Units','pixels'); 126 | 127 | pointer_pos = get(0,'PointerLocation'); %pixels {0,0} lower left 128 | fig_pos = get(fig_hndl,'Position'); %pixels {l,b,w,h} 129 | 130 | fig_pointer_pos = pointer_pos - fig_pos([1,2]); 131 | set(fig_hndl,'CurrentPoint',fig_pointer_pos); 132 | 133 | if (isempty(axes_hndl)), 134 | axes_pointer_val = []; 135 | elseif (nargout == 2), 136 | axes_pointer_line = get(axes_hndl,'CurrentPoint'); 137 | axes_pointer_val = sum(axes_pointer_line)/2; 138 | end; 139 | 140 | -------------------------------------------------------------------------------- /single-point prediction online/makehatch.m: -------------------------------------------------------------------------------- 1 | function A = makehatch(hatch) 2 | %MAKEHATCH Predefined hatch patterns 3 | % MAKEHATCH(HATCH) returns a matrix with the hatch pattern for HATCH 4 | % according to the following table: 5 | % HATCH pattern 6 | % ------- --------- 7 | % / right-slanted lines 8 | % left-slanted lines 9 | % | vertical lines 10 | % - horizontal lines 11 | % + crossing vertical and horizontal lines 12 | % x criss-crossing lines 13 | % . single dots 14 | % 15 | % See also: APPLYHATCH 16 | 17 | % By Ben Hinkle, bhinkle@mathworks.com 18 | % This code is in the public domain. 19 | 20 | n = 6; 21 | A=zeros(n); 22 | switch (hatch) 23 | case '/' 24 | A = fliplr(eye(n)); 25 | case '' 26 | A = eye(n); 27 | case '|' 28 | A(:,1) = 1; 29 | case '-' 30 | A(1,:) = 1; 31 | case '+' 32 | A(:,1) = 1; 33 | A(1,:) = 1; 34 | case 'x' 35 | A = eye(n) | fliplr(diag(ones(n-1,1),-1)); 36 | case '.' 37 | A(1:2,1:2)=1; 38 | otherwise 39 | error(['Undefined hatch pattern "' hatch '".']); 40 | end -------------------------------------------------------------------------------- /single-point prediction online/makehatch_plus.m: -------------------------------------------------------------------------------- 1 | function A = makehatch_plus(hatch,n,m) 2 | %MAKEHATCH_PLUS Predefined hatch patterns 3 | % 4 | % Modification of MAKEHATCH to allow for selection of matrix size. Useful whe using 5 | % APPLYHATCH_PLUS with higher resolution output. 6 | % 7 | % input (optional) N size of hatch matrix (default = 6) 8 | % input (optional) M width of lines and dots in hatching (default = 1) 9 | % 10 | % MAKEHATCH_PLUS(HATCH,N,M) returns a matrix with the hatch pattern for HATCH 11 | % according to the following table: 12 | % HATCH pattern 13 | % ------- --------- 14 | % / right-slanted lines 15 | % \ left-slanted lines 16 | % | vertical lines 17 | % - horizontal lines 18 | % + crossing vertical and horizontal lines 19 | % x criss-crossing lines 20 | % . square dots 21 | % c circular dots 22 | % w Just a blank white pattern 23 | % k Just a totally black pattern 24 | % 25 | % See also: APPLYHATCH, APPLYHATCH_PLUS, APPLYHATCH_PLUSCOLOR, MAKEHATCH 26 | 27 | % By Ben Hinkle, bhinkle@mathworks.com 28 | % This code is in the public domain. 29 | 30 | % Modified Brian FG Katz 8-aout-03 31 | % Modified David M Kaplan 19-fevrier-08 32 | 33 | if ~exist('n','var'), n = 6; end 34 | if ~exist('m','var'), m = 1; end 35 | n=round(n); 36 | 37 | switch (hatch) 38 | case '\' 39 | [B,C] = meshgrid( 0:n-1 ); 40 | B = B-C; 41 | clear C 42 | A = abs(B) <= m/2; 43 | A = A | abs(B-n) <= m/2; 44 | A = A | abs(B+n) <= m/2; 45 | case '/' 46 | A = fliplr(makehatch_plus('\',n,m)); 47 | case '|' 48 | A=zeros(n); 49 | A(:,1:m) = 1; 50 | case '-' 51 | A = makehatch_plus('|',n,m); 52 | A = A'; 53 | case '+' 54 | A = makehatch_plus('|',n,m); 55 | A = A | A'; 56 | case 'x' 57 | A = makehatch_plus('\',n,m); 58 | A = A | fliplr(A); 59 | case '.' 60 | A=zeros(n); 61 | A(1:2*m,1:2*m)=1; 62 | case 'c' 63 | [B,C] = meshgrid( 0:n-1 ); 64 | A = sqrt(B.^2+C.^2) <= m; 65 | A = A | fliplr(A) | flipud(A) | flipud(fliplr(A)); 66 | case 'w' 67 | A = zeros(n); 68 | case 'k' 69 | A = ones(n); 70 | otherwise 71 | error(['Undefined hatch pattern "' hatch '".']); 72 | end -------------------------------------------------------------------------------- /single-point prediction online/prediction_180.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/prediction_180.xlsx -------------------------------------------------------------------------------- /single-point prediction online/prediction_180_4800_1600_1600.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/prediction_180_4800_1600_1600.xlsx -------------------------------------------------------------------------------- /single-point prediction online/prediction_360.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/prediction_360.xlsx -------------------------------------------------------------------------------- /single-point prediction online/prediction_360_4800_1600_1600.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/prediction_360_4800_1600_1600.xlsx -------------------------------------------------------------------------------- /single-point prediction online/prediction_90.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/prediction_90.xlsx -------------------------------------------------------------------------------- /single-point prediction online/prediction_90_4800_1600_1600.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/prediction_90_4800_1600_1600.xlsx -------------------------------------------------------------------------------- /single-point prediction online/single_pre_IL.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/single_pre_IL.m -------------------------------------------------------------------------------- /single-point prediction online/total_time_step.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaoyangwangchn/TVT-data-code-channel-prediction-model/091f6438a8452a8c6985ee74847894822f4c319f/single-point prediction online/total_time_step.m --------------------------------------------------------------------------------