├── .DS_Store
├── data
├── .DS_Store
└── train_around_sample.csv
├── board
└── .DS_Store
├── weight
├── checkpoint
├── model.ckpt.meta
├── model.ckpt.index
└── model.ckpt.data-00000-of-00001
├── model
├── __pycache__
│ ├── resnet.cpython-36.pyc
│ ├── conv_lstm.cpython-36.pyc
│ ├── convlstm.cpython-36.pyc
│ ├── res_net.cpython-36.pyc
│ ├── test_data.cpython-36.pyc
│ ├── hyparameter.cpython-36.pyc
│ ├── train_data.cpython-36.pyc
│ ├── data_process.cpython-36.pyc
│ └── multi_convlstm.cpython-36.pyc
├── test_data.py
├── train_data.py
├── hyparameter.py
├── data_pre_process.py
├── convlstm.py
├── multi_convlstm.py
├── res_net.py
├── data_process.py
├── conv_lstm.py
├── resnet.py
└── data_show.py
├── .idea
├── vcs.xml
├── misc.xml
├── modules.xml
├── RCL-Learning.iml
└── workspace.xml
├── LICENSE
├── README.md
└── run_model.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/.DS_Store
--------------------------------------------------------------------------------
/data/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/data/.DS_Store
--------------------------------------------------------------------------------
/board/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/board/.DS_Store
--------------------------------------------------------------------------------
/weight/checkpoint:
--------------------------------------------------------------------------------
1 | model_checkpoint_path: "model.ckpt"
2 | all_model_checkpoint_paths: "model.ckpt"
3 |
--------------------------------------------------------------------------------
/weight/model.ckpt.meta:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/weight/model.ckpt.meta
--------------------------------------------------------------------------------
/weight/model.ckpt.index:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/weight/model.ckpt.index
--------------------------------------------------------------------------------
/weight/model.ckpt.data-00000-of-00001:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/weight/model.ckpt.data-00000-of-00001
--------------------------------------------------------------------------------
/model/__pycache__/resnet.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/model/__pycache__/resnet.cpython-36.pyc
--------------------------------------------------------------------------------
/model/__pycache__/conv_lstm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/model/__pycache__/conv_lstm.cpython-36.pyc
--------------------------------------------------------------------------------
/model/__pycache__/convlstm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/model/__pycache__/convlstm.cpython-36.pyc
--------------------------------------------------------------------------------
/model/__pycache__/res_net.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/model/__pycache__/res_net.cpython-36.pyc
--------------------------------------------------------------------------------
/model/__pycache__/test_data.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/model/__pycache__/test_data.cpython-36.pyc
--------------------------------------------------------------------------------
/model/__pycache__/hyparameter.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/model/__pycache__/hyparameter.cpython-36.pyc
--------------------------------------------------------------------------------
/model/__pycache__/train_data.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/model/__pycache__/train_data.cpython-36.pyc
--------------------------------------------------------------------------------
/model/__pycache__/data_process.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/model/__pycache__/data_process.cpython-36.pyc
--------------------------------------------------------------------------------
/model/__pycache__/multi_convlstm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zouguojian/RCL-Learning/HEAD/model/__pycache__/multi_convlstm.cpython-36.pyc
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/model/test_data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import pandas as pd
4 |
5 | def get_data(file_path):
6 | '''
7 | :param file_path:
8 | :return:
9 | '''
10 | data=pd.read_csv(file_path,encoding='utf-8')
11 | return data.values[:,2:]
--------------------------------------------------------------------------------
/model/train_data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import pandas as pd
4 |
5 | def get_data(file_path):
6 | '''
7 | :param file_path:
8 | :return:
9 | '''
10 | data=pd.read_csv(file_path,encoding='utf-8')
11 | return data.values[:,2:]
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/RCL-Learning.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 GuojianZou
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # RCL-Learning
2 | In this study, we propose an end-to-end deep learning model-RCL-Learning that integrates ResNet and ConvLSTM.
3 |
4 | Title:RCL-Learning: ResNet and Convolutional Long Short-Term Memory-based Spatiotemporal Air Pollutant Concentration Prediction Model
5 |
6 | Abstract: Predicting the concentration of air pollutants is an effective method for preventing pollution incidents by providing an early warning of harmful substances in the air. Accurate prediction of air pollutant concentration can more effectively control and prevent air pollution. In this study, a big data correlation principle and deep learning technology are used for a proposed model of predicting PM2.5 concentration. The model comprises a deep learning network model based on a residual neural network (ResNet) and a convolutional long short-term memory (LSTM) network (ConvLSTM). ResNet is used to deeply extract the spatial distribution features of pollutant concentration and meteorological data from multiple cities. The output is used as input to ConvLSTM, which further extracts the preliminary spatial distribution features extracted from the ResNet, while extracting the spatiotemporal features of the pollutant concentration and meteorological data. The model combines the two features to achieve a spatiotemporal correlation of feature sequences, thereby accurately predicting the future PM2.5 concentration of the target city for a period of time. Compared with other neural network models and traditional models, the proposed pollutant concentration prediction model improves the accuracy of predicting pollutant concentration. For 1- to 3-hours prediction tasks, the proposed pollutant concentration prediction model performed well and exhibited root mean square error (RMSE) between 5.478 and 13.622. In addition, we conducted multiscale predictions in the target city and achieved satisfactory performance, with the average RMSE value able to reach 22.927 even for 1- to 15-hours prediction tasks.
7 |
--------------------------------------------------------------------------------
/model/hyparameter.py:
--------------------------------------------------------------------------------
1 | # -- coding: utf-8 --
2 | import argparse
3 | class parameter(object):
4 | def __init__(self,parser):
5 | self.parser=parser
6 |
7 | self.parser.add_argument('--save_path', type=str, default='weight/', help='save path')
8 |
9 | self.parser.add_argument('--target_site_id', type=int, default=0, help='city ID')
10 | self.parser.add_argument('--is_training', type=bool, default=True, help='is training')
11 | self.parser.add_argument('--epochs', type=int, default=100, help='epoch')
12 | self.parser.add_argument('--step', type=int, default=1, help='step')
13 | self.parser.add_argument('--batch_size', type=int, default=64, help='batch size')
14 | self.parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate')
15 | self.parser.add_argument('--dropout', type=float, default=0.5, help='drop out')
16 | self.parser.add_argument('--site_num', type=int, default=14, help='total number of city')
17 | self.parser.add_argument('--features', type=int, default=7, help='numbers of the feature')
18 | self.parser.add_argument('--height', type=int, default=3, help='height')
19 | self.parser.add_argument('--width', type=int, default=3, help='width')
20 |
21 | self.parser.add_argument('--normalize', type=bool, default=True, help='normalize')
22 | self.parser.add_argument('--input_length', type=int, default=3, help='input length')
23 | self.parser.add_argument('--output_length', type=int, default=1, help='output length')
24 |
25 | self.parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight for L2 loss on embedding matrix')
26 | self.parser.add_argument('--hidden_size', type=int, default=128, help='hidden size')
27 | self.parser.add_argument('--hidden_layer', type=int, default=1, help='hidden layer')
28 |
29 | self.parser.add_argument('--training_set_rate', type=float, default=1.0, help='training set rate')
30 | self.parser.add_argument('--validate_set_rate', type=float, default=0.0, help='validate set rate')
31 | self.parser.add_argument('--test_set_rate', type=float, default=1.0, help='test set rate')
32 |
33 | self.parser.add_argument('--train_path', type=str,
34 | default='data/train_around_sample.csv',
35 | help='training set file address')
36 | self.parser.add_argument('--val_path', type=str,
37 | default='data/val_around_weather.csv',
38 | help='validate set file address')
39 | self.parser.add_argument('--test_path', type=str,
40 | default='data/test_around_sample.csv',
41 | help='test set file address')
42 |
43 | self.parser.add_argument('--file_out', type=str, default='weights/ckpt', help='file out')
44 |
45 | def get_para(self):
46 | return self.parser.parse_args()
47 |
48 | if __name__=='__main__':
49 | para=parameter(argparse.ArgumentParser())
50 |
51 | print(para.get_para().batch_size)
--------------------------------------------------------------------------------
/model/data_pre_process.py:
--------------------------------------------------------------------------------
1 | # -- coding: utf-8 --
2 |
3 | import xlrd
4 | from datetime import date, datetime
5 |
6 | file='/Users/guojianzou/Downloads/weather'
7 |
8 | col_name = ['time', 'temperature', 'humidity', 'pressure', 'wind_direction', 'wind_speed',
9 | 'clouds', 'maximum_temperature', 'minimum_temperature', 'conditions']
10 |
11 |
12 | def read_excel(file_name,condition_type,conditions_exit):
13 | pre_values = {'time': 0, 'temperature': 0, 'humidity': 0, 'pressure': 0, 'wind_direction': 0, 'wind_speed': 0,
14 | 'clouds': 0, 'maximum_temperature': 0, 'minimum_temperature': 0, 'conditions': 0}
15 | # 打开文件
16 | workbook = xlrd.open_workbook(file_name)
17 | # 获取所有sheet
18 | sheet_name = workbook.sheet_names()[0]
19 |
20 | # 根据sheet索引或者名称获取sheet内容
21 | sheet = workbook.sheet_by_index(0) # sheet索引从0开始
22 | # sheet = workbook.sheet_by_name('Sheet1')
23 |
24 | # print (workboot.sheets()[0])
25 | # sheet的名称,行数,列数
26 | print(sheet.name, sheet.nrows, sheet.ncols)
27 |
28 | # 获取整行和整列的值(数组)
29 | rows = sheet.row_values(0) # 获取第1行内容
30 | # cols = sheet.col_values(2) # 获取第3列内容
31 | # print(rows)
32 | array=[]
33 | for rown in range(sheet.nrows-1, 0, -1):
34 | line=sheet.row_values(rown)
35 | # print(line)
36 | time = line[0].split('/')
37 | time.reverse()
38 | time='-'.join(time)+'-'+line[1][:2]
39 |
40 | temperature=float(line[2])
41 |
42 | humidity=float(pre_values['humidity'] if line[3]=='' else line[3].split('%')[0])
43 | pre_values['humidity']=humidity
44 |
45 | pressure=float(pre_values['pressure'] if line[4]=='-' else line[4].split('Hpa')[0])
46 | pre_values['pressure']=pressure
47 |
48 | wind_direction=float( -1 if line[5]=='calm.' else line[5].split('º')[0])
49 |
50 | wind_speed=float(pre_values['wind_speed'] if line[6]=='' else line[6])
51 | pre_values['wind_speed']=wind_speed
52 |
53 | print(line[7])
54 | clouds=float( -1 if line[7][0]=='N' or line[7]=='-' else line[7].split('/')[0])
55 |
56 | maximum_temperature=float(temperature if line[12]=='-' else line[12])
57 |
58 | minimum_temperature=float(temperature if line[13]=='-' else line[13])
59 |
60 | if line[14] not in conditions_exit:
61 | condition_type +=1
62 | conditions_exit[line[14]]=condition_type
63 | conditions=conditions_exit[line[14]]
64 |
65 | print(time,temperature,humidity,pressure,wind_direction,wind_speed,clouds,maximum_temperature,minimum_temperature,conditions)
66 | array.append([time,temperature,humidity,pressure,wind_direction,wind_speed,clouds,maximum_temperature,minimum_temperature,conditions])
67 | # array = rows
68 | # array['L1'] = sheet.cell_value(rown, 0)
69 | # array['L2'] = sheet.cell_value(rown, 1)
70 | # array['L3'] = sheet.cell_value(rown, 2)
71 | # array['L4'] = sheet.cell_value(rown, 3)
72 | # array['Question'] = sheet.cell_value(rown, 4)
73 | # array['Answer'] = sheet.cell_value(rown, 5)
74 | # tables.append(array)
75 | return array
76 |
77 | def write(csv_writer,data):
78 | for line in data:
79 | csv_writer.writerow(line)
80 |
81 | import csv
82 | if __name__ == '__main__':
83 | # 读取Excel
84 | condition_type = 0
85 | conditions_exit = {'-': condition_type}
86 |
87 | # 1. 创建文件对象
88 | f = open(file+'/weather.csv', 'w', encoding='utf-8')
89 |
90 | # 2. 基于文件对象构建 csv写入对象
91 | csv_writer = csv.writer(f)
92 |
93 | # 3. 构建列表头
94 | csv_writer.writerow(col_name)
95 |
96 |
97 | for i in range(1,13):
98 | data=read_excel(file+'/'+str(i)+'.xls',condition_type,conditions_exit)
99 | # 4. 写入csv文件内容
100 | write(csv_writer,data)
101 |
102 | # 5. 关闭文件
103 | f.close()
104 |
105 | print('读取成功 和 写入成功!!!')
106 |
--------------------------------------------------------------------------------
/model/convlstm.py:
--------------------------------------------------------------------------------
1 | # -- coding: utf-8 --
2 | import tensorflow as tf
3 |
4 | class ConvLSTMCell(tf.nn.rnn_cell.RNNCell):
5 | """A LSTM cell with convolutions instead of multiplications.
6 | Reference:
7 | Xingjian, S. H. I., et al. "Convolutional LSTM network: A machine learning approach for precipitation nowcasting." Advances in Neural Information Processing Systems. 2015.
8 | """
9 |
10 | def __init__(self, shape, filters, kernel, forget_bias=1.0, activation=tf.tanh, normalize=False, peephole=True, data_format='channels_last', reuse=None):
11 | super(ConvLSTMCell, self).__init__(_reuse=reuse)
12 | self._kernel = kernel
13 | self._filters = filters
14 | self._forget_bias = forget_bias
15 | self._activation = activation
16 | self._normalize = normalize
17 | self._peephole = peephole
18 | if data_format == 'channels_last':
19 | self._size = tf.TensorShape(shape + [self._filters])
20 | self._feature_axis = self._size.ndims
21 | self._data_format = None
22 | elif data_format == 'channels_first':
23 | self._size = tf.TensorShape([self._filters] + shape)
24 | self._feature_axis = 0
25 | self._data_format = 'NC'
26 | else:
27 | raise ValueError('Unknown data_format')
28 |
29 | @property
30 | def state_size(self):
31 | return tf.nn.rnn_cell.LSTMStateTuple(self._size, self._size)
32 |
33 | @property
34 | def output_size(self):
35 | return self._size
36 |
37 | def call(self, x, state):
38 | c, h = state
39 |
40 | x = tf.concat([x, h], axis=self._feature_axis)
41 | n = x.shape[-1].value
42 | m = 4 * self._filters if self._filters > 1 else 4
43 | # w = tf.Variable(initial_value=tf.random_normal(shape=self._kernel + [n, m]),name='kernel',dtype=tf.float32)
44 | w = tf.get_variable('kernel', self._kernel + [n, m])
45 | y = tf.nn.convolution(x, w, 'SAME', data_format=self._data_format)
46 | if not self._normalize:
47 | # y += tf.Variable(initial_value=tf.random_normal(shape=[m]), name='bias')
48 | y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
49 | j, i, f, o = tf.split(y, 4, axis=self._feature_axis)
50 |
51 | if self._peephole:
52 | # i += tf.Variable(initial_value=tf.random_normal(shape=c.shape[1:]), name='W_ci') * c
53 | # f += tf.Variable(initial_value=tf.random_normal(shape=c.shape[1:]), name='W_cf') * c
54 | i += tf.get_variable('W_ci', c.shape[1:]) * c
55 | f += tf.get_variable('W_cf', c.shape[1:]) * c
56 |
57 | if self._normalize:
58 | j = tf.contrib.layers.layer_norm(j)
59 | i = tf.contrib.layers.layer_norm(i)
60 | f = tf.contrib.layers.layer_norm(f)
61 |
62 | f = tf.sigmoid(f + self._forget_bias)
63 | i = tf.sigmoid(i)
64 | c = c * f + i * self._activation(j)
65 |
66 | if self._peephole:
67 | # o += tf.Variable(initial_value=tf.random_normal(shape=c.shape[1:]), name='W_co') * c
68 | o += tf.get_variable('W_co', c.shape[1:]) * c
69 |
70 | if self._normalize:
71 | o = tf.contrib.layers.layer_norm(o)
72 | c = tf.contrib.layers.layer_norm(c)
73 |
74 | o = tf.sigmoid(o)
75 | h = o * self._activation(c)
76 |
77 | state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
78 |
79 | return h, state
80 |
81 |
82 | if __name__ == '__main__':
83 | batch_size = 32
84 | timesteps = 3
85 | shape = [162, 5]
86 | kernel = [162, 2]
87 | channels = 1
88 | filters = 12 # numbers of output channel
89 |
90 | # Create a placeholder for videos.
91 | inputs = tf.placeholder(tf.float32, [batch_size, timesteps] + shape + [channels])
92 | #
93 | # # Add the ConvLSTM step.
94 | # cell = ConvLSTMCell(shape, filters, kernel)
95 | #
96 | # '''
97 | # inputs shape is : [batch size, time size, site number, features, input channel]
98 | # outputs is : [batch size, time size, site number, features, output channel]
99 | # state: LSTMStateTuple(c=,
100 | # h=)
101 | # '''
102 | # outputs, state = tf.nn.dynamic_rnn(cell, inputs, dtype=inputs.dtype)
103 | #
104 | # print(outputs.shape)
105 | # print(state)
106 |
--------------------------------------------------------------------------------
/model/multi_convlstm.py:
--------------------------------------------------------------------------------
1 | # -- coding: utf-8 --
2 | from model.convlstm import ConvLSTMCell
3 | import tensorflow as tf
4 |
5 | class mul_convlstm(object):
6 | def __init__(self, batch, predict_time, shape=[162, 5], filters= 32 , kernel=[162, 2], layer_num=1, activation=tf.tanh, normalize=True, reuse=None):
7 | self.batch=batch
8 | self.predict_time=predict_time
9 | self.layers=layer_num
10 | self.activation=activation
11 | self.normalize=normalize
12 | self.reuse=reuse
13 |
14 |
15 | self.shape = shape
16 | self.kernel = kernel
17 | self.filters = filters # numbers of output channel
18 |
19 | def encoding(self, inputs):
20 | '''
21 | :return: shape is [batch size, time size, site num, features, out channel)
22 | '''
23 |
24 | # inputs=tf.expand_dims(inputs,axis=4)
25 |
26 | with tf.variable_scope(name_or_scope='encoder_convlstm',reuse=tf.AUTO_REUSE):
27 | # Add the ConvLSTM step.
28 | cell = ConvLSTMCell(self.shape, self.filters, self.kernel)
29 |
30 | '''
31 | inputs shape is : [batch size, time size, site number, features, input channel]
32 | outputs is : [batch size, time size, site number, features, output channel]
33 | state: LSTMStateTuple(c=,
34 | h=)
35 | '''
36 | init_state=cell.zero_state(self.batch,tf.float32)
37 | outputs, state = tf.nn.dynamic_rnn(cell, inputs, initial_state=init_state, dtype=inputs.dtype)
38 |
39 | print(outputs.shape)
40 | print(state)
41 | return outputs
42 |
43 | def decoding(self, encoder_hs):
44 | '''
45 | :param encoder_hs:
46 | :return: shape is [batch size, site number, prediction size]
47 | '''
48 |
49 | h = []
50 | h_state = encoder_hs[:, -1, :, :, :]
51 | h_state = tf.expand_dims(input=h_state, axis=1)
52 | with tf.variable_scope(name_or_scope='decoder_convlstm',reuse=tf.AUTO_REUSE):
53 | '''
54 | inputs shape is [batch size, 1, site num, features, out channel)
55 | '''
56 | # Add the ConvLSTM step.
57 | cell = ConvLSTMCell(self.shape, self.filters, self.kernel)
58 | init_state = cell.zero_state(self.batch, tf.float32)
59 |
60 | '''
61 | inputs shape is : [batch size, 1, site number, features, input channel]
62 | outputs is : [batch size, 1, site number, features, output channel]
63 | state: LSTMStateTuple(c=,
64 | h=)
65 | '''
66 | for i in range(self.predict_time):
67 | ''' return shape is [batch size, time size, height, site num, out channel) '''
68 | with tf.variable_scope('decoder_lstm', reuse=tf.AUTO_REUSE):
69 | h_state, state = tf.nn.dynamic_rnn(cell=cell, inputs=h_state,
70 | initial_state=init_state, dtype=tf.float32)
71 | init_state = state
72 |
73 | max_pool = tf.nn.avg_pool(tf.squeeze(h_state,axis=1), ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
74 | cnn_shape = max_pool.get_shape().as_list()
75 | nodes = cnn_shape[1] * cnn_shape[2] * cnn_shape[3]
76 | results = tf.layers.dense(inputs=tf.reshape(max_pool, [cnn_shape[0], nodes]), units=256, name='layer_1', reuse=tf.AUTO_REUSE,activation=tf.nn.relu)
77 | results = tf.layers.dense(inputs=results, units=1, name='layer_2', reuse=tf.AUTO_REUSE)
78 | h.append(results)
79 | pre=tf.concat(h,axis=1)
80 | return pre
81 |
82 | if __name__ == '__main__':
83 |
84 | batch_size = 32
85 | timesteps = 3
86 | shape = [162, 5]
87 | kernel = [162, 2]
88 | channels = 1
89 | filters = 12 # numbers of output channel
90 |
91 | # Create a placeholder for videos.
92 | inputs = tf.placeholder(tf.float32, [batch_size, timesteps] + shape)
93 |
94 | multi=mul_convlstm(batch=32, predict_time=2)
95 |
96 | hs=multi.encoding(inputs)
97 | print(hs.shape)
98 | pre =multi.decoding(hs)
99 | print(pre.shape)
100 |
--------------------------------------------------------------------------------
/model/res_net.py:
--------------------------------------------------------------------------------
1 | # -- coding: utf-8 --
2 | import tensorflow as tf
3 |
4 |
5 | class resnet(object):
6 | def __init__(self, para=None):
7 | '''
8 | :param batch_size:
9 | :param para:
10 | '''
11 | self.para = para
12 | self.h = [3, 3, 3]
13 | self.w = [3, 3, 3]
14 | self.out_channel = [3, 3, 3]
15 | self.in_channel = [1] + self.out_channel
16 | self.features = 5
17 |
18 | def conv_2d(self, x, h, w, in_channel, out_channel, layer_name):
19 | '''
20 | :param x:
21 | :param h:
22 | :param w:
23 | :param in_channel:
24 | :param out_channel:
25 | :return:
26 | '''
27 | filter = tf.Variable(initial_value=tf.truncated_normal(shape=[h, w, in_channel, out_channel], stddev=0.1),
28 | name=layer_name)
29 | # filter = tf.get_variable(name=layer_name, shape=[h, w, in_channel, out_channel],initializer=tf.truncated_normal_initializer(stddev=0.1))
30 | bias = tf.get_variable(layer_name + "/bias", [out_channel], initializer=tf.constant_initializer(0))
31 | layer = tf.nn.conv2d(input=x, filter=filter, strides=[1, 1, 1, 1], padding='SAME')
32 | return tf.nn.bias_add(layer, bias)
33 |
34 | def relu(self, inputs):
35 | '''
36 | :param inputs:
37 | :return:
38 | '''
39 | relu = tf.nn.relu(inputs)
40 | return relu
41 |
42 | def block(self, x, in_channel, out_channel, block_name):
43 | '''
44 | :param x:
45 | :param in_channel:
46 | :param out_channel:
47 | :param block_name:
48 | :return:
49 | '''
50 | x1 = self.conv_2d(x, self.h[0], self.w[0], in_channel[0], out_channel[0], block_name + '/1')
51 | x1 = self.relu(x1)
52 | x2 = self.conv_2d(x1, self.h[0], self.w[0], in_channel[1], out_channel[1], block_name + '/2')
53 | x2 = self.relu(x2)
54 | x3 = self.conv_2d(x2, self.h[0], self.w[0], in_channel[2], out_channel[2], block_name + '/3')
55 | x3 = self.relu(x3)
56 | return x3
57 |
58 | def residual_connected(self, x1, x2, h, w, in_channel, out_channel, residual_name):
59 | '''
60 | :param x1:
61 | :param x2:
62 | :param h:
63 | :param w:
64 | :param in_channel:
65 | :param out_channel:
66 | :param residual_name:
67 | :return:
68 | '''
69 | filter = tf.Variable(initial_value=tf.random_normal(shape=[h, w, in_channel, out_channel]), name=residual_name)
70 | bias = tf.get_variable(residual_name + "/bias", [out_channel], initializer=tf.constant_initializer(0))
71 | conv = tf.nn.conv2d(x1, filter, strides=[1, 1, 1, 1], padding='SAME')
72 | conv = tf.nn.bias_add(conv, bias)
73 | layer_add = conv + x2
74 | return self.relu(layer_add)
75 |
76 | def cnn(self, x):
77 | '''
78 | :param x: [batch size, site num, features, channel]
79 | :return: [batch size, height, channel]
80 | '''
81 |
82 | with tf.variable_scope(name_or_scope='resnet', reuse=tf.AUTO_REUSE):
83 | block1 = self.block(x, [1, 3, 3], [3, 3, 3], block_name='block1')
84 | residual1 = self.residual_connected(x, block1, 1, 1, 1, 3, residual_name='residual1')
85 | print('residual 1 shape is : ', residual1.shape)
86 |
87 | block2 = self.block(residual1, [3, 6, 6], [6, 6, 6], block_name='block2')
88 | residual2 = self.residual_connected(residual1, block2, 1, 1, 3, 6, residual_name='residual2')
89 | print('residual 2 shape is : ', residual2.shape)
90 |
91 | block3 = self.block(residual2, [6, 6, 6], [6, 6, 6], block_name='block3')
92 | residual3 = self.residual_connected(residual2, block3, 1, 1, 6, 6, residual_name='residual3')
93 | print('residual 3 shape is : ', residual3.shape)
94 |
95 | block4 = self.block(residual3, [6, 6, 6], [6, 6, 6], block_name='block4')
96 | residual4 = self.residual_connected(residual3, block4, 1, 1, 6, 6, residual_name='residual4')
97 | print('residual 4 shape is : ', residual4.shape)
98 |
99 | # max_pool=tf.nn.avg_pool(residual4, ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1], padding='SAME')
100 |
101 | max_pool = residual4
102 | print('max_pool output shape is : ', max_pool.shape)
103 |
104 | # cnn_shape = max_pool3.get_shape().as_list()
105 | # nodes = cnn_shape[1] * cnn_shape[2] * cnn_shape[3]
106 | # reshaped = tf.reshape(max_pool3, [cnn_shape[0], nodes])
107 |
108 | return max_pool
109 |
110 |
111 | if __name__ == '__main__':
112 | batch_size = 32
113 | timesteps = 3
114 | shape = [162, 5]
115 | kernel = [162, 2]
116 | channels = 1
117 | filters = 12 # numbers of output channel
118 |
119 | # Create a placeholder for videos.
120 | inputs = tf.placeholder(tf.float32, [batch_size, 1, 7, 1])
121 |
122 | multi = resnet(batch_size=32, para=None)
123 | multi.cnn(inputs)
--------------------------------------------------------------------------------
/model/data_process.py:
--------------------------------------------------------------------------------
1 | # -- coding: utf-8 --
2 |
3 | import tensorflow as tf
4 | import numpy as np
5 | import argparse
6 | import pandas as pd
7 | from model.hyparameter import parameter
8 |
9 | class dataIterator(): #切记这里的训练时段和测试时段的所用的对象不变,否则需要重复加载数据
10 | def __init__(self, hp=None):
11 | '''
12 | :param is_training: while is_training is True,the model is training state
13 | :param field_len:
14 | :param time_size:
15 | :param prediction_size:
16 | :param target_site:
17 | '''
18 | self.para=hp
19 | self.site_id=self.para.target_site_id # ozone ID
20 | self.time_size=self.para.input_length # time series length of input
21 | self.prediction_size=self.para.output_length # the length of prediction
22 | self.is_training=self.para.is_training # true or false
23 | self.window_step=self.para.step # windows step
24 | self.train_data= self.sudden_changed(np.array(self.get_source_data(self.para.train_path).values[:,2:],dtype=np.float32))
25 | self.test_data = self.sudden_changed(self.get_source_data(self.para.test_path).values[:,2:])
26 |
27 | # self.data=self.source_data.loc[self.source_data['ZoneID']==self.site_id]
28 | # self.data=self.source_data
29 | self.train_length=self.train_data.shape[0] # train data length
30 | self.test_length = self.test_data.shape[0] # test data length
31 | self.max,self.min=self.get_max_min(self.train_data,self.test_data) # max and min are list type, used for the later normalization
32 |
33 | self.normalize=self.para.normalize
34 | if self.normalize:
35 | self.train_data=self.normalization(self.train_data) #normalization
36 | self.test_data = self.normalization(self.test_data) # normalization
37 |
38 | def sudden_changed(self, data):
39 | '''
40 | :param data:
41 | :return:
42 | '''
43 | sundden = [300, 300, 300, 300, 300, 300, 40]
44 | shape = data.shape
45 | for j in range(shape[1]):
46 | for i in range(shape[0]):
47 | if i != 0:
48 | if float(data[i][j])- float(data[i - 1][j]) > sundden[j]:
49 | data[i][j] = data[i - 1][j]
50 | return data
51 |
52 | def get_source_data(self,file_path):
53 | '''
54 | :return:
55 | '''
56 | data = pd.read_csv(file_path, encoding='utf-8')
57 | # print(data.values)
58 | return data
59 |
60 | def get_max_min(self, train_data=None,test_data=None):
61 | '''
62 | :return: the max and min value of input features
63 | '''
64 | self.min_list=[]
65 | self.max_list=[]
66 |
67 | for i in range(train_data.shape[1]):
68 | self.min_list.append(min([min(train_data[:,i]),min(test_data[:,i])]))
69 | self.max_list.append(max([max(train_data[:,i]),max(test_data[:,i])]))
70 | print('the max feature list is :',self.max_list)
71 | print('the min feature list is :', self.min_list)
72 | return self.max_list,self.min_list
73 |
74 | def normalization(self, data):
75 | for i in range(data.shape[1]):
76 | data[:,i]=(data[:,i] - np.array(self.min[i])) / (np.array(self.max[i]) - np.array(self.min[i]))
77 | return data
78 |
79 | def generator(self):
80 | '''
81 | :return: yield the data of every time,
82 | shape:input_series:[time_size,field_size]
83 | label:[predict_size]
84 | '''
85 | # print('is_training is : ', self.is_training)
86 | if self.is_training:
87 | low,high=0,int(self.train_data.shape[0]//self.para.site_num)*self.para.site_num
88 | data=self.train_data
89 | else:
90 | low,high=0,int(self.test_data.shape[0]//self.para.site_num) * self.para.site_num
91 | data=self.test_data
92 |
93 | while low+self.para.site_num*(self.para.input_length + self.para.output_length)<= high:
94 | label=data[low + self.time_size * self.para.site_num: low + self.time_size * self.para.site_num + self.prediction_size * self.para.site_num,1: 2]
95 | label=np.concatenate([label[i * self.para.site_num:(i + 1) * self.para.site_num, :] for i in range(self.prediction_size)], axis=1)
96 |
97 | time=data[low + self.time_size * self.para.site_num: low + self.time_size * self.para.site_num + self.prediction_size * self.para.site_num,1:4]
98 | time=np.concatenate([time[i * self.para.site_num:(i + 1) * self.para.site_num, :] for i in range(self.prediction_size)], axis=1)
99 |
100 | x_input=np.array(data[low:low+self.time_size*self.para.site_num])
101 |
102 | # a=np.concatenate([x_input[i*self.para.site_num] for i in range(self.para.input_length)],axis=0)
103 | #
104 | # x_input=a
105 |
106 | yield (x_input,
107 | label[0])
108 | if self.is_training:
109 | low += self.window_step*self.para.site_num
110 | else:
111 | low+=self.prediction_size*self.para.site_num
112 | return
113 |
114 | def next_batch(self,batch_size, epochs, is_training=True):
115 | '''
116 | :return the iterator!!!
117 | :param batch_size:
118 | :param epochs:
119 | :return:
120 | '''
121 | self.is_training=is_training
122 | dataset=tf.data.Dataset.from_generator(self.generator,output_types=(tf.float32,tf.float32))
123 |
124 | if self.is_training:
125 | dataset=dataset.shuffle(buffer_size=int(self.train_data.shape[0]//self.para.site_num -self.time_size-self.prediction_size)//self.window_step)
126 | dataset=dataset.repeat(count=epochs)
127 | dataset=dataset.batch(batch_size=batch_size)
128 | iterator=dataset.make_one_shot_iterator()
129 |
130 | return iterator.get_next()
131 |
132 |
133 | def re_current(line, max, min):
134 | return [[line_sub[i]*(max[i]-min[i])+min[i]+0.1 for i in range(len(line_sub))] for line_sub in line]
135 | #
136 | if __name__=='__main__':
137 | para = parameter(argparse.ArgumentParser())
138 | para = para.get_para()
139 |
140 | iter=dataIterator(site_id=0,is_training=True,normalize=True,time_size=8,prediction_size=3,hp=para)
141 |
142 | next=iter.next_batch(32,1,False)
143 | with tf.Session() as sess:
144 | for _ in range(3):
145 | x,y,time=sess.run(next)
146 | # print(time.shape)
147 | # print(time)
148 | rows = np.reshape(time, [-1, 3,3])
149 | print(rows.shape)
150 | rows = np.array([re_current(row_data, [30.0, 23.0, 60.0], [1.0, 0.0, 15.0]) for row_data in rows],dtype=int)
151 | print(rows)
--------------------------------------------------------------------------------
/model/conv_lstm.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon Jan 15 16:47:10 2018
4 |
5 | @author: Administrator
6 | """
7 | import tensorflow as tf
8 | from tensorflow.python.ops.rnn_cell import LSTMStateTuple
9 | class BasicConvLSTMCell(object):
10 | """Basic Conv LSTM recurrent network cell.
11 | """
12 |
13 | def __init__(self, shape, filter_size, num_features, forget_bias=1.0, input_size=None,
14 | state_is_tuple=False, activation=tf.nn.tanh,time_size=3):
15 | """Initialize the basic Conv LSTM cell.
16 | Args:
17 | shape: int tuple thats the height and width of the cell
18 | filter_size: int tuple thats the height and width of the filter
19 | num_features: int thats the depth of the cell
20 | forget_bias: float, The bias added to forget gates (see above).
21 | input_size: Deprecated and unused.
22 | state_is_tuple: If True, accepted and returned states are 2-tuples of
23 | the `c_state` and `m_state`. If False, they are concatenated
24 | along the column axis. The latter behavior will soon be deprecated.
25 | activation: Activation function of the inner states.
26 | """
27 | #if not state_is_tuple:
28 | #logging.warn("%s: Using a concatenated state is slower and will soon be "
29 | # "deprecated. Use state_is_tuple=True.", self)
30 | self.shape = shape
31 | self.filter_size = filter_size
32 | self.num_features = num_features
33 | self._forget_bias = forget_bias
34 | self._state_is_tuple = state_is_tuple
35 | self._activation = activation
36 | self.time_size=time_size
37 | self.ALL_SIZE_ONE=256
38 | self.ALL_SIZE_TWO=1
39 | # @property
40 | # def state_size(self):
41 | # return (LSTMStateTuple(self._num_units, self._num_units)
42 | # if self._state_is_tuple else 2 * self._num_units)
43 |
44 | # @property
45 | # def output_size(self):
46 | # return self._num_units
47 | # def zero_state(self, batch_size, dtype):
48 | def zero_state(self, batch_size):
49 | """Return zero-filled state tensor(s).
50 | Args:
51 | batch_size: int, float, or unit Tensor representing the batch size.
52 | dtype: the data type to use for the state.
53 | Returns:
54 | tensor of shape '[batch_size x shape[0] x shape[1] x num_features]
55 | filled with zeros
56 | """
57 |
58 | shape = self.shape
59 | num_features = self.num_features
60 | zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2])
61 | return zeros
62 |
63 | def C_LSTM_cell(self, inputs, state, scope=None):
64 | """Long short-term memory cell (LSTM)."""
65 | with tf.variable_scope(scope or type(self).__name__,reuse=tf.AUTO_REUSE): # "BasicLSTMCell"
66 | # Parameters of gates are concatenated into one multiply for efficiency.
67 | if self._state_is_tuple:
68 | c, h = state
69 | else:
70 | c, h = tf.split(axis=3, num_or_size_splits=2, value=state)
71 | concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True)
72 |
73 | # i = input_gate, j = new_input, f = forget_gate, o = output_gate
74 | i, j, f, o = tf.split(axis=3, num_or_size_splits=4, value=concat)
75 |
76 | new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) *
77 | self._activation(j))
78 | new_h = self._activation(new_c) * tf.nn.sigmoid(o)
79 |
80 | if self._state_is_tuple:
81 | new_state = LSTMStateTuple(new_c, new_h)
82 | else:
83 | new_state = tf.concat(axis=3, values=[new_c, new_h])
84 | return new_h, new_state
85 |
86 | def State_Result(self, X, state):
87 | outputs = []
88 | with tf.variable_scope('CV_LSTM'):
89 | for timestep in range(self.time_size):
90 | if timestep > 0:
91 | tf.get_variable_scope().reuse_variables()
92 | (cell_output, state) = self.C_LSTM_cell(X[:, timestep,: ,: , :],state)
93 | outputs.append(cell_output)
94 | h_state = outputs[-1]
95 | return h_state
96 |
97 | def Full_connect(self, X, state):
98 | state_result=self.State_Result(X, state)
99 | shape=state_result.get_shape().as_list()
100 | print(shape)
101 | nodes=shape[1]*shape[2]*shape[3]
102 | reshaped=tf.reshape(state_result,[shape[0],nodes])
103 | #第一个全连接层
104 | with tf.variable_scope('F_Layer_one',reuse=tf.AUTO_REUSE):
105 | weight_three=tf.get_variable("weight",
106 | [nodes,self.ALL_SIZE_ONE],
107 | initializer=tf.truncated_normal_initializer(stddev=0.1))
108 | bias_three=tf.get_variable("bias",[self.ALL_SIZE_ONE],
109 | initializer=tf.constant_initializer(0.1))
110 | layer1=tf.nn.relu(tf.matmul(reshaped,weight_three)+bias_three)
111 | # 如果keep_out不等于None,则使用dropout函数,任何一个给定单元的留存率
112 | # if(KeepProb!=1):
113 | # layer1 = tf.nn.dropout(layer1, keep_prob=KeepProb)
114 | #第二个全连接层
115 | with tf.variable_scope('F_Layer_two',reuse=tf.AUTO_REUSE):
116 | weight_four=tf.get_variable("weight",
117 | [self.ALL_SIZE_ONE,self.ALL_SIZE_TWO],
118 | initializer=tf.truncated_normal_initializer(stddev=0.1))
119 | bias_four=tf.get_variable("bias",[self.ALL_SIZE_TWO],
120 | initializer=tf.constant_initializer(0.1))
121 | layer2=tf.nn.relu(tf.matmul(layer1,weight_four)+bias_four)
122 | # 如果keep_out不等于None,则使用dropout函数,任何一个给定单元的留存率
123 | # if(KeepProb!=1):
124 | # layer2 = tf.nn.dropout(layer6, keep_prob=KeepProb)
125 | return layer2
126 |
127 | def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None):
128 | """convolution:
129 | Args:
130 | args: a 4D Tensor or a list of 4D, batch x n, Tensors.
131 | filter_size: int tuple of filter height and width.
132 | num_features: int, number of features.
133 | bias_start: starting value to initialize the bias; 0 by default.
134 | scope: VariableScope for the created subgraph; defaults to "Linear".
135 | Returns:
136 | A 4D Tensor with shape [batch h w num_features]
137 | Raises:
138 | ValueError: if some of the arguments has unspecified or wrong shape.
139 | """
140 |
141 | # Calculate the total size of arguments on dimension 1.
142 | total_arg_size_depth = 0
143 | shapes = [a.get_shape().as_list() for a in args]
144 | # 结果是[[32, 14, 7, 3], [32, 14, 7, 3]]
145 | for shape in shapes:
146 | if len(shape) != 4:
147 | raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes))
148 | if not shape[3]:
149 | raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes))
150 | else:
151 | total_arg_size_depth += shape[3]
152 | # 结果是6
153 | dtype = [a.dtype for a in args][0]
154 | # 获取元素的类型
155 | # Now the computation.
156 | with tf.variable_scope(scope or "Conv"):
157 | matrix = tf.get_variable(
158 | "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype)
159 | # bias_one=tf.get_variable("bias",[num_features],
160 | # initializer=tf.constant_initializer(0))
161 | # matrix1 = tf.get_variable(
162 | # "Matrix1", [filter_size[0]+1, filter_size[1]+1, num_features, num_features], dtype=dtype)
163 | if len(args) == 1:
164 | res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME')
165 | else:
166 | args=tf.concat(axis=3, values=args)
167 | # 结果是(32, 14, 7, 6)
168 | res = tf.nn.conv2d(args, matrix, strides=[1, 1, 1, 1], padding='SAME')
169 | # res=tf.nn.relu(tf.nn.bias_add(res, bias_one))
170 | # res = tf.nn.conv2d(res, matrix1, strides=[1, 1, 1, 1], padding='SAME')
171 | if not bias:
172 | return res
173 | bias_term = tf.get_variable(
174 | "Bias", [num_features],
175 | dtype=dtype,
176 | initializer=tf.constant_initializer(
177 | bias_start, dtype=dtype))
178 | return res + bias_term
--------------------------------------------------------------------------------
/model/resnet.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon Jan 1 11:07:40 2018
4 | 此模型使用的卷积核大小分别是1,2,3
5 | @author: Administrator
6 | """
7 | import tensorflow as tf
8 | #tf.reset_default_graph()
9 | class Resnet(object):
10 | def __init__(self,inputs,batch_size):
11 | self.inputs=inputs
12 | self.batch_size=batch_size
13 | # 第一层卷积所需要的一些参数
14 | self.CONV1=1
15 | self.NUM_CHANNELS=1
16 | self.CONV1_DEEP=3
17 |
18 | self.CONV2=2
19 | self.CONV2_DEEP=3
20 |
21 | self.CONV3=3
22 | self.CONV3_DEEP=3
23 | def CNN_layer(self):
24 | with tf.variable_scope('layer_one_1',reuse=tf.AUTO_REUSE):
25 | weight_one=tf.get_variable("weight",
26 | [self.CONV1,self.CONV1,self.NUM_CHANNELS,self.CONV1_DEEP],
27 | initializer=tf.truncated_normal_initializer(stddev=0.1))
28 | bias_one=tf.get_variable("bias",[self.CONV1_DEEP],
29 | initializer=tf.constant_initializer(0))
30 | conv_one=tf.nn.conv2d(self.inputs, weight_one, strides=[1, 1, 1, 1],
31 | padding='SAME')
32 | layer1=tf.nn.relu(tf.nn.bias_add(conv_one, bias_one))
33 |
34 | with tf.variable_scope('layer_one_2',reuse=tf.AUTO_REUSE):
35 | weight_one=tf.get_variable("weight",
36 | [self.CONV2,self.CONV2,self.CONV1_DEEP,self.CONV2_DEEP],
37 | initializer=tf.truncated_normal_initializer(stddev=0.1))
38 | bias_one=tf.get_variable("bias",[self.CONV2_DEEP],
39 | initializer=tf.constant_initializer(0))
40 | conv_one=tf.nn.conv2d(layer1, weight_one, strides=[1, 1, 1, 1],
41 | padding='SAME')
42 | layer2=tf.nn.relu(tf.nn.bias_add(conv_one, bias_one))
43 | with tf.variable_scope('layer_one_3',reuse=tf.AUTO_REUSE):
44 | weight_one=tf.get_variable("weight",
45 | [self.CONV3,self.CONV3,self.CONV2_DEEP,self.CONV3_DEEP],
46 | initializer=tf.truncated_normal_initializer(stddev=0.1))
47 | bias_one=tf.get_variable("bias",[self.CONV3_DEEP],
48 | initializer=tf.constant_initializer(0))
49 | conv_one=tf.nn.conv2d(layer2, weight_one, strides=[1, 1, 1, 1],
50 | padding='SAME')
51 | layer3=tf.nn.bias_add(conv_one, bias_one)
52 | with tf.variable_scope('layer_add',reuse=tf.AUTO_REUSE):
53 | weight=tf.get_variable("weight",
54 | [self.CONV1,self.CONV1,self.NUM_CHANNELS,3],
55 | initializer=tf.truncated_normal_initializer(stddev=0.1))
56 | bias=tf.get_variable("bias",[3],
57 | initializer=tf.constant_initializer(0))
58 | conv=tf.nn.conv2d(self.inputs, weight, strides=[1, 1, 1, 1],
59 | padding='SAME')
60 | # return tf.nn.relu(tf.nn.bias_add(conv, bias)+layer3)
61 | layer_add=tf.nn.bias_add(conv, bias)+layer3
62 | # return layer_add
63 |
64 | with tf.variable_scope('layer_one_4',reuse=tf.AUTO_REUSE):
65 | weight_one=tf.get_variable("weight",
66 | [3,3,3,6],
67 | initializer=tf.truncated_normal_initializer(stddev=0.1))
68 | bias_one=tf.get_variable("bias",[6],
69 | initializer=tf.constant_initializer(0))
70 | conv_one=tf.nn.conv2d(layer_add, weight_one, strides=[1, 1, 1, 1],
71 | padding='SAME')
72 | layer4=tf.nn.relu(tf.nn.bias_add(conv_one, bias_one))
73 | with tf.variable_scope('layer_one_5',reuse=tf.AUTO_REUSE):
74 | weight_one=tf.get_variable("weight",
75 | [3,3,6,6],
76 | initializer=tf.truncated_normal_initializer(stddev=0.1))
77 | bias_one=tf.get_variable("bias",[6],
78 | initializer=tf.constant_initializer(0))
79 | conv_one=tf.nn.conv2d(layer4, weight_one, strides=[1, 1, 1, 1],
80 | padding='SAME')
81 | layer5=tf.nn.relu(tf.nn.bias_add(conv_one, bias_one))
82 | with tf.variable_scope('layer_one_6',reuse=tf.AUTO_REUSE):
83 | weight_one=tf.get_variable("weight",
84 | [3,3,6,6],
85 | initializer=tf.truncated_normal_initializer(stddev=0.1))
86 | bias_one=tf.get_variable("bias",[6],
87 | initializer=tf.constant_initializer(0))
88 | conv_one=tf.nn.conv2d(layer5, weight_one, strides=[1, 1, 1, 1],
89 | padding='SAME')
90 | layer6=tf.nn.bias_add(conv_one, bias_one)
91 | with tf.variable_scope('layer_add1',reuse=tf.AUTO_REUSE):
92 | weight=tf.get_variable("weight",
93 | [1,1,3,6],
94 | initializer=tf.truncated_normal_initializer(stddev=0.1))
95 | bias=tf.get_variable("bias",[6],
96 | initializer=tf.constant_initializer(0))
97 | conv=tf.nn.conv2d(layer_add, weight, strides=[1, 1, 1, 1],
98 | padding='SAME')
99 | layer_add=tf.nn.relu(tf.nn.bias_add(conv, bias)+layer6)
100 | # return layer_add
101 |
102 |
103 | with tf.variable_scope('layer_one_7',reuse=tf.AUTO_REUSE):
104 | weight_one=tf.get_variable("weight",
105 | [3,3,6,6],
106 | initializer=tf.truncated_normal_initializer(stddev=0.1))
107 | bias_one=tf.get_variable("bias",[6],
108 | initializer=tf.constant_initializer(0))
109 | conv_one=tf.nn.conv2d(layer_add, weight_one, strides=[1, 1, 1, 1],
110 | padding='SAME')
111 | layer7=tf.nn.relu(tf.nn.bias_add(conv_one, bias_one))
112 | with tf.variable_scope('layer_one_8',reuse=tf.AUTO_REUSE):
113 | weight_one=tf.get_variable("weight",
114 | [3,3,6,6],
115 | initializer=tf.truncated_normal_initializer(stddev=0.1))
116 | bias_one=tf.get_variable("bias",[6],
117 | initializer=tf.constant_initializer(0))
118 | conv_one=tf.nn.conv2d(layer7, weight_one, strides=[1, 1, 1, 1],
119 | padding='SAME')
120 | layer8=tf.nn.relu(tf.nn.bias_add(conv_one, bias_one))
121 | with tf.variable_scope('layer_one_9',reuse=tf.AUTO_REUSE):
122 | weight_one=tf.get_variable("weight",
123 | [3,3,6,6],
124 | initializer=tf.truncated_normal_initializer(stddev=0.1))
125 | bias_one=tf.get_variable("bias",[6],
126 | initializer=tf.constant_initializer(0))
127 | conv_one=tf.nn.conv2d(layer8, weight_one, strides=[1, 1, 1, 1],
128 | padding='SAME')
129 | layer9=tf.nn.bias_add(conv_one, bias_one)
130 | with tf.variable_scope('layer_add2',reuse=tf.AUTO_REUSE):
131 | weight=tf.get_variable("weight",
132 | [1,1,6,6],
133 | initializer=tf.truncated_normal_initializer(stddev=0.1))
134 | bias=tf.get_variable("bias",[6],
135 | initializer=tf.constant_initializer(0))
136 | conv=tf.nn.conv2d(layer_add, weight, strides=[1, 1, 1, 1],
137 | padding='SAME')
138 | layer_add=tf.nn.relu(tf.nn.bias_add(conv, bias)+layer9)
139 | # return layer_add
140 |
141 |
142 | with tf.variable_scope('layer_one_10',reuse=tf.AUTO_REUSE):
143 | weight_one=tf.get_variable("weight",
144 | [3,3,6,6],
145 | initializer=tf.truncated_normal_initializer(stddev=0.1))
146 | bias_one=tf.get_variable("bias",[6],
147 | initializer=tf.constant_initializer(0))
148 | conv_one=tf.nn.conv2d(layer_add, weight_one, strides=[1, 1, 1, 1],
149 | padding='SAME')
150 | layer10=tf.nn.relu(tf.nn.bias_add(conv_one, bias_one))
151 | with tf.variable_scope('layer_one_11',reuse=tf.AUTO_REUSE):
152 | weight_one=tf.get_variable("weight",
153 | [3,3,6,6],
154 | initializer=tf.truncated_normal_initializer(stddev=0.1))
155 | bias_one=tf.get_variable("bias",[6],
156 | initializer=tf.constant_initializer(0))
157 | conv_one=tf.nn.conv2d(layer10, weight_one, strides=[1, 1, 1, 1],
158 | padding='SAME')
159 | layer11=tf.nn.relu(tf.nn.bias_add(conv_one, bias_one))
160 | with tf.variable_scope('layer_one_12',reuse=tf.AUTO_REUSE):
161 | weight_one=tf.get_variable("weight",
162 | [3,3,6,6],
163 | initializer=tf.truncated_normal_initializer(stddev=0.1))
164 | bias_one=tf.get_variable("bias",[6],
165 | initializer=tf.constant_initializer(0))
166 | conv_one=tf.nn.conv2d(layer11, weight_one, strides=[1, 1, 1, 1],
167 | padding='SAME')
168 | layer12=tf.nn.bias_add(conv_one, bias_one)
169 | with tf.variable_scope('layer_add3',reuse=tf.AUTO_REUSE):
170 | weight=tf.get_variable("weight",
171 | [1,1,6,6],
172 | initializer=tf.truncated_normal_initializer(stddev=0.1))
173 | bias=tf.get_variable("bias",[6],
174 | initializer=tf.constant_initializer(0))
175 | conv=tf.nn.conv2d(layer_add, weight, strides=[1, 1, 1, 1],
176 | padding='SAME')
177 | layer_add=tf.nn.relu(tf.nn.bias_add(conv, bias)+layer12)
178 | return layer_add
--------------------------------------------------------------------------------
/run_model.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Tue Jan 23 23:35:58 2018
4 |
5 | @author: Administrator
6 | """
7 | from model.res_net import resnet
8 | from model.multi_convlstm import mul_convlstm
9 |
10 | from model.resnet import Resnet
11 | from model.conv_lstm import BasicConvLSTMCell
12 | from model.hyparameter import parameter
13 | from model.data_process import dataIterator
14 | import tensorflow as tf
15 | import argparse
16 | import numpy as np
17 | import matplotlib.pyplot as plt
18 | import os
19 | from matplotlib.pyplot import MultipleLocator
20 |
21 |
22 | # tf.config.optimizer.set_experimental_options({'layout_optimizer': False})
23 | tf.reset_default_graph()
24 | os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
25 | logs_path="board"
26 | para=parameter(argparse.ArgumentParser()).get_para()
27 |
28 | class Model(object):
29 | def __init__(self,para):
30 | self.para=para
31 | # define placeholders
32 | self.placeholders = {
33 | 'features': tf.placeholder(tf.float32, shape=[self.para.batch_size*self.para.input_length, self.para.site_num, self.para.features]),
34 | 'labels': tf.placeholder(tf.float32, shape=[self.para.batch_size, self.para.output_length]),
35 | 'dropout': tf.placeholder_with_default(0., shape=()),
36 | 'is_training':tf.placeholder_with_default(input=False,shape=())
37 | }
38 | self.model()
39 |
40 | def model(self):
41 | '''
42 | :param batch_size: 64
43 | :param encoder_layer:
44 | :param decoder_layer:
45 | :param encoder_nodes:
46 | :param prediction_size:
47 | :param is_training: True
48 | :return:
49 | '''
50 |
51 | # create model
52 | l = resnet(para=para)
53 | x_input = self.placeholders['features']
54 | inputs = tf.reshape(x_input, shape=[-1, self.para.site_num, self.para.features])
55 | '''
56 | cnn output shape is : [batch size, height, site num, output channel]
57 | '''
58 | cnn_out = l.cnn(tf.expand_dims(inputs[:, :, :], axis=3))
59 | shape = cnn_out.shape
60 |
61 | # rescnn = Resnet(tf.expand_dims(inputs[:, :, :], axis=3))
62 | # cnn_out = rescnn.CNN_layer()
63 | # shape = cnn_out.shape
64 |
65 | cnn_out = tf.reshape(cnn_out, shape=[-1, self.para.input_length, shape[1], shape[2], shape[3]])
66 | print('resnet output shape is : ',cnn_out.shape)
67 | '''
68 | resnet output shape is : (32, 3, 14, 4, 32)
69 | '''
70 | mul_convl=mul_convlstm(batch=self.para.batch_size,
71 | predict_time=self.para.output_length,
72 | shape=[cnn_out.shape[2],cnn_out.shape[3]],
73 | filters=6,
74 | kernel=[3, 3],
75 | layer_num=self.para.hidden_layer,
76 | normalize=self.para.is_training)
77 |
78 | h_states=mul_convl.encoding(cnn_out)
79 | self.pres=mul_convl.decoding(h_states)
80 |
81 | # clstm = BasicConvLSTMCell([cnn_out.shape[2],cnn_out.shape[3]], [3, 3], cnn_out.shape[3],time_size= self.para.input_length)
82 | # state = clstm.zero_state(self.para.batch_size)
83 | # self.pres = clstm.Full_connect(cnn_out, state)
84 |
85 | self.cross_entropy = tf.reduce_mean(
86 | tf.sqrt(tf.reduce_mean(tf.square(self.placeholders['labels'] - self.pres), axis=0)))
87 |
88 | print('self.pres shape is : ', self.pres.shape)
89 | print('labels shape is : ', self.placeholders['labels'].shape)
90 |
91 | print(self.cross_entropy)
92 | print('cross shape is : ',self.cross_entropy.shape)
93 |
94 | # tf.summary.scalar('cross_entropy',self.cross_entropy)
95 | # backprocess and update the parameters
96 | self.train_op = tf.train.AdamOptimizer(self.para.learning_rate).minimize(self.cross_entropy)
97 |
98 | def test(self):
99 | '''
100 | :param batch_size: usually use 1
101 | :param encoder_layer:
102 | :param decoder_layer:
103 | :param encoder_nodes:
104 | :param prediction_size:
105 | :param is_training: False
106 | :return:
107 | '''
108 | model_file = tf.train.latest_checkpoint('weights/')
109 | self.saver.restore(self.sess, model_file)
110 |
111 |
112 | def accuracy(self,label,predict):
113 | '''
114 | :param Label: represents the observed value
115 | :param Predict: represents the predicted value
116 | :param epoch:
117 | :param steps:
118 | :return:
119 | '''
120 | error = label - predict
121 | average_error = np.mean(np.fabs(error.astype(float)))
122 | # print("mae is : %.6f" % (average_error))
123 |
124 | rmse_error = np.sqrt(np.mean(np.square(label - predict)))
125 | # print("rmse is : %.6f" % (rmse_error))
126 |
127 | cor = np.mean(np.multiply((label - np.mean(label)),
128 | (predict - np.mean(predict)))) / (np.std(predict) * np.std(label))
129 | # print('correlation coefficient is: %.6f' % (cor))
130 |
131 | # mask = label != 0
132 | # mape =np.mean(np.fabs((label[mask] - predict[mask]) / label[mask]))*100.0
133 | # mape=np.mean(np.fabs((label - predict) / label)) * 100.0
134 | # print('mape is: %.6f %' % (mape))
135 | sse = np.sum((label - predict) ** 2)
136 | sst = np.sum((label - np.mean(label)) ** 2)
137 | R2 = 1 - sse / sst # r2_score(y_actual, y_predicted, multioutput='raw_values')
138 | # print('r^2 is: %.6f' % (R2))
139 |
140 | return average_error,rmse_error,cor,R2
141 |
142 | def describe(self,label,predict,prediction_size):
143 | '''
144 | :param label:
145 | :param predict:
146 | :param prediction_size:
147 | :return:
148 | '''
149 | plt.figure()
150 | # Label is observed value,Blue
151 | plt.plot(label[0:prediction_size], 'b*:', label=u'actual value')
152 | # Predict is predicted value,Red
153 | plt.plot(predict[0:prediction_size], 'r*:', label=u'predicted value')
154 | # use the legend
155 | # plt.legend()
156 | plt.xlabel("time(hours)", fontsize=17)
157 | plt.ylabel("pm$_{2.5}$ (ug/m$^3$)", fontsize=17)
158 | plt.title("the prediction of pm$_{2.5}", fontsize=17)
159 | plt.show()
160 |
161 | def initialize_session(self):
162 | self.sess=tf.Session()
163 | self.saver=tf.train.Saver()
164 |
165 | def re_current(self, a, max, min):
166 | return [num*(max-min)+min for num in a]
167 |
168 | def construct_feed_dict(self, features, labels, placeholders):
169 | """Construct feed dictionary."""
170 | feed_dict = dict()
171 | feed_dict.update({placeholders['labels']: labels})
172 | feed_dict.update({placeholders['features']: features})
173 | return feed_dict
174 |
175 | def run_epoch(self):
176 | '''
177 | from now on,the model begin to training, until the epoch to 100
178 | '''
179 |
180 | max_mae = 100
181 | self.sess.run(tf.global_variables_initializer())
182 | # merged = tf.summary.merge_all()
183 | # writer = tf.summary.FileWriter(logs_path,graph=tf.get_default_graph())
184 | #
185 | # for (x, y) in zip(tf.global_variables(), self.sess.run(tf.global_variables())):
186 | # print('\n', x, y)
187 |
188 | iterate = dataIterator(hp=self.para)
189 | train_next = iterate.next_batch(batch_size=self.para.batch_size, epochs=self.para.epochs, is_training=True)
190 |
191 | # '''
192 | for i in range(int((iterate.train_length //self.para.site_num-(iterate.time_size + iterate.prediction_size))//iterate.window_step)
193 | * self.para.epochs // self.para.batch_size):
194 | x, label =self.sess.run(train_next)
195 | features=np.reshape(np.array(x), [-1, self.para.site_num, self.para.features])
196 | feed_dict = self.construct_feed_dict(features, label, self.placeholders)
197 | feed_dict.update({self.placeholders['dropout']: self.para.dropout})
198 |
199 | loss, _ = self.sess.run((self.cross_entropy,self.train_op), feed_dict=feed_dict)
200 | # writer.add_summary(summary, loss)
201 | print("after %d steps,the training average loss value is : %.6f" % (i, loss))
202 |
203 | # validate processing
204 | if i % 10 == 0:
205 | mae, rmse, R, R2=self.evaluate()
206 | if max_mae>mae:
207 | max_mae=mae
208 | print("#--------At %d -th steps, we need to update the parameters of network!----------#"%(i))
209 | print("mae is : %.6f" % (mae))
210 | print("rmse is : %.6f" % (rmse))
211 | print('R is: %.6f' % (R))
212 | print('R^2 is: %.6f' % (R2))
213 | self.saver.save(self.sess,save_path=self.para.save_path+'model.ckpt')
214 |
215 | def evaluate(self):
216 | '''
217 | :param para:
218 | :param pre_model:
219 | :return:
220 | '''
221 | label_list = list()
222 | predict_list = list()
223 |
224 | model_file = tf.train.latest_checkpoint(self.para.save_path)
225 | if not self.para.is_training:
226 | print('the model weights has been loaded:')
227 | self.saver.restore(self.sess, model_file)
228 |
229 | iterate_test = dataIterator(hp=self.para)
230 | test_next = iterate_test.next_batch(batch_size=self.para.batch_size, epochs=1, is_training=False)
231 | max,min=iterate_test.max_list[1],iterate_test.min_list[1]
232 | # '''
233 | for i in range(int((iterate_test.test_length // self.para.site_num
234 | -(iterate_test.time_size + iterate_test.prediction_size))//iterate_test.prediction_size)// self.para.batch_size):
235 | x, label =self.sess.run(test_next)
236 |
237 | features=np.reshape(np.array(x), [-1, self.para.site_num, self.para.features])
238 | feed_dict = self.construct_feed_dict(features, label, self.placeholders)
239 | feed_dict.update({self.placeholders['dropout']: 0.0}) #不能取 1.0,因为我们使用的是1-dropout为正则的方式,可取 0.0
240 | # feed_dict.update({self.placeholders['is_training']:self.para.is_training})
241 |
242 | pre = self.sess.run((self.pres), feed_dict=feed_dict)
243 | label_list.append(label)
244 | predict_list.append(pre)
245 |
246 | label_list=np.reshape(np.array(label_list,dtype=np.float32),[-1, self.para.output_length])
247 | predict_list=np.reshape(np.array(predict_list,dtype=np.float32),[-1, self.para.output_length])
248 |
249 |
250 | if self.para.normalize:
251 | label_list = np.array([self.re_current(np.reshape(site_label, [-1]),max,min) for site_label in label_list],dtype=np.float32)
252 | predict_list = np.array([self.re_current(np.reshape(site_label, [-1]),max,min) for site_label in predict_list],dtype=np.float32)
253 | else:
254 | label_list = np.array([np.reshape(site_label, [-1]) for site_label in label_list],dtype=np.float32)
255 | predict_list = np.array([np.reshape(site_label, [-1]) for site_label in predict_list],dtype=np.float32)
256 |
257 | label_list=np.reshape(label_list,[-1])
258 | predict_list=np.reshape(predict_list,[-1])
259 | mae, rmse, R, R2 = self.accuracy(label_list, predict_list) #产生预测指标
260 | if not self.para.is_training:
261 | print("mae is : %.6f" % (mae))
262 | print("rmse is : %.6f" % (rmse))
263 | print('R is: %.6f' % (R))
264 | print('R^2 is: %.6f' % (R2))
265 | #pre_model.describe(label_list, predict_list, pre_model.para.prediction_size) #预测值可视化
266 | return mae, rmse, R, R2
267 |
268 | def main(argv=None):
269 | '''
270 | :param argv:
271 | :return:
272 | '''
273 | print('beginning____________________________beginning_____________________________beginning!!!')
274 | para = parameter(argparse.ArgumentParser())
275 | para = para.get_para()
276 |
277 | print('Please input a number : 1 or 0. (1 and 0 represents the training or testing, respectively).')
278 | val = input('please input the number : ')
279 |
280 | if int(val) == 1:para.is_training = True
281 | else:
282 | para.batch_size=1
283 | para.is_training = False
284 |
285 | pre_model = Model(para)
286 | pre_model.initialize_session()
287 |
288 | if int(val) == 1:pre_model.run_epoch()
289 | else:
290 | pre_model.evaluate()
291 |
292 | print('finished____________________________finished_____________________________finished!!!')
293 |
294 | def re_current(a, max, min):
295 | print(a.shape)
296 | return [float(num*(max-min)+min) for num in a]
297 |
298 | if __name__ == '__main__':
299 | main()
--------------------------------------------------------------------------------
/model/data_show.py:
--------------------------------------------------------------------------------
1 | # -- coding: utf-8 --
2 | import pandas as pd
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | file='/Users/guojianzou/Documents/program/shanghai_weather/'
6 |
7 |
8 | def correlation(obseved_v,predicted_v):
9 |
10 | cor = np.mean(np.multiply((obseved_v - np.mean(obseved_v)),
11 | (predicted_v - np.mean(predicted_v)))) / (
12 | np.std(predicted_v) * np.std(obseved_v))
13 | # print(obseved_v.shape,predicted_v.shape)
14 | # cor=np.corrcoef(obseved_v,predicted_v)
15 | print('the correlation is : ',cor)
16 |
17 | cities=['NanJing','SuZhou','NanTong','WuXi','ChangZhou','ZhenJiang',
18 | 'HangZhou','NingBo','ShaoXing','HuZhou','JiaXing','TaiZhou','ZhouShan']
19 |
20 | pollution=pd.read_csv(file+'train_around_weather.csv')
21 |
22 | for i in range(3,9):
23 | for city in cities:
24 | data1=pollution.loc[pollution['location']=='ShangHai'].values[:,i]
25 | data2=pollution.loc[pollution['location']==city].values[:,i]
26 | correlation(data1,data2)
27 |
28 | print('finish')
29 |
30 |
31 | pollution=pd.read_csv(file+'train_weather_day.csv').values[:,2:]
32 |
33 | # print(pollution.values.shape)
34 | #
35 | # data=pollution.loc[pollution['PM2.5']>75]
36 | #
37 | # print(data.values.shape)
38 | #
39 | # print(1088.0/7936.0)
40 |
41 | sundden=[300,300,300,300,300,300,40]
42 |
43 | def sudden_changed(data):
44 | '''
45 | 用于处理突变的值
46 | Args:
47 | city_dictionary:
48 | Returns:
49 | '''
50 | shape=data.shape
51 | print(shape)
52 | for i in range(shape[0]):
53 | for j in range(shape[1]):
54 | if i!=0:
55 | if data[i][j]-data[i-1][j]>sundden[j]:
56 | data[i][j] = data[i - 1][j]
57 | return data
58 |
59 | pollution=sudden_changed(pollution)
60 |
61 |
62 | weather=pd.read_csv(file+'weather.csv').values[:,1:]
63 |
64 | weathers=[]
65 |
66 | for line in weather:
67 | for i in range(3):
68 | weathers.append(line)
69 |
70 | weather=np.array(weathers)[:7937, :]
71 |
72 | # plt.figure()
73 | # # Label is observed value,Blue
74 | # font = {'family': 'Times New Roman',
75 | # 'weight': 'normal',
76 | # 'size': 10,
77 | # }
78 | # font1 = {'family': 'Times New Roman',
79 | # 'weight': 'normal',
80 | # 'size': 8,
81 | # }
82 | #
83 | # plt.subplot(2, 4, 1)
84 | # plt.plot(pollution[:,0],color='orange', label=u'AQI')
85 | # # use the legend
86 | # plt.legend(loc='upper right',prop=font1)
87 | # plt.grid(axis='y', linestyle='--')
88 | # plt.xlabel("Time (hours)", fontdict=font)
89 | # # plt.xlabel("time(hours)", fontsize=17,fontdict=font)
90 | # plt.ylabel("AQI", fontdict=font)
91 | # # plt.title('sample 1 (48 - 40 h)', fontdict=font)
92 | # # plt.title("the prediction of PM2.5", fontsize=17,fontdict=font)
93 | #
94 | # plt.subplot(2, 4, 2)
95 | # plt.plot(pollution[:,1], color='#0cdc73', label=u'PM$_{2.5}$')
96 | # plt.legend(loc='upper right',prop=font1)
97 | # plt.grid(axis='y', linestyle='--')
98 | # plt.xlabel("Time (hours)", fontdict=font)
99 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
100 | # # plt.title('sample 2 (48 - 40 h)', fontdict=font)
101 | #
102 | # plt.subplot(2, 4, 3)
103 | # plt.plot(pollution[:,2], 'b', label=u'PM$_{10}$')
104 | # plt.legend(loc='upper right',prop=font1)
105 | # plt.grid(axis='y', linestyle='--')
106 | # plt.xlabel("Time (hours)", fontdict=font)
107 | # plt.ylabel("PM$_{10}$(ug/m$^3$)", fontdict=font)
108 | # # plt.title('sample 3 (48 - 40 h)', fontdict=font)
109 | #
110 | # plt.subplot(2, 4, 4)
111 | # plt.plot(pollution[:,3], color='#f504c9', label=u'SO$_2$')
112 | # plt.legend(loc='upper right',prop=font1)
113 | # plt.grid(axis='y', linestyle='--')
114 | # plt.xlabel("Time (hours)", fontdict=font)
115 | # plt.ylabel("SO$_2$(ug/m$^3$)", fontdict=font)
116 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
117 | #
118 | # plt.subplot(2, 4, 5)
119 | # plt.plot(pollution[:,4], color='#d0c101', label=u'NO$_2$')
120 | # plt.legend(loc='upper right',prop=font1)
121 | # plt.grid(axis='y', linestyle='--')
122 | # plt.xlabel("Time (hours)", fontdict=font)
123 | # plt.ylabel("NO$_2$(ug/m$^3$)", fontdict=font)
124 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
125 | #
126 | # plt.subplot(2, 4, 6)
127 | # plt.plot(pollution[:,5], color='#ff5b00', label=u'O$_3$')
128 | # plt.legend(loc='upper right',prop=font1)
129 | # plt.grid(axis='y', linestyle='--')
130 | # plt.xlabel("Time (hours)", fontdict=font)
131 | # plt.ylabel("O$_3$(ug/m$^3$)", fontdict=font)
132 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
133 | #
134 | # plt.subplot(2, 4, 7)
135 | # plt.plot(pollution[:,6], color='#a8a495', label=u'CO')
136 | # plt.legend(loc='upper right',prop=font1)
137 | # plt.grid(axis='y', linestyle='--')
138 | # plt.xlabel("Time (hours)", fontdict=font)
139 | # plt.ylabel("CO(ug/m$^3$)", fontdict=font)
140 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
141 | #
142 | # plt.show()
143 |
144 |
145 |
146 | #
147 | # plt.figure()
148 | # # Label is observed value,Blue
149 | # font = {'family': 'Times New Roman',
150 | # 'weight': 'normal',
151 | # 'size': 10,
152 | # }
153 | # font1 = {'family': 'Times New Roman',
154 | # 'weight': 'normal',
155 | # 'size': 8,
156 | # }
157 | #
158 | # plt.subplot(3, 4, 1)
159 | # plt.plot(weather[:,0],color='#7FFFD4', label=u'Temperature')
160 | # # use the legend
161 | # plt.legend(loc='upper right',prop=font1)
162 | # plt.grid(axis='y', linestyle='--')
163 | # plt.xlabel("Time (hours)", fontdict=font)
164 | # # plt.xlabel("time(hours)", fontsize=17,fontdict=font)
165 | # plt.ylabel("Temperature($°$C)", fontdict=font)
166 | # # plt.title('sample 1 (48 - 40 h)', fontdict=font)
167 | # # plt.title("the prediction of PM2.5", fontsize=17,fontdict=font)
168 | #
169 | # plt.subplot(3, 4, 2)
170 | # plt.plot(weather[:,1], color='#DEB887', label=u'Humidity')
171 | # plt.legend(loc='upper right',prop=font1)
172 | # plt.grid(axis='y', linestyle='--')
173 | # plt.xlabel("Time (hours)", fontdict=font)
174 | # plt.ylabel("Humidity(%)", fontdict=font)
175 | # # plt.title('sample 2 (48 - 40 h)', fontdict=font)
176 | #
177 | # plt.subplot(3, 4, 3)
178 | # plt.plot(weather[:,2], color='#7FFF00', label=u'Air pressure')
179 | # plt.legend(loc='upper right',prop=font1)
180 | # plt.grid(axis='y', linestyle='--')
181 | # plt.xlabel("Time (hours)", fontdict=font)
182 | # plt.ylabel("Air pressure(Hpa)", fontdict=font)
183 | # # plt.title('sample 3 (48 - 40 h)', fontdict=font)
184 | #
185 | # plt.subplot(3, 4, 4)
186 | # plt.plot(weather[:,3], color='#6495ED', label=u'Wind direction')
187 | # plt.legend(loc='upper right',prop=font1)
188 | # plt.grid(axis='y', linestyle='--')
189 | # plt.xlabel("Time (hours)", fontdict=font)
190 | # plt.ylabel("Wind direction($°$)", fontdict=font)
191 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
192 | #
193 | # plt.subplot(3, 4, 5)
194 | # plt.plot(weather[:,4], color='#DC143C', label=u'Wind speed')
195 | # plt.legend(loc='upper right',prop=font1)
196 | # plt.grid(axis='y', linestyle='--')
197 | # plt.xlabel("Time (hours)", fontdict=font)
198 | # plt.ylabel("Wind speed(km/h)", fontdict=font)
199 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
200 | #
201 | # plt.subplot(3, 4, 6)
202 | # plt.plot(weather[:,5], color='#A9A9A9', label=u'Clouds')
203 | # plt.legend(loc='upper right',prop=font1)
204 | # plt.grid(axis='y', linestyle='--')
205 | # plt.xlabel("Time (hours)", fontdict=font)
206 | # plt.ylabel("Clouds", fontdict=font)
207 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
208 | #
209 | # plt.subplot(3, 4, 7)
210 | # plt.plot(weather[:,6], color='#a55af4', label=u'Maximum temperature')
211 | # plt.legend(loc='upper right',prop=font1)
212 | # plt.grid(axis='y', linestyle='--')
213 | # plt.xlabel("Time (hours)", fontdict=font)
214 | # plt.ylabel("Maximum temperature($°$C)", fontdict=font)
215 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
216 | #
217 | # plt.subplot(3, 4, 8)
218 | # plt.plot(weather[:,7], color='#82cafc', label=u'Minimum temperature')
219 | # plt.legend(loc='upper right',prop=font1)
220 | # plt.grid(axis='y', linestyle='--')
221 | # plt.xlabel("Time (hours)", fontdict=font)
222 | # plt.ylabel("Minimum temperature($°$C)", fontdict=font)
223 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
224 | #
225 | # plt.subplot(3, 4, 9)
226 | # plt.plot(weather[:,8], color='#ffdf22', label=u'Conditions')
227 | # plt.legend(loc='upper right',prop=font1)
228 | # plt.grid(axis='y', linestyle='--')
229 | # plt.xlabel("Time (hours)", fontdict=font)
230 | # plt.ylabel("Conditions", fontdict=font)
231 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
232 | #
233 | # plt.show()
234 |
235 | def sudden_changed(data):
236 | '''
237 | 用于处理突变的值
238 | Args:
239 | city_dictionary:
240 | Returns:
241 | '''
242 | shape=data.shape
243 | print(shape)
244 | for i in range(shape[0]):
245 | if i!=0:
246 | if data[i]-data[i-1]>100:
247 | data[i] = data[i - 1]
248 | return data
249 |
250 | pollution=pd.read_csv(file+'train_around_weather.csv',usecols=['location','PM2.5'])
251 |
252 | # plt.figure()
253 | # # Label is observed value,Blue
254 | # font = {'family': 'Times New Roman',
255 | # 'weight': 'normal',
256 | # 'size': 9,
257 | # }
258 | # font1 = {'family': 'Times New Roman',
259 | # 'weight': 'normal',
260 | # 'size': 8,
261 | # }
262 | #
263 | # plt.subplot(4, 4, 1)
264 | # plt.plot(pollution.loc[pollution['location']=='ShangHai'].values[:,1],color='#0cdc73', label=u'Shanghai')
265 | # # use the legend
266 | # plt.legend(loc='upper right',prop=font1)
267 | # plt.grid(axis='y', linestyle='--')
268 | # plt.xlabel("Time (hours)", fontdict=font)
269 | # # plt.xlabel("time(hours)", fontsize=17,fontdict=font)
270 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
271 | # # plt.title('sample 1 (48 - 40 h)', fontdict=font)
272 | # # plt.title("the prediction of PM2.5", fontsize=17,fontdict=font)
273 | #
274 | # plt.subplot(4, 4, 2)
275 | # plt.plot(pollution.loc[pollution['location']=='NanJing'].values[:,1], color='#696969', label=u'Nanjing')
276 | # plt.legend(loc='upper right',prop=font1)
277 | # plt.grid(axis='y', linestyle='--')
278 | # plt.xlabel("Time (hours)", fontdict=font)
279 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
280 | # # plt.title('sample 2 (48 - 40 h)', fontdict=font)
281 | #
282 | # plt.subplot(4, 4, 3)
283 | # plt.plot(pollution.loc[pollution['location']=='SuZhou'].values[:,1], color='#1E90FF', label=u'Suzhou')
284 | # plt.legend(loc='upper right',prop=font1)
285 | # plt.grid(axis='y', linestyle='--')
286 | # plt.xlabel("Time (hours)", fontdict=font)
287 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
288 | # # plt.title('sample 3 (48 - 40 h)', fontdict=font)
289 | #
290 | #
291 | # a=sudden_changed(pollution.loc[pollution['location']=='NanTong'].values[:,1])
292 | # plt.subplot(4, 4, 4)
293 | # plt.plot(a, color='#228B22', label=u'Nantong')
294 | # plt.legend(loc='upper right',prop=font1)
295 | # plt.grid(axis='y', linestyle='--')
296 | # plt.xlabel("Time (hours)", fontdict=font)
297 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
298 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
299 | #
300 | # plt.subplot(4, 4, 5)
301 | # plt.plot(pollution.loc[pollution['location']=='WuXi'].values[:,1], color='#FF00FF', label=u'Wuxi')
302 | # plt.legend(loc='upper right',prop=font1)
303 | # plt.grid(axis='y', linestyle='--')
304 | # plt.xlabel("Time (hours)", fontdict=font)
305 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
306 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
307 | #
308 | # plt.subplot(4, 4, 6)
309 | # plt.plot(pollution.loc[pollution['location']=='ChangZhou'].values[:,1], color='#FFD700', label=u'Changzhou')
310 | # plt.legend(loc='upper right',prop=font1)
311 | # plt.grid(axis='y', linestyle='--')
312 | # plt.xlabel("Time (hours)", fontdict=font)
313 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
314 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
315 | #
316 | # plt.subplot(4, 4, 7)
317 | # plt.plot(pollution.loc[pollution['location']=='ZhenJiang'].values[:,1], color='#FF69B4', label=u'Zhenjiang')
318 | # plt.legend(loc='upper right',prop=font1)
319 | # plt.grid(axis='y', linestyle='--')
320 | # plt.xlabel("Time (hours)", fontdict=font)
321 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
322 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
323 | #
324 | # plt.subplot(4, 4, 8)
325 | # plt.plot(pollution.loc[pollution['location']=='HangZhou'].values[:,1], color='#CD5C5C', label=u'Hangzhou')
326 | # plt.legend(loc='upper right',prop=font1)
327 | # plt.grid(axis='y', linestyle='--')
328 | # plt.xlabel("Time (hours)", fontdict=font)
329 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
330 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
331 | #
332 | # plt.subplot(4, 4, 9)
333 | # plt.plot(pollution.loc[pollution['location']=='NingBo'].values[:,1], color='#9370DB', label=u'Ningbo')
334 | # plt.legend(loc='upper right',prop=font1)
335 | # plt.grid(axis='y', linestyle='--')
336 | # plt.xlabel("Time (hours)", fontdict=font)
337 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
338 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
339 | #
340 | # plt.subplot(4, 4, 10)
341 | # plt.plot(pollution.loc[pollution['location']=='ShaoXing'].values[:,1], color='#0000CD', label=u'Shaoxing')
342 | # plt.legend(loc='upper right',prop=font1)
343 | # plt.grid(axis='y', linestyle='--')
344 | # plt.xlabel("Time (hours)", fontdict=font)
345 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
346 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
347 | #
348 | # plt.subplot(4, 4, 11)
349 | # plt.plot(pollution.loc[pollution['location']=='HuZhou'].values[:,1], color='#ADD8E6', label=u'Huzhou')
350 | # plt.legend(loc='upper right',prop=font1)
351 | # plt.grid(axis='y', linestyle='--')
352 | # plt.xlabel("Time (hours)", fontdict=font)
353 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
354 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
355 | #
356 | # plt.subplot(4, 4, 12)
357 | # plt.plot(pollution.loc[pollution['location']=='JiaXing'].values[:,1], color='#FFB6C1', label=u'Jiaxing')
358 | # plt.legend(loc='upper right',prop=font1)
359 | # plt.grid(axis='y', linestyle='--')
360 | # plt.xlabel("Time (hours)", fontdict=font)
361 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
362 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
363 | #
364 | # plt.subplot(4, 4, 13)
365 | # plt.plot(pollution.loc[pollution['location']=='TaiZhou'].values[:,1], color='#FFA07A', label=u'Taizhou')
366 | # plt.legend(loc='upper right',prop=font1)
367 | # plt.grid(axis='y', linestyle='--')
368 | # plt.xlabel("Time (hours)", fontdict=font)
369 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
370 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
371 | #
372 | # plt.subplot(4, 4, 14)
373 | # plt.plot(pollution.loc[pollution['location']=='ZhouShan'].values[:,1], color='#20B2AA', label=u'Zhoushan')
374 | # plt.legend(loc='upper right',prop=font1)
375 | # plt.grid(axis='y', linestyle='--')
376 | # plt.xlabel("Time (hours)", fontdict=font)
377 | # plt.ylabel("PM$_{2.5}$(ug/m$^3$)", fontdict=font)
378 | # # plt.title('sample 4 (48 - 40 h)', fontdict=font)
379 | #
380 | # plt.show()
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 | lstm-layer
89 | lstm
90 | bias
91 | AQI
92 | features
93 |
94 |
95 |
96 |
97 |
98 |
99 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 | 1655974255643
199 |
200 |
201 | 1655974255643
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
--------------------------------------------------------------------------------
/data/train_around_sample.csv:
--------------------------------------------------------------------------------
1 | time,location,AQI,PM2.5,PM10,SO2,NO2,O3,CO
2 | 2016/1/1 0:00,ShangHai,108,80,99,17,68,41,1.101
3 | 2016/1/1 0:00,NanJing,192,144,218,35,105,20,1.669
4 | 2016/1/1 0:00,SuZhou,127,96,109,18,89,12,1.313
5 | 2016/1/1 0:00,NanTong,142,108,135,42,64,22,1.132
6 | 2016/1/1 0:00,WuXi,170,128,173,28,88,16,1.857
7 | 2016/1/1 0:00,ChangZhou,184,138,180,46,84,10,1.663
8 | 2016/1/1 0:00,ZhenJiang,196,147,193,38,82,14,1.287
9 | 2016/1/1 0:00,HangZhou,224,173,240,28,102,14,1.607
10 | 2016/1/1 0:00,NingBo,214,163,245,20,99,18,1.609
11 | 2016/1/1 0:00,ShaoXing,291,240,361,19,79,15,1.908
12 | 2016/1/1 0:00,HuZhou,126,95,166,19,83,6,1.466
13 | 2016/1/1 0:00,JiaXing,193,145,203,39,106,6,1.878
14 | 2016/1/1 0:00,TaiZhou,179,135,198,15,37,60,1.362
15 | 2016/1/1 0:00,ZhouShan,59,41,58,4,20,67,0.671
16 | 2016/1/1 1:00,ShangHai,96,71,93,18,66,38,1.058
17 | 2016/1/1 1:00,NanJing,186,140,207,35,99,19,1.614
18 | 2016/1/1 1:00,SuZhou,122,92,98,16,85,12,1.286
19 | 2016/1/1 1:00,NanTong,119,89,116,48,69,16,1.096
20 | 2016/1/1 1:00,WuXi,150,115,152,26,76,23,1.687
21 | 2016/1/1 1:00,ChangZhou,192,144,191,49,78,12,1.742
22 | 2016/1/1 1:00,ZhenJiang,181,136,182,31,67,19,1.228
23 | 2016/1/1 1:00,HangZhou,211,160,255,26,95,12,1.656
24 | 2016/1/1 1:00,NingBo,207,156,238,18,88,18,1.572
25 | 2016/1/1 1:00,ShaoXing,256,205,295,18,72,12,1.718
26 | 2016/1/1 1:00,HuZhou,124,93,176,29,80,6,1.599
27 | 2016/1/1 1:00,JiaXing,146,111,168,25,92,14,1.349
28 | 2016/1/1 1:00,TaiZhou,181,136,201,12,41,50,1.615
29 | 2016/1/1 1:00,ZhouShan,59,41,56,4,18,67,0.756
30 | 2016/1/1 2:00,ShangHai,93,69,87,16,66,43,1.091
31 | 2016/1/1 2:00,NanJing,186,140,205,34,94,19,1.5
32 | 2016/1/1 2:00,SuZhou,119,89,93,16,82,12,1.277
33 | 2016/1/1 2:00,NanTong,130,98,121,50,72,13,1.117
34 | 2016/1/1 2:00,WuXi,137,104,139,32,78,15,1.473
35 | 2016/1/1 2:00,ChangZhou,181,136,201,55,78,10,1.903
36 | 2016/1/1 2:00,ZhenJiang,177,133,177,34,68,17,1.34
37 | 2016/1/1 2:00,HangZhou,200,150,244,21,88,13,1.639
38 | 2016/1/1 2:00,NingBo,182,137,210,17,81,16,1.403
39 | 2016/1/1 2:00,ShaoXing,202,151,227,17,66,11,1.554
40 | 2016/1/1 2:00,HuZhou,172,130,193,37,83,6,1.589
41 | 2016/1/1 2:00,JiaXing,125,94,135,21,62,37,1.198
42 | 2016/1/1 2:00,TaiZhou,185,139,201,13,52,29,1.421
43 | 2016/1/1 2:00,ZhouShan,59,41,55,6,16,69,0.747
44 | 2016/1/1 3:00,ShangHai,88,65,82,16,58,43,0.989
45 | 2016/1/1 3:00,NanJing,178,134,194,34,92,15,1.476
46 | 2016/1/1 3:00,SuZhou,122,92,100,15,79,12,1.295
47 | 2016/1/1 3:00,NanTong,126,95,119,56,64,17,1.07
48 | 2016/1/1 3:00,WuXi,133,101,131,32,78,14,1.378
49 | 2016/1/1 3:00,ChangZhou,172,130,198,56,74,12,1.815
50 | 2016/1/1 3:00,ZhenJiang,172,130,167,33,68,15,1.344
51 | 2016/1/1 3:00,HangZhou,204,153,218,19,83,11,1.59
52 | 2016/1/1 3:00,NingBo,188,141,218,16,82,8,1.388
53 | 2016/1/1 3:00,ShaoXing,202,151,216,16,59,13,1.473
54 | 2016/1/1 3:00,HuZhou,188,141,187,43,86,6,1.687
55 | 2016/1/1 3:00,JiaXing,116,87,116,19,50,46,1.114
56 | 2016/1/1 3:00,TaiZhou,195,146,207,14,55,24,1.298
57 | 2016/1/1 3:00,ZhouShan,58,40,55,6,16,66,0.772
58 | 2016/1/1 4:00,ShangHai,86,63,89,15,53,46,0.982
59 | 2016/1/1 4:00,NanJing,182,137,190,36,94,12,1.563
60 | 2016/1/1 4:00,SuZhou,126,95,100,13,84,7,1.349
61 | 2016/1/1 4:00,NanTong,116,87,109,58,64,15,0.992
62 | 2016/1/1 4:00,WuXi,127,96,124,34,80,12,1.36
63 | 2016/1/1 4:00,ChangZhou,168,127,176,52,73,11,1.686
64 | 2016/1/1 4:00,ZhenJiang,150,115,158,34,56,27,1.325
65 | 2016/1/1 4:00,HangZhou,186,140,228,17,79,11,1.529
66 | 2016/1/1 4:00,NingBo,184,138,213,14,72,13,1.349
67 | 2016/1/1 4:00,ShaoXing,188,141,212,21,54,15,1.366
68 | 2016/1/1 4:00,HuZhou,165,125,183,37,85,5,1.552
69 | 2016/1/1 4:00,JiaXing,115,86,111,16,49,42,1.13
70 | 2016/1/1 4:00,TaiZhou,193,145,204,16,43,28,1.4
71 | 2016/1/1 4:00,ZhouShan,54,37,52,5,13,70,0.77
72 | 2016/1/1 5:00,ShangHai,80,58,81,15,54,44,1.024
73 | 2016/1/1 5:00,NanJing,189,142,197,36,94,11,1.695
74 | 2016/1/1 5:00,SuZhou,125,94,100,12,87,5,1.407
75 | 2016/1/1 5:00,NanTong,110,82,103,58,65,14,1.015
76 | 2016/1/1 5:00,WuXi,122,92,124,34,84,7,1.393
77 | 2016/1/1 5:00,ChangZhou,171,129,175,49,71,11,1.608
78 | 2016/1/1 5:00,ZhenJiang,143,109,152,32,60,22,1.357
79 | 2016/1/1 5:00,HangZhou,184,138,225,17,75,10,1.497
80 | 2016/1/1 5:00,NingBo,181,136,210,17,68,11,1.385
81 | 2016/1/1 5:00,ShaoXing,177,133,200,17,55,12,1.367
82 | 2016/1/1 5:00,HuZhou,147,112,147,29,79,5,1.429
83 | 2016/1/1 5:00,JiaXing,113,84,108,15,53,35,1.144
84 | 2016/1/1 5:00,TaiZhou,188,141,197,16,36,40,1.365
85 | 2016/1/1 5:00,ZhouShan,50,35,49,5,13,66,0.754
86 | 2016/1/1 6:00,ShangHai,76,55,70,14,62,31,1.034
87 | 2016/1/1 6:00,NanJing,195,146,203,34,91,12,1.833
88 | 2016/1/1 6:00,SuZhou,119,89,99,12,88,6,1.397
89 | 2016/1/1 6:00,NanTong,116,87,107,58,66,12,1.02
90 | 2016/1/1 6:00,WuXi,127,96,126,33,84,6,1.454
91 | 2016/1/1 6:00,ChangZhou,167,126,171,47,71,12,1.653
92 | 2016/1/1 6:00,ZhenJiang,141,107,148,53,52,29,1.332
93 | 2016/1/1 6:00,HangZhou,177,133,203,15,72,9,1.523
94 | 2016/1/1 6:00,NingBo,174,131,203,15,63,12,1.346
95 | 2016/1/1 6:00,ShaoXing,171,129,196,18,51,15,1.391
96 | 2016/1/1 6:00,HuZhou,160,121,146,35,78,5,1.435
97 | 2016/1/1 6:00,JiaXing,110,82,110,17,54,31,1.191
98 | 2016/1/1 6:00,TaiZhou,192,144,184,14,33,37,1.487
99 | 2016/1/1 6:00,ZhouShan,50,35,48,6,15,56,0.751
100 | 2016/1/1 7:00,ShangHai,81,59,79,16,74,22,1.21
101 | 2016/1/1 7:00,NanJing,192,144,202,30,88,11,1.862
102 | 2016/1/1 7:00,SuZhou,115,86,100,13,89,6,1.401
103 | 2016/1/1 7:00,NanTong,122,92,124,77,70,9,1.146
104 | 2016/1/1 7:00,WuXi,131,99,134,33,85,5,1.564
105 | 2016/1/1 7:00,ChangZhou,174,131,180,50,73,10,1.831
106 | 2016/1/1 7:00,ZhenJiang,138,105,134,56,47,32,1.137
107 | 2016/1/1 7:00,HangZhou,174,131,213,14,70,9,1.562
108 | 2016/1/1 7:00,NingBo,161,122,192,15,63,10,1.326
109 | 2016/1/1 7:00,ShaoXing,168,127,189,16,51,13,1.471
110 | 2016/1/1 7:00,HuZhou,170,128,155,46,75,5,1.495
111 | 2016/1/1 7:00,JiaXing,114,85,113,18,68,15,1.368
112 | 2016/1/1 7:00,TaiZhou,168,127,178,12,33,38,1.917
113 | 2016/1/1 7:00,ZhouShan,50,35,49,6,25,48,0.802
114 | 2016/1/1 8:00,ShangHai,78,57,79,17,82,15,1.412
115 | 2016/1/1 8:00,NanJing,177,133,188,27,81,13,1.861
116 | 2016/1/1 8:00,SuZhou,110,82,101,13,88,7,1.326
117 | 2016/1/1 8:00,NanTong,179,135,175,91,70,10,1.248
118 | 2016/1/1 8:00,WuXi,132,100,138,28,85,8,1.602
119 | 2016/1/1 8:00,ChangZhou,182,137,189,51,71,12,1.885
120 | 2016/1/1 8:00,ZhenJiang,144,110,142,33,47,30,1.411
121 | 2016/1/1 8:00,HangZhou,202,151,226,14,67,9,1.634
122 | 2016/1/1 8:00,NingBo,160,121,190,15,62,11,1.367
123 | 2016/1/1 8:00,ShaoXing,168,127,192,17,51,15,1.493
124 | 2016/1/1 8:00,HuZhou,167,126,169,55,72,8,1.583
125 | 2016/1/1 8:00,JiaXing,120,90,115,17,75,9,1.52
126 | 2016/1/1 8:00,TaiZhou,164,124,173,12,34,38,1.563
127 | 2016/1/1 8:00,ZhouShan,53,36,54,8,41,36,0.907
128 | 2016/1/1 9:00,ShangHai,93,69,82,20,80,23,1.205
129 | 2016/1/1 9:00,NanJing,160,121,179,28,75,17,1.799
130 | 2016/1/1 9:00,SuZhou,108,80,104,16,83,16,1.236
131 | 2016/1/1 9:00,NanTong,170,128,178,77,71,19,1.205
132 | 2016/1/1 9:00,WuXi,132,100,145,28,83,17,1.461
133 | 2016/1/1 9:00,ChangZhou,168,127,194,52,68,18,1.786
134 | 2016/1/1 9:00,ZhenJiang,161,122,160,35,56,26,2.138
135 | 2016/1/1 9:00,HangZhou,209,158,241,18,71,12,1.715
136 | 2016/1/1 9:00,NingBo,158,120,179,16,61,17,1.36
137 | 2016/1/1 9:00,ShaoXing,186,140,208,27,57,19,1.622
138 | 2016/1/1 9:00,HuZhou,189,142,194,54,74,12,1.736
139 | 2016/1/1 9:00,JiaXing,121,91,119,21,73,17,1.495
140 | 2016/1/1 9:00,TaiZhou,163,123,169,16,36,42,1.549
141 | 2016/1/1 9:00,ZhouShan,55,36,57,9,46,43,0.907
142 | 2016/1/1 10:00,ShangHai,91,67,83,19,66,43,1.023
143 | 2016/1/1 10:00,NanJing,164,124,182,31,70,29,1.672
144 | 2016/1/1 10:00,SuZhou,98,73,91,20,54,47,1.002
145 | 2016/1/1 10:00,NanTong,127,96,143,53,62,35,1.001
146 | 2016/1/1 10:00,WuXi,125,94,136,26,66,38,1.327
147 | 2016/1/1 10:00,ChangZhou,144,110,182,45,58,33,1.58
148 | 2016/1/1 10:00,ZhenJiang,154,117,148,34,53,37,1.639
149 | 2016/1/1 10:00,HangZhou,244,193,272,28,79,18,1.72
150 | 2016/1/1 10:00,NingBo,150,115,168,17,65,27,1.323
151 | 2016/1/1 10:00,ShaoXing,186,140,218,39,64,29,1.606
152 | 2016/1/1 10:00,HuZhou,196,147,244,67,72,23,1.633
153 | 2016/1/1 10:00,JiaXing,108,80,114,26,61,45,1.19
154 | 2016/1/1 10:00,TaiZhou,150,115,161,19,38,51,1.473
155 | 2016/1/1 10:00,ZhouShan,56,35,60,10,34,60,0.781
156 | 2016/1/1 11:00,ShangHai,81,59,69,17,53,60,0.848
157 | 2016/1/1 11:00,NanJing,163,123,174,34,65,46,1.538
158 | 2016/1/1 11:00,SuZhou,78,57,69,18,43,64,0.767
159 | 2016/1/1 11:00,NanTong,99,74,111,35,49,57,0.84
160 | 2016/1/1 11:00,WuXi,119,89,130,23,56,56,1.184
161 | 2016/1/1 11:00,ChangZhou,130,98,155,41,38,62,1.557
162 | 2016/1/1 11:00,ZhenJiang,144,110,141,30,40,68,1.258
163 | 2016/1/1 11:00,HangZhou,248,197,272,32,77,41,1.55
164 | 2016/1/1 11:00,NingBo,125,94,139,20,63,47,1.138
165 | 2016/1/1 11:00,ShaoXing,184,138,218,37,67,46,1.482
166 | 2016/1/1 11:00,HuZhou,211,160,226,60,67,46,1.51
167 | 2016/1/1 11:00,JiaXing,94,70,95,26,39,75,0.95
168 | 2016/1/1 11:00,TaiZhou,131,99,129,16,35,62,1.371
169 | 2016/1/1 11:00,ZhouShan,55,33,58,10,29,69,0.735
170 | 2016/1/1 12:00,ShangHai,72,52,64,17,41,74,0.759
171 | 2016/1/1 12:00,NanJing,163,123,171,32,64,60,1.514
172 | 2016/1/1 12:00,SuZhou,67,48,59,17,37,74,0.675
173 | 2016/1/1 12:00,NanTong,83,61,87,28,41,70,0.759
174 | 2016/1/1 12:00,WuXi,105,78,118,21,43,76,1.058
175 | 2016/1/1 12:00,ChangZhou,110,82,139,34,28,80,1.263
176 | 2016/1/1 12:00,ZhenJiang,146,111,138,27,38,80,1.068
177 | 2016/1/1 12:00,HangZhou,217,166,240,29,64,67,1.375
178 | 2016/1/1 12:00,NingBo,110,82,123,25,61,62,1.057
179 | 2016/1/1 12:00,ShaoXing,182,137,190,31,70,56,1.344
180 | 2016/1/1 12:00,HuZhou,223,172,225,60,72,63,1.439
181 | 2016/1/1 12:00,JiaXing,78,57,77,17,28,93,0.801
182 | 2016/1/1 12:00,TaiZhou,89,66,105,13,28,87,1.054
183 | 2016/1/1 12:00,ZhouShan,59,38,66,10,29,76,0.733
184 | 2016/1/1 13:00,ShangHai,70,50,58,16,31,90,0.707
185 | 2016/1/1 13:00,NanJing,161,122,170,29,63,68,1.47
186 | 2016/1/1 13:00,SuZhou,77,56,64,18,43,80,0.746
187 | 2016/1/1 13:00,NanTong,70,50,79,24,36,82,0.736
188 | 2016/1/1 13:00,WuXi,93,69,108,20,32,90,0.945
189 | 2016/1/1 13:00,ChangZhou,96,71,116,32,27,87,1.237
190 | 2016/1/1 13:00,ZhenJiang,139,106,131,24,31,99,1.111
191 | 2016/1/1 13:00,HangZhou,188,141,202,24,64,86,1.221
192 | 2016/1/1 13:00,NingBo,103,76,115,24,57,74,1.048
193 | 2016/1/1 13:00,ShaoXing,149,114,169,32,59,71,1.211
194 | 2016/1/1 13:00,HuZhou,221,170,217,50,90,61,1.456
195 | 2016/1/1 13:00,JiaXing,75,54,75,17,27,101,0.77
196 | 2016/1/1 13:00,TaiZhou,78,57,95,12,23,97,0.979
197 | 2016/1/1 13:00,ZhouShan,67,47,81,11,36,80,0.787
198 | 2016/1/1 14:00,ShangHai,72,52,60,14,25,100,0.669
199 | 2016/1/1 14:00,NanJing,158,120,167,27,61,70,1.445
200 | 2016/1/1 14:00,SuZhou,81,59,75,18,45,84,0.752
201 | 2016/1/1 14:00,NanTong,74,53,82,23,40,83,0.722
202 | 2016/1/1 14:00,WuXi,80,58,91,21,35,92,0.914
203 | 2016/1/1 14:00,ChangZhou,96,71,103,37,30,91,1.368
204 | 2016/1/1 14:00,ZhenJiang,132,100,123,23,36,99,1.104
205 | 2016/1/1 14:00,HangZhou,178,134,181,21,56,98,1.102
206 | 2016/1/1 14:00,NingBo,87,64,99,21,49,85,0.882
207 | 2016/1/1 14:00,ShaoXing,146,111,153,39,56,75,1.14
208 | 2016/1/1 14:00,HuZhou,202,151,205,42,83,71,1.316
209 | 2016/1/1 14:00,JiaXing,71,51,68,15,28,101,0.75
210 | 2016/1/1 14:00,TaiZhou,86,63,94,12,23,104,1.012
211 | 2016/1/1 14:00,ZhouShan,67,48,72,11,34,91,0.756
212 | 2016/1/1 15:00,ShangHai,66,47,57,14,27,99,0.657
213 | 2016/1/1 15:00,NanJing,154,117,163,26,60,74,1.354
214 | 2016/1/1 15:00,SuZhou,69,49,56,17,41,85,0.665
215 | 2016/1/1 15:00,NanTong,77,56,84,20,39,84,0.682
216 | 2016/1/1 15:00,WuXi,82,60,91,21,43,86,0.924
217 | 2016/1/1 15:00,ChangZhou,94,70,110,39,34,90,1.39
218 | 2016/1/1 15:00,ZhenJiang,115,86,115,22,34,100,1.056
219 | 2016/1/1 15:00,HangZhou,164,124,161,19,56,98,1.052
220 | 2016/1/1 15:00,NingBo,78,57,90,17,41,94,0.791
221 | 2016/1/1 15:00,ShaoXing,125,94,133,30,61,75,1.033
222 | 2016/1/1 15:00,HuZhou,174,131,193,40,65,90,1.183
223 | 2016/1/1 15:00,JiaXing,76,55,76,16,31,104,0.792
224 | 2016/1/1 15:00,TaiZhou,75,54,90,10,21,109,0.947
225 | 2016/1/1 15:00,ZhouShan,53,36,50,10,23,96,0.67
226 | 2016/1/1 16:00,ShangHai,64,45,56,14,32,92,0.663
227 | 2016/1/1 16:00,NanJing,148,113,160,24,69,66,1.304
228 | 2016/1/1 16:00,SuZhou,60,42,55,15,44,78,0.623
229 | 2016/1/1 16:00,NanTong,70,50,85,22,49,71,0.658
230 | 2016/1/1 16:00,WuXi,82,60,98,20,53,77,0.943
231 | 2016/1/1 16:00,ChangZhou,94,70,114,38,41,81,1.249
232 | 2016/1/1 16:00,ZhenJiang,110,82,114,18,35,99,0.928
233 | 2016/1/1 16:00,HangZhou,144,110,152,19,63,89,1.042
234 | 2016/1/1 16:00,NingBo,75,54,87,16,48,88,0.8
235 | 2016/1/1 16:00,ShaoXing,98,73,115,23,48,81,0.872
236 | 2016/1/1 16:00,HuZhou,168,127,175,38,70,89,1.185
237 | 2016/1/1 16:00,JiaXing,80,58,75,17,35,108,0.831
238 | 2016/1/1 16:00,TaiZhou,74,53,89,8,22,107,0.992
239 | 2016/1/1 16:00,ZhouShan,49,34,46,7,25,91,0.661
240 | 2016/1/1 17:00,ShangHai,66,47,58,13,44,79,0.702
241 | 2016/1/1 17:00,NanJing,143,109,158,23,69,68,1.281
242 | 2016/1/1 17:00,SuZhou,60,42,67,15,55,67,0.693
243 | 2016/1/1 17:00,NanTong,69,46,85,26,69,46,0.723
244 | 2016/1/1 17:00,WuXi,87,64,107,20,69,62,0.988
245 | 2016/1/1 17:00,ChangZhou,96,71,116,34,52,68,1.083
246 | 2016/1/1 17:00,ZhenJiang,103,76,109,20,47,77,0.96
247 | 2016/1/1 17:00,HangZhou,153,116,165,20,79,69,1.066
248 | 2016/1/1 17:00,NingBo,82,60,97,17,68,65,0.872
249 | 2016/1/1 17:00,ShaoXing,94,70,118,30,60,74,0.963
250 | 2016/1/1 17:00,HuZhou,165,125,191,38,80,74,1.233
251 | 2016/1/1 17:00,JiaXing,81,59,85,20,47,93,0.893
252 | 2016/1/1 17:00,TaiZhou,75,54,93,7,28,92,1.038
253 | 2016/1/1 17:00,ZhouShan,55,38,54,8,37,77,0.72
254 | 2016/1/1 18:00,ShangHai,72,52,72,13,67,50,0.804
255 | 2016/1/1 18:00,NanJing,139,106,156,23,72,64,1.276
256 | 2016/1/1 18:00,SuZhou,67,47,81,13,64,56,0.775
257 | 2016/1/1 18:00,NanTong,76,48,99,30,83,27,0.812
258 | 2016/1/1 18:00,WuXi,91,67,111,20,85,42,1.076
259 | 2016/1/1 18:00,ChangZhou,108,80,116,34,62,52,1.225
260 | 2016/1/1 18:00,ZhenJiang,126,95,118,20,46,77,0.863
261 | 2016/1/1 18:00,HangZhou,139,106,155,20,89,45,1.118
262 | 2016/1/1 18:00,NingBo,77,56,93,14,73,52,0.888
263 | 2016/1/1 18:00,ShaoXing,83,61,109,19,64,59,0.938
264 | 2016/1/1 18:00,HuZhou,161,122,189,34,89,53,1.325
265 | 2016/1/1 18:00,JiaXing,80,58,89,18,55,73,0.929
266 | 2016/1/1 18:00,TaiZhou,83,61,103,10,38,76,1.139
267 | 2016/1/1 18:00,ZhouShan,67,48,76,7,57,48,0.89
268 | 2016/1/1 19:00,ShangHai,81,59,82,14,78,34,0.871
269 | 2016/1/1 19:00,NanJing,133,101,150,24,78,53,1.591
270 | 2016/1/1 19:00,SuZhou,80,58,87,14,64,54,0.834
271 | 2016/1/1 19:00,NanTong,75,53,98,31,82,24,0.829
272 | 2016/1/1 19:00,WuXi,97,72,126,20,94,31,1.08
273 | 2016/1/1 19:00,ChangZhou,114,85,137,39,71,37,1.258
274 | 2016/1/1 19:00,ZhenJiang,135,102,112,23,53,64,0.886
275 | 2016/1/1 19:00,HangZhou,131,99,147,18,89,33,1.101
276 | 2016/1/1 19:00,NingBo,76,55,92,13,71,48,0.881
277 | 2016/1/1 19:00,ShaoXing,93,69,119,29,84,37,1.034
278 | 2016/1/1 19:00,HuZhou,147,112,180,39,81,45,1.253
279 | 2016/1/1 19:00,JiaXing,89,66,98,17,57,68,0.955
280 | 2016/1/1 19:00,TaiZhou,92,68,115,14,54,48,1.285
281 | 2016/1/1 19:00,ZhouShan,77,56,88,7,63,36,1.065
282 | 2016/1/1 20:00,ShangHai,86,63,79,16,82,28,0.85
283 | 2016/1/1 20:00,NanJing,138,105,154,26,86,50,1.71
284 | 2016/1/1 20:00,SuZhou,91,67,88,15,66,52,0.877
285 | 2016/1/1 20:00,NanTong,77,56,101,29,70,28,0.785
286 | 2016/1/1 20:00,WuXi,104,77,127,20,92,29,1.027
287 | 2016/1/1 20:00,ChangZhou,110,82,146,42,69,35,1.429
288 | 2016/1/1 20:00,ZhenJiang,156,118,138,27,49,67,1.119
289 | 2016/1/1 20:00,HangZhou,135,102,142,20,88,28,1.132
290 | 2016/1/1 20:00,NingBo,71,51,84,11,63,50,0.828
291 | 2016/1/1 20:00,ShaoXing,104,77,125,23,90,26,1.185
292 | 2016/1/1 20:00,HuZhou,130,98,138,32,61,51,1.125
293 | 2016/1/1 20:00,JiaXing,80,58,86,15,52,53,0.878
294 | 2016/1/1 20:00,TaiZhou,92,68,114,9,57,35,1.358
295 | 2016/1/1 20:00,ZhouShan,76,55,81,6,59,27,1.051
296 | 2016/1/1 21:00,ShangHai,87,64,81,16,80,29,0.839
297 | 2016/1/1 21:00,NanJing,163,123,178,27,88,44,1.509
298 | 2016/1/1 21:00,SuZhou,96,71,84,17,73,41,0.922
299 | 2016/1/1 21:00,NanTong,83,61,96,28,64,26,0.787
300 | 2016/1/1 21:00,WuXi,96,71,118,18,71,45,1.046
301 | 2016/1/1 21:00,ChangZhou,105,78,140,42,55,44,1.537
302 | 2016/1/1 21:00,ZhenJiang,163,123,153,26,48,64,1.081
303 | 2016/1/1 21:00,HangZhou,125,94,146,20,90,20,1.133
304 | 2016/1/1 21:00,NingBo,69,49,81,11,64,45,0.858
305 | 2016/1/1 21:00,ShaoXing,116,87,141,21,91,16,1.361
306 | 2016/1/1 21:00,HuZhou,120,90,107,25,53,47,1.051
307 | 2016/1/1 21:00,JiaXing,77,56,78,15,57,36,0.872
308 | 2016/1/1 21:00,TaiZhou,92,68,114,8,67,19,1.492
309 | 2016/1/1 21:00,ZhouShan,66,47,72,6,43,35,1.216
310 | 2016/1/1 22:00,ShangHai,88,65,80,16,79,26,0.839
311 | 2016/1/1 22:00,NanJing,199,149,204,29,101,28,1.511
312 | 2016/1/1 22:00,SuZhou,97,72,86,17,78,27,0.902
313 | 2016/1/1 22:00,NanTong,86,63,97,33,79,13,0.808
314 | 2016/1/1 22:00,WuXi,94,70,112,18,65,47,1.053
315 | 2016/1/1 22:00,ChangZhou,104,77,130,42,45,49,1.425
316 | 2016/1/1 22:00,ZhenJiang,153,116,149,25,53,53,0.977
317 | 2016/1/1 22:00,HangZhou,138,105,152,20,91,15,1.191
318 | 2016/1/1 22:00,NingBo,69,49,81,11,66,40,0.836
319 | 2016/1/1 22:00,ShaoXing,127,96,163,23,95,9,1.476
320 | 2016/1/1 22:00,HuZhou,138,105,128,24,54,39,1.088
321 | 2016/1/1 22:00,JiaXing,77,56,75,15,61,27,0.87
322 | 2016/1/1 22:00,TaiZhou,94,70,119,9,76,9,1.588
323 | 2016/1/1 22:00,ZhouShan,64,45,63,6,44,31,0.939
324 | 2016/1/1 23:00,ShangHai,92,68,81,16,77,25,0.834
325 | 2016/1/1 23:00,NanJing,213,162,223,32,108,18,1.54
326 | 2016/1/1 23:00,SuZhou,94,70,83,16,79,17,0.887
327 | 2016/1/1 23:00,NanTong,88,65,100,36,88,10,0.86
328 | 2016/1/1 23:00,WuXi,99,74,112,20,72,39,1.105
329 | 2016/1/1 23:00,ChangZhou,108,80,123,38,47,45,1.273
330 | 2016/1/1 23:00,ZhenJiang,161,122,152,25,50,49,0.979
331 | 2016/1/1 23:00,HangZhou,138,105,169,19,92,12,1.294
332 | 2016/1/1 23:00,NingBo,67,48,78,10,48,56,0.771
333 | 2016/1/1 23:00,ShaoXing,135,102,172,27,87,10,1.498
334 | 2016/1/1 23:00,HuZhou,139,106,137,28,53,37,1.065
335 | 2016/1/1 23:00,JiaXing,77,56,73,15,60,24,0.862
336 | 2016/1/1 23:00,TaiZhou,98,73,122,11,72,7,1.609
337 | 2016/1/1 23:00,ZhouShan,64,45,63,7,36,40,0.803
338 | 2016/1/2 0:00,ShangHai,92,68,78,16,67,28,0.787
339 | 2016/1/2 0:00,NanJing,223,172,237,31,106,16,1.54
340 | 2016/1/2 0:00,SuZhou,94,70,79,16,76,13,0.886
341 | 2016/1/2 0:00,NanTong,98,73,117,38,93,8,0.937
342 | 2016/1/2 0:00,WuXi,110,82,120,21,67,36,1.137
343 | 2016/1/2 0:00,ChangZhou,116,87,124,39,48,41,1.561
344 | 2016/1/2 0:00,ZhenJiang,185,139,172,29,49,46,0.99
--------------------------------------------------------------------------------