├── .gitignore ├── LICENSE ├── README.md ├── data ├── dataset_t_2.5_mdof10.mat ├── dataset_t_2.5_mdof6.mat ├── dataset_t_4_fem.mat ├── dataset_t_4_mdof10.mat ├── dataset_t_4_mdof6.mat ├── dataset_v_2.5_mdof10.mat ├── dataset_v_2.5_mdof6.mat ├── dataset_v_4_fem.mat ├── dataset_v_4_mdof10.mat └── dataset_v_4_mdof6.mat ├── get_model.py ├── test.py ├── train.py ├── train_MRF.py └── utils_data.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 xzk8559 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RecursiveLSTM 2 | **Recursive long short-term memory network for predicting nonlinear structural seismic response** 3 | 4 | *Zekun Xu, Jun Chen(CA), Jiaxu Shen, Mengjie Xiang* 5 | 6 | Artificial neural networks have been used to predict nonlinear structural time histories under seismic excitation because they have a significantly lower computational cost than the traditional time-step integration method. However, most existing techniques require simplification procedures such as downsampling to maintain identical length and sampling rates, and they lack sufficient accuracy, generality, or interpretability. In this paper, a recursive long short-term memory (LSTM) network was proposed for predicting nonlinear structural seismic responses for arbitrary lengths and sampling rates. Referring to the traditional integral solution method, the proposed LSTM model uses the recursive prediction principle and is therefore applicable to structures and earthquakes with different spectral characteristics and amplitudes. The measured ground motions and multilayer frame structures were used for model training and validation. The rules of hyperparameter selection for practical applications are herein discussed. The results showed that the proposed recursive LSTM model can adequately reproduce the global and local characteristics of the time history responses on four different structural response datasets, exhibiting good accuracy and generalization capability. 7 | 8 | ## Dependencies 9 | - Python 3 10 | - Tensorflow 2.0+ 11 | 12 | ## Use 13 | - If you want to experiment with datasets in paper: 14 | 1. run `train.py` to train LSTM models. 15 | 2. run `test.py` to evaluate on test sets. 16 | - If you want to use your own datasets, refer to `train_MRF.py`, modify the data paths and reading process. 17 | 18 | ## Citation 19 | If you use this code for your research, please cite our paper: 20 | 21 | ``` 22 | Zekun Xu, Jun Chen, Jiaxu Shen, Mengjie Xiang, 23 | Recursive long short-term memory network for predicting nonlinear structural seismic response, 24 | Engineering Structures, 25 | Volume 250, 26 | 2022, 27 | 113406, 28 | ISSN 0141-0296, 29 | https://doi.org/10.1016/j.engstruct.2021.113406. 30 | (https://www.sciencedirect.com/science/article/pii/S0141029621015133) 31 | ``` 32 | -------------------------------------------------------------------------------- /data/dataset_t_2.5_mdof10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_t_2.5_mdof10.mat -------------------------------------------------------------------------------- /data/dataset_t_2.5_mdof6.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_t_2.5_mdof6.mat -------------------------------------------------------------------------------- /data/dataset_t_4_fem.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_t_4_fem.mat -------------------------------------------------------------------------------- /data/dataset_t_4_mdof10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_t_4_mdof10.mat -------------------------------------------------------------------------------- /data/dataset_t_4_mdof6.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_t_4_mdof6.mat -------------------------------------------------------------------------------- /data/dataset_v_2.5_mdof10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_v_2.5_mdof10.mat -------------------------------------------------------------------------------- /data/dataset_v_2.5_mdof6.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_v_2.5_mdof6.mat -------------------------------------------------------------------------------- /data/dataset_v_4_fem.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_v_4_fem.mat -------------------------------------------------------------------------------- /data/dataset_v_4_mdof10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_v_4_mdof10.mat -------------------------------------------------------------------------------- /data/dataset_v_4_mdof6.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xzk8559/RecursiveLSTM/e0c728490962627483c5d5f266a969f92abbedc3/data/dataset_v_4_mdof6.mat -------------------------------------------------------------------------------- /get_model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | def get_model(args): 4 | # Stacked LSTMs 5 | seq = [] 6 | seq.append( 7 | tf.keras.layers.LSTM( 8 | 128, 9 | recurrent_regularizer=tf.keras.regularizers.l1_l2(l1=0, l2=0.01), 10 | return_sequences=True, 11 | activation='tanh', 12 | input_shape=(args.past_history,4)) 13 | ) 14 | for i in range(args.layers-2): 15 | seq.append( 16 | tf.keras.layers.LSTM( 17 | 128, 18 | recurrent_regularizer=tf.keras.regularizers.l1_l2(l1=0, l2=0.01), 19 | return_sequences=True, 20 | activation='tanh') 21 | ) 22 | 23 | seq.append( 24 | tf.keras.layers.LSTM( 25 | 128, 26 | recurrent_regularizer=tf.keras.regularizers.l1_l2(l1=0, l2=0.01), 27 | return_sequences=False, 28 | activation='tanh') 29 | ) 30 | 31 | seq.append(tf.keras.layers.Dense(3)) 32 | 33 | model = tf.keras.models.Sequential(seq) 34 | model.compile(optimizer=tf.keras.optimizers.Adam( 35 | learning_rate=args.initial_lr, beta_1=0.9, beta_2=0.999, 36 | epsilon=1e-07, amsgrad=False, name='Adam' 37 | ), 38 | loss = "mse" 39 | ) 40 | return model -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import scipy.io 3 | import numpy as np 4 | import pandas as pd 5 | import matplotlib.pyplot as plt 6 | import tensorflow as tf 7 | import joblib 8 | print(tf.__version__) 9 | 10 | from utils_data import * 11 | from get_model import * 12 | 13 | # 14 | args = get_args(parse_args().task) 15 | 16 | # Load data 17 | eq_valid, d_valid, lens_v = load_data(args, train=0) 18 | print('Data loaded.') 19 | 20 | # Scale 21 | scaler = joblib.load('./save/scaler.save') 22 | print('Scaler loaded.') 23 | 24 | # Create model 25 | model = get_model(args) 26 | model.load_weights('./save/checkpoint.ckpt') 27 | print(model.summary()) 28 | 29 | # Convert Keras to onnx 30 | import keras2onnx 31 | import onnx 32 | onnx_model = keras2onnx.convert_keras(model, model.name) 33 | onnx.save_model(onnx_model, './save/model-best.onnx') 34 | 35 | # Load onnx model 36 | import onnx 37 | import onnxruntime as rt 38 | onnx_model = onnx.load('./save/model-best.onnx') 39 | onnx.checker.check_model(onnx_model) 40 | sess = rt.InferenceSession('./save/model-best.onnx') 41 | 42 | #%% 43 | import time 44 | # from utils import lowpass 45 | 46 | is_revised = 1 47 | is_filted = 1 48 | 49 | inp = args.past_history 50 | step = args.local_sample_step 51 | history_size = inp * step 52 | 53 | nval = eq_valid.shape[0] 54 | iteration = eq_valid.shape[1] 55 | 56 | origin, body = scale_valset(eq_valid, d_valid, scaler) 57 | 58 | head = np.zeros((nval, history_size, 4)) 59 | result = np.concatenate((head, body), axis=1) 60 | 61 | tick1 = time.time() 62 | 63 | for i in range(history_size, history_size+iteration): 64 | 65 | indices = range(i-history_size, i, step) 66 | seq = result[:, indices, :] 67 | 68 | if is_revised == 0: 69 | outputs = sess.run(None, {sess.get_inputs()[0].name: seq.astype(np.float32)})[0] # (batch_size, 3) 70 | elif is_revised == 1: 71 | seq = np.concatenate((seq, -seq), axis=0) 72 | outputs = sess.run(None, {sess.get_inputs()[0].name: seq.astype(np.float32)})[0] # (batch_size, 3) 73 | outputs = (outputs[:nval] - outputs[nval:])/2 74 | 75 | result[:, i:i+1, 1:4] = np.reshape(outputs, (-1, 1, 3)) 76 | print ("\r processing: {} / {} iterations ({}%)".format(i-history_size+1, iteration, (i-history_size+1)*100//iteration), end="") 77 | 78 | tick2 = time.time() 79 | 80 | origin = origin.astype(np.float64) 81 | result = result[:, history_size:, :] 82 | print("\n", tick2 - tick1) 83 | 84 | if is_filted == 1: 85 | for index in range(nval): 86 | for floor in range(3): 87 | result[index,:,floor+1] = lowpass(result[index,:,floor+1], 8, 100) 88 | 89 | 90 | IND = 0 91 | floor = 1 # 1, 2, 3 92 | window = range(500, 1000) 93 | window = range(0, lens_v[IND, 0]) 94 | 95 | l1 = origin[IND, window, floor] 96 | l2 = result[IND, window, floor] 97 | 98 | plt.figure(figsize=(20,12)) 99 | 100 | line_0 = plt.plot(l1, alpha=0.5, label = 'original disp')[0] 101 | line_0.set_color('red') 102 | line_0.set_linewidth(2.0) 103 | line_4 = plt.plot(l2, alpha=0.5, label = 'predicted disp{}-{}'.format(IND, floor))[0] 104 | line_4.set_color('green') 105 | line_4.set_linewidth(2.0) 106 | plt.legend() 107 | plt.show() 108 | 109 | print(np.corrcoef(l1, l2)[1][0]) 110 | 111 | #%% evaluate 112 | 113 | result_inv = result.copy() 114 | origin_inv = origin.copy() 115 | nval = origin.shape[0] 116 | 117 | for i in range(nval): 118 | result_inv[i, :, :] = scaler.inverse_transform(result_inv[i, :, :]).reshape(1, -1, 4) 119 | origin_inv[i, :, :] = scaler.inverse_transform(origin_inv[i, :, :]).reshape(1, -1, 4) 120 | 121 | # origin = origin/1e3 122 | # result = result/1e3 123 | 124 | nfloor = 3 # 1 5 9 / 1 3 6 125 | evaluate_results = np.zeros((nval, 7, nfloor)) 126 | 127 | for index in range(nval): 128 | 129 | # eq = origin[index, :lens_v[index,0], 0] 130 | for floor in range(nfloor): 131 | 132 | y_predict = result_inv[index, :lens_v[index,0],floor+1] 133 | y_test = origin_inv[index, :lens_v[index,0], floor+1] 134 | 135 | evaluate_results[index, :, floor] = evaluate(y_test, y_predict) 136 | 137 | evaluate_results_mean0 = np.mean(evaluate_results, axis = 0) 138 | evaluate_results_mean2 = np.mean(evaluate_results, axis = 2) 139 | evaluate_results_mean02 = np.mean(evaluate_results_mean2, axis = 0) 140 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import tensorflow as tf 4 | print(tf.__version__) 5 | 6 | from utils_data import * 7 | from get_model import * 8 | 9 | import argparse 10 | 11 | def parse_args(): 12 | parser = argparse.ArgumentParser(description="Run LSTM") 13 | parser.add_argument('--task', type=int, default=0) 14 | return parser.parse_args() 15 | 16 | 17 | ''' 18 | Load data 19 | eq shape: (num_eq, length_eq) 20 | d shape: (num_eq, length_eq, 3) 21 | ''' 22 | args = get_args(parse_args().task) 23 | eq_train, d_train, lens_t = load_data(args, train=1) 24 | eq_valid, d_valid, lens_v = load_data(args, train=0) 25 | print('Data loaded.') 26 | 27 | # Scale 28 | scaler, scaler_max = get_scaler(eq_train, d_train) 29 | print('Data scaled.') 30 | 31 | # Generate XY dataset 32 | XY_train = get_dataset_xy(eq_train, d_train, lens_t, scaler, args) 33 | XY_valid = get_dataset_xy(eq_valid, d_valid, lens_v, scaler, args) 34 | print('XY Data generated.') 35 | 36 | # Create model 37 | model = get_model(args) 38 | 39 | for x, y in XY_valid.take(1): 40 | print (x.shape) 41 | print (model(x).shape) 42 | print(model.summary()) 43 | 44 | initial_lr = args.initial_lr 45 | decay = args.decay 46 | initial_epoch = args.initial_epoch 47 | def step_decay(epoch): 48 | lr = initial_lr / (1 + decay * (initial_epoch + epoch)) 49 | if lr<5e-5: 50 | lr = 5e-5 51 | return lr 52 | 53 | lrate = tf.keras.callbacks.LearningRateScheduler(step_decay) 54 | filepath = os.path.join('./save', "checkpoint.ckpt") 55 | checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True, verbose=0, save_weights_only=True, mode='auto', period=1) 56 | history = model.fit(XY_train, 57 | epochs=args.EPOCHS, 58 | steps_per_epoch=100, 59 | validation_data=XY_valid, 60 | validation_steps=50, 61 | callbacks=[lrate, checkpoint]) 62 | 63 | min_vloss = min(history.history['val_loss']) 64 | print('Reached minimum val loss %e at epoch %d.' 65 | %(min_vloss, history.history['val_loss'].index(min_vloss))) 66 | np.savetxt('../save/loss.txt', history.history['loss']) 67 | np.savetxt('../save/vloss.txt', history.history['val_loss']) 68 | 69 | -------------------------------------------------------------------------------- /train_MRF.py: -------------------------------------------------------------------------------- 1 | import os 2 | import scipy.io 3 | import numpy as np 4 | import tensorflow as tf 5 | print(tf.__version__) 6 | 7 | from utils_data import * 8 | from get_model import * 9 | 10 | 11 | args = get_args(0) 12 | args.dt = 0.005 13 | args.layers = 5 14 | args.past_history = 100 # (75, 125)/global_sample_step 15 | args.global_sample_step = 1 16 | args.local_sample_step = 6 17 | args.window_sliding_step = 1 18 | 19 | mat = scipy.io.loadmat('./data/data_MRFDBF.mat') 20 | train_indices = mat['trainInd'] - 1 21 | valid_indices = mat['valInd'] - 1 22 | test_indices = mat['testInd'] - 1 23 | eq_train = mat['input_tf'][train_indices[0], ::args.global_sample_step].astype(np.float32) 24 | eq_valid = mat['input_tf'][valid_indices[0], ::args.global_sample_step].astype(np.float32) 25 | d_train = mat['target_tf'][train_indices[0], ::args.global_sample_step].astype(np.float32) 26 | d_valid = mat['target_tf'][valid_indices[0], ::args.global_sample_step].astype(np.float32) 27 | lens_t = np.array(eq_train.shape[0]*[5001], dtype=np.uint16).reshape((-1,1)) 28 | lens_v = np.array(eq_valid.shape[0]*[5001], dtype=np.uint16).reshape((-1,1)) 29 | 30 | print('Data loaded.') 31 | 32 | # Scale 33 | scaler, scaler_max = get_scaler(eq_train, d_train) 34 | print('Data scaled.') 35 | 36 | # Generate XY dataset 37 | XY_train = get_dataset_xy(eq_train, d_train, lens_t, scaler, args) 38 | XY_valid = get_dataset_xy(eq_valid, d_valid, lens_v, scaler, args) 39 | print('XY Data generated.') 40 | 41 | # Create model 42 | model = get_model(args) 43 | 44 | for x, y in XY_valid.take(1): 45 | print (x.shape) 46 | print (model(x).shape) 47 | print(model.summary()) 48 | 49 | initial_lr = args.initial_lr 50 | decay = args.decay 51 | initial_epoch = args.initial_epoch 52 | def step_decay(epoch): 53 | lr = initial_lr / (1 + decay * (initial_epoch + epoch)) 54 | if lr<5e-5: 55 | lr = 5e-5 56 | return lr 57 | 58 | lrate = tf.keras.callbacks.LearningRateScheduler(step_decay) 59 | filepath = os.path.join('./save', "checkpoint.ckpt") 60 | checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True, verbose=0, save_weights_only=True, mode='auto', period=1) 61 | history = model.fit(XY_train, 62 | epochs=args.EPOCHS, 63 | steps_per_epoch=100, 64 | validation_data=XY_valid, 65 | validation_steps=50, 66 | # initial_epoch = 3000, 67 | callbacks=[lrate, checkpoint]) # WandbCallback() 68 | 69 | min_vloss = min(history.history['val_loss']) 70 | print('Reached minimum val loss %e at epoch %d.' 71 | %(min_vloss, history.history['val_loss'].index(min_vloss))) 72 | np.savetxt('./save/loss.txt', history.history['loss']) 73 | np.savetxt('./save/vloss.txt', history.history['val_loss']) 74 | 75 | -------------------------------------------------------------------------------- /utils_data.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import os 3 | import numpy as np 4 | import scipy.io 5 | from scipy import signal 6 | import joblib 7 | from sklearn.preprocessing import MaxAbsScaler 8 | from sklearn.metrics import mean_squared_error 9 | from sklearn.metrics import mean_absolute_error 10 | from sklearn.metrics import r2_score 11 | 12 | def get_args(index): 13 | class Args: 14 | def __init__(self, arglist): 15 | self.dataDir = arglist[0] 16 | self.trainset = arglist[1] 17 | self.validset = arglist[2] 18 | self.dt = arglist[3] 19 | self.layers = arglist[4] 20 | self.past_history = arglist[5] 21 | self.global_sample_step = arglist[6] 22 | self.local_sample_step = arglist[7] 23 | self.window_sliding_step = arglist[8] 24 | self.EPOCHS = 3500 25 | self.BATCH_SIZE = 256 26 | self.initial_lr = 0.001 27 | self.decay = 0.005 28 | self.initial_epoch = 0 29 | self.target_size = 1 30 | 31 | if index == 0: 32 | return Args([ 33 | './data/', 34 | 'dataset_t_2.5_mdof10.mat', 35 | 'dataset_v_2.5_mdof10.mat', 36 | 0.01, 6, 100, 1, 4, 2]) 37 | 38 | elif index == 1: 39 | return Args([ 40 | './data/', 41 | 'dataset_t_4_mdof10.mat', 42 | 'dataset_v_4_mdof10.mat', 43 | 0.01, 5, 90, 1, 4, 2]) 44 | 45 | elif index == 2: 46 | return Args([ 47 | './data/', 48 | 'dataset_t_2.5_mdof6.mat', 49 | 'dataset_v_2.5_mdof6.mat', 50 | 0.01, 7, 50, 1, 4, 2]) 51 | 52 | elif index == 3: 53 | return Args([ 54 | './data/', 55 | 'dataset_t_4_mdof6.mat', 56 | 'dataset_v_4_mdof6.mat', 57 | 0.01, 6, 60, 1, 4, 2]) 58 | 59 | elif index == 4: 60 | return Args([ 61 | './data/', 62 | 'dataset_t_4_fem.mat', 63 | 'dataset_v_4_fem.mat', 64 | 0.02, 7, 75, 1, 1, 1]) 65 | 66 | 67 | def load_data(args, train=1): 68 | 69 | if train: 70 | file = scipy.io.loadmat(args.dataDir + args.trainset) 71 | else: 72 | file = scipy.io.loadmat(args.dataDir + args.validset) 73 | 74 | lens = file['lens'] # shape (n, 1) 75 | eq = file['input_tf'] # shape (n, length) 76 | d = file['target_tf'] # shape (n, length, 3) 77 | 78 | lens = lens // args.global_sample_step 79 | eq = eq[:, ::args.global_sample_step] 80 | d = d[:, ::args.global_sample_step] 81 | 82 | return eq, d, lens 83 | 84 | def get_scaler(eq, d): 85 | 86 | eq = eq.reshape((-1, 1)) 87 | d = d.reshape((-1, d.shape[2])) 88 | temp = np.concatenate((eq, d), axis=1) 89 | 90 | scaler = MaxAbsScaler() 91 | scaler.fit(temp) 92 | data_max = scaler.max_abs_ 93 | joblib.dump(scaler, os.path.join('./save', "scaler.save")) 94 | 95 | return scaler, data_max 96 | 97 | 98 | def get_dataset_xy(eq, d, lens, scaler, args, inverse=1): 99 | 100 | x = [] 101 | y = [] 102 | for i in range(len(eq)): 103 | 104 | eq0 = eq[i, :lens[i, 0]-1] 105 | d0 = d[i, :lens[i, 0]-1] 106 | 107 | # shape of eq0 (length,) 108 | # shape of d0 (length, 3) 109 | eq0 = eq0.reshape((-1, 1)) 110 | dataset = np.concatenate((eq0, d0), axis=1) 111 | dataset = scaler.transform(dataset) 112 | 113 | x_single, y_single = get_dataset_single(dataset, dataset[:, 1:4], 0, None, args) 114 | x.append(x_single) 115 | y.append(y_single) 116 | 117 | x = np.concatenate(x) 118 | y = np.concatenate(y) 119 | x = x.astype(np.float32) 120 | y = y.astype(np.float32) 121 | 122 | if inverse: 123 | x, y = add_inverse(x, y) 124 | 125 | XY = tf.data.Dataset.from_tensor_slices((x, y)) 126 | XY = XY.cache().shuffle(len(x)).batch(args.BATCH_SIZE).repeat() 127 | 128 | print ('Single window of past history : {}'.format(x[0].shape)) 129 | print ('Target response to predict : {}'.format(y[0].shape)) 130 | print('Shape of dataset X : {}'.format(x.shape)) 131 | print('Shape of dataset Y : {}'.format(y.shape)) 132 | 133 | return XY 134 | 135 | 136 | def get_dataset_single(dataset, target, start_index, end_index, args): 137 | data = [] 138 | labels = [] 139 | step = args.local_sample_step 140 | history_size = args.past_history * step 141 | 142 | start_index = start_index + history_size 143 | if end_index is None: 144 | end_index = len(dataset) - args.target_size 145 | 146 | for i in range(start_index, end_index, args.window_sliding_step): 147 | 148 | indices = range(i-history_size, i, step) 149 | 150 | data.append(dataset[indices]) 151 | 152 | label = target[i:i+args.target_size, :].reshape(args.target_size*target.shape[1]) 153 | labels.append(label) 154 | 155 | return np.array(data), np.array(labels) 156 | 157 | def add_inverse(a, b): 158 | a = np.concatenate((a, -a), axis=0) 159 | b = np.concatenate((b, -b), axis=0) 160 | return a, b 161 | 162 | def lowpass(data, f, fs): 163 | wn = 2*f/fs 164 | b, a = signal.butter(8, wn, 'lowpass') 165 | filtedData = signal.filtfilt(b, a, data) 166 | return filtedData 167 | 168 | def scale_valset(eq_val, d_val, scaler): 169 | 170 | eq_val_flat = eq_val.reshape(eq_val.shape[0]*eq_val.shape[1], -1) # (n*l, 1) 171 | d_val_flat = d_val.reshape(d_val.shape[0]*d_val.shape[1], -1) # (n*l, 3) 172 | origin = np.concatenate((eq_val_flat, d_val_flat), axis=1) # (n*l, 4) 173 | origin = scaler.transform(origin) 174 | origin = origin.reshape(eq_val.shape[0], eq_val.shape[1], -1) # (n, l, 4) 175 | body = np.concatenate((origin[:,:,0:1], np.zeros(origin[:,:,1:].shape)), axis=2) 176 | return origin, body 177 | 178 | def evaluate(y_true, y_predict): 179 | 180 | # common 181 | max_true = np.max(np.abs(y_true)) 182 | max_predict = np.max(np.abs(y_predict)) 183 | 184 | re = np.abs(max_predict/max_true - 1) 185 | mae = mean_absolute_error(y_true, y_predict) 186 | rmse = np.sqrt(mean_squared_error(y_true, y_predict)) 187 | r = np.corrcoef(y_true, y_predict)[1][0] 188 | r2 = r2_score(y_true, y_predict) 189 | 190 | # weighted 191 | mean_true = np.mean(y_true) 192 | abs_true = np.abs(y_true) 193 | w_true = abs_true/np.max(abs_true) 194 | 195 | wvar = np.average(np.square(y_true - mean_true), weights = w_true) 196 | wmse = np.average(np.square(y_true - y_predict), weights = w_true) 197 | 198 | wrmse = np.sqrt(wmse) 199 | rw2 = 1 - wmse/wvar 200 | 201 | return np.array([re, mae, rmse, r, r2, wrmse, rw2]) --------------------------------------------------------------------------------