├── .gitignore ├── LICENSE ├── abfo_mlnn_script.py ├── cro_mlnn_script.py ├── data ├── formatted │ ├── draw_data.py │ ├── handle_data.py │ └── results │ │ ├── cpu_ram.pdf │ │ ├── google_cpu_5m.pdf │ │ ├── google_ram_5m.pdf │ │ ├── internet_traffic_eu_5m.pdf │ │ └── internet_traffic_uk_5m.pdf └── note.md ├── envs ├── env.yml └── readme_first.md ├── ga_mlnn_script.py ├── history ├── images │ └── code │ │ ├── BFO.png │ │ ├── CRO.png │ │ ├── DE.png │ │ ├── GA.png │ │ ├── PSO.png │ │ ├── all_code_fit.pdf │ │ ├── all_code_fit.svg │ │ ├── all_code_wrapper.pdf │ │ ├── all_code_wrapper.png │ │ ├── all_code_wrapper.svg │ │ ├── hybrid_mlnn.png │ │ ├── neural_network.png │ │ ├── root_ann.png │ │ ├── root_hybrid_mlnn.png │ │ └── root_rnn.png └── in_paper │ ├── k=2 │ ├── cpu_k2_abfo_mlnn.pdf │ ├── cpu_k2_cro_mlnn.pdf │ ├── cpu_k2_ga_mlnn.pdf │ ├── cpu_k2_lstm.pdf │ ├── cpu_k2_mlnn.pdf │ ├── cpu_k2_ocro_mlnn.pdf │ ├── cpu_k2_pso_mlnn.pdf │ ├── cpu_k2_rnn.pdf │ ├── eu_k2_abfo_mlnn.pdf │ ├── eu_k2_cro_mlnn.pdf │ ├── eu_k2_ga_mlnn.pdf │ ├── eu_k2_lstm.pdf │ ├── eu_k2_mlnn.pdf │ ├── eu_k2_ocro_mlnn.pdf │ ├── eu_k2_pso_mlnn.pdf │ ├── eu_k2_rnn.pdf │ ├── ram_k2_abfo_mlnn.pdf │ ├── ram_k2_cro_mlnn.pdf │ ├── ram_k2_ga_mlnn.pdf │ ├── ram_k2_lstm.pdf │ ├── ram_k2_mlnn.pdf │ ├── ram_k2_ocro_mlnn.pdf │ ├── ram_k2_pso_mlnn.pdf │ └── ram_k2_rnn.pdf │ └── k=5 │ ├── cpu_k5_abfo_mlnn.pdf │ ├── cpu_k5_cro_mlnn.pdf │ ├── cpu_k5_ga_mlnn.pdf │ ├── cpu_k5_lstm.pdf │ ├── cpu_k5_mlnn.pdf │ ├── cpu_k5_ocro_mlnn.pdf │ ├── cpu_k5_pso_mlnn.pdf │ ├── cpu_k5_rnn.pdf │ ├── eu_k5_abfo_mlnn.pdf │ ├── eu_k5_cro_mlnn.pdf │ ├── eu_k5_ga_mlnn.pdf │ ├── eu_k5_lstm.pdf │ ├── eu_k5_mlnn.pdf │ ├── eu_k5_ocro_mlnn.pdf │ ├── eu_k5_pso_mlnn.pdf │ ├── eu_k5_rnn.pdf │ ├── ram_k5_abfo_mlnn.pdf │ ├── ram_k5_cro_mlnn.pdf │ ├── ram_k5_ga_mlnn.pdf │ ├── ram_k5_lstm.pdf │ ├── ram_k5_mlnn.pdf │ ├── ram_k5_ocro_mlnn.pdf │ ├── ram_k5_pso_mlnn.pdf │ ├── ram_k5_rnn.pdf │ ├── wc_k5_abfo_mlnn.pdf │ ├── wc_k5_cro_mlnn.pdf │ ├── wc_k5_ga_mlnn.pdf │ ├── wc_k5_lstm.pdf │ ├── wc_k5_mlnn.pdf │ ├── wc_k5_ocro_mlnn.pdf │ ├── wc_k5_pso_mlnn.pdf │ └── wc_k5_rnn.pdf ├── lstm1hl_script.py ├── mlnn1hl_script.py ├── model ├── main │ ├── hybrid_mlnn.py │ ├── traditional_ffnn.py │ └── traditional_rnn.py └── root │ ├── hybrid │ └── root_hybrid_mlnn.py │ ├── root_base.py │ └── traditional │ ├── root_mlnn.py │ └── root_rnn.py ├── ocro_mlnn_script.py ├── project_note.md ├── pso_mlnn_script.py ├── readme.md ├── rnn1hl_script.py └── utils ├── GraphUtil.py ├── IOUtil.py ├── MathUtil.py ├── MeasureUtil.py ├── PreprocessingUtil.py └── SettingPaper.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .idea/ 3 | drafts/ 4 | data/raw/ 5 | 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2019, Thieu Nguyen, Binh Minh Nguyen, Giang Nguyen 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /abfo_mlnn_script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from os.path import splitext, basename, realpath 11 | from sklearn.model_selection import ParameterGrid 12 | from model.main.hybrid_mlnn import ABfoLSMlnn 13 | from utils.SettingPaper import abfols_mlnn_paras_final as param_grid 14 | from utils.SettingPaper import * 15 | from utils.IOUtil import load_dataset 16 | 17 | if SP_RUN_TIMES == 1: 18 | all_model_file_name = SP_LOG_FILENAME 19 | else: # If runs with more than 1, like stability test --> name of the models ==> such as: rnn1hl.csv 20 | all_model_file_name = str(splitext(basename(realpath(__file__)))[0]) 21 | 22 | 23 | def train_model(item): 24 | root_base_paras = { 25 | "dataset": dataset, 26 | "feature_size": feature_size, 27 | "data_idx": SP_DATA_SPLIT_INDEX_2, 28 | "sliding": item["sliding"], 29 | "multi_output": multi_output, 30 | "output_idx": output_index, 31 | "method_statistic": SP_PREPROCESSING_METHOD, 32 | "log_filename": all_model_file_name, 33 | "n_runs": SP_RUN_TIMES, # 1 or others 34 | "path_save_result": SP_PATH_SAVE_BASE + SP_DATA_FILENAME[loop] + "/", 35 | "draw": SP_DRAW, 36 | "print_train": SP_PRINT_TRAIN, # 0: nothing, 1 : full detail, 2: short version 37 | } 38 | paras_name = "hs_{}-ep_{}-act_{}-ps_{}-Ci_{}-Ped_{}-Ns_{}-N_minmax_{}".format(item["hidden_size"], item["epoch"], item["activation"], item["pop_size"], 39 | item["Ci"], item["Ped"], item["Ns"], item["N_minmax"]) 40 | root_hybrid_paras = { 41 | "hidden_size": item["hidden_size"], "activations": item["activations"], "epoch": item["epoch"], "domain_range": item["domain_range"], 42 | "paras_name": paras_name 43 | } 44 | abfols_paras = { 45 | "epoch": item["epoch"], "pop_size": item["pop_size"], "Ci": item["Ci"], "Ped": item["Ped"], "Ns": item["Ns"], "N_minmax": item["N_minmax"] 46 | } 47 | md = ABfoLSMlnn(root_base_paras=root_base_paras, root_hybrid_paras=root_hybrid_paras, abfols_paras=abfols_paras) 48 | md._running__() 49 | 50 | 51 | for _ in range(SP_RUN_TIMES): 52 | for loop in range(len(SP_DATA_FILENAME)): 53 | filename = SP_LOAD_DATA_FROM + SP_DATA_FILENAME[loop] 54 | dataset = load_dataset(filename, cols=SP_DATA_COLS[loop]) 55 | feature_size = len(SP_DATA_COLS[loop]) 56 | multi_output = SP_DATA_MULTI_OUTPUT[loop] 57 | output_index = SP_OUTPUT_INDEX[loop] 58 | # Create combination of params. 59 | for item in list(ParameterGrid(param_grid)): 60 | train_model(item) 61 | -------------------------------------------------------------------------------- /cro_mlnn_script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from os.path import splitext, basename, realpath 11 | from sklearn.model_selection import ParameterGrid 12 | from model.main.hybrid_mlnn import CroMlnn 13 | from utils.SettingPaper import cro_mlnn_paras_final as param_grid 14 | from utils.SettingPaper import * 15 | from utils.IOUtil import load_dataset 16 | 17 | if SP_RUN_TIMES == 1: 18 | all_model_file_name = SP_LOG_FILENAME 19 | else: # If runs with more than 1, like stability test --> name of the models ==> such as: rnn1hl.csv 20 | all_model_file_name = str(splitext(basename(realpath(__file__)))[0]) 21 | 22 | 23 | def train_model(item): 24 | root_base_paras = { 25 | "dataset": dataset, 26 | "feature_size": feature_size, 27 | "data_idx": SP_DATA_SPLIT_INDEX_2, 28 | "sliding": item["sliding"], 29 | "multi_output": multi_output, 30 | "output_idx": output_index, 31 | "method_statistic": SP_PREPROCESSING_METHOD, 32 | "log_filename": all_model_file_name, 33 | "n_runs": SP_RUN_TIMES, # 1 or others 34 | "path_save_result": SP_PATH_SAVE_BASE + SP_DATA_FILENAME[loop] + "/", 35 | "draw": SP_DRAW, 36 | "print_train": SP_PRINT_TRAIN, # 0: nothing, 1 : full detail, 2: short version 37 | } 38 | paras_name = "hs_{}-ep_{}-act_{}-ps_{}-po_{}-Fb_{}-Fa_{}-Fd_{}-Pd_{}-G_{}-GCR_{}-k_{}".format(item["hidden_size"], item["epoch"], item["activations"], 39 | item["pop_size"], item["po"], item["Fb"], item["Fa"], item["Fd"], item["Pd"], item["G"], item["GCR"], item["k"]) 40 | root_hybrid_paras = { 41 | "hidden_size": item["hidden_size"], "activations": item["activations"], "epoch": item["epoch"], "domain_range": item["domain_range"], 42 | "paras_name": paras_name 43 | } 44 | cro_paras = { 45 | "epoch": item["epoch"], "pop_size": item["pop_size"], "po": item["po"], "Fb": item["Fb"], "Fa": item["Fa"], 46 | "Fd": item["Fd"], "Pd": item["Pd"], "G": item["G"], "GCR": item["GCR"], "k": item["k"] 47 | } 48 | md = CroMlnn(root_base_paras=root_base_paras, root_hybrid_paras=root_hybrid_paras, cro_paras=cro_paras) 49 | md._running__() 50 | 51 | 52 | for _ in range(SP_RUN_TIMES): 53 | for loop in range(len(SP_DATA_FILENAME)): 54 | filename = SP_LOAD_DATA_FROM + SP_DATA_FILENAME[loop] 55 | dataset = load_dataset(filename, cols=SP_DATA_COLS[loop]) 56 | feature_size = len(SP_DATA_COLS[loop]) 57 | multi_output = SP_DATA_MULTI_OUTPUT[loop] 58 | output_index = SP_OUTPUT_INDEX[loop] 59 | # Create combination of params. 60 | for item in list(ParameterGrid(param_grid)): 61 | train_model(item) 62 | -------------------------------------------------------------------------------- /data/formatted/draw_data.py: -------------------------------------------------------------------------------- 1 | from utils.IOUtil import read_dataset_file 2 | from matplotlib.ticker import FuncFormatter 3 | import matplotlib.pyplot as plt 4 | 5 | def plot_all_files(filenames, col_indexs, xlabels, ylabels, titles, colours, pathsaves): 6 | for i in range(0, len(filenames)): 7 | filename = filenames[i] + ".csv" 8 | pathsave = pathsaves[i] + ".pdf" 9 | col_index = col_indexs[i] 10 | color = colours[i] 11 | xlabel = xlabels[i] 12 | ylabel = ylabels[i] 13 | title = titles[i] 14 | 15 | dataset = read_dataset_file(filename, usecols=col_index, header=0) 16 | ax = plt.subplot() 17 | plt.plot(dataset, color) 18 | plt.xlabel(xlabel) 19 | plt.ylabel(ylabel) 20 | ax.set_title(title) 21 | 22 | plt.savefig(pathsave, bbox_inches = "tight") 23 | plt.show() 24 | 25 | 26 | def plot_file_with_scale_label(filename, col_index, xlabel, ylabel, title, color, pathsave, scaler): 27 | """ 28 | :param scaler: [ text_scale, math_scale, coordinate_text ] 29 | Eg: [ "%1.2fK", 1e-3, "x" ], [ "%1.2fM", 1e-6, "y" ], [ "%1.2fB", 1e-9, "both" ] 30 | :return: 31 | """ 32 | def scaler_function(x, pos): 33 | 'The two args are the value and tick position' # millions, thousands, billions, ... 34 | return scaler[0] % (x * scaler[1]) 35 | formatter = FuncFormatter(scaler_function) 36 | 37 | dataset = read_dataset_file(filename, usecols=col_index, header=0) 38 | ax = plt.subplot() 39 | if scaler[2] == "x": 40 | ax.xaxis.set_major_formatter(formatter) 41 | elif scaler[2] == "y": 42 | ax.yaxis.set_major_formatter(formatter) 43 | elif scaler[2] == "both": 44 | ax.xaxis.set_major_formatter(formatter) 45 | ax.yaxis.set_major_formatter(formatter) 46 | else: 47 | "====== Don't wanna scale anything =====" 48 | plt.plot(dataset, color) 49 | plt.xlabel(xlabel) 50 | plt.ylabel(ylabel) 51 | ax.set_title(title) 52 | 53 | plt.savefig(pathsave, bbox_inches="tight") 54 | plt.show() 55 | 56 | 57 | # filename = "worldcup98_5m.csv" 58 | # pathsave = "results/worldcup98_5m_test.pdf" 59 | # col_index = [1] 60 | # color = '#ff7f0e' 61 | # xlabel = "Time (5 minutes)" 62 | # ylabel = "Number of requests" 63 | # title = "Request to server in worldcup season in 1998" 64 | # scaler = ["%1.1fK", 1e-3, "both"] 65 | # 66 | # plot_file_with_scale_label(filename, col_index, xlabel, ylabel, title, col_index, pathsave, scaler) 67 | 68 | 69 | 70 | 71 | ### Default Color plotly 72 | # D3: #1f77b4 73 | # Plotly: #1f77b4 ; rgb(31, 119, 180) 74 | # D3: #ff7f0e 75 | # Plotly: #ff7f0e ; rgb(255, 127, 14) 76 | # D3: #2ca02c 77 | # Plotly: #2ca02c ; rgb(44, 160, 44) 78 | # D3: #d62728 79 | # Plotly: #d62728 ; rgb(214, 39, 40) 80 | # D3: #9467bd 81 | # Plotly: #9467bd ; rgb(148, 103, 189) 82 | # D3: #8c564b 83 | # Plotly: #8c564b ; rgb(140, 86, 75) 84 | # D3: #e377c2 85 | # Plotly: #e377c2 ; rgb(227, 119, 194) 86 | # D3: #7f7f7f 87 | # Plotly: #7f7f7f ; rgb(127, 127, 127) 88 | # D3: #bcbd22 89 | # Plotly: #bcbd22 ; rgb(188, 189, 34) 90 | # D3: #17becf 91 | # Plotly: #17becf ; rgb(23, 190, 207) 92 | 93 | 94 | # filenames = ["internet_traffic_eu_5m", "internet_traffic_uk_5m","worldcup98_5m", "google_5m", "google_5m"] 95 | # pathsaves = ["internet_traffic_eu_5m", "internet_traffic_uk_5m","worldcup98_5m", "google_cpu_5m", "google_ram_5m"] 96 | # col_indexs = [ [4], [1], [1], [1], [2] ] 97 | # xlabels = ["Time (5 minutes)", "Time (5 minutes)", "Time (5 minutes)", "Time (5 minutes)", "Time (5 minutes)"] 98 | # ylabels = ["Megabyte", "Bit", "Request", "CPU usage", "Memory usage"] 99 | # titles = ["Internet traffic data from EU cities", "Internet traffic data from UK cities", 100 | # "Request to server in worldcup season in 1998", 101 | # "CPU usage from Google trace in 2011", "Memory usage from Google trace in 2011"] 102 | # colours = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd'] 103 | # 104 | # plot_all_files(filenames, col_indexs, xlabels, ylabels, titles, colours, pathsaves) 105 | 106 | 107 | 108 | 109 | def draw_2d(data=None, labels=None, title=None, pathsave=None): 110 | ax = plt.subplot() 111 | plt.figure(1) 112 | plt.plot(data[:, 0:1], data[:, 1:2], 'co', markersize=1.0) 113 | plt.ylabel(labels[1]) 114 | plt.xlabel(labels[0]) 115 | ax.set_title(title) 116 | plt.savefig(pathsave, bbox_inches="tight") 117 | plt.close() 118 | return None 119 | 120 | filename = "google_5m.csv" 121 | pathsave = "results/cpu_ram.pdf" 122 | col_idx = [1, 2] 123 | labels = ["CPU usage", "Memory usage"] 124 | title = "CPU usage and Memory usage from Google trace in 2011" 125 | dataset = read_dataset_file(filename, usecols=col_idx, header=0) 126 | draw_2d(dataset, labels, title, pathsave) 127 | 128 | 129 | -------------------------------------------------------------------------------- /data/formatted/handle_data.py: -------------------------------------------------------------------------------- 1 | from utils.IOUtil import read_dataset_file, save_formatted_data_csv 2 | import numpy as np 3 | 4 | filename = "internet_traffic_eu_5m.csv" 5 | pathsave = "test.csv" 6 | 7 | dataset = read_dataset_file(filename, usecols=[1], header=0) 8 | 9 | t1 = dataset[:, 0:1] / 8 10 | t2 = dataset[:, 0:1] / (8*1024) 11 | t3 = dataset[:, 0:1] / (8 * 1024 * 1024) 12 | 13 | done = np.concatenate((dataset, t1, t2, t3), axis=1) 14 | 15 | save_formatted_data_csv(done, pathsave, "") 16 | 17 | 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /data/formatted/results/cpu_ram.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/data/formatted/results/cpu_ram.pdf -------------------------------------------------------------------------------- /data/formatted/results/google_cpu_5m.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/data/formatted/results/google_cpu_5m.pdf -------------------------------------------------------------------------------- /data/formatted/results/google_ram_5m.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/data/formatted/results/google_ram_5m.pdf -------------------------------------------------------------------------------- /data/formatted/results/internet_traffic_eu_5m.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/data/formatted/results/internet_traffic_eu_5m.pdf -------------------------------------------------------------------------------- /data/formatted/results/internet_traffic_uk_5m.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/data/formatted/results/internet_traffic_uk_5m.pdf -------------------------------------------------------------------------------- /data/note.md: -------------------------------------------------------------------------------- 1 | # Dataset 2 | 3 | ### Computing 4 | 5 | 1. Internet traffic data (in bits) from a private ISP with centres 6 | 7 | https://datamarket.com/data/list/?q=cat:ecd%20provider:tsdl 8 | 9 | 10 | 1. Internet traffic data (in bits) from a private ISP with centres in 11 European cities. 11 | The data corresponds to a transatlantic link and was collected from 06:57 hours on 7 June to 11:17 hours on 31 July 2005. Hourly data., 12 | 13 | 2. Internet traffic data (in bits) from a private ISP with centres in 11 European cities. 14 | The data corresponds to a transatlantic link and was collected from 06:57 hours on 7 June to 11:17 hours on 31 July 2005. Data collected at five minute intervals., 15 | 16 | 3. Internet traffic data (in bits) from a private ISP with centres in 11 European cities. 17 | The data corresponds to a transatlantic link and was collected from 06:57 hours on 7 June to 11:17 hours on 31 July 2005. Daily data., 18 | 19 | 4. Internet traffic data (in bits) from an ISP. Aggregated traffic in the United Kingdom academic network backbone. It was collected between 19 November 2004, at 09:30 hours and 27 January 2005, at 11:11 hours. Data collected at five minute intervals. 20 | 21 | 5. Internet traffic data (in bits) from an ISP. Aggregated traffic in the United Kingdom academic network backbone. It was collected between 19 November 2004, at 09:30 hours and 27 January 2005, at 11:11 hours. Daily data 22 | 23 | 6. Internet traffic data (in bits) from an ISP. Aggregated traffic in the United Kingdom academic network backbone. It was collected between 19 November 2004, at 09:30 hours and 27 January 2005, at 11:11 hours. Hourly data 24 | 25 | 26 | 27 | ### Traffic 5m 28 | 1. EU 29 | https://datamarket.com/data/set/232n/internet-traffic-data-in-bits-from-a-private-isp-with-centres-in-11-european-cities-the-data-corresponds-to-a-transatlantic-link-and-was-collected-from-0657-hours-on-7-june-to-1117-hours-on-31-july-2005-data-collected-at-five-minute-intervals#!ds=232n&display=line 30 | 2. UK 31 | https://datamarket.com/data/set/232g/internet-traffic-data-in-bits-from-an-isp-aggregated-traffic-in-the-united-kingdom-academic-network-backbone-it-was-collected-between-19-november-2004-at-0930-hours-and-27-january-2005-at-1111-hours-data-collected-at-five-minute-intervals#!ds=232g 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /envs/env.yml: -------------------------------------------------------------------------------- 1 | name: ai 2 | channels: 3 | - conda-forge 4 | - anaconda 5 | - defaults 6 | dependencies: 7 | - _tflow_select=2.2.0=eigen 8 | - absl-py=0.9.0=py37_0 9 | - asn1crypto=1.3.0=py37_0 10 | - astor=0.8.0=py37_0 11 | - atomicwrites=1.3.0=py37_1 12 | - attrs=19.3.0=py_0 13 | - automat=0.8.0=py_0 14 | - backcall=0.1.0=py37_0 15 | - bcrypt=3.1.7=py37he774522_0 16 | - blas=1.0=mkl 17 | - bleach=3.1.0=py37_0 18 | - blinker=1.4=py37_0 19 | - ca-certificates=2020.1.1=0 20 | - cachetools=3.1.1=py_0 21 | - certifi=2019.11.28=py37_1 22 | - cffi=1.14.0=py37h7a1dbc1_0 23 | - chardet=3.0.4=py37_1003 24 | - click=7.0=py37_0 25 | - colorama=0.4.3=py_0 26 | - constantly=15.1.0=py37h28b3542_0 27 | - cryptography=2.8=py37h7a1dbc1_0 28 | - cssselect=1.1.0=py_0 29 | - cycler=0.10.0=py37_0 30 | - decorator=4.4.1=py_0 31 | - defusedxml=0.6.0=py_0 32 | - entrypoints=0.3=py37_0 33 | - freetype=2.9.1=ha9979f8_1 34 | - gast=0.2.2=py37_0 35 | - google-auth=1.11.2=py_0 36 | - google-auth-oauthlib=0.4.1=py_2 37 | - google-pasta=0.1.8=py_0 38 | - grpcio=1.27.2=py37h351948d_0 39 | - h5py=2.10.0=py37h5e291fa_0 40 | - hdf5=1.10.4=h7ebc959_0 41 | - hyperlink=19.0.0=py_0 42 | - icc_rt=2019.0.0=h0cc432a_1 43 | - icu=58.2=ha66f8fd_1 44 | - idna=2.8=py37_0 45 | - importlib_metadata=1.5.0=py37_0 46 | - incremental=17.5.0=py37_0 47 | - intel-openmp=2020.0=166 48 | - ipykernel=5.1.4=py37h39e3cac_0 49 | - ipython=7.12.0=py37h5ca1d4c_0 50 | - ipython_genutils=0.2.0=py37_0 51 | - ipywidgets=7.5.1=py_0 52 | - jedi=0.16.0=py37_0 53 | - jinja2=2.11.1=py_0 54 | - joblib=0.14.1=py_0 55 | - jpeg=9b=hb83a4c4_2 56 | - jsonschema=3.2.0=py37_0 57 | - jupyter=1.0.0=py37_7 58 | - jupyter_client=5.3.4=py37_0 59 | - jupyter_console=6.1.0=py_0 60 | - jupyter_core=4.6.1=py37_0 61 | - keras=2.3.1=0 62 | - keras-applications=1.0.8=py_0 63 | - keras-base=2.3.1=py37_0 64 | - keras-preprocessing=1.1.0=py_1 65 | - kiwisolver=1.1.0=py37ha925a31_0 66 | - libiconv=1.15=h1df5818_7 67 | - libpng=1.6.37=h2a8f88b_0 68 | - libprotobuf=3.11.4=h7bd577a_0 69 | - libsodium=1.0.16=h9d3ae62_0 70 | - libxml2=2.9.9=h464c3ec_0 71 | - libxslt=1.1.33=h579f668_0 72 | - lxml=4.5.0=py37h1350720_0 73 | - m2w64-gcc-libgfortran=5.3.0=6 74 | - m2w64-gcc-libs=5.3.0=7 75 | - m2w64-gcc-libs-core=5.3.0=7 76 | - m2w64-gmp=6.1.0=2 77 | - m2w64-libwinpthread-git=5.0.0.4634.697f757=2 78 | - markdown=3.1.1=py37_0 79 | - markupsafe=1.1.1=py37he774522_0 80 | - matplotlib=3.1.3=py37_0 81 | - matplotlib-base=3.1.3=py37h64f37c6_0 82 | - mistune=0.8.4=py37he774522_0 83 | - mkl=2020.0=166 84 | - mkl-service=2.3.0=py37hb782905_0 85 | - mkl_fft=1.0.15=py37h14836fe_0 86 | - mkl_random=1.1.0=py37h675688f_0 87 | - more-itertools=8.2.0=py_0 88 | - msys2-conda-epoch=20160418=1 89 | - nbconvert=5.6.1=py37_0 90 | - nbformat=5.0.4=py_0 91 | - notebook=6.0.3=py37_0 92 | - numpy=1.18.1=py37h93ca92e_0 93 | - numpy-base=1.18.1=py37hc3f5095_1 94 | - oauthlib=3.1.0=py_0 95 | - openssl=1.1.1e=he774522_0 96 | - opt_einsum=3.1.0=py_0 97 | - packaging=20.1=py_0 98 | - pandas=1.0.1=py37h47e9c7a_0 99 | - pandoc=2.2.3.2=0 100 | - pandocfilters=1.4.2=py37_1 101 | - parsel=1.5.2=py37_0 102 | - parso=0.6.1=py_0 103 | - patsy=0.5.1=py_0 104 | - pickleshare=0.7.5=py37_0 105 | - pip=20.0.2=py37_1 106 | - pluggy=0.13.1=py37_0 107 | - prometheus_client=0.7.1=py_0 108 | - prompt_toolkit=3.0.3=py_0 109 | - protobuf=3.11.4=py37h33f27b4_0 110 | - py=1.8.1=py_0 111 | - pyasn1=0.4.8=py_0 112 | - pyasn1-modules=0.2.7=py_0 113 | - pycparser=2.19=py37_0 114 | - pydispatcher=2.0.5=py37_1 115 | - pygments=2.5.2=py_0 116 | - pyhamcrest=1.9.0=py37_2 117 | - pyjwt=1.7.1=py37_0 118 | - pyopenssl=19.1.0=py37_0 119 | - pyparsing=2.4.6=py_0 120 | - pyqt=5.9.2=py37h6538335_2 121 | - pyreadline=2.1=py37_1 122 | - pyrsistent=0.15.7=py37he774522_0 123 | - pysocks=1.7.1=py37_0 124 | - pytest=5.3.5=py37_0 125 | - pytest-runner=5.2=py_0 126 | - python=3.7.6=h60c2a47_2 127 | - python-dateutil=2.8.1=py_0 128 | - pytz=2019.3=py_0 129 | - pywin32=227=py37he774522_1 130 | - pywinpty=0.5.7=py37_0 131 | - pyyaml=5.3.1=py37he774522_0 132 | - pyzmq=18.1.1=py37ha925a31_0 133 | - qt=5.9.7=vc14h73c81de_0 134 | - qtconsole=4.6.0=py37_1 135 | - queuelib=1.5.0=py37_0 136 | - requests=2.22.0=py37_1 137 | - requests-oauthlib=1.3.0=py_0 138 | - rsa=4.0=py_0 139 | - scikit-learn=0.22.1=py37h6288b17_0 140 | - scipy=1.4.1=py37h9439919_0 141 | - scrapy=1.6.0=py37_0 142 | - seaborn=0.10.0=py_0 143 | - send2trash=1.5.0=py37_0 144 | - service_identity=18.1.0=py37h28b3542_0 145 | - setuptools=45.2.0=py37_0 146 | - sip=4.19.8=py37h6538335_0 147 | - six=1.14.0=py37_0 148 | - sqlite=3.31.1=he774522_0 149 | - statsmodels=0.11.1=py37hfa6e2cd_0 150 | - tensorboard=2.1.0=py3_0 151 | - tensorflow=2.1.0=eigen_py37hd727fc0_0 152 | - tensorflow-base=2.1.0=eigen_py37h49b2757_0 153 | - tensorflow-estimator=2.1.0=pyhd54b08b_0 154 | - termcolor=1.1.0=py37_1 155 | - terminado=0.8.3=py37_0 156 | - testpath=0.4.4=py_0 157 | - tornado=6.0.3=py37he774522_3 158 | - traitlets=4.3.3=py37_0 159 | - twisted=19.10.0=py37he774522_0 160 | - urllib3=1.25.8=py37_0 161 | - vc=14.1=h0510ff6_4 162 | - vs2015_runtime=14.16.27012=hf0eaf9b_1 163 | - w3lib=1.21.0=py_0 164 | - wcwidth=0.1.8=py_0 165 | - webencodings=0.5.1=py37_1 166 | - werkzeug=0.14.1=py37_0 167 | - wheel=0.34.2=py37_0 168 | - widgetsnbextension=3.5.1=py37_0 169 | - win_inet_pton=1.1.0=py37_0 170 | - wincertstore=0.2=py37_0 171 | - winpty=0.4.3=4 172 | - wrapt=1.11.2=py37he774522_0 173 | - yaml=0.1.7=hc54c509_2 174 | - zeromq=4.3.1=h33f27b4_3 175 | - zipp=2.2.0=py_0 176 | - zlib=1.2.11=h62dcd97_3 177 | - zope=1.0=py37_1 178 | - zope.interface=4.7.1=py37he774522_0 179 | - pip: 180 | - mealpy==0.7.1 181 | - opfunu==0.4.3 182 | prefix: C:\Users\nguye\Miniconda3\envs\ai 183 | 184 | -------------------------------------------------------------------------------- /envs/readme_first.md: -------------------------------------------------------------------------------- 1 | ## Install Environments 2 | 3 | 1. Do it on your terminal (ubuntu) or bash shell (windows). Make sure you already have miniconda 4 | ```code 5 | Check this link for pre-installed package. Due to new version of tensorflow. Both windows and linux 6 | https://www.tensorflow.org/install/pip?lang=python3 7 | 8 | After that: 9 | 10 | First way: (Do it by yourself) 11 | conda --version 12 | conda update conda 13 | conda info --envs 14 | 15 | conda create -n ai python==3.7.6 16 | conda activate ai 17 | 18 | conda install pandas matplotlib scikit-learn scrapy seaborn 19 | conda install -c anaconda tensorflow 20 | conda install -c anaconda ipython-notebook 21 | conda install -c conda-forge statsmodels 22 | conda install keras 23 | pip install mealpy 24 | 25 | 26 | Second way: (create env by my env.yml file) 27 | 28 | conda ai create -f env.yml (the first line in that file is the name of the environment) 29 | conda activate ai 30 | pip install mealpy 31 | 32 | ``` 33 | 34 | 2. Useful command 35 | ```code 36 | 37 | 1) Activate, check and deactivate environment 38 | conda activate ai 39 | 40 | conda list (or) 41 | conda env list 42 | conda info --envs 43 | 44 | source deactivate 45 | 46 | 2) Check package inside the environment 47 | conda list -n ai (if ai hasn't activated) 48 | conda list (if ai already activated) 49 | 50 | 3) Export to .yml for other usage. 51 | source activate ai (access to environment) 52 | conda env export > env.yml 53 | 54 | 4) Delete environment 55 | conda remove --name ai --all (or) 56 | conda env remove --name ai 57 | 58 | conda info --envs 59 | ``` 60 | 61 | ## Link libraries needed 62 | https://bigdata-madesimple.com/top-20-python-libraries-for-data-science/ 63 | -------------------------------------------------------------------------------- /ga_mlnn_script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from os.path import splitext, basename, realpath 11 | from sklearn.model_selection import ParameterGrid 12 | from model.main.hybrid_mlnn import GaMlnn 13 | from utils.SettingPaper import ga_mlnn_paras_final as param_grid 14 | from utils.SettingPaper import * 15 | from utils.IOUtil import load_dataset 16 | 17 | if SP_RUN_TIMES == 1: 18 | all_model_file_name = SP_LOG_FILENAME 19 | else: # If runs with more than 1, like stability test --> name of the models ==> such as: rnn1hl.csv 20 | all_model_file_name = str(splitext(basename(realpath(__file__)))[0]) 21 | 22 | 23 | def train_model(item): 24 | root_base_paras = { 25 | "dataset": dataset, 26 | "feature_size": feature_size, 27 | "data_idx": SP_DATA_SPLIT_INDEX, 28 | "sliding": item["sliding"], 29 | "multi_output": multi_output, 30 | "output_idx": output_index, 31 | "method_statistic": SP_PREPROCESSING_METHOD, 32 | "log_filename": all_model_file_name, 33 | "n_runs": SP_RUN_TIMES, # 1 or others 34 | "path_save_result": SP_PATH_SAVE_BASE + SP_DATA_FILENAME[loop] + "/", 35 | "draw": SP_DRAW, 36 | "print_train": SP_PRINT_TRAIN, # 0: nothing, 1 : full detail, 2: short version 37 | } 38 | paras_name = "hs_{}-ep_{}-act_{}-ps_{}-pc_{}-pm_{}".format(item["hidden_size"], item["epoch"], item["activations"], item["pop_size"], item["pc"], 39 | item["pm"]) 40 | root_hybrid_paras = { 41 | "hidden_size": item["hidden_size"], "activations": item["activations"], "epoch": item["epoch"], "domain_range": item["domain_range"], 42 | "paras_name": paras_name 43 | } 44 | ga_paras = { 45 | "epoch": item["epoch"], "pop_size": item["pop_size"], "pc": item["pc"], "pm": item["pm"] 46 | } 47 | md = GaMlnn(root_base_paras=root_base_paras, root_hybrid_paras=root_hybrid_paras, ga_paras=ga_paras) 48 | md._running__() 49 | 50 | 51 | for _ in range(SP_RUN_TIMES): 52 | for loop in range(len(SP_DATA_FILENAME)): 53 | filename = SP_LOAD_DATA_FROM + SP_DATA_FILENAME[loop] 54 | dataset = load_dataset(filename, cols=SP_DATA_COLS[loop]) 55 | feature_size = len(SP_DATA_COLS[loop]) 56 | multi_output = SP_DATA_MULTI_OUTPUT[loop] 57 | output_index = SP_OUTPUT_INDEX[loop] 58 | # Create combination of params. 59 | for item in list(ParameterGrid(param_grid)): 60 | train_model(item) 61 | -------------------------------------------------------------------------------- /history/images/code/BFO.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/BFO.png -------------------------------------------------------------------------------- /history/images/code/CRO.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/CRO.png -------------------------------------------------------------------------------- /history/images/code/DE.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/DE.png -------------------------------------------------------------------------------- /history/images/code/GA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/GA.png -------------------------------------------------------------------------------- /history/images/code/PSO.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/PSO.png -------------------------------------------------------------------------------- /history/images/code/all_code_fit.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/all_code_fit.pdf -------------------------------------------------------------------------------- /history/images/code/all_code_fit.svg: -------------------------------------------------------------------------------- 1 | 2 |
root_ann
root_ann
root_hybrid_mlnn
root_hybrid_mlnn<br>
mlnn
<b>mlnn</b>
rnn
[Not supported by viewer]
root_rnn
root_rnn
root_base
root_base
root_algo
root_algo
root
[Not supported by viewer]
optimizer
<b>optimizer</b>
DE
DE
CRO
CRO<br>
evolutionary
<b>evolutionary</b>
swarm
<b>swarm</b>
PSO
PSO
GA
GA
BFO
BFO
BaseGa
BaseGa
BaseDe
BaseDe
BaseCro
BaseCro
OCro
OCro
BasePso
BasePso
BaseBfo
BaseBfo
ABfoLS
ABfoLS
main
<b>main</b>
keras
<b>keras</b>
numpy
<b>numpy</b>
neural_network
neural_network<br>
hybird_mlnn
hybird_mlnn<br>
hybird_elm
hybird_elm<br>
Rnn1HL
Rnn1HL
Lstm1HL
Lstm1HL
Rnn2HL
Rnn2HL
Lstm2HL
Lstm2HL
Mlnn1HL
Mlnn1HL
Mlnn2HL
Mlnn2HL
hybird_dbn
hybird_dbn<br>
PsoElm
PsoElm
BfoElm
BfoElm
GaMlnn
GaMlnn
DeMlnn
DeMlnn
......
[Not supported by viewer]
Implement
Implement
Used
Used
-------------------------------------------------------------------------------- /history/images/code/all_code_wrapper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/all_code_wrapper.pdf -------------------------------------------------------------------------------- /history/images/code/all_code_wrapper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/all_code_wrapper.png -------------------------------------------------------------------------------- /history/images/code/hybrid_mlnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/hybrid_mlnn.png -------------------------------------------------------------------------------- /history/images/code/neural_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/neural_network.png -------------------------------------------------------------------------------- /history/images/code/root_ann.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/root_ann.png -------------------------------------------------------------------------------- /history/images/code/root_hybrid_mlnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/root_hybrid_mlnn.png -------------------------------------------------------------------------------- /history/images/code/root_rnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/images/code/root_rnn.png -------------------------------------------------------------------------------- /history/in_paper/k=2/cpu_k2_abfo_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/cpu_k2_abfo_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/cpu_k2_cro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/cpu_k2_cro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/cpu_k2_ga_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/cpu_k2_ga_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/cpu_k2_lstm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/cpu_k2_lstm.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/cpu_k2_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/cpu_k2_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/cpu_k2_ocro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/cpu_k2_ocro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/cpu_k2_pso_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/cpu_k2_pso_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/cpu_k2_rnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/cpu_k2_rnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/eu_k2_abfo_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/eu_k2_abfo_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/eu_k2_cro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/eu_k2_cro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/eu_k2_ga_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/eu_k2_ga_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/eu_k2_lstm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/eu_k2_lstm.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/eu_k2_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/eu_k2_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/eu_k2_ocro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/eu_k2_ocro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/eu_k2_pso_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/eu_k2_pso_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/eu_k2_rnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/eu_k2_rnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/ram_k2_abfo_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/ram_k2_abfo_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/ram_k2_cro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/ram_k2_cro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/ram_k2_ga_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/ram_k2_ga_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/ram_k2_lstm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/ram_k2_lstm.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/ram_k2_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/ram_k2_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/ram_k2_ocro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/ram_k2_ocro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/ram_k2_pso_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/ram_k2_pso_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=2/ram_k2_rnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=2/ram_k2_rnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/cpu_k5_abfo_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/cpu_k5_abfo_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/cpu_k5_cro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/cpu_k5_cro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/cpu_k5_ga_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/cpu_k5_ga_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/cpu_k5_lstm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/cpu_k5_lstm.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/cpu_k5_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/cpu_k5_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/cpu_k5_ocro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/cpu_k5_ocro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/cpu_k5_pso_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/cpu_k5_pso_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/cpu_k5_rnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/cpu_k5_rnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/eu_k5_abfo_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/eu_k5_abfo_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/eu_k5_cro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/eu_k5_cro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/eu_k5_ga_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/eu_k5_ga_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/eu_k5_lstm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/eu_k5_lstm.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/eu_k5_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/eu_k5_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/eu_k5_ocro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/eu_k5_ocro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/eu_k5_pso_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/eu_k5_pso_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/eu_k5_rnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/eu_k5_rnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/ram_k5_abfo_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/ram_k5_abfo_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/ram_k5_cro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/ram_k5_cro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/ram_k5_ga_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/ram_k5_ga_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/ram_k5_lstm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/ram_k5_lstm.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/ram_k5_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/ram_k5_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/ram_k5_ocro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/ram_k5_ocro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/ram_k5_pso_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/ram_k5_pso_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/ram_k5_rnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/ram_k5_rnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/wc_k5_abfo_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/wc_k5_abfo_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/wc_k5_cro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/wc_k5_cro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/wc_k5_ga_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/wc_k5_ga_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/wc_k5_lstm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/wc_k5_lstm.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/wc_k5_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/wc_k5_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/wc_k5_ocro_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/wc_k5_ocro_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/wc_k5_pso_mlnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/wc_k5_pso_mlnn.pdf -------------------------------------------------------------------------------- /history/in_paper/k=5/wc_k5_rnn.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chasebk/code_OCRO_MLNN/f8010ad0303f69d806f1605892bdec58005612f9/history/in_paper/k=5/wc_k5_rnn.pdf -------------------------------------------------------------------------------- /lstm1hl_script.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from os.path import splitext, basename, realpath 11 | from sklearn.model_selection import ParameterGrid 12 | from model.main.traditional_rnn import Lstm1HL 13 | from utils.SettingPaper import lstm1hl_paras_final as param_grid 14 | from utils.SettingPaper import * 15 | from utils.IOUtil import load_dataset 16 | 17 | if SP_RUN_TIMES == 1: 18 | all_model_file_name = SP_LOG_FILENAME 19 | else: # If runs with more than 1, like stability test --> name of the models ==> such as: rnn1hl.csv 20 | all_model_file_name = str(splitext(basename(realpath(__file__)))[0]) 21 | 22 | 23 | def train_model(item): 24 | root_base_paras = { 25 | "dataset": dataset, 26 | "feature_size": feature_size, 27 | "data_idx": SP_DATA_SPLIT_INDEX, 28 | "sliding": item["sliding"], 29 | "multi_output": multi_output, 30 | "output_idx": output_index, 31 | "method_statistic": SP_PREPROCESSING_METHOD, 32 | "log_filename": all_model_file_name, 33 | "n_runs": SP_RUN_TIMES, # 1 or others 34 | "path_save_result": SP_PATH_SAVE_BASE + SP_DATA_FILENAME[loop] + "/", 35 | "draw": SP_DRAW, 36 | "print_train": SP_PRINT_TRAIN, # 0: nothing, 1 : full detail, 2: short version 37 | } 38 | paras_name = "hs_{}-ep_{}-bs_{}-lr_{}-ac_{}-op_{}-lo_{}".format(item["hidden_sizes"], item["epoch"], item["batch_size"], item["learning_rate"], 39 | item["activations"], item["optimizer"], item["loss"]) 40 | root_rnn_paras = { 41 | "hidden_sizes": item["hidden_sizes"], "epoch": item["epoch"], "batch_size": item["batch_size"], "learning_rate": item["learning_rate"], 42 | "activations": item["activations"], "optimizer": item["optimizer"], "loss": item["loss"], "dropouts": item["dropouts"], "paras_name": paras_name 43 | } 44 | md = Lstm1HL(root_base_paras=root_base_paras, root_rnn_paras=root_rnn_paras) 45 | md._running__() 46 | 47 | 48 | for _ in range(SP_RUN_TIMES): 49 | for loop in range(len(SP_DATA_FILENAME)): 50 | filename = SP_LOAD_DATA_FROM + SP_DATA_FILENAME[loop] 51 | dataset = load_dataset(filename, cols=SP_DATA_COLS[loop]) 52 | feature_size = len(SP_DATA_COLS[loop]) 53 | multi_output = SP_DATA_MULTI_OUTPUT[loop] 54 | output_index = SP_OUTPUT_INDEX[loop] 55 | # Create combination of params. 56 | for item in list(ParameterGrid(param_grid)): 57 | train_model(item) 58 | -------------------------------------------------------------------------------- /mlnn1hl_script.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from os.path import splitext, basename, realpath 11 | from sklearn.model_selection import ParameterGrid 12 | from model.main.traditional_ffnn import Mlnn1HL 13 | from utils.SettingPaper import * 14 | from utils.IOUtil import load_dataset 15 | from utils.SettingPaper import mlnn1hl_paras_final as param_grid 16 | 17 | if SP_RUN_TIMES == 1: 18 | all_model_file_name = SP_LOG_FILENAME 19 | else: # If runs with more than 1, like stability test --> name of the models ==> such as: rnn1hl.csv 20 | all_model_file_name = str(splitext(basename(realpath(__file__)))[0]) 21 | 22 | 23 | def train_model(item): 24 | root_base_paras = { 25 | "dataset": dataset, 26 | "feature_size": feature_size, 27 | "data_idx": SP_DATA_SPLIT_INDEX, 28 | "sliding": item["sliding"], 29 | "multi_output": multi_output, 30 | "output_idx": output_index, 31 | "method_statistic": SP_PREPROCESSING_METHOD, 32 | "log_filename": all_model_file_name, 33 | "n_runs": SP_RUN_TIMES, # 1 or others 34 | "path_save_result": SP_PATH_SAVE_BASE + SP_DATA_FILENAME[loop] + "/", 35 | "draw": SP_DRAW, 36 | "print_train": SP_PRINT_TRAIN, # 0: nothing, 1 : full detail, 2: short version 37 | } 38 | paras_name = "hs_{}-ep_{}-bs_{}-lr_{}-ac_{}-op_{}-lo_{}".format(item["hidden_sizes"], item["epoch"], item["batch_size"], item["learning_rate"], 39 | item["activations"], item["optimizer"], item["loss"]) 40 | 41 | root_mlnn_paras = { 42 | "hidden_sizes": item["hidden_sizes"], "epoch": item["epoch"], "batch_size": item["batch_size"], "learning_rate": item["learning_rate"], 43 | "activations": item["activations"], "optimizer": item["optimizer"], "loss": item["loss"], "paras_name": paras_name 44 | } 45 | md = Mlnn1HL(root_base_paras=root_base_paras, root_mlnn_paras=root_mlnn_paras) 46 | md._running__() 47 | 48 | 49 | for _ in range(SP_RUN_TIMES): 50 | for loop in range(len(SP_DATA_FILENAME)): 51 | filename = SP_LOAD_DATA_FROM + SP_DATA_FILENAME[loop] 52 | dataset = load_dataset(filename, cols=SP_DATA_COLS[loop]) 53 | feature_size = len(SP_DATA_COLS[loop]) 54 | multi_output = SP_DATA_MULTI_OUTPUT[loop] 55 | output_index = SP_OUTPUT_INDEX[loop] 56 | # Create combination of params. 57 | for item in list(ParameterGrid(param_grid)): 58 | train_model(item) 59 | -------------------------------------------------------------------------------- /model/main/hybrid_mlnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 18:10, 06/04/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from mealpy.swarm_based import PSO, BFO 11 | from mealpy.evolutionary_based import GA, DE, CRO 12 | from model.root.hybrid.root_hybrid_mlnn import RootHybridMlnn 13 | 14 | 15 | class GaMlnn(RootHybridMlnn): 16 | def __init__(self, root_base_paras=None, root_hybrid_paras=None, ga_paras=None): 17 | RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras) 18 | self.epoch = ga_paras["epoch"] 19 | self.pop_size = ga_paras["pop_size"] 20 | self.pc = ga_paras["pc"] 21 | self.pm = ga_paras["pm"] 22 | self.filename = "GA_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"]) 23 | 24 | def _training__(self): 25 | md = GA.BaseGA(self._objective_function__, self.problem_size, self.domain_range, self.print_train, self.epoch, self.pop_size, self.pc, self.pm) 26 | self.solution, self.best_fit, self.loss_train = md._train__() 27 | 28 | 29 | class DeMlnn(RootHybridMlnn): 30 | def __init__(self, root_base_paras=None, root_hybrid_paras=None, de_paras=None): 31 | RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras) 32 | self.epoch = de_paras["epoch"] 33 | self.pop_size = de_paras["pop_size"] 34 | self.wf = de_paras["wf"] 35 | self.cr = de_paras["cr"] 36 | self.filename = "DE_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"]) 37 | 38 | def _training__(self): 39 | md = DE.BaseDE(self._objective_function__, self.problem_size, self.domain_range, self.print_train, self.epoch, self.pop_size, self.wf, self.cr) 40 | self.solution, self.best_fit, self.loss_train = md._train__() 41 | 42 | 43 | class PsoMlnn(RootHybridMlnn): 44 | def __init__(self, root_base_paras=None, root_hybrid_paras=None, pso_paras=None): 45 | RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras) 46 | self.epoch = pso_paras["epoch"] 47 | self.pop_size = pso_paras["pop_size"] 48 | self.c1 = pso_paras["c_minmax"][0] 49 | self.c2 = pso_paras["c_minmax"][1] 50 | self.w_min = pso_paras["w_minmax"][0] 51 | self.w_max = pso_paras["w_minmax"][1] 52 | self.filename = "PSO_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"]) 53 | 54 | def _training__(self): 55 | md = PSO.BasePSO(self._objective_function__, self.problem_size, self.domain_range, self.print_train, 56 | self.epoch, self.pop_size, self.c1, self.c2, self.w_min, self.w_max) 57 | self.solution, self.best_fit, self.loss_train = md._train__() 58 | 59 | 60 | class BfoMlnn(RootHybridMlnn): 61 | def __init__(self, root_base_paras=None, root_hybrid_paras=None, bfo_paras=None): 62 | RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras) 63 | self.pop_size = bfo_paras["pop_size"] 64 | self.Ci = bfo_paras["Ci"] 65 | self.Ped = bfo_paras["Ped"] 66 | self.Ns = bfo_paras["Ns"] 67 | self.Ned = bfo_paras["Ned"] 68 | self.Nre = bfo_paras["Nre"] 69 | self.Nc = bfo_paras["Nc"] 70 | self.attract_repels = bfo_paras["attract_repels"] 71 | self.filename = "BFO_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"]) 72 | 73 | def _training__(self): 74 | md = BFO.BaseBFO(self._objective_function__, self.problem_size, self.domain_range, self.print_train, 75 | self.pop_size, self.Ci, self.Ped, self.Ns, self.Ned, self.Nre, self.Nc, self.attract_repels) 76 | self.solution, self.best_fit, self.loss_train = md._train__() 77 | 78 | 79 | class ABfoLSMlnn(RootHybridMlnn): 80 | def __init__(self, root_base_paras=None, root_hybrid_paras=None, abfols_paras=None): 81 | RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras) 82 | self.epoch = abfols_paras["epoch"] 83 | self.pop_size = abfols_paras["pop_size"] 84 | self.Ci = abfols_paras["Ci"] 85 | self.Ped = abfols_paras["Ped"] 86 | self.Ns = abfols_paras["Ns"] 87 | self.N_minmax = abfols_paras["N_minmax"] 88 | self.filename = "ABFOLS_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"]) 89 | 90 | def _training__(self): 91 | md = BFO.ABFOLS(self._objective_function__, self.problem_size, self.domain_range, self.print_train, 92 | self.epoch, self.pop_size, self.Ci, self.Ped, self.Ns, self.N_minmax) 93 | self.solution, self.best_fit, self.loss_train = md._train__() 94 | 95 | 96 | class CroMlnn(RootHybridMlnn): 97 | def __init__(self, root_base_paras=None, root_hybrid_paras=None, cro_paras=None): 98 | RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras) 99 | self.epoch = cro_paras["epoch"] 100 | self.pop_size = cro_paras["pop_size"] 101 | self.po = cro_paras["po"] 102 | self.Fb = cro_paras["Fb"] 103 | self.Fa = cro_paras["Fa"] 104 | self.Fd = cro_paras["Fd"] 105 | self.Pd = cro_paras["Pd"] 106 | self.G = cro_paras["G"] 107 | self.GCR = cro_paras["GCR"] 108 | self.k = cro_paras["k"] 109 | self.filename = "CRO_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"]) 110 | 111 | def _training__(self): 112 | md = CRO.BaseCRO(self._objective_function__, self.problem_size, self.domain_range, self.print_train, 113 | self.epoch, self.pop_size, self.po, self.Fb, self.Fa, self.Fd, self.Pd, self.G, self.GCR, self.k) 114 | self.solution, self.best_fit, self.loss_train = md._train__() 115 | 116 | 117 | class OCroMlnn(RootHybridMlnn): 118 | def __init__(self, root_base_paras=None, root_hybrid_paras=None, ocro_paras=None): 119 | RootHybridMlnn.__init__(self, root_base_paras, root_hybrid_paras) 120 | self.epoch = ocro_paras["epoch"] 121 | self.pop_size = ocro_paras["pop_size"] 122 | self.po = ocro_paras["po"] 123 | self.Fb = ocro_paras["Fb"] 124 | self.Fa = ocro_paras["Fa"] 125 | self.Fd = ocro_paras["Fd"] 126 | self.Pd = ocro_paras["Pd"] 127 | self.G = ocro_paras["G"] 128 | self.GCR = ocro_paras["GCR"] 129 | self.k = ocro_paras["k"] 130 | self.restart_count = ocro_paras["restart_count"] 131 | self.filename = "OCRO_MLNN-sliding_{}-{}".format(root_base_paras["sliding"], root_hybrid_paras["paras_name"]) 132 | 133 | def _training__(self): 134 | md = CRO.OCRO(self._objective_function__, self.problem_size, self.domain_range, self.print_train, 135 | self.epoch, self.pop_size, self.po, self.Fb, self.Fa, self.Fd, self.Pd, self.G, self.GCR, self.k, self.restart_count) 136 | self.solution, self.best_fit, self.loss_train = md._train__() 137 | 138 | 139 | -------------------------------------------------------------------------------- /model/main/traditional_ffnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 01:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from keras.models import Sequential 11 | from keras.layers import Dense 12 | from model.root.traditional.root_mlnn import RootMlnn 13 | 14 | 15 | class Mlnn1HL(RootMlnn): 16 | def __init__(self, root_base_paras=None, root_mlnn_paras=None): 17 | RootMlnn.__init__(self, root_base_paras, root_mlnn_paras) 18 | self.filename = "MLNN-1H-sliding_{}-{}".format(root_base_paras["sliding"], root_mlnn_paras["paras_name"]) 19 | 20 | def _training__(self): 21 | self.model = Sequential() 22 | self.model.add(Dense(units=self.hidden_sizes[0], input_dim=self.X_train.shape[1], activation=self.activations[0])) 23 | self.model.add(Dense(1, activation=self.activations[1])) 24 | self.model.compile(loss=self.loss, optimizer=self.optimizer) 25 | ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train) 26 | self.loss_train = ml.history["loss"] 27 | 28 | 29 | class Mlnn2HL(RootMlnn): 30 | def __init__(self, root_base_paras=None, root_mlnn_paras=None): 31 | RootMlnn.__init__(self, root_base_paras, root_mlnn_paras) 32 | self.filename = "MLNN-2H-sliding_{}-{}".format(root_base_paras["sliding"], root_mlnn_paras["paras_name"]) 33 | 34 | def _training__(self): 35 | self.model = Sequential() 36 | self.model.add(Dense(self.hidden_sizes[0], input_dim=self.X_train.shape[1], activation=self.activations[0])) 37 | self.model.add(Dense(self.hidden_sizes[1], activation=self.activations[1])) 38 | self.model.add(Dense(1, activation=self.activations[2])) 39 | self.model.compile(loss=self.loss, optimizer=self.optimizer) 40 | ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train) 41 | self.loss_train = ml.history["loss"] 42 | -------------------------------------------------------------------------------- /model/main/traditional_rnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 02:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from keras.models import Sequential 11 | from keras.layers import Dense, LSTM, GRU, Dropout 12 | from model.root.traditional.root_rnn import RootRnn 13 | 14 | 15 | class Rnn1HL(RootRnn): 16 | def __init__(self, root_base_paras=None, root_rnn_paras=None): 17 | RootRnn.__init__(self, root_base_paras, root_rnn_paras) 18 | self.filename = "RNN-1HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"]) 19 | 20 | def _training__(self): 21 | # The RNN architecture 22 | self.model = Sequential() 23 | self.model.add(LSTM(units=self.hidden_sizes[0], activation=self.activations[0], input_shape=(self.X_train.shape[1], 1))) 24 | self.model.add(Dropout(self.dropouts[0])) 25 | self.model.add(Dense(units=1, activation=self.activations[1])) 26 | self.model.compile(loss=self.loss, optimizer=self.optimizer) 27 | ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train) 28 | self.loss_train = ml.history["loss"] 29 | 30 | 31 | class Rnn2HL(RootRnn): 32 | def __init__(self, root_base_paras=None, root_rnn_paras=None): 33 | RootRnn.__init__(self, root_base_paras, root_rnn_paras) 34 | self.filename = "RNN-2HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"]) 35 | 36 | def _training__(self): 37 | # The RNN architecture 38 | self.model = Sequential() 39 | self.model.add(LSTM(units=self.hidden_sizes[0], return_sequences=True, input_shape=(self.X_train.shape[1], 1), activation=self.activations[0])) 40 | self.model.add(Dropout(self.dropouts[0])) 41 | self.model.add(LSTM(units=self.hidden_sizes[1], activation=self.activations[1])) 42 | self.model.add(Dropout(self.dropouts[1])) 43 | self.model.add(Dense(units=1, activation=self.activations[2])) 44 | self.model.compile(loss=self.loss, optimizer=self.optimizer) 45 | ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train) 46 | self.loss_train = ml.history["loss"] 47 | 48 | 49 | class Lstm1HL(RootRnn): 50 | def __init__(self, root_base_paras=None, root_rnn_paras=None): 51 | RootRnn.__init__(self, root_base_paras, root_rnn_paras) 52 | self.filename = "LSTM-1HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"]) 53 | 54 | def _training__(self): 55 | # The LSTM architecture 56 | self.model = Sequential() 57 | self.model.add(LSTM(units=self.hidden_sizes[0], input_shape=(None, 1), activation=self.activations[0])) 58 | self.model.add(Dense(units=1, activation=self.activations[1])) 59 | self.model.compile(loss=self.loss, optimizer=self.optimizer) 60 | ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train) 61 | self.loss_train = ml.history["loss"] 62 | 63 | 64 | class Lstm2HL(RootRnn): 65 | def __init__(self, root_base_paras=None, root_rnn_paras=None): 66 | RootRnn.__init__(self, root_base_paras, root_rnn_paras) 67 | self.filename = "LSTM-2HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"]) 68 | 69 | def _training__(self): 70 | # The LSTM architecture 71 | self.model = Sequential() 72 | self.model.add(LSTM(units=self.hidden_sizes[0], return_sequences=True, input_shape=(None, 1), activation=self.activations[0])) 73 | self.model.add(LSTM(units=self.hidden_sizes[1], activation=self.activations[1])) 74 | self.model.add(Dense(units=1, activation=self.activations[2])) 75 | self.model.compile(loss=self.loss, optimizer=self.optimizer) 76 | ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train) 77 | self.loss_train = ml.history["loss"] 78 | 79 | 80 | class Gru1HL(RootRnn): 81 | def __init__(self, root_base_paras=None, root_rnn_paras=None): 82 | RootRnn.__init__(self, root_base_paras, root_rnn_paras) 83 | self.filename = "GRU-1HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"]) 84 | 85 | def _training__(self): 86 | # The GRU architecture 87 | self.model = Sequential() 88 | self.model.add(GRU(units=self.hidden_sizes[0], input_shape=(self.X_train.shape[1], 1), activation=self.activations[0])) 89 | self.model.add(Dropout(self.dropouts[0])) 90 | self.model.add(Dense(units=1, activation=self.activations[1])) 91 | self.model.compile(loss=self.loss, optimizer=self.optimizer) 92 | ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train) 93 | self.loss_train = ml.history["loss"] 94 | 95 | 96 | class Gru2HL(RootRnn): 97 | def __init__(self, root_base_paras=None, root_rnn_paras=None): 98 | RootRnn.__init__(self, root_base_paras, root_rnn_paras) 99 | self.filename = "GRU-2HL-sliding_{}-{}".format(root_base_paras["sliding"], root_rnn_paras["paras_name"]) 100 | 101 | def _training__(self): 102 | # The GRU architecture 103 | self.model = Sequential() 104 | self.model.add(GRU(units=self.hidden_sizes[0], return_sequences=True, input_shape=(self.X_train.shape[1], 1), activation=self.activations[0])) 105 | self.model.add(Dropout(self.dropouts[0])) 106 | self.model.add(GRU(units=self.hidden_sizes[1], activation=self.activations[1])) 107 | self.model.add(Dropout(self.dropouts[1])) 108 | self.model.add(Dense(units=1, activation=self.activations[2])) 109 | self.model.compile(loss=self.loss, optimizer=self.optimizer) 110 | ml = self.model.fit(self.X_train, self.y_train, epochs=self.epoch, batch_size=self.batch_size, verbose=self.print_train) 111 | self.loss_train = ml.history["loss"] 112 | -------------------------------------------------------------------------------- /model/root/hybrid/root_hybrid_mlnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 18:22, 06/04/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from sklearn.metrics import mean_squared_error 11 | from numpy import reshape, add, matmul 12 | from time import time 13 | from model.root.root_base import RootBase 14 | import utils.MathUtil as my_math 15 | 16 | 17 | class RootHybridMlnn(RootBase): 18 | """ 19 | This is root of all hybrid multi-layer neural network (meta-heuristics + MLNN) 20 | """ 21 | 22 | def __init__(self, root_base_paras=None, root_hybrid_paras=None): 23 | RootBase.__init__(self, root_base_paras) 24 | self.domain_range = root_hybrid_paras["domain_range"] 25 | self.activations = root_hybrid_paras["activations"] 26 | if root_hybrid_paras["hidden_size"][1]: 27 | self.hidden_size = root_hybrid_paras["hidden_size"][0] 28 | else: 29 | self.hidden_size = 2 * root_base_paras["sliding"] * root_base_paras["feature_size"] + 1 30 | self.problem_size, self.epoch = None, None 31 | 32 | def _setting__(self): 33 | ## New discovery 34 | self._activation1__ = getattr(my_math, self.activations[0]) 35 | self._activation2__ = getattr(my_math, self.activations[1]) 36 | 37 | self.input_size, self.output_size = self.X_train.shape[1], self.y_train.shape[1] 38 | self.w1_size = self.input_size * self.hidden_size 39 | self.b1_size = self.hidden_size 40 | self.w2_size = self.hidden_size * self.output_size 41 | self.b2_size = self.output_size 42 | self.problem_size = self.w1_size + self.b1_size + self.w2_size + self.b2_size 43 | 44 | def _forecasting__(self): 45 | hidd = self._activation1__(add(matmul(self.X_test, self.model["w1"]), self.model["b1"])) 46 | y_pred = self._activation2__(add(matmul(hidd, self.model["w2"]), self.model["b2"])) 47 | real_inverse = self.scaler.inverse_transform(self.y_test) 48 | pred_inverse = self.scaler.inverse_transform(reshape(y_pred, self.y_test.shape)) 49 | return real_inverse, pred_inverse, self.y_test, y_pred 50 | 51 | def _running__(self): 52 | self.time_system = time() 53 | self._preprocessing_2d__() 54 | self._setting__() 55 | self.time_total_train = time() 56 | self._training__() 57 | self.model = self._get_model__(self.solution) 58 | self.time_total_train = round(time() - self.time_total_train, 4) 59 | self.time_epoch = round(self.time_total_train / self.epoch, 4) 60 | self.time_predict = time() 61 | y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled = self._forecasting__() 62 | self.time_predict = round(time() - self.time_predict, 8) 63 | self.time_system = round(time() - self.time_system, 4) 64 | self._save_results__(y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled, self.loss_train, self.n_runs) 65 | 66 | ## Helper functions 67 | def _get_model__(self, individual=None): 68 | w1 = reshape(individual[:self.w1_size], (self.input_size, self.hidden_size)) 69 | b1 = reshape(individual[self.w1_size:self.w1_size + self.b1_size], (-1, self.hidden_size)) 70 | w2 = reshape(individual[self.w1_size + self.b1_size: self.w1_size + self.b1_size + self.w2_size], (self.hidden_size, self.output_size)) 71 | b2 = reshape(individual[self.w1_size + self.b1_size + self.w2_size:], (-1, self.output_size)) 72 | return {"w1": w1, "b1": b1, "w2": w2, "b2": b2} 73 | 74 | def _objective_function__(self, solution=None): 75 | md = self._get_model__(solution) 76 | hidd = self._activation1__(add(matmul(self.X_train, md["w1"]), md["b1"])) 77 | y_pred = self._activation2__(add(matmul(hidd, md["w2"]), md["b2"])) 78 | return mean_squared_error(y_pred, self.y_train) 79 | -------------------------------------------------------------------------------- /model/root/root_base.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 18:10, 06/04/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from sklearn.preprocessing import MinMaxScaler 11 | from utils.PreprocessingUtil import TimeSeries 12 | from utils.MeasureUtil import MeasureTimeSeries 13 | from utils.IOUtil import save_all_models_to_csv, save_prediction_to_csv, save_loss_train_to_csv 14 | from utils.GraphUtil import draw_predict_with_error 15 | 16 | 17 | class RootBase: 18 | """ 19 | This is root of all networks. 20 | """ 21 | 22 | def __init__(self, root_base_paras=None): 23 | self.dataset = root_base_paras["dataset"] 24 | self.data_idx = root_base_paras["data_idx"] 25 | self.sliding = root_base_paras["sliding"] 26 | self.output_idx = root_base_paras["output_idx"] 27 | self.method_statistic = root_base_paras["method_statistic"] 28 | self.scaler = MinMaxScaler() 29 | 30 | self.n_runs = root_base_paras["n_runs"] 31 | self.path_save_result = root_base_paras["path_save_result"] 32 | self.log_filename = root_base_paras["log_filename"] 33 | self.multi_output = root_base_paras["multi_output"] 34 | self.draw = root_base_paras["draw"] 35 | self.print_train = root_base_paras["print_train"] 36 | 37 | self.model, self.solution, self.loss_train, self.filename = None, None, [], None 38 | self.X_train, self.y_train, self.X_valid, self.y_valid, self.X_test, self.y_test = None, None, None, None, None, None 39 | self.time_total_train, self.time_epoch, self.time_predict, self.time_system = None, None, None, None 40 | 41 | def _preprocessing_2d__(self): 42 | ts = TimeSeries(self.dataset, self.data_idx, self.sliding, self.output_idx, self.method_statistic, self.scaler) 43 | self.X_train, self.y_train, self.X_valid, self.y_valid, self.X_test, self.y_test, self.scaler = ts._preprocessing_2d__() 44 | 45 | def _preprocessing_3d__(self): 46 | ts = TimeSeries(self.dataset, self.data_idx, self.sliding, self.output_idx, self.method_statistic, self.scaler) 47 | self.X_train, self.y_train, self.X_valid, self.y_valid, self.X_test, self.y_test, self.scaler = ts._preprocessing_3d__() 48 | 49 | def _save_results__(self, y_true=None, y_pred=None, y_true_scaled=None, y_pred_scaled=None, loss_train=None, n_runs=1): 50 | if self.multi_output: 51 | measure_scaled = MeasureTimeSeries(y_true_scaled, y_pred_scaled, "raw_values", number_rounding=4) 52 | measure_scaled._fit__() 53 | data1 = "CPU_" 54 | data2 = "RAM_" 55 | item = {'model_name': self.filename, 'total_time_train': self.time_total_train, 'time_epoch': self.time_epoch, 56 | 'time_predict': self.time_predict, 'time_system': self.time_system, 57 | data1 + 'scaled_EV': measure_scaled.score_ev[0], data1 + 'scaled_MSLE': measure_scaled.score_msle[0], 58 | data1 + 'scaled_R2': measure_scaled.score_r2[0], data1 + 'scaled_MAE': measure_scaled.score_mae[0], 59 | data1 + 'scaled_MSE': measure_scaled.score_mse[0], data1 + 'scaled_RMSE': measure_scaled.score_rmse[0], 60 | data1 + 'scaled_MAPE': measure_scaled.score_mape[0], data1 + 'scaled_SMAPE': measure_scaled.score_smape[0], 61 | 62 | data2 + 'scaled_EV': measure_scaled.score_ev[1], data2 + 'scaled_MSLE': measure_scaled.score_msle[1], 63 | data2 + 'scaled_R2': measure_scaled.score_r2[1], data2 + 'scaled_MAE': measure_scaled.score_mae[1], 64 | data2 + 'scaled_MSE': measure_scaled.score_mse[1], data2 + 'scaled_RMSE': measure_scaled.score_rmse[1], 65 | data2 + 'scaled_MAPE': measure_scaled.score_mape[1], data2 + 'scaled_SMAPE': measure_scaled.score_smape[1]} 66 | 67 | if n_runs == 1: 68 | save_prediction_to_csv(y_true[:, 0:1], y_pred[:, 0:1], self.filename, self.path_save_result + data1) 69 | save_prediction_to_csv(y_true[:, 1:2], y_pred[:, 1:2], self.filename, self.path_save_result + data2) 70 | save_loss_train_to_csv(loss_train, self.filename, self.path_save_result + "Error-") 71 | 72 | if self.draw: 73 | draw_predict_with_error(1, [y_true[:, 0:1], y_pred[:, 0:1]], [measure_scaled.score_rmse[0], measure_scaled.score_mae[0]], self.filename, 74 | self.path_save_result + data1) 75 | draw_predict_with_error(2, [y_true[:, 1:2], y_pred[:, 1:2]], [measure_scaled.score_rmse[1], measure_scaled.score_mae[1]], self.filename, 76 | self.path_save_result + data2) 77 | if self.print_train: 78 | print('Predict DONE - CPU - RMSE: %f, RAM - RMSE: %f' % (measure_scaled.score_rmse[0], measure_scaled.score_rmse[1])) 79 | save_all_models_to_csv(item, self.log_filename, self.path_save_result) 80 | 81 | else: 82 | measure_scaled = MeasureTimeSeries(y_true_scaled, y_pred_scaled, None, number_rounding=4) 83 | measure_scaled._fit__() 84 | measure_unscaled = MeasureTimeSeries(y_true, y_pred, None, number_rounding=4) 85 | measure_unscaled._fit__() 86 | 87 | item = {'model_name': self.filename, 'total_time_train': self.time_total_train, 'time_epoch': self.time_epoch, 88 | 'time_predict': self.time_predict, 'time_system': self.time_system, 89 | 'scaled_EV': measure_scaled.score_ev, 'scaled_MSLE': measure_scaled.score_msle, 'scaled_R2': measure_scaled.score_r2, 90 | 'scaled_MAE': measure_scaled.score_mae, 'scaled_MSE': measure_scaled.score_mse, 'scaled_RMSE': measure_scaled.score_rmse, 91 | 'scaled_MAPE': measure_scaled.score_mape, 'scaled_SMAPE': measure_scaled.score_smape, 92 | 'unscaled_EV': measure_unscaled.score_ev, 'unscaled_MSLE': measure_unscaled.score_msle, 'unscaled_R2': measure_unscaled.score_r2, 93 | 'unscaled_MAE': measure_unscaled.score_mae, 'unscaled_MSE': measure_unscaled.score_mse, 'unscaled_RMSE': measure_unscaled.score_rmse, 94 | 'unscaled_MAPE': measure_unscaled.score_mape, 'unscaled_SMAPE': measure_unscaled.score_smape} 95 | 96 | if n_runs == 1: 97 | save_prediction_to_csv(y_true, y_pred, self.filename, self.path_save_result) 98 | save_loss_train_to_csv(loss_train, self.filename, self.path_save_result + "Error-") 99 | if self.draw: 100 | draw_predict_with_error([y_true, y_pred], [measure_unscaled.score_rmse, measure_unscaled.score_mae], self.filename, self.path_save_result) 101 | if self.print_train: 102 | print('Predict DONE - RMSE: %f, MAE: %f' % (measure_unscaled.score_rmse, measure_unscaled.score_mae)) 103 | 104 | save_all_models_to_csv(item, self.log_filename, self.path_save_result) 105 | 106 | def _forecasting__(self): 107 | pass 108 | 109 | def _training__(self): 110 | pass 111 | 112 | def _running__(self): 113 | pass 114 | -------------------------------------------------------------------------------- /model/root/traditional/root_mlnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 18:15, 06/04/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from model.root.root_base import RootBase 11 | import time 12 | 13 | 14 | class RootMlnn(RootBase): 15 | def __init__(self, root_base_paras=None, root_mlnn_paras=None): 16 | RootBase.__init__(self, root_base_paras) 17 | self.epoch = root_mlnn_paras["epoch"] 18 | self.batch_size = root_mlnn_paras["batch_size"] 19 | self.learning_rate = root_mlnn_paras["learning_rate"] 20 | self.activations = root_mlnn_paras["activations"] 21 | self.optimizer = root_mlnn_paras["optimizer"] 22 | self.loss = root_mlnn_paras["loss"] 23 | if root_mlnn_paras["hidden_sizes"][-1]: 24 | self.hidden_sizes = root_mlnn_paras["hidden_sizes"][:-1] 25 | else: 26 | num_hid = len(root_mlnn_paras["hidden_sizes"]) - 1 27 | self.hidden_sizes = [(num_hid - i) * root_base_paras["sliding"] * root_base_paras["feature_size"] + 1 for i in range(num_hid)] 28 | 29 | def _forecasting__(self): 30 | # Evaluate models on the test set 31 | y_pred = self.model.predict(self.X_test) 32 | pred_inverse = self.scaler.inverse_transform(y_pred) 33 | real_inverse = self.scaler.inverse_transform(self.y_test) 34 | return real_inverse, pred_inverse, self.y_test, y_pred 35 | 36 | def _running__(self): 37 | self.time_system = time.time() 38 | self._preprocessing_2d__() 39 | self.time_total_train = time.time() 40 | self._training__() 41 | self.time_total_train = round(time.time() - self.time_total_train, 4) 42 | self.time_epoch = round(self.time_total_train / self.epoch, 4) 43 | self.time_predict = time.time() 44 | y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled = self._forecasting__() 45 | self.time_predict = round(time.time() - self.time_predict, 8) 46 | self.time_system = round(time.time() - self.time_system, 4) 47 | self._save_results__(y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled, self.loss_train, self.n_runs) 48 | -------------------------------------------------------------------------------- /model/root/traditional/root_rnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 18:23, 06/04/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from model.root.root_base import RootBase 11 | import time 12 | 13 | 14 | class RootRnn(RootBase): 15 | def __init__(self, root_base_paras=None, root_rnn_paras=None): 16 | RootBase.__init__(self, root_base_paras) 17 | self.epoch = root_rnn_paras["epoch"] 18 | self.batch_size = root_rnn_paras["batch_size"] 19 | self.learning_rate = root_rnn_paras["learning_rate"] 20 | self.activations = root_rnn_paras["activations"] 21 | self.optimizer = root_rnn_paras["optimizer"] 22 | self.loss = root_rnn_paras["loss"] 23 | self.dropouts = root_rnn_paras["dropouts"] 24 | if root_rnn_paras["hidden_sizes"][-1]: 25 | self.hidden_sizes = root_rnn_paras["hidden_sizes"][:-1] 26 | else: 27 | num_hid = len(root_rnn_paras["hidden_sizes"]) - 1 28 | self.hidden_sizes = [(num_hid - i) * root_base_paras["sliding"] * root_base_paras["feature_size"] + 1 for i in range(num_hid)] 29 | 30 | def _forecasting__(self): 31 | y_pred = self.model.predict(self.X_test) 32 | pred_inverse = self.scaler.inverse_transform(y_pred) 33 | real_inverse = self.scaler.inverse_transform(self.y_test) 34 | return real_inverse, pred_inverse, self.y_test, y_pred 35 | 36 | def _running__(self): 37 | self.time_system = time.time() 38 | self._preprocessing_3d__() 39 | self.time_total_train = time.time() 40 | self._training__() 41 | self.time_total_train = round(time.time() - self.time_total_train, 4) 42 | self.time_epoch = round(self.time_total_train / self.epoch, 4) 43 | self.time_predict = time.time() 44 | y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled = self._forecasting__() 45 | self.time_predict = round(time.time() - self.time_predict, 8) 46 | self.time_system = round(time.time() - self.time_system, 4) 47 | self._save_results__(y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled, self.loss_train, self.n_runs) 48 | -------------------------------------------------------------------------------- /ocro_mlnn_script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from os.path import splitext, basename, realpath 11 | from sklearn.model_selection import ParameterGrid 12 | from model.main.hybrid_mlnn import OCroMlnn 13 | from utils.SettingPaper import ocro_mlnn_paras_final as param_grid 14 | from utils.SettingPaper import * 15 | from utils.IOUtil import load_dataset 16 | 17 | if SP_RUN_TIMES == 1: 18 | all_model_file_name = SP_LOG_FILENAME 19 | else: # If runs with more than 1, like stability test --> name of the models ==> such as: rnn1hl.csv 20 | all_model_file_name = str(splitext(basename(realpath(__file__)))[0]) 21 | 22 | 23 | def train_model(item): 24 | root_base_paras = { 25 | "dataset": dataset, 26 | "feature_size": feature_size, 27 | "data_idx": SP_DATA_SPLIT_INDEX_2, 28 | "sliding": item["sliding"], 29 | "multi_output": multi_output, 30 | "output_idx": output_index, 31 | "method_statistic": SP_PREPROCESSING_METHOD, 32 | "log_filename": all_model_file_name, 33 | "n_runs": SP_RUN_TIMES, # 1 or others 34 | "path_save_result": SP_PATH_SAVE_BASE + SP_DATA_FILENAME[loop] + "/", 35 | "draw": SP_DRAW, 36 | "print_train": SP_PRINT_TRAIN, # 0: nothing, 1 : full detail, 2: short version 37 | } 38 | paras_name = "hs_{}-ep_{}-act_{}-ps_{}-po_{}-Fb_{}-Fa_{}-Fd_{}-Pd_{}-G_{}-GCR_{}-k_{}-rc_{}".format(item["hidden_size"], item["epoch"], item["activations"], 39 | item["pop_size"], item["po"], item["Fb"], item["Fa"], item["Fd"], item["Pd"], item["G"], item["GCR"], item["k"], item["restart_count"]) 40 | root_hybrid_paras = { 41 | "hidden_size": item["hidden_size"], "activations": item["activations"], "epoch": item["epoch"], "domain_range": item["domain_range"], 42 | "paras_name": paras_name 43 | } 44 | ocro_paras = { 45 | "epoch": item["epoch"], "pop_size": item["pop_size"], "po": item["po"], "Fb": item["Fb"], "Fa": item["Fa"], 46 | "Fd": item["Fd"], "Pd": item["Pd"], "G": item["G"], "GCR": item["GCR"], "k": item["k"], "restart_count": item["restart_count"] 47 | } 48 | md = OCroMlnn(root_base_paras=root_base_paras, root_hybrid_paras=root_hybrid_paras, ocro_paras=ocro_paras) 49 | md._running__() 50 | 51 | 52 | for _ in range(SP_RUN_TIMES): 53 | for loop in range(len(SP_DATA_FILENAME)): 54 | filename = SP_LOAD_DATA_FROM + SP_DATA_FILENAME[loop] 55 | dataset = load_dataset(filename, cols=SP_DATA_COLS[loop]) 56 | feature_size = len(SP_DATA_COLS[loop]) 57 | multi_output = SP_DATA_MULTI_OUTPUT[loop] 58 | output_index = SP_OUTPUT_INDEX[loop] 59 | # Create combination of params. 60 | for item in list(ParameterGrid(param_grid)): 61 | train_model(item) 62 | -------------------------------------------------------------------------------- /project_note.md: -------------------------------------------------------------------------------- 1 | ## Temp 2 | 1. Accuracy 3 | ```code 4 | MLNN, RNN, LSTM, GA-MLNN, PSO-MLNN, ABFO-MLNN, CRO-MLNN, OCRO-MLNN 5 | ``` 6 | 7 | 2. Stability 8 | ```code 9 | MLNN, RNN, LSTM, GA-MLNN, PSO-MLNN, ABFO-MLNN, CRO-MLNN, OCRO-MLNN 10 | ``` 11 | 12 | # Project structure 13 | 1. General view class 14 | ![Our model](history/images/code/all_code_wrapper.png) 15 | 16 | 2. Details view class 17 | * root files 18 | 19 | ![](history/images/code/root_ann.png) ![](history/images/code/root_rnn.png) ![](history/images/code/root_hybrid_mlnn.png) 20 | 21 | * algorithm files 22 | 23 | ![](history/images/code/GA.png) ![](history/images/code/DE.png) ![](history/images/code/PSO.png) 24 | 25 | ![](history/images/code/CRO.png) ![](history/images/code/BFO.png) 26 | 27 | * main files 28 | 29 | ![Our model](history/images/code/hybrid_mlnn.png) 30 | 31 | ![Our model](history/images/code/neural_network.png) 32 | -------------------------------------------------------------------------------- /pso_mlnn_script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from os.path import splitext, basename, realpath 11 | from sklearn.model_selection import ParameterGrid 12 | from model.main.hybrid_mlnn import PsoMlnn 13 | from utils.SettingPaper import pso_mlnn_paras_final as param_grid 14 | from utils.SettingPaper import * 15 | from utils.IOUtil import load_dataset 16 | 17 | if SP_RUN_TIMES == 1: 18 | all_model_file_name = SP_LOG_FILENAME 19 | else: # If runs with more than 1, like stability test --> name of the models ==> such as: rnn1hl.csv 20 | all_model_file_name = str(splitext(basename(realpath(__file__)))[0]) 21 | 22 | 23 | def train_model(item): 24 | root_base_paras = { 25 | "dataset": dataset, 26 | "feature_size": feature_size, 27 | "data_idx": SP_DATA_SPLIT_INDEX_2, 28 | "sliding": item["sliding"], 29 | "multi_output": multi_output, 30 | "output_idx": output_index, 31 | "method_statistic": SP_PREPROCESSING_METHOD, 32 | "log_filename": all_model_file_name, 33 | "n_runs": SP_RUN_TIMES, # 1 or others 34 | "path_save_result": SP_PATH_SAVE_BASE + SP_DATA_FILENAME[loop] + "/", 35 | "draw": SP_DRAW, 36 | "print_train": SP_PRINT_TRAIN, # 0: nothing, 1 : full detail, 2: short version 37 | } 38 | paras_name = "hs_{}-ep_{}-act_{}-ps_{}-pc_{}-pm_{}".format(item["hidden_size"], item["epoch"], item["activation"], item["pop_size"], item["c_minmax"], 39 | item["w_minmax"]) 40 | root_hybrid_paras = { 41 | "hidden_size": item["hidden_size"], "activations": item["activations"], "epoch": item["epoch"], "domain_range": item["domain_range"], 42 | "paras_name": paras_name 43 | } 44 | pso_paras = { 45 | "epoch": item["epoch"], "pop_size": item["pop_size"], "c_minmax": item["c_minmax"], "w_minmax": item["w_minmax"] 46 | } 47 | md = PsoMlnn(root_base_paras=root_base_paras, root_hybrid_paras=root_hybrid_paras, pso_paras=pso_paras) 48 | md._running__() 49 | 50 | 51 | for _ in range(SP_RUN_TIMES): 52 | for loop in range(len(SP_DATA_FILENAME)): 53 | filename = SP_LOAD_DATA_FROM + SP_DATA_FILENAME[loop] 54 | dataset = load_dataset(filename, cols=SP_DATA_COLS[loop]) 55 | feature_size = len(SP_DATA_COLS[loop]) 56 | multi_output = SP_DATA_MULTI_OUTPUT[loop] 57 | output_index = SP_OUTPUT_INDEX[loop] 58 | # Create combination of params. 59 | for item in list(ParameterGrid(param_grid)): 60 | train_model(item) 61 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Efficient Time-series Forecasting using Neural Network and Opposition-based Coral Reefs Optimization 2 | 3 | [![GitHub release](https://img.shields.io/badge/release-2.0.0-yellow.svg)]() 4 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3742045.svg)](https://doi.org/10.5281/zenodo.3742045) 5 | [![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)](https://opensource.org/licenses/Apache-2.0) 6 | 7 | ## Dear friends and followers 8 | * I updated to repository to the newest version (version 2.0.0), which is very easy to read and reproduce. 9 | * All of our optimizers are (meta-heuristics) now deleted and taken the new one from my newest library: 10 | 11 | https://pypi.org/project/mealpy/ 12 | 13 | * If you use my code or library in your project, I would appreciate the cites: 14 | * Nguyen, T., Nguyen, T., Nguyen, B. M., & Nguyen, G. (2019). Efficient Time-Series Forecasting Using Neural Network and Opposition-Based Coral Reefs Optimization. International Journal of Computational Intelligence Systems, 12(2), 1144-1161. 15 | 16 | * Nguyen, T., Nguyen, B. M., & Nguyen, G. (2019, April). Building Resource Auto-scaler with Functional-Link Neural Network and Adaptive Bacterial Foraging Optimization. In International Conference on Theory and Applications of Models of Computation (pp. 501-517). Springer, Cham. 17 | 18 | * Nguyen, T., Tran, N., Nguyen, B. M., & Nguyen, G. (2018, November). A Resource Usage Prediction System Using Functional-Link and Genetic Algorithm Neural Network for Multivariate Cloud Metrics. In 2018 IEEE 11th Conference on Service-Oriented Computing and Applications (SOCA) (pp. 49-56). IEEE. 19 | 20 | * If you want to know more about code, or want a pdf of both above paper, contact me: nguyenthieu2102@gmail.com 21 | 22 | 23 | ## How to read my repository 24 | 1. data: include formatted data 25 | 2. utils: Helped functions such as IO, Draw, Math, Settings (for all model and parameters), Preprocessing... 26 | 3. paper: include 2 main folders: 27 | * results: forecasting results of all models (3 folders inside) 28 | * final: final forecasting results (runs on server) 29 | * stability: final stability results(runs on server) 30 | 4. model: (2 folders) 31 | * root: (want to understand the code, read this classes first) 32 | * root_base.py: root for all models (traditional, hybrid and variants...) 33 | * traditional: root for all traditional models (inherit: root_base) 34 | * hybrid: root for all hybrid models (inherit: root_base) 35 | * main: (final models) 36 | * this classes will use those optimizer above and those root (traditional, hybrid) above 37 | * the running files (outside with the orginial folder: cro_mlnn_script.py, ...) will call this classes 38 | * the traditional models will use single file such as: traditional_ffnn, traditional_rnn,... 39 | * the hybrid models will use 2 files, example: hybrid_ffnn.py and GA.py (optimizer files) 40 | 41 | 42 | ## Notes 43 | 44 | 1. To improve the speed of Pycharm when opening (because Pycharm will indexing when opening), you should right click to 45 | paper and data folder => Mark Directory As => Excluded 46 | 47 | 2. How to run models? 48 | ```code 49 | 1. Before runs the models, make sure you clone this repository in your laptop: 50 | https://github.com/chasebk/code_ocro_mlnn 51 | 52 | 2. Then open it in your editor like Pycharm or Spider... 53 | 54 | 3. Now you need to create python environment using conda (assumpted that you have already had it). Open terminal 55 | conda your_environment_name create -f envs/env.yml (go to the root project folder and create new environment from my file in: envs/env.yml) 56 | 57 | 4. Now you can activate your environment and run the models 58 | conda activate your_environment_name # First, activate your environment to get the needed libraries. 59 | python model_name 60 | 61 | For example: 62 | conda ai_env create -f envs/env.yml 63 | conda activate ai_env 64 | python lstm1hl_script.py 65 | 66 | 5. My model name: 67 | 68 | 1. MLNN (1 HL) => mlnn1hl_script.py 69 | 2. RNN (1HL) => rnn1hl_script.py 70 | 3. LSTM (1HL) => lstm1hl_script.py 71 | 4. GA-MLNN => ga_mlnn_script.py 72 | 5. PSO-MLNN => pso_mlnn_script.py 73 | 6. ABFO-MLNN => abfo_mlnn_script.py 74 | 7. CRO-MLNN => cro_mlnn_script.py 75 | 8. OCRO-MLNN => ocro_mlnn_script.py 76 | 77 | 78 | 6. Multi-output meaning? 79 | 80 | cpu: input model would be cpu, output model would be cpu 81 | ram: same as cpu 82 | multi_cpu : input model would be cpu and ram, output model would be cpu 83 | multi_ram : input model would be cpu and ram, output model would be ram 84 | multi : input model would be cpu and ram, output model would be cpu and ram 85 | ``` 86 | 87 | ### How to change model's parameters? 88 | 89 | ```code 90 | You can change the model's parameters in file: utils/SettingPaper.py 91 | 92 | For example: 93 | 94 | +) For traditional models: MLNN, RNN, LSTM 95 | 96 | ####: MLNN-1HL 97 | mlnn1hl_paras_final = { 98 | "sliding": [2, 5, 10], 99 | "hidden_sizes" : [(10, True) ], 100 | "activations": [("elu", "elu")], # 0: elu, 1:relu, 2:tanh, 3:sigmoid 101 | "learning_rate": [0.0001], 102 | "epoch": [1000], 103 | "batch_size": [128], 104 | "optimizer": ["adam"], # GradientDescentOptimizer, AdamOptimizer, AdagradOptimizer, AdadeltaOptimizer 105 | "loss": ["mse"] 106 | } 107 | 108 | - If you want to tune the parameters, you can adding more value in each parameters like this: 109 | 110 | - sliding: [1, 2, 3, 4] or you want just 1 parameter: [12] 111 | - hidden_sizes: [ (5, True), (10, True), (1000, True) ] or [ (14, True) ] 112 | - activations: [ ("elu", "relu"), ("elu", "tanh") ] or just: [ ("elu", "elu") ] 113 | - learning_rate: [0.1, 0.01, 0.001] or just: [0.1] 114 | .... 115 | 116 | 117 | + For hybrid models: GA_MLNN, PSO_MLNN, CRO_MLNN, OCRO_MLNN 118 | 119 | #### : GA-MLNN 120 | ga_mlnn_paras_final = { 121 | "sliding": [2, 5, 10], 122 | "hidden_size" : [(10, True) ], 123 | "activations": [(0, 0)], # 0: elu, 1:relu, 2:tanh, 3:sigmoid 124 | "train_valid_rate": [(0.6, 0.4)], 125 | 126 | "epoch": [1000], 127 | "pop_size": [100], # 100 -> 900 128 | "pc": [0.95], # 0.85 -> 0.97 129 | "pm": [0.025], # 0.005 -> 0.10 130 | "domain_range": [(-1, 1)] # lower and upper bound 131 | } 132 | 133 | - Same as traditional models. 134 | ``` 135 | 136 | ### Where is the results folder? 137 | ```code 138 | - Look at the running file, for example: ga_elm_script.py 139 | 140 | +) For 1-time runs (Only run 1 time for each model). 141 | _There are 3 type of results file include: model_name.csv file, model_name.png file and Error-model_name.csv file 142 | _model_name.csv included: y_true and y_predict 143 | _model_name.png is visualized of: y_true and y_predict (test dataset) 144 | _Error-model_name.csv included errors of training dataset after epoch. 1st, 2nd column are: MSE errors 145 | 146 | => All 3 type of files above is automatically generated in folder which you can set in SettingPaper 147 | 148 | +) For stability runs (Run each model n-time with same parameters). 149 | Because in this test, we don't need to visualize the y_true and y_predict and also don't need to save y_true and y_predict 150 | So I just save n-time running in the same csv files in folder 151 | 152 | - Noted: 153 | 154 | + In the training set we use MSE. But for the testing set, we can use so much more error like: R2, MAPE, ... 155 | You can find the function code described it in file: 156 | model/root/root_base.py 157 | + _save_results__: 158 | ``` 159 | 160 | * Take a look at project_structure.md file. Describe how the project was built. 161 | 162 | 163 | 164 | ### Publications 165 | * If you see our work is useful, please cite us as follows: 166 | ```code 167 | @article{Nguyen2019, 168 | title={Efficient Time-Series Forecasting Using Neural Network and Opposition-Based Coral Reefs Optimization}, 169 | author={Thieu Nguyen and Tu Nguyen and Binh Minh Nguyen and Giang Nguyen}, 170 | year={2019}, 171 | journal={International Journal of Computational Intelligence Systems}, 172 | volume={12}, 173 | issue={2}, 174 | pages={1144-1161}, 175 | issn={1875-6883}, 176 | url={https://doi.org/10.2991/ijcis.d.190930.003}, 177 | doi={https://doi.org/10.2991/ijcis.d.190930.003} 178 | } 179 | ``` 180 | 181 | * Link full PDF: 182 | => [PDF](https://download.atlantis-press.com/article/125921354.pdf) 183 | -------------------------------------------------------------------------------- /rnn1hl_script.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from os.path import splitext, basename, realpath 11 | from sklearn.model_selection import ParameterGrid 12 | from model.main.traditional_rnn import Rnn1HL 13 | from utils.SettingPaper import rnn1hl_paras_final as param_grid 14 | from utils.SettingPaper import * 15 | from utils.IOUtil import load_dataset 16 | 17 | if SP_RUN_TIMES == 1: 18 | all_model_file_name = SP_LOG_FILENAME 19 | else: # If runs with more than 1, like stability test --> name of the models ==> such as: rnn1hl.csv 20 | all_model_file_name = str(splitext(basename(realpath(__file__)))[0]) 21 | 22 | 23 | def train_model(item): 24 | root_base_paras = { 25 | "dataset": dataset, 26 | "feature_size": feature_size, 27 | "data_idx": SP_DATA_SPLIT_INDEX, 28 | "sliding": item["sliding"], 29 | "multi_output": multi_output, 30 | "output_idx": output_index, 31 | "method_statistic": SP_PREPROCESSING_METHOD, 32 | "log_filename": all_model_file_name, 33 | "n_runs": SP_RUN_TIMES, # 1 or others 34 | "path_save_result": SP_PATH_SAVE_BASE + SP_DATA_FILENAME[loop] + "/", 35 | "draw": SP_DRAW, 36 | "print_train": SP_PRINT_TRAIN, # 0: nothing, 1 : full detail, 2: short version 37 | } 38 | paras_name = "hs_{}-ep_{}-bs_{}-lr_{}-ac_{}-op_{}-lo_{}".format(item["hidden_sizes"], item["epoch"], item["batch_size"], item["learning_rate"], 39 | item["activations"], item["optimizer"], item["loss"]) 40 | root_rnn_paras = { 41 | "hidden_sizes": item["hidden_sizes"], "epoch": item["epoch"], "batch_size": item["batch_size"], "learning_rate": item["learning_rate"], 42 | "activations": item["activations"], "optimizer": item["optimizer"], "loss": item["loss"], "dropouts": item["dropouts"], "paras_name": paras_name 43 | } 44 | md = Rnn1HL(root_base_paras=root_base_paras, root_rnn_paras=root_rnn_paras) 45 | md._running__() 46 | 47 | 48 | for _ in range(SP_RUN_TIMES): 49 | for loop in range(len(SP_DATA_FILENAME)): 50 | filename = SP_LOAD_DATA_FROM + SP_DATA_FILENAME[loop] 51 | dataset = load_dataset(filename, cols=SP_DATA_COLS[loop]) 52 | feature_size = len(SP_DATA_COLS[loop]) 53 | multi_output = SP_DATA_MULTI_OUTPUT[loop] 54 | output_index = SP_OUTPUT_INDEX[loop] 55 | # Create combination of params. 56 | for item in list(ParameterGrid(param_grid)): 57 | train_model(item) 58 | -------------------------------------------------------------------------------- /utils/GraphUtil.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | #import matplotlib as mpl 11 | #mpl.use('Agg') 12 | import matplotlib.pyplot as plt 13 | 14 | 15 | def draw_predict(y_test=None, y_pred=None, filename=None, pathsave=None): 16 | plt.plot(y_test) 17 | plt.plot(y_pred) 18 | plt.ylabel('CPU') 19 | plt.xlabel('Timestamp') 20 | plt.legend(['Actual', 'Predict'], loc='upper right') 21 | plt.savefig(pathsave + filename + ".png") 22 | plt.close() 23 | return None 24 | 25 | 26 | def draw_predict_with_error(data=None, error=None, filename=None, pathsave=None): 27 | plt.plot(data[0]) 28 | plt.plot(data[1]) 29 | plt.ylabel('Real value') 30 | plt.xlabel('Point') 31 | plt.legend(['Predict y... RMSE= ' + str(error[0]), 'Test y... MAE= ' + str(error[1])], loc='upper right') 32 | plt.savefig(pathsave + filename + ".png") 33 | plt.close() 34 | return None 35 | 36 | 37 | def draw_raw_time_series_data(data=None, label=None, title=None, filename=None, pathsave=None): 38 | plt.plot(data) 39 | plt.xlabel(label["y"]) 40 | plt.ylabel(label["x"]) 41 | plt.title(title, fontsize=8) 42 | plt.savefig(pathsave + filename + ".pdf") 43 | plt.close() 44 | return None 45 | 46 | 47 | def draw_raw_time_series_data_and_show(data=None, label=None, title=None): 48 | plt.plot(data) 49 | plt.xlabel(label["y"]) 50 | plt.ylabel(label["x"]) 51 | plt.title(title, fontsize=8) 52 | plt.show() 53 | return None 54 | 55 | -------------------------------------------------------------------------------- /utils/IOUtil.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | import numpy as np 11 | import pandas as pd 12 | from csv import DictWriter 13 | from os import getcwd, path, makedirs 14 | 15 | 16 | def save_all_models_to_csv(item=None, log_filename=None, pathsave=None): 17 | check_directory = getcwd() + "/" + pathsave 18 | if not path.exists(check_directory): 19 | makedirs(check_directory) 20 | with open(pathsave + log_filename + ".csv", 'a') as file: 21 | w = DictWriter(file, delimiter=',', lineterminator='\n', fieldnames=item.keys()) 22 | if file.tell() == 0: 23 | w.writeheader() 24 | w.writerow(item) 25 | 26 | 27 | def save_prediction_to_csv(y_test=None, y_pred=None, filename=None, pathsave=None): 28 | check_directory = getcwd() + "/" + pathsave 29 | if not path.exists(check_directory): 30 | makedirs(check_directory) 31 | 32 | temp = np.concatenate((y_test, y_pred), axis=1) 33 | np.savetxt(pathsave + filename + ".csv", temp, delimiter=",") 34 | return None 35 | 36 | 37 | def save_loss_train_to_csv(error=None, filename=None, pathsave=None): 38 | np.savetxt(pathsave + filename + ".csv", np.array(error), delimiter=",") 39 | return None 40 | 41 | 42 | def load_dataset(path_to_data=None, cols=None): 43 | df = pd.read_csv(path_to_data + ".csv", usecols=cols) 44 | return df.values 45 | 46 | 47 | def save_run_test(num_run_test=None, data=None, filepath=None): 48 | t0 = np.reshape(data, (num_run_test, -1)) 49 | np.savetxt(filepath, t0, delimiter=",") 50 | 51 | 52 | def load_prediction_results(pathfile=None, delimiter=",", header=None): 53 | df = pd.read_csv(pathfile, sep=delimiter, header=header) 54 | return df.values[:, 0:1], df.values[:, 1:2] 55 | 56 | 57 | def save_number_of_vms(data=None, pathfile=None): 58 | t0 = np.reshape(data, (-1, 1)) 59 | np.savetxt(pathfile, t0, delimiter=",") 60 | 61 | 62 | def load_number_of_vms(pathfile=None, delimiter=",", header=None): 63 | df = pd.read_csv(pathfile, sep=delimiter, header=header) 64 | return df.values[:, 0:1] 65 | 66 | 67 | def save_scaling_results_to_csv(data=None, path_file=None): 68 | np.savetxt(path_file + ".csv", np.array(data), delimiter=",") 69 | return None 70 | 71 | 72 | def read_dataset_file(filepath=None, usecols=None, header=0, index_col=False, inplace=True): 73 | df = pd.read_csv(filepath, usecols=usecols, header=header, index_col=index_col) 74 | df.dropna(inplace=inplace) 75 | return df.values 76 | 77 | 78 | def save_formatted_data_csv(dataset=None, filename=None, pathsave=None): 79 | np.savetxt(pathsave + filename + ".csv", dataset, delimiter=",") 80 | return None -------------------------------------------------------------------------------- /utils/MathUtil.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from numpy import where, maximum, power, multiply, exp 11 | from numpy import tanh as mytanh 12 | 13 | def itself(x): 14 | return x 15 | def elu(x, alpha=1): 16 | return where(x < 0, alpha * (exp(x) - 1), x) 17 | def relu(x): 18 | return maximum(0, x) 19 | def tanh(x): 20 | return mytanh(x) 21 | def sigmoid(x): 22 | return 1.0 / (1.0 + exp(-x)) 23 | 24 | def derivative_self(x): 25 | return 1 26 | def derivative_elu(x, alpha=1): 27 | return where(x < 0, x + alpha, 1) 28 | def derivative_relu(x): 29 | return where(x < 0, 0, 1) 30 | def derivative_tanh(x): 31 | return 1 - power(x, 2) 32 | def derivative_sigmoid(x): 33 | return multiply(x, 1-x) 34 | -------------------------------------------------------------------------------- /utils/MeasureUtil.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, mean_squared_log_error, median_absolute_error, r2_score 11 | import numpy as np 12 | 13 | 14 | class MeasureTimeSeries: 15 | def __init__(self, y_true, y_pred, multi_output=None, number_rounding=3): 16 | """ 17 | :param y_true: 18 | :param y_pred: 19 | :param multi_output: string in [‘raw_values’, ‘uniform_average’, ‘variance_weighted’] or array-like of shape (n_outputs) 20 | :param number_rounding: 21 | """ 22 | self.y_true = y_true 23 | self.y_pred = y_pred 24 | self.multi_output = multi_output 25 | self.number_rounding = number_rounding 26 | self.score_ev, self.score_mae, self.score_mse, self.score_msle, self.score_meae = None, None, None, None, None 27 | self.score_r2, self.score_rmse, self.score_mape, self.score_smape = None, None, None, None 28 | 29 | def explained_variance_score(self): 30 | temp = explained_variance_score(self.y_true, self.y_pred, multioutput=self.multi_output) 31 | self.score_ev = np.round(temp, self.number_rounding) 32 | 33 | def mean_absolute_error(self): 34 | temp = mean_absolute_error(self.y_true, self.y_pred, multioutput=self.multi_output) 35 | self.score_mae = np.round(temp, self.number_rounding) 36 | 37 | def mean_squared_error(self): 38 | temp = mean_squared_error(self.y_true, self.y_pred, multioutput=self.multi_output) 39 | self.score_mse = np.round(temp, self.number_rounding) 40 | 41 | def mean_squared_log_error(self): 42 | y_true = np.where(self.y_true < 0, 0, self.y_true) 43 | y_pred = np.where(self.y_pred < 0, 0, self.y_pred) 44 | temp = mean_squared_log_error(y_true, y_pred, multioutput=self.multi_output) 45 | self.score_msle = np.round(temp, self.number_rounding) 46 | 47 | def median_absolute_error(self): 48 | if self.multi_output is not None: 49 | print("Median absolute error is not supported for multi output") 50 | return None 51 | temp = median_absolute_error(self.y_true, self.y_pred) 52 | self.score_meae = np.round(temp, self.number_rounding) 53 | 54 | def r2_score_error(self): 55 | temp = r2_score(self.y_true, self.y_pred, multioutput=self.multi_output) 56 | self.score_r2 = np.round(temp, self.number_rounding) 57 | 58 | def root_mean_squared_error(self): 59 | temp = np.sqrt(mean_squared_error(self.y_true, self.y_pred, multioutput=self.multi_output)) 60 | self.score_rmse = np.round(temp, self.number_rounding) 61 | 62 | def mean_absolute_percentage_error(self): 63 | temp = np.mean(np.abs((self.y_true - self.y_pred) / self.y_true), axis=0) * 100 64 | self.score_mape = np.round(temp, self.number_rounding) 65 | 66 | def symmetric_mean_absolute_percentage_error(self): 67 | temp = np.mean(2*np.abs(self.y_pred - self.y_true) / (np.abs(self.y_true) + np.abs(self.y_pred)), axis=0) * 100 68 | self.score_smape = np.round(temp, self.number_rounding) 69 | 70 | def _fit__(self): 71 | self.explained_variance_score() 72 | self.mean_absolute_error() 73 | self.mean_squared_error() 74 | self.mean_squared_log_error() 75 | self.r2_score_error() 76 | self.root_mean_squared_error() 77 | self.mean_absolute_percentage_error() 78 | self.symmetric_mean_absolute_percentage_error() -------------------------------------------------------------------------------- /utils/PreprocessingUtil.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # ------------------------------------------------------------------------------------------------------% 3 | # Created by "Thieu Nguyen" at 00:51, 29/03/2020 % 4 | # % 5 | # Email: nguyenthieu2102@gmail.com % 6 | # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % 7 | # Github: https://github.com/thieunguyen5991 % 8 | # -------------------------------------------------------------------------------------------------------% 9 | 10 | import numpy as np 11 | from copy import deepcopy 12 | 13 | 14 | class TimeSeries: 15 | def __init__(self, dataset=None, data_idx=None, sliding=None, output_index=None, method_statistic=0, minmax_scaler=None): 16 | ''' 17 | :param data_idx: 18 | :param sliding: 19 | :param output_index: 20 | :param method_statistic: 21 | :param minmax_scaler: 22 | ''' 23 | self.original_dataset = dataset 24 | self.dimension = dataset.shape[1] # The real number of features 25 | self.original_dataset_len = len(dataset) 26 | self.dataset_len = self.original_dataset_len - sliding 27 | 28 | self.train_idx = int(data_idx[0] * self.dataset_len) 29 | self.train_len = self.train_idx 30 | self.valid_idx = self.train_idx + int(data_idx[1] * self.dataset_len) 31 | self.valid_len = self.valid_idx - self.train_idx 32 | self.test_idx = self.dataset_len 33 | self.test_len = self.dataset_len - self.train_len - self.valid_len 34 | self.sliding = sliding 35 | self.output_index = output_index 36 | self.method_statistic = method_statistic 37 | self.minmax_scaler = minmax_scaler 38 | 39 | def __get_dataset_X__(self, list_transform=None): 40 | """ 41 | :param list_transform: [ x1 | t1 ] => Make a window slides 42 | :return: dataset_sliding = [ x1 | x2 | x3| t1 | t2 | t3 | ... ] 43 | """ 44 | dataset_sliding = np.zeros(shape=(self.test_idx, 1)) #[ 0 | x1 | x2 | x3| t1 | t2 | t3 | ... ] 45 | for i in range(self.dimension): 46 | for j in range(self.sliding): 47 | temp = np.array(list_transform[j: self.test_idx + j, i:i + 1]) 48 | dataset_sliding = np.concatenate((dataset_sliding, temp), axis=1) 49 | dataset_sliding = dataset_sliding[:, 1:] #[ x1 | x2 | x3| t1 | t2 | t3 | ... ] 50 | 51 | ## Find the dataset_X by using different statistic method on above window slides 52 | if self.method_statistic == 0: # default 53 | dataset_X = deepcopy(dataset_sliding) 54 | else: 55 | dataset_X = np.zeros(shape=(self.test_idx, 1)) 56 | if self.method_statistic == 1: 57 | """ 58 | mean(x1, x2, x3, ...), mean(t1, t2, t3,...) 59 | """ 60 | for i in range(self.dimension): 61 | meanx = np.reshape(np.mean(dataset_sliding[:, i * self.sliding:(i + 1) * self.sliding], axis=1), (-1, 1)) 62 | dataset_X = np.concatenate((dataset_X, meanx), axis=1) 63 | 64 | if self.method_statistic == 2: 65 | """ 66 | min(x1, x2, x3, ...), mean(x1, x2, x3, ...), max(x1, x2, x3, ....) 67 | """ 68 | for i in range(self.dimension): 69 | minx = np.reshape(np.amin(dataset_sliding[:, i * self.sliding:(i + 1) * self.sliding], axis=1), (-1, 1)) 70 | meanx = np.reshape(np.mean(dataset_sliding[:, i * self.sliding:(i + 1) * self.sliding], axis=1), (-1, 1)) 71 | maxx = np.reshape(np.amax(dataset_sliding[:, i * self.sliding:(i + 1) * self.sliding], axis=1), (-1, 1)) 72 | dataset_X = np.concatenate((dataset_X, minx, meanx, maxx), axis=1) 73 | 74 | if self.method_statistic == 3: 75 | """ 76 | min(x1, x2, x3, ...), median(x1, x2, x3, ...), max(x1, x2, x3, ....), min(t1, t2, t3, ...), median(t1, t2, t3, ...), max(t1, t2, t3, ....) 77 | """ 78 | for i in range(self.dimension): 79 | minx = np.reshape(np.amin(dataset_sliding[:, i * self.sliding:(i + 1) * self.sliding], axis=1), (-1, 1)) 80 | medix = np.reshape(np.median(dataset_sliding[:, i * self.sliding:(i + 1) * self.sliding], axis=1), (-1, 1)) 81 | maxx = np.reshape(np.amax(dataset_sliding[:, i * self.sliding:(i + 1) * self.sliding], axis=1), (-1, 1)) 82 | dataset_X = np.concatenate((dataset_X, minx, medix, maxx), axis=1) 83 | dataset_X = dataset_X[:, 1:] 84 | return dataset_X 85 | 86 | def _preprocessing_2d__(self): 87 | """ 88 | output_index = None 89 | + single input => single output 90 | + multiple input => multiple output 91 | 92 | output_index = number (index) 93 | + single input => single output index 94 | + multiple input => single output index 95 | 96 | valid_idx = 0 ==> No validate data || cpu(t), cpu(t-1), ..., ram(t), ram(t-1),... 97 | """ 98 | 99 | if self.output_index is None: 100 | list_transform = self.minmax_scaler.fit_transform(self.original_dataset) 101 | # print(preprocessing.MinMaxScaler().data_max_) 102 | dataset_y = deepcopy(list_transform[self.sliding:]) # Now we need to find dataset_X 103 | else: 104 | # Example : data [0, 1, 2, 3] 105 | # output_index = 2 ==> Loop scale through 3, 0, 1, 2 106 | # [ cpu, ram, disk_io, disk_space ] 107 | # list_transform: [ 0, disk_space, cpu, ram, disk_io ] 108 | # Cut list_transform: [ disk_space, cpu, ram, disk_io ] 109 | # Dataset y = list_transform[-1] 110 | 111 | list_transform = np.zeros(shape=(self.original_dataset_len, 1)) 112 | for i in range(0, self.dimension): 113 | t = self.output_index - (self.dimension - 1) + i 114 | d1 = self.minmax_scaler.fit_transform( 115 | self.original_dataset[:self.original_dataset_len, t].reshape(-1, 1)) 116 | list_transform = np.concatenate((list_transform, d1), axis=1) 117 | # print(minmax_scaler.data_max_) 118 | list_transform = list_transform[:, 1:] 119 | dataset_y = deepcopy(list_transform[self.sliding:, -1:]) # Now we need to find dataset_X 120 | 121 | dataset_X = self.__get_dataset_X__(list_transform) 122 | 123 | ## Split data to set train and set test 124 | if self.valid_len == 0: 125 | X_train, y_train = dataset_X[0:self.train_idx], dataset_y[0:self.train_idx] 126 | X_test, y_test = dataset_X[self.train_idx:self.test_idx], dataset_y[self.train_idx:self.test_idx] 127 | # print("Processing data done!!!") 128 | return X_train, y_train, None, None, X_test, y_test, self.minmax_scaler 129 | else: 130 | X_train, y_train = dataset_X[0:self.train_idx], dataset_y[0:self.train_idx] 131 | X_valid, y_valid = dataset_X[self.train_idx:self.valid_idx], dataset_y[self.train_idx:self.valid_idx] 132 | X_test, y_test = dataset_X[self.valid_idx:self.test_idx], dataset_y[self.valid_idx:self.test_idx] 133 | # print("Processing data done!!!") 134 | return X_train, y_train, X_valid, y_valid, X_test, y_test, self.minmax_scaler 135 | 136 | def _preprocessing_3d__(self): 137 | if self.output_index is None: 138 | list_transform = self.minmax_scaler.fit_transform(self.original_dataset) 139 | dataset_y = deepcopy(list_transform[self.sliding:]) # Now we need to find dataset_X 140 | else: 141 | list_transform = np.zeros(shape=(self.original_dataset_len, 1)) 142 | for i in range(0, self.dimension): 143 | t = self.output_index - (self.dimension - 1) + i 144 | d1 = self.minmax_scaler.fit_transform( 145 | self.original_dataset[:self.original_dataset_len, t].reshape(-1, 1)) 146 | list_transform = np.concatenate((list_transform, d1), axis=1) 147 | list_transform = list_transform[:, 1:] 148 | dataset_y = deepcopy(list_transform[self.sliding:, -1:]) # Now we need to find dataset_X 149 | dataset_X = self.__get_dataset_X__(list_transform) 150 | ## Split data to set train and set test 151 | if self.valid_len == 0: 152 | X_train, y_train = dataset_X[0:self.train_idx], dataset_y[0:self.train_idx] 153 | X_valid, y_valid = None, None 154 | X_test, y_test = dataset_X[self.train_idx:self.test_idx], dataset_y[self.train_idx:self.test_idx] 155 | 156 | X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1 )) 157 | X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) 158 | # y_train = y_train.flatten() 159 | # y_test = y_test.flatten() 160 | else: 161 | X_train, y_train = dataset_X[0:self.train_idx], dataset_y[0:self.train_idx] 162 | X_valid, y_valid = dataset_X[self.train_idx:self.valid_idx], dataset_y[self.train_idx:self.valid_idx] 163 | X_test, y_test = dataset_X[self.valid_idx:self.test_idx], dataset_y[self.valid_idx:self.test_idx] 164 | 165 | X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) 166 | X_valid = np.reshape(X_valid, (X_valid.shape[0], X_valid.shape[1], 1)) 167 | X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) 168 | # y_train = y_train.flatten() 169 | # y_valid = y_valid.flatten() 170 | # y_test = y_test.flatten() 171 | return X_train, y_train, X_valid, y_valid, X_test, y_test, self.minmax_scaler 172 | -------------------------------------------------------------------------------- /utils/SettingPaper.py: -------------------------------------------------------------------------------- 1 | ###### Config for test 2 | 3 | SP_RUN_TIMES = 3 4 | SP_LOG_FILENAME = "LOG_MODELS" 5 | SP_PATH_SAVE_BASE = "history/results/" 6 | SP_DRAW = True 7 | SP_PRINT_TRAIN = 2 # 0: nothing, 1 : full detail, 2: short version 8 | SP_PREPROCESSING_METHOD = 0 # 0: sliding window, 1: mean, 2: min-mean-max, 3: min-median-max 9 | 10 | SP_DATA_SPLIT_INDEX = (0.8, 0, 0.2) 11 | SP_DATA_SPLIT_INDEX_2 = (0.75, 0.15, 0.15) 12 | 13 | ### Full avaiable dataset 14 | SP_LOAD_DATA_FROM = "data/formatted/" 15 | SP_DATA_FILENAME = ["it_eu_5m", "it_uk_5m", "worldcup98_5m", "gg_cpu", "gg_ram", "gg_multi_cpu", "gg_multi_ram"] 16 | SP_DATA_COLS = [[4], [2], [2], [1], [2], [1, 2], [1, 2]] 17 | SP_DATA_MULTI_OUTPUT = [False, False, False, False, False, False, False] 18 | SP_OUTPUT_INDEX = [None, None, None, None, None, 0, 1] 19 | 20 | 21 | ######################## Paras according to the paper 22 | 23 | ####: MLNN-1HL 24 | mlnn1hl_paras_final = { 25 | "sliding": [2, 5, 10], 26 | "hidden_sizes": [(10, True) ], 27 | "activations": [("elu", "elu")], 28 | "learning_rate": [0.0001], 29 | "epoch": [1000], 30 | "batch_size": [128], 31 | "optimizer": ["adam"], # GradientDescentOptimizer, AdamOptimizer, AdagradOptimizer, AdadeltaOptimizer 32 | "loss": ["mse"] 33 | } 34 | 35 | ####: RNN-1HL 36 | rnn1hl_paras_final = { 37 | "sliding": [2, 5, 10], 38 | "hidden_sizes": [(10, True)], 39 | "activations": [("elu", "elu")], 40 | "learning_rate": [0.0001], 41 | "epoch": [1000], 42 | "batch_size": [128], 43 | "optimizer": ["adam"], # GradientDescentOptimizer, AdamOptimizer, AdagradOptimizer, AdadeltaOptimizer 44 | "loss": ["mse"], 45 | "dropouts": [[0.2]] 46 | } 47 | 48 | ####: LSTM-1HL 49 | lstm1hl_paras_final = { 50 | "sliding": [2, 5, 10], 51 | "hidden_sizes": [(10, True)], 52 | "activations": [("elu", "elu")], 53 | "learning_rate": [0.0001], 54 | "epoch": [1000], 55 | "batch_size": [128], 56 | "optimizer": ["adam"], # GradientDescentOptimizer, AdamOptimizer, AdagradOptimizer, AdadeltaOptimizer 57 | "loss": ["mse"], 58 | "dropouts": [[0.2]] 59 | } 60 | 61 | ####: GRU-1HL 62 | gru1hl_paras_final = { 63 | "sliding": [2, 5, 10], 64 | "hidden_sizes": [(10, True)], 65 | "activations": [("elu", "elu")], # 0: elu, 1:relu, 2:tanh, 3:sigmoid 66 | "learning_rate": [0.0001], 67 | "epoch": [1000], 68 | "batch_size": [128], 69 | "optimizer": ["adam"], # GradientDescentOptimizer, AdamOptimizer, AdagradOptimizer, AdadeltaOptimizer 70 | "loss": ["mse"], 71 | "dropouts": [[0.2]] 72 | } 73 | 74 | 75 | # ========================= MLNN ============================== 76 | 77 | #### : GA-MLNN 78 | ga_mlnn_paras_final = { 79 | "sliding": [2, 5, 10], 80 | "hidden_size": [(10, True)], 81 | "activations": [("elu", "elu")], 82 | 83 | "epoch": [1000], 84 | "pop_size": [100], # 100 -> 900 85 | "pc": [0.95], # 0.85 -> 0.97 86 | "pm": [0.025], # 0.005 -> 0.10 87 | "domain_range": [(-1, 1)] # lower and upper bound 88 | } 89 | 90 | #### : DE-MLNN 91 | de_mlnn_paras_final = { 92 | "sliding": [2, 5, 10], 93 | "hidden_size": [(10, True)], 94 | "activations": [("elu", "elu")], 95 | 96 | "epoch": [1000], 97 | "pop_size": [100], # 10 * problem_size 98 | "Wf": [0.8], # Weighting factor 99 | "Cr": [0.9], # Crossover rate 100 | "domain_range": [(-1, 1)] # lower and upper bound 101 | } 102 | 103 | 104 | #### : CRO-MLNN 105 | cro_mlnn_paras_final = { 106 | "sliding": [2, 5, 10], 107 | "hidden_size": [(10, True) ], 108 | "activations": [("elu", "elu")], 109 | 110 | "epoch": [1000], 111 | "pop_size": [100], 112 | "G": [[0.02, 0.2]], 113 | "GCR": [0.1], 114 | "po": [0.4], 115 | "Fb": [0.9], 116 | "Fa": [0.1], 117 | "Fd": [0.1], 118 | "Pd": [0.1], 119 | "k": [3], 120 | "domain_range": [(-1, 1)] # lower and upper bound 121 | } 122 | 123 | #### : OCRO-MLNN 124 | ocro_mlnn_paras_final = { 125 | "sliding": [2, 5, 10], 126 | "hidden_size": [(10, True)], 127 | "activations": [("elu", "elu")], 128 | 129 | "epoch": [1000], 130 | "pop_size": [100], 131 | "G": [[0.02, 0.2]], 132 | "GCR": [0.1], 133 | "po": [0.4], 134 | "Fb": [0.8], 135 | "Fa": [0.1], 136 | "Fd": [0.3], 137 | "Pd": [0.1], 138 | "k": [3], 139 | "domain_range": [(-1, 1)], # lower and upper bound 140 | 141 | "restart_count": [55], 142 | } 143 | 144 | 145 | 146 | #### : PSO-MLNN 147 | pso_mlnn_paras_final = { 148 | "sliding": [2, 5, 10], 149 | "hidden_size": [(10, True)], 150 | "activations": [("elu", "elu")], 151 | 152 | "epoch": [1000], 153 | "pop_size": [100], # 100 -> 900 154 | "w_minmax": [(0.4, 0.9)], # [0-1] -> [0.4-0.9] Trong luong cua con chim 155 | "c_minmax": [(1.2, 1.2)], # [(1.2, 1.2), (0.8, 2.0), (1.6, 0.6)] # [0-2] Muc do anh huong cua local va global 156 | # r1, r2 : random theo tung vong lap 157 | # delta(t) = 1 (do do: x(sau) = x(truoc) + van_toc 158 | "domain_range": [(-1, 1)] # lower and upper bound 159 | } 160 | 161 | 162 | #### : BFO-MLNN 163 | bfo_mlnn_paras_final = { 164 | "sliding": [2, 5, 10], 165 | "hidden_size": [(10, True)], 166 | "activations": [("elu", "elu")], 167 | 168 | "pop_size": [100], 169 | "Ci": [0.01], # step_size 170 | "Ped": [0.25], # p_eliminate 171 | "Ns": [4], # swim_length 172 | "Ned": [6], # elim_disp_steps 173 | "Nre": [2], # repro_steps 174 | "Nc": [30], # chem_steps 175 | "attract_repel": [(0.1, 0.2, 0.1, 10)], # [ d_attr, w_attr, h_repel, w_repel ] 176 | 177 | "domain_range": [(-1, 1)] # lower and upper bound 178 | } 179 | 180 | #### : ABFOLS-MLNN 181 | abfols_mlnn_paras_final = { 182 | "sliding": [2, 5, 10], 183 | "hidden_size": [(10, True)], 184 | "activations": [("elu", "elu")], 185 | 186 | "epoch": [1000], 187 | "pop_size": [100], # 100 -> 900 188 | "Ci": [(0.1, 0.00001)], # C_s (start), C_e (end) -=> step size # step size in BFO 189 | "Ped": [0.25], # p_eliminate 190 | "Ns": [4], # swim_length 191 | "N_minmax": [(3, 40)], # (Dead threshold value, split threshold value) -> N_adapt, N_split 192 | 193 | "domain_range": [(-1, 1)] # lower and upper bound 194 | } 195 | --------------------------------------------------------------------------------