├── disaggregate
├── temp.py
├── __init__.py
├── disaggregator.py
├── util.py
├── WindowGRU.py
├── dsc.py
├── dae.py
├── dldisaggregator.py
├── seq2point.py
├── rnn.py
├── gru.py
├── cnn_rnn.py
├── seq2seq.py
└── attention_dae.py
├── .idea
├── .gitignore
├── vcs.xml
├── inspectionProfiles
│ └── profiles_settings.xml
├── other.xml
├── modules.xml
├── misc.xml
└── nilmtk_dl.iml
├── README.md
├── LICENSE
├── .gitignore
├── ex_house_mc.py
├── ex_house_dw.py
├── ex_house_fr.py
├── metrics.py
├── ex_time_mc.py
├── ex_time_dw.py
├── ex_time_fr.py
├── ex1.py
├── api.py
└── ex2.ipynb
/disaggregate/temp.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /workspace.xml
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/other.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/disaggregate/__init__.py:
--------------------------------------------------------------------------------
1 | from nilmtk.disaggregate import Disaggregator
2 | from .dae import DAE
3 | from .seq2point import Seq2Point
4 | from .seq2seq import Seq2Seq
5 | # from .WindowGRU import WindowGRU
6 | from .rnn import RNN
7 | from .gru import WindowGRU
8 | from .attention_dae import ADAE
9 | from .cnn_rnn import CNN_RNN
10 | from .util import get_activations, config, get_sections_df, get_sections_df_2
11 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # nilmtk-dl
2 | 基于深度学习的非侵入式负荷检测工具包。在nilmtk-contrib的基础上做了一些改进。主要有:
3 | 1. 修改了一些bug(当然也可能是因为我不会用);
4 | 2. 增加一些metrics,现在不仅可以用Energy-based标准评估,也可以用Event-based标准进行评估;
5 | 3. 增加了一个激活转换功能,在数据集仅提供功率时,也能够通过激活函数将功率转化为启停事件,方便一些以启停事件作为目标函数的模型复现(不过我复现后觉得那些模型效果不好);
6 | 4. 由于REDD数据集存在一些bad section,增加了自动提取nilmtk.DataSet中的good section进行训练与预测的功能;
7 | 5. 增加了一些可视化功能;
8 | 6. 对原有的基于深度学习Disaggregator中的保存模型、读取模型函数进行补充,现在可以通过简单更改实验配置文件就能够实现模型的保存或读取。
9 |
10 | ex_time_\*.py与ex_house_\*.py是针对REDD数据集的实验配置文件范例,\*为电器简写。
11 |
12 | 两个jupyter notebook是草稿本,可以无视。
13 |
14 | 由于本人水平十分有限,代码可能有一些bug,有任何问题请联系xieck13@gmail.com
15 |
--------------------------------------------------------------------------------
/.idea/nilmtk_dl.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 XckCodeDD
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # vscode
85 | .vscode
86 |
87 | # pyenv
88 | .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98 | __pypackages__/
99 |
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 |
104 | # SageMath parsed files
105 | *.sage.py
106 |
107 | # Environments
108 | .env
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 |
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # mkdocs documentation
124 | /site
125 |
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 |
131 | # Pyre type checker
132 | .pyre/
133 |
134 | #
135 | reserved
136 |
137 | #
138 | RNN
139 | DAE
140 | GRU
141 | Seq2Point
142 | Seq2Seq
143 | *.h5
144 | *.hdf5
145 | *.csv
--------------------------------------------------------------------------------
/disaggregate/disaggregator.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from datetime import datetime
3 | from nilmtk.timeframe import merge_timeframes, TimeFrame
4 |
5 |
6 | class Disaggregator(object):
7 | """Provides a common interface to all disaggregation classes.
8 |
9 | See https://github.com/nilmtk/nilmtk/issues/755 for discussion
10 |
11 | Attributes
12 | ----------
13 | model :
14 | Each subclass should internally store models learned from training.
15 |
16 | MODEL_NAME : string
17 | A short name for this type of model.
18 | e.g. 'CO' for combinatorial optimisation.
19 | """
20 |
21 | def partial_fit(self, train_mains, train_appliances, **load_kwargs):
22 | """ Trains the model given a metergroup containing appliance meters
23 | (supervised) or a site meter (unsupervised). Will have a
24 | default implementation in super class.
25 | train_main: list of pd.DataFrames with pd.DatetimeIndex as index and 1
26 | or more power columns
27 | train_appliances: list of (appliance_name,list of pd.DataFrames) with
28 | the same pd.DatetimeIndex as index as train_main and
29 | the same 1 or more power columns as train_main
30 | """
31 | raise NotImplementedError()
32 |
33 | def disaggregate_chunk(self, test_mains):
34 | """Passes each chunk from mains generator to disaggregate_chunk()
35 | Parameters
36 | ----------
37 | test_mains : list of pd.DataFrames
38 | """
39 | raise NotImplementedError()
40 |
41 | def call_preprocessing(self, train_mains, train_appliances):
42 | """Calls the preprocessing functions of this algorithm and returns the
43 | preprocessed data in the same format
44 | Parameters
45 | ----------
46 | train_main: list of pd.DataFrames with pd.DatetimeIndex as index and 1
47 | or more power columns
48 | train_appliances: list of (appliance_name,list of pd.DataFrames) with
49 | the same pd.DatetimeIndex as index as train_main and the
50 | same 1 or more power columns as train_main
51 | """
52 | return train_mains, train_appliances
53 |
54 | def save_model(self, folder_name):
55 | """Passes each chunk from mains generator to disaggregate_chunk()
56 | Parameters
57 | ----------
58 | test_mains : list of pd.DataFrames
59 | """
60 | raise NotImplementedError()
61 |
62 | def load_model(self, folder_name):
63 | """Passes each chunk from mains generator to disaggregate_chunk()
64 | Parameters
65 | ----------
66 | test_mains : list of pd.DataFrames
67 | """
68 | raise NotImplementedError()
69 |
70 |
--------------------------------------------------------------------------------
/ex_house_mc.py:
--------------------------------------------------------------------------------
1 |
2 | from api import API
3 | from disaggregate import ADAE, DAE, Seq2Point, Seq2Seq, WindowGRU, RNN
4 | import warnings
5 | warnings.filterwarnings("ignore")
6 |
7 | path = 'D:/workspace/nilm/data/redd_data.h5'
8 | # path = 'D:/workspace/nilm/code/databank/redd_data.h5'
9 |
10 |
11 | debug = False
12 | test = False
13 |
14 | if(debug):
15 | method = {
16 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
17 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
18 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
19 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
20 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
21 | }
22 | else:
23 | method = {
24 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None}),
25 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None}),
26 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None}),
27 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None}),
28 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None}),
29 | }
30 | if test:
31 | method = {
32 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': 'DAE'}),
33 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': 'RNN'}),
34 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': 'Seq2Point'}),
35 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': 'Seq2Seq'}),
36 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': 'GRU'}),
37 | }
38 |
39 |
40 | ex_train_microwave = {
41 |
42 | 'power': {
43 | 'mains': ['apparent', 'active'],
44 | 'appliance': ['apparent', 'active']
45 | },
46 | 'sample_rate': 6,
47 |
48 |
49 | 'appliances': ['microwave'],
50 | 'methods': method,
51 | 'isState': False,
52 | 'train': {
53 | 'datasets': {
54 |
55 | 'redd': {
56 | 'path': path,
57 | 'buildings': {
58 | 1: {
59 | 'start_time': '2011-04-18',
60 | 'end_time': '2011-05-24'
61 | },
62 | 3: {
63 | 'start_time': '2011-04-16',
64 | 'end_time': '2011-05-30'
65 | }
66 |
67 | }
68 |
69 |
70 | }
71 | }
72 | },
73 |
74 | 'test': {
75 | 'datasets': {
76 | 'redd': {
77 | 'path': path,
78 | 'buildings': {
79 | 2: {
80 | 'start_time': '2011-04-17',
81 | 'end_time': '2011-05-22'
82 | },
83 | }
84 | }
85 | },
86 | },
87 | }
88 |
89 | #%%
90 |
91 | API(ex_train_microwave)
--------------------------------------------------------------------------------
/ex_house_dw.py:
--------------------------------------------------------------------------------
1 |
2 | from api import API
3 | from disaggregate import ADAE, DAE, Seq2Point, Seq2Seq, WindowGRU, RNN
4 | import warnings
5 | warnings.filterwarnings("ignore")
6 |
7 | path = 'D:/workspace/nilm/data/redd_data.h5'
8 | # path = 'D:/workspace/nilm/code/databank/redd_data.h5'
9 |
10 |
11 | debug = False
12 | test = False
13 |
14 | if(debug):
15 | method = {
16 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
17 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
18 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
19 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
20 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
21 | }
22 | else:
23 | method = {
24 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None}),
25 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None}),
26 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None}),
27 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None}),
28 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None}),
29 | }
30 | if test:
31 | method = {
32 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': 'DAE'}),
33 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': 'RNN'}),
34 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': 'Seq2Point'}),
35 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': 'Seq2Seq'}),
36 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': 'GRU'}),
37 | }
38 |
39 |
40 |
41 | ex_train_dish_washer = {
42 |
43 | 'power': {
44 | 'mains': ['apparent', 'active'],
45 | 'appliance': ['apparent', 'active']
46 | },
47 | 'sample_rate': 6,
48 |
49 |
50 | 'appliances': ['dish washer'],
51 | 'methods': method,
52 | 'isState': False,
53 | 'train': {
54 | 'datasets': {
55 |
56 | 'redd': {
57 | 'path': path,
58 | 'buildings': {
59 | 1: {
60 | 'start_time': '2011-04-18',
61 | 'end_time': '2011-05-24'
62 | },
63 | 3: {
64 | 'start_time': '2011-04-16',
65 | 'end_time': '2011-05-30'
66 | }
67 | }
68 | }
69 | }
70 | },
71 |
72 | 'test': {
73 | 'datasets': {
74 | 'redd': {
75 | 'path': path,
76 | 'buildings': {
77 | 2: {
78 | 'start_time': '2011-04-17',
79 | 'end_time': '2011-05-22'
80 | },
81 | 4: {
82 | 'start_time': '2011-04-16',
83 | 'end_time': '2011-06-03'
84 | }
85 | }
86 | }
87 | },
88 | },
89 | }
90 |
91 | API(ex_train_dish_washer)
--------------------------------------------------------------------------------
/ex_house_fr.py:
--------------------------------------------------------------------------------
1 |
2 | from api import API
3 | from disaggregate import ADAE, DAE, Seq2Point, Seq2Seq, WindowGRU, RNN
4 | import warnings
5 | warnings.filterwarnings("ignore")
6 |
7 | path = 'D:/workspace/nilm/data/redd_data.h5'
8 | # path = 'D:/workspace/nilm/code/databank/redd_data.h5'
9 |
10 |
11 | debug = False
12 | test = False
13 |
14 | if(debug):
15 | method = {
16 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
17 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
18 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
19 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
20 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
21 | }
22 | else:
23 | method = {
24 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None}),
25 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None}),
26 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None}),
27 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None}),
28 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None}),
29 | }
30 | if test:
31 | method = {
32 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': 'DAE'}),
33 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': 'RNN'}),
34 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': 'Seq2Point'}),
35 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': 'Seq2Seq'}),
36 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': 'GRU'}),
37 | }
38 |
39 |
40 | ex_train_fridge = {
41 |
42 | 'power': {
43 | 'mains': ['apparent', 'active'],
44 | 'appliance': ['apparent', 'active']
45 | },
46 | 'sample_rate': 6,
47 |
48 |
49 | 'appliances': ['fridge'],
50 | 'methods': method,
51 | 'isState': False,
52 | 'train': {
53 | 'datasets': {
54 |
55 | 'redd': {
56 | 'path': path,
57 | 'buildings': {
58 | 1: {
59 | 'start_time': '2011-04-18',
60 | 'end_time': '2011-05-24'
61 | },
62 | 3: {
63 | 'start_time': '2011-04-16',
64 | 'end_time': '2011-05-30'
65 | }
66 |
67 | }
68 |
69 |
70 | }
71 | }
72 | },
73 |
74 | 'test': {
75 | 'datasets': {
76 | 'redd': {
77 | 'path': path,
78 | 'buildings': {
79 | 2: {
80 | 'start_time': '2011-04-17',
81 | 'end_time': '2011-05-22'
82 | },
83 | 6: {
84 | 'start_time': '2011-05-21',
85 | 'end_time': '2011-06-14'
86 | }
87 | }
88 | }
89 | },
90 | },
91 | }
92 |
93 | #%%
94 |
95 | API(ex_train_fridge)
--------------------------------------------------------------------------------
/metrics.py:
--------------------------------------------------------------------------------
1 | #from nilmtk.electric import get_activations
2 | import pandas as pd
3 | import numpy as np
4 | from disaggregate import get_activations
5 |
6 | from sklearn.metrics import confusion_matrix
7 | from sklearn.metrics import mean_absolute_error, mean_squared_error, f1_score, recall_score, precision_score, accuracy_score
8 |
9 | class Metrics():
10 | def __init__(self, y_true, y_pred, params,isState):
11 | self.params = params
12 | self.s_true = pd.DataFrame(np.zeros_like(y_true), index=y_true.index)
13 | self.s_pred = pd.DataFrame(np.zeros_like(y_pred), index=y_pred.index)
14 | if(isState):
15 | self.e_true = y_true
16 | self.e_pred = y_pred
17 | self.s_true = y_true
18 | self.s_pred[y_pred > 0.5]=1.0 # may be wrong, np.int8?
19 | else:
20 | self.e_true = y_true
21 | self.e_pred = y_pred
22 |
23 | self.calculate_state()
24 |
25 | self.TP = 0
26 | self.TN = 0
27 | self.FP = 0
28 | self.FN = 0
29 | self.precision = 0
30 | self.recall = 0
31 | self.true_on_period = float(len(np.where(self.s_true==1)[0]))
32 | self.true_off_period = float(len(np.where(self.s_true==0)[0]))
33 | self.pred_on_period = float(len(np.where(self.s_pred==1)[0]))
34 | self.pred_off_period = float(len(np.where(self.s_pred==0)[0]))
35 | self.calculate_cf_matrix()
36 |
37 |
38 | def calculate_state(self):
39 | _, self.s_true = get_activations(self.e_true, self.params)
40 | _, self.s_pred = get_activations(self.e_pred, self.params)
41 |
42 | def calculate_cf_matrix(self):
43 | temp = confusion_matrix(self.s_true, self.s_pred)
44 | print(temp)
45 | self.TP = temp[1][1]
46 | self.TN = temp[0][0]
47 | self.FP = temp[0][1]
48 | self.FN = temp[1][0]
49 |
50 | def Accuracy(self):
51 | return accuracy_score(self.s_true, self.s_pred)
52 |
53 | def Precision(self):
54 | if (self.TP+self.FP != 0):
55 | p = self.TP/(self.TP+self.FP)
56 | self.precision = p
57 | else:
58 | p = 0
59 | return p
60 |
61 | def Recall(self):
62 | if (self.TP+self.FN != 0):
63 | r = self.TP/(self.TP+self.FN)
64 | self.recall = r
65 | else:
66 | r = 0
67 | return r
68 |
69 | def F_1_score(self):
70 | if(self.precision == 0 or self.recall == 0):
71 | return 0
72 | else:
73 | return f1_score(self.s_true, self.s_pred)
74 |
75 | def MSE(self):
76 | return mean_squared_error(self.e_true, self.e_pred)
77 |
78 | def MAE(self):
79 | return mean_absolute_error(self.e_true, self.e_pred)
80 |
81 | def sMAE(self,rate=100.0):
82 | error = np.array((self.e_true - self.e_pred)).flatten()
83 | abs_error = np.abs(error)
84 | s = np.array(self.s_true).flatten()
85 | e1 = sum(abs_error * s) / self.true_on_period
86 | e2 = sum(abs_error * (1 - s)) / self.true_off_period
87 | return (e1 * rate + e2) / (1 + rate)
88 |
89 |
90 |
91 | # def RMSE(self):
92 | # '''
93 | # The root mean square error (Chris Holmes, 2014;Batra, Kelly, et al., 2014 ; Mayhorn et al., 2016)
94 | # is the standard deviation of the energy estimation errors. The RMSE reports based on how spread-out
95 | # these errors are. In other words, it tells you how concentrated the estimations are around the true
96 | # values. The RMSE reports on the same unit as the data, thus making it an intuitive metric.
97 | # '''
98 | # MSE = self.MSE()
99 | # E_mean = np.mean(self.e_true)
100 | # return 1 - np.square(MSE)/E_mean
101 |
102 |
103 |
104 |
105 |
106 | # if __name__ = '__main__':
107 | # from nilmtk import DataSet
108 | # ukdale = DataSet(r'D:\workspace\data\ukdale.h5')
109 | # elec = ukdale.buildings[1].elec
110 | # print(elec)
111 | # elec_series = elec[2].power_series_all_data(sample_period=1).head(10800)
112 | # # state = get_activations_2(elec_series,25,300,300)
113 | # # print(state)
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 | # activation = get_activations(elec_series)
128 |
--------------------------------------------------------------------------------
/ex_time_mc.py:
--------------------------------------------------------------------------------
1 | from api import API
2 | from disaggregate import ADAE, DAE, Seq2Point, Seq2Seq, WindowGRU, RNN
3 | import warnings
4 |
5 | warnings.filterwarnings("ignore")
6 |
7 | path = 'D:/workspace/nilm/data/redd_data.h5'
8 | # path = 'D:/workspace/nilm/code/databank/redd_data.h5'
9 |
10 | DEBUG = False
11 | TEST = False
12 |
13 |
14 | def generate_method(debug, test):
15 | if debug:
16 | method = {
17 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
18 | # 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
19 | # 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
20 | # 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
21 | # 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
22 | }
23 | else:
24 | method = {
25 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None}),
26 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None}),
27 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None}),
28 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None}),
29 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None}),
30 | }
31 | if test:
32 | method = {
33 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': 'DAE', 'batch_size': 256}),
34 | # 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': 'RNN', 'batch_size': 256}),
35 | # 'Seq2Point': Seq2Point(
36 | # {'save-model-path': 'Seq2Point', 'pretrained-model-path': 'Seq2Point', 'batch_size': 256}),
37 | # 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': 'Seq2Seq', 'batch_size': 256}),
38 | # 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': 'GRU', 'batch_size': 256}),
39 | }
40 | return method
41 |
42 | time_config = {
43 | 'train': {
44 | 1: {
45 | 'start_time': '2011-04-18',
46 | 'end_time': '2011-05-07'
47 | },
48 | 2: {
49 | 'start_time': '2011-04-17',
50 | 'end_time': '2011-04-25'
51 | },
52 | 3: {
53 | 'start_time': '2011-04-16',
54 | 'end_time': '2011-04-27'
55 | },
56 |
57 | 4: {
58 | 'start_time': '2011-04-16',
59 | 'end_time': '2011-05-22'
60 | },
61 | 6: {
62 | 'start_time': '2011-04-16',
63 | 'end_time': '2011-06-09'
64 | }
65 | },
66 | 'test': {
67 | 1: {
68 | 'start_time': '2011-05-07',
69 | 'end_time': '2011-05-24'
70 | },
71 | 2: {
72 | 'start_time': '2011-04-25',
73 | 'end_time': '2011-05-22'
74 | },
75 | 3: {
76 | 'start_time': '2011-04-27',
77 | 'end_time': '2011-05-30'
78 | },
79 |
80 | 4: {
81 | 'start_time': '2011-05-22',
82 | 'end_time': '2011-06-03'
83 | },
84 | 6: {
85 | 'start_time': '2011-06-09',
86 | 'end_time': '2011-06-13'
87 | }
88 | }
89 | }
90 |
91 | method = generate_method(DEBUG, TEST)
92 |
93 | ex_train_microwave = {
94 |
95 | 'power': {
96 | 'mains': ['apparent', 'active'],
97 | 'appliance': ['apparent', 'active']
98 | },
99 | 'sample_rate': 6,
100 |
101 | 'appliances': ['microwave'],
102 | 'methods': method,
103 | 'isState': False,
104 | 'train': {
105 | 'datasets': {
106 |
107 | 'redd': {
108 | 'path': path,
109 | 'buildings': {
110 | 1: time_config['train'][1],
111 | 2: time_config['train'][2],
112 | 3: time_config['train'][3],
113 | }
114 |
115 | }
116 | }
117 | },
118 |
119 | 'test': {
120 | 'datasets': {
121 | 'redd': {
122 | 'path': path,
123 | 'buildings': {
124 | 1: time_config['test'][1],
125 | # 2: time_config['test'][2],
126 | # 3: time_config['test'][3],
127 | }
128 | }
129 | },
130 | },
131 | }
132 | API(ex_train_microwave)
133 |
--------------------------------------------------------------------------------
/ex_time_dw.py:
--------------------------------------------------------------------------------
1 | from api import API
2 | from disaggregate import ADAE, DAE, Seq2Point, Seq2Seq, WindowGRU, RNN
3 | import warnings
4 |
5 | warnings.filterwarnings("ignore")
6 |
7 | path = 'D:/workspace/nilm/data/redd_data.h5'
8 | path = 'D:/workspace/nilm/code/databank/redd_data.h5'
9 |
10 | DEBUG = False
11 | TEST = True
12 |
13 |
14 | def generate_method(debug, test):
15 | if debug:
16 | method = {
17 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
18 | # 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
19 | # 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
20 | # 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretraiTruened-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
21 | # 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
22 | }
23 | else:
24 | method = {
25 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None}),
26 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None}),
27 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None}),
28 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None}),
29 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None}),
30 | }
31 | if test:
32 | method = {
33 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': 'DAE', 'batch_size': 256}),
34 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': 'RNN', 'batch_size': 256}),
35 | 'Seq2Point': Seq2Point(
36 | {'save-model-path': 'Seq2Point', 'pretrained-model-path': 'Seq2Point', 'batch_size': 256}),
37 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': 'Seq2Seq', 'batch_size': 256}),
38 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': 'GRU', 'batch_size': 256}),
39 | }
40 | return method
41 |
42 | time_config = {
43 | 'train': {
44 | 1: {
45 | 'start_time': '2011-04-18',
46 | 'end_time': '2011-05-07'
47 | },
48 | 2: {
49 | 'start_time': '2011-04-17',
50 | 'end_time': '2011-04-25'
51 | },
52 | 3: {
53 | 'start_time': '2011-04-16',
54 | 'end_time': '2011-04-27'
55 | },
56 |
57 | 4: {
58 | 'start_time': '2011-04-16',
59 | 'end_time': '2011-05-22'
60 | },
61 | 6: {
62 | 'start_time': '2011-04-16',
63 | 'end_time': '2011-06-09'
64 | }
65 | },
66 | 'test': {
67 | 1: {
68 | 'start_time': '2011-05-07',
69 | 'end_time': '2011-05-24'
70 | },
71 | 2: {
72 | 'start_time': '2011-04-25',
73 | 'end_time': '2011-05-22'
74 | },
75 | 3: {
76 | 'start_time': '2011-04-27',
77 | 'end_time': '2011-05-30'
78 | },
79 |
80 | 4: {
81 | 'start_time': '2011-05-22',
82 | 'end_time': '2011-06-03'
83 | },
84 | 6: {
85 | 'start_time': '2011-06-09',
86 | 'end_time': '2011-06-13'
87 | }
88 | }
89 | }
90 |
91 | method = generate_method(DEBUG, TEST)
92 |
93 | ex_train_dish_washer = {
94 |
95 | 'power': {
96 | 'mains': ['apparent', 'active'],
97 | 'appliance': ['apparent', 'active']
98 | },
99 | 'sample_rate': 6,
100 |
101 | 'appliances': ['dish washer'],
102 | 'methods': method,
103 | 'isState': False,
104 | 'train': {
105 | 'datasets': {
106 | 'redd': {
107 | 'path': path,
108 | 'buildings': {
109 | 1: time_config['train'][1],
110 | 2: time_config['train'][2],
111 | 3: time_config['train'][3],
112 | 4: time_config['train'][4],
113 |
114 | }
115 | }
116 | }
117 | },
118 |
119 | 'test': {
120 | 'datasets': {
121 | 'redd': {
122 | 'path': path,
123 | 'buildings': {
124 | 1: time_config['test'][1],
125 | 2: time_config['test'][2],
126 | 3: time_config['test'][3],
127 | 4: time_config['test'][4],
128 | }
129 | }
130 | }
131 | }
132 | }
133 | API(ex_train_dish_washer)
134 |
--------------------------------------------------------------------------------
/ex_time_fr.py:
--------------------------------------------------------------------------------
1 | from api import API
2 | from disaggregate import ADAE, DAE, Seq2Point, Seq2Seq, WindowGRU, RNN
3 | import warnings
4 |
5 | warnings.filterwarnings("ignore")
6 |
7 | path = 'D:/workspace/nilm/data/redd_data.h5'
8 | # path = 'D:/workspace/nilm/code/databank/redd_data.h5'
9 |
10 | DEBUG = False
11 | TEST = False
12 |
13 |
14 | def generate_method(debug, test):
15 | if debug:
16 | method = {
17 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
18 | # 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
19 | # 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
20 | # 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
21 | # 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
22 | }
23 | else:
24 | method = {
25 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None}),
26 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None}),
27 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None}),
28 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None}),
29 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None}),
30 | }
31 | if test:
32 | method = {
33 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': 'DAE', 'batch_size': 256}),
34 | # 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': 'RNN', 'batch_size': 256}),
35 | # 'Seq2Point': Seq2Point(
36 | # {'save-model-path': 'Seq2Point', 'pretrained-model-path': 'Seq2Point', 'batch_size': 256}),
37 | # 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': 'Seq2Seq', 'batch_size': 256}),
38 | # 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': 'GRU', 'batch_size': 256}),
39 | }
40 | return method
41 |
42 | time_config = {
43 | 'train': {
44 | 1: {
45 | 'start_time': '2011-04-18',
46 | 'end_time': '2011-05-07'
47 | },
48 | 2: {
49 | 'start_time': '2011-04-17',
50 | 'end_time': '2011-04-25'
51 | },
52 | 3: {
53 | 'start_time': '2011-04-16',
54 | 'end_time': '2011-04-27'
55 | },
56 |
57 | 4: {
58 | 'start_time': '2011-04-16',
59 | 'end_time': '2011-05-22'
60 | },
61 | 6: {
62 | 'start_time': '2011-04-16',
63 | 'end_time': '2011-06-09'
64 | }
65 | },
66 | 'test': {
67 | 1: {
68 | 'start_time': '2011-05-07',
69 | 'end_time': '2011-05-24'
70 | },
71 | 2: {
72 | 'start_time': '2011-04-25',
73 | 'end_time': '2011-05-22'
74 | },
75 | 3: {
76 | 'start_time': '2011-04-27',
77 | 'end_time': '2011-05-30'
78 | },
79 |
80 | 4: {
81 | 'start_time': '2011-05-22',
82 | 'end_time': '2011-06-03'
83 | },
84 | 6: {
85 | 'start_time': '2011-06-09',
86 | 'end_time': '2011-06-13'
87 | }
88 | }
89 | }
90 |
91 | method = generate_method(DEBUG, TEST)
92 |
93 |
94 | method = generate_method(DEBUG, TEST)
95 | ex_train_fridge = {
96 |
97 | 'power': {
98 | 'mains': ['apparent', 'active'],
99 | 'appliance': ['apparent', 'active']
100 | },
101 | 'sample_rate': 6,
102 |
103 | 'appliances': ['fridge'],
104 | 'methods': method,
105 | 'isState': False,
106 | 'train': {
107 | 'datasets': {
108 |
109 | 'redd': {
110 | 'path': path,
111 | 'buildings': {
112 | 1: time_config['train'][1],
113 | 2: time_config['train'][2],
114 | 3: time_config['train'][3],
115 | 4: time_config['train'][6],
116 |
117 | }
118 |
119 | }
120 | }
121 | },
122 |
123 | 'test': {
124 | 'datasets': {
125 | 'redd': {
126 | 'path': path,
127 | 'buildings': {
128 | 1: time_config['test'][1],
129 | 2: time_config['test'][2],
130 | 3: time_config['test'][3],
131 | 4: time_config['test'][6],
132 | }
133 | }
134 | },
135 | },
136 | }
137 | API(ex_train_fridge)
138 |
--------------------------------------------------------------------------------
/ex1.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | from api import API
4 | from disaggregate import ADAE, DAE, Seq2Point, Seq2Seq, WindowGRU, RNN, CNN_RNN
5 |
6 |
7 |
8 | warnings.filterwarnings("ignore")
9 |
10 | path = 'D:/workspace/nilm/code/databank/redd_data.h5'
11 |
12 | debug = True
13 | test = False
14 | if debug:
15 | method = {
16 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),
17 | # 'RNN':RNN({'save-model-path':'RNN','pretrained-model-path':None,'n_epochs':1,'batch_size':256}),
18 | # 'Seq2Point':Seq2Point({'save-model-path':'Seq2Point','pretrained-model-path':None,'n_epochs':1,'batch_size':256}),
19 | # 'Seq2Seq':Seq2Seq({'save-model-path':'Seq2Seq','pretrained-model-path':None,'n_epochs':1,'batch_size':256}),
20 | # 'GRU':WindowGRU({'save-model-path':'GRU','pretrained-model-path':None,'n_epochs':1,'batch_size':256}),
21 | }
22 | else:
23 | method = {
24 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None}),
25 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None}),
26 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None}),
27 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None}),
28 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None}),
29 | }
30 | if test:
31 | method = {
32 | 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': 'DAE'}),
33 | 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': 'RNN'}),
34 | 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': 'Seq2Point'}),
35 | 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': 'Seq2Seq'}),
36 | 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': 'GRU'}),
37 | }
38 |
39 | ex1 = {
40 |
41 | 'power': {
42 | 'mains': ['apparent', 'active'],
43 | 'appliance': ['apparent', 'active']
44 | },
45 | 'sample_rate': 6,
46 |
47 | 'appliances': ['dish washer'],
48 | 'methods': method,
49 | 'isState': False,
50 | 'train': {
51 | 'datasets': {
52 |
53 | 'redd': {
54 | 'path': path,
55 | 'buildings': {
56 | 1: {
57 | 'start_time': '2011-04-18',
58 | 'end_time': '2011-05-24'
59 | },
60 | 3: {
61 | 'start_time': '2011-04-16',
62 | 'end_time': '2011-05-30'
63 | }
64 |
65 | }
66 |
67 | }
68 | }
69 | },
70 |
71 | 'test': {
72 | 'datasets': {
73 | 'redd': {
74 | 'path': path,
75 | 'buildings': {
76 | 2: {
77 | 'start_time': '2011-04-17',
78 | 'end_time': '2011-05-22'
79 | },
80 | 4: {
81 | 'start_time': '2011-04-16',
82 | 'end_time': '2011-06-03'
83 | }
84 | }
85 | }
86 | },
87 | },
88 | }
89 |
90 | test = False
91 | ex_train_microwave = {
92 |
93 | 'power': {
94 | 'mains': ['apparent', 'active'],
95 | 'appliance': ['apparent', 'active']
96 | },
97 | 'sample_rate': 6,
98 |
99 | 'appliances': ['microwave'],
100 | 'methods': method,
101 | 'isState': False,
102 | 'train': {
103 | 'datasets': {
104 | 'redd': {
105 | 'path': path,
106 | 'buildings': {
107 | 1: {
108 | 'start_time': '2011-04-18',
109 | 'end_time': '2011-05-08'
110 | },
111 | 2: {
112 | 'start_time': '2011-04-17',
113 | 'end_time': '2011-05-22'
114 | },
115 | 3: {
116 | 'start_time': '2011-04-16',
117 | 'end_time': '2011-05-30'
118 | },
119 |
120 | # 4: {
121 | # 'start_time': '2011-04-16',
122 | # 'end_time': '2011-06-03'
123 | # }
124 |
125 | }
126 | }
127 | }
128 | },
129 |
130 | 'test': {
131 | 'datasets': {
132 | 'redd': {
133 | 'path': path,
134 | 'buildings': {
135 | 1: {
136 | 'start_time': '2011-05-11',
137 | 'end_time': '2011-05-24'
138 | },
139 | 2: {
140 | 'start_time': '2011-04-27',
141 | 'end_time': '2011-05-22'
142 | },
143 | 3: {
144 | 'start_time': '2011-05-23',
145 | 'end_time': '2011-05-30'
146 | },
147 |
148 | # 4: {
149 | # 'start_time': '2011-05-23',
150 | # 'end_time': '2011-06-03'
151 | # }
152 |
153 | }
154 | }
155 | }
156 | }
157 | }
158 |
159 | res = API(ex_train_microwave)
160 |
--------------------------------------------------------------------------------
/disaggregate/util.py:
--------------------------------------------------------------------------------
1 | import json
2 | import numpy as np
3 | import pandas as pd
4 |
5 |
6 | def timedelta64_to_secs(timedelta):
7 | """Convert `timedelta` to seconds.
8 |
9 | Parameters
10 | ----------
11 | timedelta : np.timedelta64
12 |
13 | Returns
14 | -------
15 | float : seconds
16 | """
17 | if len(timedelta) == 0:
18 | return np.array([])
19 | else:
20 | return timedelta / np.timedelta64(1, 's')
21 |
22 |
23 | class NumpyEncoder(json.JSONEncoder):
24 | def default(self, obj):
25 | if isinstance(obj, np.integer):
26 | return int(obj)
27 | elif isinstance(obj, np.floating):
28 | return float(obj)
29 | elif isinstance(obj, np.ndarray):
30 | return obj.tolist()
31 | else:
32 | return json.JSONEncoder.default(self, obj)
33 |
34 |
35 | def get_activations(chunk, params):
36 | """Returns runs of an appliance.
37 |
38 | Most appliances spend a lot of their time off. This function finds
39 | periods when the appliance is on.
40 |
41 | Parameters
42 | ----------
43 | chunk : pd.Series
44 | min_off_duration : int
45 | If min_off_duration > 0 then ignore 'off' periods less than
46 | min_off_duration seconds of sub-threshold power consumption
47 | (e.g. a washing machine might draw no power for a short
48 | period while the clothes soak.) Defaults to 0.
49 | min_on_duration : int
50 | Any activation lasting less seconds than min_on_duration will be
51 | ignored. Defaults to 0.
52 | border : int
53 | Number of rows to include before and after the detected activation
54 | on_power_threshold : int or float
55 | Watts
56 |
57 | Returns
58 | -------
59 | list of pd.Series. Each series contains one activation.
60 | """
61 | min_off_duration = params['N_off']
62 | min_on_duration = params['N_on']
63 | border = params['border']
64 | on_power_threshold = params['p']
65 | when_on = chunk >= on_power_threshold
66 | # print(chunk)
67 | state = pd.DataFrame(np.zeros_like(chunk), index=chunk.index)
68 | # print(state)
69 | # Find state changes
70 | state_changes = when_on.astype(np.float32).diff()
71 |
72 | switch_on_events = np.where(state_changes == 1)[0]
73 | switch_off_events = np.where(state_changes == -1)[0]
74 |
75 | if len(switch_on_events) == 0 or len(switch_off_events) == 0:
76 | if (when_on[0]):
77 | state[:] = 1
78 | return [], state
79 | else:
80 | return [], state
81 |
82 | del when_on
83 | del state_changes
84 |
85 | # Make sure events align
86 | if switch_off_events[0] < switch_on_events[0]:
87 | state[:switch_off_events[0]] = 1
88 | switch_off_events = switch_off_events[1:]
89 | if len(switch_off_events) == 0:
90 | return [], state
91 | if switch_on_events[-1] > switch_off_events[-1]:
92 | state[switch_on_events[-1]:] = 1
93 | switch_on_events = switch_on_events[:-1]
94 | if len(switch_on_events) == 0:
95 | return [], state
96 | assert len(switch_on_events) == len(switch_off_events)
97 |
98 | # Smooth over off-durations less than min_off_duration
99 | if min_off_duration > 0:
100 | off_durations = (chunk.index[switch_on_events[1:]].values -
101 | chunk.index[switch_off_events[:-1]].values)
102 |
103 | off_durations = timedelta64_to_secs(off_durations)
104 |
105 | above_threshold_off_durations = np.where(
106 | off_durations >= min_off_duration)[0]
107 |
108 | # Now remove off_events and on_events
109 | switch_off_events = switch_off_events[
110 | np.concatenate([above_threshold_off_durations,
111 | [len(switch_off_events) - 1]])]
112 | switch_on_events = switch_on_events[
113 | np.concatenate([[0], above_threshold_off_durations + 1])]
114 | assert len(switch_on_events) == len(switch_off_events)
115 |
116 | activations = []
117 | for on, off in zip(switch_on_events, switch_off_events):
118 | duration = (chunk.index[off] - chunk.index[on]).total_seconds()
119 | if duration < min_on_duration:
120 | continue
121 | on -= 1 + border
122 | if on < 0:
123 | on = 0
124 | off += border
125 | activation = chunk.iloc[on:off]
126 | state.iloc[on:off] = 1
127 | # throw away any activation with any NaN values
128 | if not activation.isnull().values.any():
129 | activations.append(activation)
130 |
131 | return activations, state
132 |
133 |
134 | config = {
135 | 'threshold': {
136 | 'microwave': {'p': 50, 'N_off': 10, 'N_on': 10, 'border': 1},
137 | 'fridge': {'p': 5, 'N_off': 60, 'N_on': 60, 'border': 1},
138 | 'dish washer': {'p': 10, 'N_off': 300, 'N_on': 1800, 'border': 1}
139 | },
140 | 'result': {
141 | 'MSE': [],
142 | 'MAE': [],
143 | 'ACC': [],
144 | 'Precision': [],
145 | 'Recall': [],
146 | 'F1': [],
147 | 'sMAE': []
148 | }
149 | }
150 |
151 |
152 | def get_sections_df(chunk, good_section):
153 | result = []
154 | for section in good_section:
155 | temp = chunk[section.start:section.end]
156 | if (temp.shape[0] > 1000):
157 | result.append(temp)
158 | return result
159 |
160 |
161 | def get_sections_df_2(main_section, app_section):
162 | result = []
163 | index = pd.date_range(start=main_section[0].start, end=main_section[-1].end, freq='s')
164 | test = pd.DataFrame(index=index)
165 | test['mains'] = 0
166 | test['apps'] = 0
167 | # print('-')
168 |
169 | for sec in main_section:
170 | test.loc[sec.start:sec.end, 'mains'] = 1
171 | # print('-')
172 | for sec in app_section:
173 | test.loc[sec.start:sec.end, 'apps'] = 1
174 | # print('-')
175 |
176 | test['all'] = 0
177 | test['all'] = ((test['mains'] == 1) & (test['apps'] == 1)).astype(int)
178 | test['start'] = test['all'].diff()
179 | if test['all'].iloc[0] == 1:
180 | test['start'].iloc[0] = 1
181 |
182 | test['end'] = test['all'].diff().fillna(100)
183 | test['end'] = test[['end']].apply(lambda x: x.shift(-1))
184 | if test['all'].iloc[-1] == 1:
185 | test['end'].iloc[-1] = -1
186 | start_index = index[test['start'] == 1]
187 | end_index = index[test['end'] == -1]
188 |
189 | for i in range(len(start_index)):
190 | start = start_index[i]
191 | end = end_index[i]
192 | if (end - start) / np.timedelta64(1, 's') > 3000:
193 | result.append((start, end))
194 | return result
195 |
--------------------------------------------------------------------------------
/disaggregate/WindowGRU.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from warnings import warn, filterwarnings
3 |
4 | from matplotlib import rcParams
5 | import matplotlib.pyplot as plt
6 | from collections import OrderedDict
7 | import random
8 | import sys
9 | import pandas as pd
10 | import numpy as np
11 | import h5py
12 | import os
13 | import pickle
14 |
15 | from keras.models import Sequential
16 | from keras.layers import Dense, Conv1D, GRU, Bidirectional, Dropout
17 | from keras.utils import plot_model
18 | from sklearn.model_selection import train_test_split
19 | from keras.callbacks import ModelCheckpoint
20 | import keras.backend as K
21 | from nilmtk.utils import find_nearest
22 | from nilmtk.feature_detectors import cluster
23 | from nilmtk.disaggregate import Disaggregator
24 | from nilmtk.datastore import HDFDataStore
25 |
26 | import random
27 | import json
28 | from .util import *
29 | random.seed(10)
30 | np.random.seed(10)
31 | class WindowGRU(Disaggregator):
32 |
33 | def __init__(self, params):
34 |
35 | self.MODEL_NAME = "WindowGRU"
36 | self.save_model_path = params.get('save-model-path',None)
37 | self.load_model_path = params.get('pretrained-model-path',None)
38 | self.chunk_wise_training = params.get('chunk_wise_training',False)
39 | self.sequence_length = params.get('sequence_length',99)
40 | self.n_epochs = params.get('n_epochs', 10)
41 | self.models = OrderedDict()
42 | self.max_val = 800
43 | self.batch_size = params.get('batch_size',512)
44 |
45 | def partial_fit(self,train_main,train_appliances,do_preprocessing=True,**load_kwargs):
46 |
47 |
48 | if do_preprocessing:
49 | train_main, train_appliances = self.call_preprocessing(train_main, train_appliances, 'train')
50 |
51 | train_main = pd.concat(train_main,axis=0).values
52 | train_main = train_main.reshape((-1,self.sequence_length,1))
53 |
54 | new_train_appliances = []
55 | for app_name, app_df in train_appliances:
56 | app_df = pd.concat(app_df,axis=0).values
57 | app_df = app_df.reshape((-1,1))
58 | new_train_appliances.append((app_name, app_df))
59 |
60 | train_appliances = new_train_appliances
61 | for app_name, app_df in train_appliances:
62 | if app_name not in self.models:
63 | print("First model training for ", app_name)
64 | self.models[app_name] = self.return_network()
65 | else:
66 | print("Started re-training model for ", app_name)
67 |
68 | model = self.models[app_name]
69 | mains = train_main.reshape((-1,self.sequence_length,1))
70 | app_reading = app_df.reshape((-1,1))
71 | filepath = 'windowgru-temp-weights-'+str(random.randint(0,100000))+'.h5'
72 | checkpoint = ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
73 | train_x, v_x, train_y, v_y = train_test_split(mains, app_reading, test_size=.15,random_state=10)
74 | model.fit(train_x,train_y,validation_data=[v_x,v_y],epochs=self.n_epochs,callbacks=[checkpoint],shuffle=True,batch_size=self.batch_size)
75 | model.load_weights(filepath)
76 |
77 |
78 | def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):
79 |
80 | if model is not None:
81 | self.models = model
82 |
83 | if do_preprocessing:
84 | test_main_list = self.call_preprocessing(
85 | test_main_list, submeters_lst=None, method='test')
86 |
87 | test_predictions = []
88 | for mains in test_main_list:
89 | disggregation_dict = {}
90 | mains = mains.values.reshape((-1,self.sequence_length,1))
91 | for appliance in self.models:
92 | prediction = self.models[appliance].predict(mains,batch_size=self.batch_size)
93 | prediction = np.reshape(prediction, len(prediction))
94 | valid_predictions = prediction.flatten()
95 | valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
96 | valid_predictions = self._denormalize(valid_predictions, self.max_val)
97 | df = pd.Series(valid_predictions)
98 | disggregation_dict[appliance] = df
99 | results = pd.DataFrame(disggregation_dict, dtype='float32')
100 | test_predictions.append(results)
101 | return test_predictions
102 |
103 | def call_preprocessing(self, mains_lst, submeters_lst, method):
104 | max_val = self.max_val
105 | if method == 'train':
106 | print("Training processing")
107 | processed_mains = []
108 |
109 | for mains in mains_lst:
110 | # add padding values
111 | padding = [0 for i in range(0, self.sequence_length - 1)]
112 | paddf = pd.DataFrame({mains.columns.values[0]: padding})
113 | mains = mains.append(paddf)
114 | mainsarray = self.preprocess_train_mains(mains)
115 | processed_mains.append(pd.DataFrame(mainsarray))
116 |
117 | tuples_of_appliances = []
118 | for (appliance_name, app_dfs_list) in submeters_lst:
119 | processed_app_dfs = []
120 | for app_df in app_dfs_list:
121 | data = self.preprocess_train_appliances(app_df)
122 | processed_app_dfs.append(pd.DataFrame(data))
123 | tuples_of_appliances.append((appliance_name, processed_app_dfs))
124 |
125 | return processed_mains , tuples_of_appliances
126 |
127 | if method == 'test':
128 | processed_mains = []
129 | for mains in mains_lst:
130 | # add padding values
131 | padding = [0 for i in range(0, self.sequence_length - 1)]
132 | paddf = pd.DataFrame({mains.columns.values[0]: padding})
133 | mains = mains.append(paddf)
134 | mainsarray = self.preprocess_test_mains(mains)
135 | processed_mains.append(pd.DataFrame(mainsarray))
136 |
137 | return processed_mains
138 |
139 | def preprocess_test_mains(self, mains):
140 |
141 | mains = self._normalize(mains, self.max_val)
142 | mainsarray = np.array(mains)
143 | indexer = np.arange(self.sequence_length)[
144 | None, :] + np.arange(len(mainsarray) - self.sequence_length + 1)[:, None]
145 | mainsarray = mainsarray[indexer]
146 | mainsarray = mainsarray.reshape((-1,self.sequence_length))
147 | return pd.DataFrame(mainsarray)
148 |
149 | def preprocess_train_appliances(self, appliance):
150 |
151 | appliance = self._normalize(appliance, self.max_val)
152 | appliancearray = np.array(appliance)
153 | appliancearray = appliancearray.reshape((-1,1))
154 | return pd.DataFrame(appliancearray)
155 |
156 | def preprocess_train_mains(self, mains):
157 |
158 | mains = self._normalize(mains, self.max_val)
159 | mainsarray = np.array(mains)
160 | indexer = np.arange(self.sequence_length)[None, :] + np.arange(len(mainsarray) - self.sequence_length + 1)[:, None]
161 | mainsarray = mainsarray[indexer]
162 | mainsarray = mainsarray.reshape((-1,self.sequence_length))
163 | return pd.DataFrame(mainsarray)
164 |
165 | def _normalize(self, chunk, mmax):
166 |
167 | tchunk = chunk / mmax
168 | return tchunk
169 |
170 | def _denormalize(self, chunk, mmax):
171 |
172 | tchunk = chunk * mmax
173 | return tchunk
174 |
175 | def return_network(self):
176 | '''Creates the GRU architecture described in the paper
177 | '''
178 | model = Sequential()
179 | # 1D Conv
180 | model.add(Conv1D(16,4,activation='relu',input_shape=(self.sequence_length,1),padding="same",strides=1))
181 | # Bi-directional GRUs
182 | model.add(Bidirectional(GRU(64, activation='relu',
183 | return_sequences=True), merge_mode='concat'))
184 | model.add(Dropout(0.5))
185 | model.add(Bidirectional(GRU(128, activation='relu',
186 | return_sequences=False), merge_mode='concat'))
187 | model.add(Dropout(0.5))
188 | # Fully Connected Layers
189 | model.add(Dense(128, activation='relu'))
190 | model.add(Dropout(0.5))
191 | model.add(Dense(1, activation='linear'))
192 | model.compile(loss='mse', optimizer='adam')
193 | return model
--------------------------------------------------------------------------------
/disaggregate/dsc.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from warnings import warn
3 | from nilmtk.disaggregate import Disaggregator
4 | import pandas as pd
5 | import numpy as np
6 | from collections import OrderedDict
7 | import matplotlib.pyplot as plt
8 | from sklearn.model_selection import train_test_split
9 | from sklearn.decomposition import MiniBatchDictionaryLearning, SparseCoder
10 | from sklearn.metrics import mean_squared_error
11 | import time
12 | import warnings
13 | warnings.filterwarnings("ignore")
14 |
15 | class DSC(Disaggregator):
16 |
17 | def __init__(self, params):
18 |
19 | self.MODEL_NAME = 'DSC' # Add the name for the algorithm
20 | self.chunk_wise_training = False
21 | self.dictionaries = OrderedDict()
22 | self.power = OrderedDict()
23 | self.shape = 60*2
24 | self.learning_rate = 1e-9
25 | self.iterations = 3000
26 | self.sparsity_coef = 20
27 | self.n_components = 10
28 | self.shape = params.get('shape',self.shape)
29 | self.learning_rate = params.get('learning_rate',self.learning_rate)
30 | self.iterations = params.get('iterations',self.iterations)
31 | self.n_epochs = self.iterations
32 | self.n_components = params.get('n_components',self.n_components)
33 |
34 | def learn_dictionary(self, appliance_main, app_name):
35 |
36 | if appliance_main.size%self.shape!=0:
37 | extra_values = self.shape - (appliance_main.size)%(self.shape)
38 | appliance_main = list(appliance_main.values.flatten()) + [0]*extra_values
39 | appliance_main = np.array(appliance_main).reshape((-1,self.shape)).T
40 | self.power[app_name] = appliance_main
41 |
42 | if app_name not in self.dictionaries:
43 | print ("Training First dictionary for ",app_name)
44 | model = MiniBatchDictionaryLearning(n_components=self.n_components,positive_code=True,positive_dict=True,transform_algorithm='lasso_lars',alpha=self.sparsity_coef)
45 |
46 | else:
47 | print ("Re-training dictionary for ",app_name)
48 | model = self.dictionaries[app_name]
49 | model.fit(appliance_main.T)
50 | reconstruction = np.matmul(model.components_.T,model.transform(appliance_main.T).T)
51 | print ("RMSE reconstruction for appliance %s is %s"%(app_name,mean_squared_error(reconstruction,appliance_main)**(.5)))
52 | self.dictionaries[app_name] = model
53 |
54 |
55 | def discriminative_training(self,concatenated_activations,concatenated_bases, verbose = 100):
56 |
57 |
58 | # Making copies of concatenated bases and activation.
59 | optimal_a = np.copy(concatenated_activations)
60 | predicted_b = np.copy(concatenated_bases)
61 |
62 | '''
63 | Next step is to modify bases such that, we get optimal A upon sparse coding
64 | We want to get a_opt on finding activations from b_hat
65 | '''
66 |
67 | alpha = self.learning_rate
68 | least_error = 1e10
69 | total_power = self.total_power
70 | v_size = .20
71 | v_index = int(total_power.shape[1] * v_size)
72 | train_power = total_power[:,:-v_index]
73 | v_power = total_power[:,-v_index:]
74 | train_optimal_a = optimal_a[:,:-v_index]
75 | v_optimal_a = optimal_a[:,-v_index:]
76 |
77 | print ("If Iteration wise errors are not decreasing, then please decrease the learning rate")
78 | for i in range(self.iterations):
79 |
80 | a = time.time()
81 | # Finding activations for the given bases
82 | model = SparseCoder(dictionary=predicted_b.T,positive_code=True,transform_algorithm='lasso_lars',transform_alpha=self.sparsity_coef)
83 | train_predicted_a = model.transform(train_power.T).T
84 | model = SparseCoder(dictionary=predicted_b.T,positive_code=True,transform_algorithm='lasso_lars',transform_alpha=self.sparsity_coef)
85 | val_predicted_a = model.transform(v_power.T).T
86 | err = np.mean(np.abs(val_predicted_a - v_optimal_a))
87 |
88 | if err0,predicted_b,0)
98 | # Making sure that columns sum to 1
99 | predicted_b = (predicted_b.T/np.linalg.norm(predicted_b.T,axis=1).reshape((-1,1))).T
100 | #if i%verbose==0:
101 | print ("Iteration ",i," Error ",err)
102 |
103 | return best_b
104 |
105 | def print_appliance_wise_errors(self, activations, bases):
106 |
107 | start_comp = 0
108 | for cnt, i in enumerate(self.power):
109 | X = self.power[i]
110 | n_comps = self.dictionaries[i].n_components
111 | pred = np.matmul(bases[:,start_comp:start_comp+n_comps],activations[start_comp:start_comp+n_comps,:])
112 | start_comp+=n_comps
113 | #plt.plot(pred.T[home_id],label=i)
114 | print ("Error for ",i," is ",mean_squared_error(pred, X)**(.5))
115 |
116 | def partial_fit(self, train_main, train_appliances, **load_kwargs):
117 |
118 | print("...............DSC partial_fit running...............")
119 |
120 | #print (train_main[0])
121 |
122 | train_main = pd.concat(train_main,axis=1) #np.array([i.values.reshape((self.sequence_length,1)) for i in train_main])
123 |
124 | if train_main.size%self.shape!=0:
125 | extra_values = self.shape - (train_main.size)%(self.shape)
126 | train_main = list(train_main.values.flatten()) + [0]*extra_values
127 |
128 | train_main = np.array(train_main).reshape((-1,self.shape)).T
129 | self.total_power = train_main
130 | new_train_appliances = []
131 |
132 | for app_name, app_df in train_appliances:
133 | app_df = pd.concat(app_df)
134 | new_train_appliances.append((app_name, app_df))
135 |
136 | train_appliances = new_train_appliances
137 |
138 | if len(train_main)>10:
139 |
140 | for appliance_name, power in train_appliances:
141 | self.learn_dictionary(power, appliance_name)
142 |
143 | concatenated_bases = []
144 | concatenated_activations = []
145 |
146 | for i in self.dictionaries:
147 |
148 | model = self.dictionaries[i]
149 |
150 | concatenated_bases.append(model.components_.T)
151 | concatenated_activations.append(model.transform(self.power[i].T).T)
152 |
153 | concatenated_bases = np.concatenate(concatenated_bases,axis=1)
154 | concatenated_activations = np.concatenate(concatenated_activations,axis=0)
155 | print ("--"*15)
156 | print ("Optimal Errors")
157 | self.print_appliance_wise_errors(concatenated_activations, concatenated_bases)
158 | print ("--"*15)
159 | model = SparseCoder(dictionary=concatenated_bases.T,positive_code=True,transform_algorithm='lasso_lars',transform_alpha=self.sparsity_coef)
160 | predicted_activations = model.transform(train_main.T).T
161 | print ('\n\n')
162 | print ("--"*15)
163 | print ("Error in prediction before discriminative sparse coding")
164 | self.print_appliance_wise_errors(predicted_activations, concatenated_bases)
165 | print ("--"*15)
166 | print ('\n\n')
167 | optimal_b = self.discriminative_training(concatenated_activations,concatenated_bases)
168 | model = SparseCoder(dictionary=optimal_b.T,positive_code=True,transform_algorithm='lasso_lars',transform_alpha=self.sparsity_coef)
169 | self.disggregation_model = model
170 | predicted_activations = model.transform(train_main.T).T
171 | print ("--"*15)
172 | print ("Model Errors after Discriminative Training")
173 | self.print_appliance_wise_errors(predicted_activations, concatenated_bases)
174 | print ("--"*15)
175 | self.disaggregation_bases = optimal_b
176 | self.reconstruction_bases = concatenated_bases
177 |
178 | else:
179 | print ("This chunk has small number of samples, so skipping the training")
180 |
181 | def disaggregate_chunk(self, test_main_list):
182 |
183 | test_predictions = []
184 | for test_main in test_main_list:
185 | if test_main.size%self.shape!=0:
186 | extra_values = self.shape - (test_main.size)%(self.shape)
187 | test_main = list(test_main.values.flatten()) + [0]*extra_values
188 | test_main = np.array(test_main).reshape((-1,self.shape)).T
189 | predicted_activations = self.disggregation_model.transform(test_main.T).T
190 | #predicted_usage = self.reconstruction_bases@predicted_activations
191 | disggregation_dict = {}
192 | start_comp = 0
193 | for cnt, app_name in enumerate(self.power):
194 | n_comps = self.dictionaries[app_name].n_components
195 | predicted_usage = np.matmul(self.reconstruction_bases[:,start_comp:start_comp+n_comps],predicted_activations[start_comp:start_comp+n_comps,:])
196 | start_comp+=n_comps
197 | predicted_usage = predicted_usage.T.flatten()
198 | flat_mains = test_main.T.flatten()
199 | predicted_usage = np.where(predicted_usage>flat_mains,flat_mains,predicted_usage)
200 | disggregation_dict[app_name] = pd.Series(predicted_usage)
201 | results = pd.DataFrame(disggregation_dict, dtype='float32')
202 | test_predictions.append(results)
203 |
204 | return test_predictions
205 |
206 |
--------------------------------------------------------------------------------
/disaggregate/dae.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from warnings import warn
3 | from nilmtk.disaggregate import Disaggregator
4 | from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten
5 | import pandas as pd
6 | import numpy as np
7 | from collections import OrderedDict
8 | from keras.optimizers import SGD
9 | from keras.models import Sequential
10 | import matplotlib.pyplot as plt
11 | from sklearn.model_selection import train_test_split
12 | from keras.callbacks import ModelCheckpoint
13 | import keras.backend as K
14 | from statistics import mean
15 | import os
16 | import pickle
17 | import random
18 | import json
19 | from .util import *
20 |
21 | random.seed(10)
22 | np.random.seed(10)
23 |
24 |
25 | class DAE(Disaggregator):
26 |
27 | def __init__(self, params):
28 | """
29 | Iniititalize the moel with the given parameters
30 | """
31 | self.MODEL_NAME = "DAE"
32 | self.chunk_wise_training = params.get('chunk_wise_training', False)
33 | self.sequence_length = params.get('sequence_length', 99)
34 | self.n_epochs = params.get('n_epochs', 50)
35 | self.batch_size = params.get('batch_size', 1024)
36 | self.mains_mean = params.get('mains_mean', 1000)
37 | self.mains_std = params.get('mains_std', 600)
38 | self.appliance_params = params.get('appliance_params', {})
39 | self.save_model_path = params.get('save-model-path', None)
40 | self.load_model_path = params.get('pretrained-model-path', None)
41 | self.models = OrderedDict()
42 | if self.load_model_path:
43 | self.load_model()
44 |
45 | def partial_fit(self, train_main, train_appliances, do_preprocessing=True, **load_kwargs):
46 | """
47 | The partial fit function
48 | """
49 |
50 | # If no appliance wise parameters are specified, then they are computed from the data
51 | if len(self.appliance_params) == 0:
52 | self.set_appliance_params(train_appliances)
53 |
54 | # TO preprocess the data and bring it to a valid shape
55 | if do_preprocessing:
56 | print("Doing Preprocessing")
57 | train_main, train_appliances = self.call_preprocessing(train_main, train_appliances, 'train')
58 | train_main = pd.concat(train_main, axis=0).values
59 | print(train_main)
60 | train_main = train_main.reshape((-1, self.sequence_length, 1))
61 | new_train_appliances = []
62 | for app_name, app_df in train_appliances:
63 | app_df = pd.concat(app_df, axis=0).values
64 | app_df = app_df.reshape((-1, self.sequence_length, 1))
65 | new_train_appliances.append((app_name, app_df))
66 | train_appliances = new_train_appliances
67 | for appliance_name, power in train_appliances:
68 | if appliance_name not in self.models:
69 | print("First model training for ", appliance_name)
70 | self.models[appliance_name] = self.return_network()
71 | print(self.models[appliance_name].summary())
72 | print("Started Retraining model for ", appliance_name)
73 | model = self.models[appliance_name]
74 | filepath = 'dae-temp-weights-' + str(random.randint(0, 100000)) + '.h5'
75 | checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
76 | train_x, v_x, train_y, v_y = train_test_split(train_main, power, test_size=.15, random_state=10)
77 | model.fit(train_x, train_y, validation_data=[v_x, v_y], epochs=self.n_epochs, callbacks=[checkpoint],
78 | shuffle=True, batch_size=self.batch_size)
79 | model.load_weights(filepath)
80 |
81 | if self.save_model_path:
82 | self.save_model()
83 |
84 | def load_model(self):
85 | print("Loading the model using the pretrained-weights")
86 | model_folder = self.load_model_path
87 | if os.path.exists(os.path.join(model_folder, "model.json")):
88 | with open(os.path.join(model_folder, "model.json"), "r") as f:
89 | model_string = f.read().strip()
90 | params_to_load = json.loads(model_string)
91 |
92 | self.sequence_length = int(params_to_load['sequence_length'])
93 | self.mains_mean = params_to_load['mains_mean']
94 | self.mains_std = params_to_load['mains_std']
95 | self.appliance_params = params_to_load['appliance_params']
96 |
97 | for appliance_name in self.appliance_params:
98 | self.models[appliance_name] = self.return_network()
99 | self.models[appliance_name].load_weights(os.path.join(model_folder, appliance_name + ".h5"))
100 |
101 | def save_model(self):
102 |
103 | if os.path.exists(self.save_model_path) == False:
104 | os.makedirs(self.save_model_path)
105 | params_to_save = {}
106 | params_to_save['appliance_params'] = self.appliance_params
107 | params_to_save['sequence_length'] = self.sequence_length
108 | params_to_save['mains_mean'] = self.mains_mean
109 | params_to_save['mains_std'] = self.mains_std
110 | for appliance_name in self.models:
111 | print("Saving model for ", appliance_name)
112 | self.models[appliance_name].save_weights(os.path.join(self.save_model_path, appliance_name + ".h5"))
113 |
114 | with open(os.path.join(self.save_model_path, 'model.json'), 'w') as file:
115 | file.write(json.dumps(params_to_save, cls=NumpyEncoder))
116 |
117 | def disaggregate_chunk(self, test_main_list, do_preprocessing=True):
118 | if do_preprocessing:
119 | test_main_list = self.call_preprocessing(test_main_list, submeters_lst=None, method='test')
120 |
121 | test_predictions = []
122 | for test_main in test_main_list:
123 | test_main = test_main.values
124 | test_main = test_main.reshape((-1, self.sequence_length, 1))
125 | disggregation_dict = {}
126 | for appliance in self.models:
127 | prediction = self.models[appliance].predict(test_main, batch_size=self.batch_size)
128 | app_mean = self.appliance_params[appliance]['mean']
129 | app_std = self.appliance_params[appliance]['std']
130 | prediction = self.denormalize_output(prediction, app_mean, app_std)
131 | valid_predictions = prediction.flatten()
132 | valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
133 | series = pd.Series(valid_predictions)
134 | disggregation_dict[appliance] = series
135 | results = pd.DataFrame(disggregation_dict, dtype='float32')
136 | test_predictions.append(results)
137 | return test_predictions
138 |
139 | def return_network(self):
140 | model = Sequential()
141 | model.add(Conv1D(8, 4, activation="linear", input_shape=(self.sequence_length, 1), padding="same", strides=1))
142 | model.add(Flatten())
143 | model.add(Dense((self.sequence_length) * 8, activation='relu'))
144 | model.add(Dense(128, activation='relu'))
145 | model.add(Dense((self.sequence_length) * 8, activation='relu'))
146 | model.add(Reshape(((self.sequence_length), 8)))
147 | model.add(Conv1D(1, 4, activation="linear", padding="same", strides=1))
148 | model.compile(loss='mse', optimizer='adam')
149 | return model
150 |
151 | def call_preprocessing(self, mains_lst, submeters_lst, method):
152 | sequence_length = self.sequence_length
153 | if method == 'train':
154 | processed_mains = []
155 | for mains in mains_lst:
156 | mains = self.normalize_input(mains.values, sequence_length, self.mains_mean, self.mains_std, True)
157 | processed_mains.append(pd.DataFrame(mains))
158 |
159 | tuples_of_appliances = []
160 | for (appliance_name, app_df_list) in submeters_lst:
161 | app_mean = self.appliance_params[appliance_name]['mean']
162 | app_std = self.appliance_params[appliance_name]['std']
163 | processed_app_dfs = []
164 | for app_df in app_df_list:
165 | data = self.normalize_output(app_df.values, sequence_length, app_mean, app_std, True)
166 | processed_app_dfs.append(pd.DataFrame(data))
167 | tuples_of_appliances.append((appliance_name, processed_app_dfs))
168 |
169 | return processed_mains, tuples_of_appliances
170 |
171 | if method == 'test':
172 | processed_mains = []
173 | for mains in mains_lst:
174 | mains = self.normalize_input(mains.values, sequence_length, self.mains_mean, self.mains_std, False)
175 | processed_mains.append(pd.DataFrame(mains))
176 | return processed_mains
177 |
178 | def normalize_input(self, data, sequence_length, mean, std, overlapping=False):
179 | n = sequence_length
180 | excess_entries = sequence_length - (data.size % sequence_length)
181 | lst = np.array([0] * excess_entries)
182 | arr = np.concatenate((data.flatten(), lst), axis=0)
183 | if overlapping:
184 | windowed_x = np.array([arr[i:i + n] for i in range(len(arr) - n + 1)])
185 | else:
186 | windowed_x = arr.reshape((-1, sequence_length))
187 | windowed_x = windowed_x - mean
188 | windowed_x = windowed_x / std
189 | return (windowed_x / std).reshape((-1, sequence_length))
190 |
191 | def normalize_output(self, data, sequence_length, mean, std, overlapping=False):
192 | n = sequence_length
193 | excess_entries = sequence_length - (data.size % sequence_length)
194 | lst = np.array([0] * excess_entries)
195 | arr = np.concatenate((data.flatten(), lst), axis=0)
196 | if overlapping:
197 | windowed_y = np.array([arr[i:i + n] for i in range(len(arr) - n + 1)])
198 | else:
199 | windowed_y = arr.reshape((-1, sequence_length))
200 | windowed_y = windowed_y - mean
201 | return (windowed_y / std).reshape((-1, sequence_length))
202 |
203 | def denormalize_output(self, data, mean, std):
204 | return mean + data * std
205 |
206 | def set_appliance_params(self, train_appliances):
207 |
208 | for (app_name, df_list) in train_appliances:
209 | l = np.array(pd.concat(df_list, axis=0))
210 | app_mean = np.mean(l)
211 | app_std = np.std(l)
212 | if app_std < 1:
213 | app_std = 100
214 | self.appliance_params.update({app_name: {'mean': app_mean, 'std': app_std}})
215 | print('the appliance params is set')
216 |
--------------------------------------------------------------------------------
/disaggregate/dldisaggregator.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from warnings import warn
3 | from nilmtk.disaggregate import Disaggregator
4 | from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten
5 | import os
6 | import pickle
7 | import pandas as pd
8 | import numpy as np
9 | from collections import OrderedDict
10 | from keras.optimizers import SGD
11 | from keras.models import Sequential, load_model
12 | import matplotlib.pyplot as plt
13 | from sklearn.model_selection import train_test_split
14 | from keras.callbacks import ModelCheckpoint
15 | import keras.backend as K
16 | import random
17 | import sys
18 | import json
19 | from .util import NumpyEncoder
20 | random.seed(10)
21 | np.random.seed(10)
22 |
23 | class SequenceLengthError(Exception):
24 | pass
25 |
26 | class ApplianceNotFoundError(Exception):
27 | pass
28 |
29 | class DL_disagregator(Disaggregator):
30 |
31 | def __init__(self, params):
32 | """
33 | Parameters to be specified for the model
34 | """
35 |
36 | self.MODEL_NAME = " "
37 | self.models = OrderedDict()
38 | self.sequence_length = params.get('sequence_length',99)
39 | self.n_epochs = params.get('n_epochs', 50 )
40 | self.batch_size = params.get('batch_size',1024)
41 | self.mains_mean = params.get('mains_mean',1800)
42 | self.mains_std = params.get('mains_std',600)
43 | self.appliance_params = params.get('appliance_params',{})
44 | self.save_model_path = params.get('save-model-path', None)
45 | self.load_model_path = params.get('pretrained-model-path',None)
46 | self.models = OrderedDict()
47 | if self.load_model_path:
48 | self.load_model()
49 | if self.sequence_length%2==0:
50 | print ("Sequence length should be odd!")
51 | raise (SequenceLengthError)
52 |
53 | def partial_fit(self,train_main,train_appliances,do_preprocessing=True,
54 | **load_kwargs):
55 |
56 | # If no appliance wise parameters are provided, then copmute them using the first chunk
57 | if len(self.appliance_params) == 0:
58 | self.set_appliance_params(train_appliances)
59 |
60 | print("...............Seq2Point partial_fit running...............")
61 | # Do the pre-processing, such as windowing and normalizing
62 |
63 | if do_preprocessing:
64 | train_main, train_appliances = self.call_preprocessing(
65 | train_main, train_appliances, 'train') #480374,1 -> 480374,99, 480374,1 -> 480374,1
66 |
67 | train_main = pd.concat(train_main,axis=0) #480374,99
68 | train_main = train_main.values.reshape((-1,self.sequence_length,1))
69 |
70 | new_train_appliances = []
71 | for app_name, app_df in train_appliances:
72 | app_df = pd.concat(app_df,axis=0)
73 | app_df_values = app_df.values.reshape((-1,1))
74 | new_train_appliances.append((app_name, app_df_values))
75 | train_appliances = new_train_appliances
76 |
77 | for appliance_name, power in train_appliances:
78 | # Check if the appliance was already trained. If not then create a new model for it
79 | if appliance_name not in self.models:
80 | print("First model training for ", appliance_name)
81 | self.models[appliance_name] = self.return_network()
82 | # Retrain the particular appliance
83 | else:
84 | print("Started Retraining model for ", appliance_name)
85 |
86 | model = self.models[appliance_name]
87 | if train_main.size > 0:
88 | # Sometimes chunks can be empty after dropping NANS
89 | if len(train_main) > 10:
90 | # Do validation when you have sufficient samples
91 | filepath = 'seq2point-temp-weights-'+str(random.randint(0,100000))+'.h5'
92 | checkpoint = ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
93 | train_x, v_x, train_y, v_y = train_test_split(train_main, power, test_size=.15,random_state=10)
94 | model.fit(train_x,train_y,validation_data=[v_x,v_y],epochs=self.n_epochs,callbacks=[checkpoint],batch_size=self.batch_size)
95 | model.load_weights(filepath)
96 | if self.save_model_path:
97 | self.save_model()
98 |
99 | def load_model(self):
100 | print ("Loading the model using the pretrained-weights")
101 | model_folder = self.load_model_path
102 | if os.path.exists(os.path.join(model_folder, "model.json")):
103 | with open(os.path.join(model_folder, "model.json"), "r") as f:
104 | model_string = f.read().strip()
105 | params_to_load = json.loads(model_string)
106 |
107 |
108 | self.sequence_length = int(params_to_load['sequence_length'])
109 | self.mains_mean = params_to_load['mains_mean']
110 | self.mains_std = params_to_load['mains_std']
111 | self.appliance_params = params_to_load['appliance_params']
112 |
113 | for appliance_name in self.appliance_params:
114 | self.models[appliance_name] = self.return_network()
115 | self.models[appliance_name].load_weights(os.path.join(model_folder,appliance_name+".h5"))
116 |
117 |
118 | def save_model(self):
119 | if (os.path.exists(self.save_model_path) == False):
120 | os.makedirs(self.save_model_path)
121 | params_to_save = {}
122 | params_to_save['appliance_params'] = self.appliance_params
123 | params_to_save['sequence_length'] = self.sequence_length
124 | params_to_save['mains_mean'] = self.mains_mean
125 | params_to_save['mains_std'] = self.mains_std
126 | for appliance_name in self.models:
127 | print ("Saving model for ", appliance_name)
128 | self.models[appliance_name].save_weights(os.path.join(self.save_model_path,appliance_name+".h5"))
129 |
130 | with open(os.path.join(self.save_model_path,'model.json'),'w') as file:
131 | file.write(json.dumps(params_to_save, cls=NumpyEncoder))
132 |
133 | def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):
134 |
135 | if model is not None:
136 | self.models = model
137 |
138 | # Preprocess the test mains such as windowing and normalizing
139 |
140 | if do_preprocessing:
141 | test_main_list = self.call_preprocessing(test_main_list, submeters_lst=None, method='test')
142 |
143 | test_predictions = []
144 | for test_main in test_main_list:
145 | test_main = test_main.values
146 | test_main = test_main.reshape((-1, self.sequence_length, 1))
147 | disggregation_dict = {}
148 | for appliance in self.models:
149 | prediction = self.models[appliance].predict(test_main,batch_size=self.batch_size)
150 | prediction = self.appliance_params[appliance]['mean'] + prediction * self.appliance_params[appliance]['std']
151 | valid_predictions = prediction.flatten()
152 | valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
153 | df = pd.Series(valid_predictions)
154 | disggregation_dict[appliance] = df
155 | results = pd.DataFrame(disggregation_dict, dtype='float32')
156 | test_predictions.append(results)
157 | return test_predictions
158 |
159 | def return_network(self):
160 | # Model architecture
161 | model = Sequential()
162 | model.add(Conv1D(30,10,activation="relu",input_shape=(self.sequence_length,1),strides=1))
163 | model.add(Conv1D(30, 8, activation='relu', strides=1))
164 | model.add(Conv1D(40, 6, activation='relu', strides=1))
165 | model.add(Conv1D(50, 5, activation='relu', strides=1))
166 | model.add(Dropout(.2))
167 | model.add(Conv1D(50, 5, activation='relu', strides=1))
168 | model.add(Dropout(.2))
169 | model.add(Flatten())
170 | model.add(Dense(1024, activation='relu'))
171 | model.add(Dropout(.2))
172 | model.add(Dense(1))
173 | model.compile(loss='mse', optimizer='adam') # ,metrics=[self.mse])
174 | return model
175 |
176 | def call_preprocessing(self, mains_lst, submeters_lst, method):
177 |
178 | if method == 'train':
179 | # Preprocessing for the train data
180 | mains_df_list = []
181 | for mains in mains_lst:
182 | new_mains = mains.values.flatten()
183 | n = self.sequence_length
184 | units_to_pad = n // 2
185 | new_mains = np.pad(new_mains,(units_to_pad,units_to_pad),'constant',constant_values=(0,0))
186 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)]) ####################
187 | new_mains = (new_mains - self.mains_mean) / self.mains_std
188 | mains_df_list.append(pd.DataFrame(new_mains))
189 |
190 | appliance_list = []
191 | for app_index, (app_name, app_df_list) in enumerate(submeters_lst):
192 | if app_name in self.appliance_params:
193 | app_mean = self.appliance_params[app_name]['mean']
194 | app_std = self.appliance_params[app_name]['std']
195 | else:
196 | print ("Parameters for ", app_name ," were not found!")
197 | raise ApplianceNotFoundError()
198 |
199 | processed_appliance_dfs = []
200 |
201 | for app_df in app_df_list:
202 | new_app_readings = app_df.values.reshape((-1, 1))
203 | # This is for choosing windows
204 | new_app_readings = (new_app_readings - app_mean) / app_std
205 | # Return as a list of dataframe
206 | processed_appliance_dfs.append(pd.DataFrame(new_app_readings))
207 | appliance_list.append((app_name, processed_appliance_dfs))
208 | return mains_df_list, appliance_list
209 |
210 | else:
211 | # Preprocessing for the test data
212 | mains_df_list = []
213 |
214 | for mains in mains_lst:
215 | new_mains = mains.values.flatten()
216 | n = self.sequence_length
217 | units_to_pad = n // 2
218 | new_mains = np.pad(new_mains,(units_to_pad,units_to_pad),'constant',constant_values=(0,0))
219 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
220 | new_mains = (new_mains - self.mains_mean) / self.mains_std
221 | mains_df_list.append(pd.DataFrame(new_mains))
222 | return mains_df_list
223 |
224 | def set_appliance_params(self,train_appliances):
225 | # Find the parameters using the first
226 | for (app_name,df_list) in train_appliances:
227 | l = np.array(pd.concat(df_list,axis=0))
228 | app_mean = np.mean(l)
229 | app_std = np.std(l)
230 | if app_std<1:
231 | app_std = 100
232 | self.appliance_params.update({app_name:{'mean':app_mean,'std':app_std}})
233 | print (self.appliance_params)
--------------------------------------------------------------------------------
/disaggregate/seq2point.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from warnings import warn
3 | from nilmtk.disaggregate import Disaggregator
4 | from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten
5 | import os
6 | import pickle
7 | import pandas as pd
8 | import numpy as np
9 | from collections import OrderedDict
10 | from keras.optimizers import SGD
11 | from keras.models import Sequential, load_model
12 | import matplotlib.pyplot as plt
13 | from sklearn.model_selection import train_test_split
14 | from keras.callbacks import ModelCheckpoint
15 | import keras.backend as K
16 | import random
17 | import sys
18 | import json
19 | from .util import NumpyEncoder
20 | random.seed(10)
21 | np.random.seed(10)
22 |
23 | class SequenceLengthError(Exception):
24 | pass
25 |
26 | class ApplianceNotFoundError(Exception):
27 | pass
28 |
29 | class Seq2Point(Disaggregator):
30 |
31 | def __init__(self, params):
32 | """
33 | Parameters to be specified for the model
34 | """
35 |
36 | self.MODEL_NAME = "Seq2Point"
37 | self.models = OrderedDict()
38 | self.chunk_wise_training = params.get('chunk_wise_training',False)
39 | self.sequence_length = params.get('sequence_length',99)
40 | self.n_epochs = params.get('n_epochs', 50 )
41 | self.batch_size = params.get('batch_size',1024)
42 | self.mains_mean = params.get('mains_mean',1800)
43 | self.mains_std = params.get('mains_std',600)
44 | self.appliance_params = params.get('appliance_params',{})
45 | self.save_model_path = params.get('save-model-path', None)
46 | self.load_model_path = params.get('pretrained-model-path',None)
47 | self.models = OrderedDict()
48 | if self.load_model_path:
49 | self.load_model()
50 | if self.sequence_length%2==0:
51 | print ("Sequence length should be odd!")
52 | raise (SequenceLengthError)
53 |
54 | def partial_fit(self,train_main,train_appliances,do_preprocessing=True,
55 | **load_kwargs):
56 |
57 | # If no appliance wise parameters are provided, then copmute them using the first chunk
58 | if len(self.appliance_params) == 0:
59 | self.set_appliance_params(train_appliances)
60 |
61 | print("...............Seq2Point partial_fit running...............")
62 | # Do the pre-processing, such as windowing and normalizing
63 |
64 | if do_preprocessing:
65 | train_main, train_appliances = self.call_preprocessing(
66 | train_main, train_appliances, 'train') #480374,1 -> 480374,99, 480374,1 -> 480374,1
67 |
68 | train_main = pd.concat(train_main,axis=0) #480374,99
69 | train_main = train_main.values.reshape((-1,self.sequence_length,1))
70 |
71 | new_train_appliances = []
72 | for app_name, app_df in train_appliances:
73 | app_df = pd.concat(app_df,axis=0)
74 | app_df_values = app_df.values.reshape((-1,1))
75 | new_train_appliances.append((app_name, app_df_values))
76 | train_appliances = new_train_appliances
77 |
78 | for appliance_name, power in train_appliances:
79 | # Check if the appliance was already trained. If not then create a new model for it
80 | if appliance_name not in self.models:
81 | print("First model training for ", appliance_name)
82 | self.models[appliance_name] = self.return_network()
83 | # Retrain the particular appliance
84 | else:
85 | print("Started Retraining model for ", appliance_name)
86 |
87 | model = self.models[appliance_name]
88 | if train_main.size > 0:
89 | # Sometimes chunks can be empty after dropping NANS
90 | if len(train_main) > 10:
91 | # Do validation when you have sufficient samples
92 | filepath = 'seq2point-temp-weights-'+str(random.randint(0,100000))+'.h5'
93 | checkpoint = ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
94 | train_x, v_x, train_y, v_y = train_test_split(train_main, power, test_size=.15,random_state=10)
95 | model.fit(train_x,train_y,validation_data=[v_x,v_y],epochs=self.n_epochs,callbacks=[checkpoint],batch_size=self.batch_size)
96 | model.load_weights(filepath)
97 | if self.save_model_path:
98 | self.save_model()
99 |
100 | def load_model(self):
101 | print ("Loading the model using the pretrained-weights")
102 | model_folder = self.load_model_path
103 | if os.path.exists(os.path.join(model_folder, "model.json")):
104 | with open(os.path.join(model_folder, "model.json"), "r") as f:
105 | model_string = f.read().strip()
106 | params_to_load = json.loads(model_string)
107 |
108 |
109 | self.sequence_length = int(params_to_load['sequence_length'])
110 | self.mains_mean = params_to_load['mains_mean']
111 | self.mains_std = params_to_load['mains_std']
112 | self.appliance_params = params_to_load['appliance_params']
113 |
114 | for appliance_name in self.appliance_params:
115 | self.models[appliance_name] = self.return_network()
116 | self.models[appliance_name].load_weights(os.path.join(model_folder,appliance_name+".h5"))
117 |
118 |
119 | def save_model(self):
120 | if (os.path.exists(self.save_model_path) == False):
121 | os.makedirs(self.save_model_path)
122 | params_to_save = {}
123 | params_to_save['appliance_params'] = self.appliance_params
124 | params_to_save['sequence_length'] = self.sequence_length
125 | params_to_save['mains_mean'] = self.mains_mean
126 | params_to_save['mains_std'] = self.mains_std
127 | for appliance_name in self.models:
128 | print ("Saving model for ", appliance_name)
129 | self.models[appliance_name].save_weights(os.path.join(self.save_model_path,appliance_name+".h5"))
130 |
131 | with open(os.path.join(self.save_model_path,'model.json'),'w') as file:
132 | file.write(json.dumps(params_to_save, cls=NumpyEncoder))
133 |
134 | def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):
135 |
136 | if model is not None:
137 | self.models = model
138 |
139 | # Preprocess the test mains such as windowing and normalizing
140 |
141 | if do_preprocessing:
142 | test_main_list = self.call_preprocessing(test_main_list, submeters_lst=None, method='test')
143 |
144 | test_predictions = []
145 | for test_main in test_main_list:
146 | test_main = test_main.values
147 | test_main = test_main.reshape((-1, self.sequence_length, 1))
148 | disggregation_dict = {}
149 | for appliance in self.models:
150 | prediction = self.models[appliance].predict(test_main,batch_size=self.batch_size)
151 | prediction = self.appliance_params[appliance]['mean'] + prediction * self.appliance_params[appliance]['std']
152 | valid_predictions = prediction.flatten()
153 | valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
154 | df = pd.Series(valid_predictions)
155 | disggregation_dict[appliance] = df
156 | results = pd.DataFrame(disggregation_dict, dtype='float32')
157 | test_predictions.append(results)
158 | return test_predictions
159 |
160 | def return_network(self):
161 | # Model architecture
162 | model = Sequential()
163 | model.add(Conv1D(30,10,activation="relu",input_shape=(self.sequence_length,1),strides=1))
164 | model.add(Conv1D(30, 8, activation='relu', strides=1))
165 | model.add(Conv1D(40, 6, activation='relu', strides=1))
166 | model.add(Conv1D(50, 5, activation='relu', strides=1))
167 | model.add(Dropout(.2))
168 | model.add(Conv1D(50, 5, activation='relu', strides=1))
169 | model.add(Dropout(.2))
170 | model.add(Flatten())
171 | model.add(Dense(1024, activation='relu'))
172 | model.add(Dropout(.2))
173 | model.add(Dense(1))
174 | model.compile(loss='mse', optimizer='adam') # ,metrics=[self.mse])
175 | return model
176 |
177 | def call_preprocessing(self, mains_lst, submeters_lst, method):
178 |
179 | if method == 'train':
180 | # Preprocessing for the train data
181 | mains_df_list = []
182 | for mains in mains_lst:
183 | new_mains = mains.values.flatten()
184 | n = self.sequence_length
185 | units_to_pad = n // 2
186 | new_mains = np.pad(new_mains,(units_to_pad,units_to_pad),'constant',constant_values=(0,0))
187 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)]) ####################
188 | new_mains = (new_mains - self.mains_mean) / self.mains_std
189 | mains_df_list.append(pd.DataFrame(new_mains))
190 |
191 | appliance_list = []
192 | for app_index, (app_name, app_df_list) in enumerate(submeters_lst):
193 | if app_name in self.appliance_params:
194 | app_mean = self.appliance_params[app_name]['mean']
195 | app_std = self.appliance_params[app_name]['std']
196 | else:
197 | print ("Parameters for ", app_name ," were not found!")
198 | raise ApplianceNotFoundError()
199 |
200 | processed_appliance_dfs = []
201 |
202 | for app_df in app_df_list:
203 | new_app_readings = app_df.values.reshape((-1, 1))
204 | # This is for choosing windows
205 | new_app_readings = (new_app_readings - app_mean) / app_std
206 | # Return as a list of dataframe
207 | processed_appliance_dfs.append(pd.DataFrame(new_app_readings))
208 | appliance_list.append((app_name, processed_appliance_dfs))
209 | return mains_df_list, appliance_list
210 |
211 | else:
212 | # Preprocessing for the test data
213 | mains_df_list = []
214 |
215 | for mains in mains_lst:
216 | new_mains = mains.values.flatten()
217 | n = self.sequence_length
218 | units_to_pad = n // 2
219 | new_mains = np.pad(new_mains,(units_to_pad,units_to_pad),'constant',constant_values=(0,0))
220 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
221 | new_mains = (new_mains - self.mains_mean) / self.mains_std
222 | mains_df_list.append(pd.DataFrame(new_mains))
223 | return mains_df_list
224 |
225 | def set_appliance_params(self,train_appliances):
226 | # Find the parameters using the first
227 | for (app_name,df_list) in train_appliances:
228 | l = np.array(pd.concat(df_list,axis=0))
229 | app_mean = np.mean(l)
230 | app_std = np.std(l)
231 | if app_std<1:
232 | app_std = 100
233 | self.appliance_params.update({app_name:{'mean':app_mean,'std':app_std}})
234 | print (self.appliance_params)
--------------------------------------------------------------------------------
/disaggregate/rnn.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from warnings import warn
3 | import numpy as np
4 | from nilmtk.disaggregate import Disaggregator
5 | from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten, Bidirectional, LSTM
6 | import os
7 | import pickle
8 | import pandas as pd
9 | import numpy as np
10 | from collections import OrderedDict
11 | from keras.optimizers import SGD
12 | from keras.models import Sequential, load_model
13 | import matplotlib.pyplot as plt
14 | from sklearn.model_selection import train_test_split
15 | from keras.callbacks import ModelCheckpoint
16 | import keras.backend as K
17 | import random
18 | import sys
19 | import json
20 | from .util import *
21 | random.seed(10)
22 | np.random.seed(10)
23 |
24 | class SequenceLengthError(Exception):
25 | pass
26 |
27 | class ApplianceNotFoundError(Exception):
28 | pass
29 |
30 | class RNN(Disaggregator):
31 |
32 | def __init__(self, params):
33 | """
34 | Parameters to be specified for the model
35 | """
36 |
37 | self.MODEL_NAME = "RNN"
38 | self.models = OrderedDict()
39 | self.chunk_wise_training = params.get('chunk_wise_training',False)
40 | self.sequence_length = params.get('sequence_length',99)
41 | self.n_epochs = params.get('n_epochs', 50 )
42 | self.batch_size = params.get('batch_size',1024)
43 | self.mains_mean = params.get('mains_mean',1800)
44 | self.mains_std = params.get('mains_std',600)
45 | self.appliance_params = params.get('appliance_params',{})
46 | self.save_model_path = params.get('save-model-path', None)
47 | self.load_model_path = params.get('pretrained-model-path',None)
48 | self.models = OrderedDict()
49 | if self.load_model_path:
50 | self.load_model()
51 | if self.sequence_length%2==0:
52 | print ("Sequence length should be odd!")
53 | raise (SequenceLengthError)
54 |
55 | def partial_fit(self,train_main,train_appliances,do_preprocessing=True,
56 | **load_kwargs):
57 |
58 | # If no appliance wise parameters are provided, then copmute them using the first chunk
59 | if len(self.appliance_params) == 0:
60 | self.set_appliance_params(train_appliances)
61 |
62 |
63 | print("...............RNN partial_fit running...............")
64 | # Do the pre-processing, such as windowing and normalizing
65 |
66 | if do_preprocessing:
67 | train_main, train_appliances = self.call_preprocessing(
68 | train_main, train_appliances, 'train')
69 |
70 | train_main = pd.concat(train_main,axis=0)
71 | train_main = train_main.values.reshape((-1,self.sequence_length,1))
72 |
73 | new_train_appliances = []
74 | for app_name, app_df in train_appliances:
75 | app_df = pd.concat(app_df,axis=0)
76 | app_df_values = app_df.values.reshape((-1,1))
77 | new_train_appliances.append((app_name, app_df_values))
78 | train_appliances = new_train_appliances
79 |
80 | for appliance_name, power in train_appliances:
81 | # Check if the appliance was already trained. If not then create a new model for it
82 | if appliance_name not in self.models:
83 | print("First model training for ", appliance_name)
84 | self.models[appliance_name] = self.return_network()
85 | # Retrain the particular appliance
86 | else:
87 | print("Started Retraining model for ", appliance_name)
88 |
89 | model = self.models[appliance_name]
90 | if train_main.size > 0:
91 | # Sometimes chunks can be empty after dropping NANS
92 | if len(train_main) > 10:
93 | # Do validation when you have sufficient samples
94 | filepath = 'RNN-temp-weights-'+str(random.randint(0,100000))+'.h5'
95 | checkpoint = ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
96 | train_x, v_x, train_y, v_y = train_test_split(train_main, power, test_size=.15,random_state=10)
97 | model.fit(train_x,train_y,validation_data=[v_x,v_y],epochs=self.n_epochs,callbacks=[checkpoint],batch_size=self.batch_size)
98 | model.load_weights(filepath)
99 | if self.save_model_path:
100 | self.save_model()
101 |
102 | def load_model(self):
103 | print ("Loading the model using the pretrained-weights")
104 | model_folder = self.load_model_path
105 | if os.path.exists(os.path.join(model_folder, "model.json")):
106 | with open(os.path.join(model_folder, "model.json"), "r") as f:
107 | model_string = f.read().strip()
108 | params_to_load = json.loads(model_string)
109 |
110 |
111 | self.sequence_length = int(params_to_load['sequence_length'])
112 | self.mains_mean = params_to_load['mains_mean']
113 | self.mains_std = params_to_load['mains_std']
114 | self.appliance_params = params_to_load['appliance_params']
115 |
116 | for appliance_name in self.appliance_params:
117 | self.models[appliance_name] = self.return_network()
118 | self.models[appliance_name].load_weights(os.path.join(model_folder,appliance_name+".h5"))
119 |
120 |
121 | def save_model(self):
122 | if (os.path.exists(self.save_model_path) == False):
123 | os.makedirs(self.save_model_path)
124 | params_to_save = {}
125 | params_to_save['appliance_params'] = self.appliance_params
126 | params_to_save['sequence_length'] = self.sequence_length
127 | params_to_save['mains_mean'] = self.mains_mean
128 | params_to_save['mains_std'] = self.mains_std
129 | for appliance_name in self.models:
130 | print ("Saving model for ", appliance_name)
131 | self.models[appliance_name].save_weights(os.path.join(self.save_model_path,appliance_name+".h5"))
132 |
133 | with open(os.path.join(self.save_model_path,'model.json'),'w') as file:
134 | file.write(json.dumps(params_to_save, cls=NumpyEncoder))
135 |
136 | def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):
137 |
138 | if model is not None:
139 | self.models = model
140 |
141 | # Preprocess the test mains such as windowing and normalizing
142 |
143 | if do_preprocessing:
144 | test_main_list = self.call_preprocessing(
145 | test_main_list, submeters_lst=None, method='test')
146 |
147 | test_predictions = []
148 | for test_main in test_main_list:
149 | # print(test_main.shape)
150 | # if(test_main.shape[0] < 100):
151 | # continue
152 | test_main = test_main.values
153 | test_main = test_main.reshape((-1, self.sequence_length, 1))
154 | disggregation_dict = {}
155 | for appliance in self.models:
156 | prediction = self.models[appliance].predict(test_main,batch_size=self.batch_size)
157 | prediction = self.appliance_params[appliance]['mean'] + prediction * self.appliance_params[appliance]['std']
158 | valid_predictions = prediction.flatten()
159 | valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
160 | df = pd.Series(valid_predictions)
161 | disggregation_dict[appliance] = df
162 | results = pd.DataFrame(disggregation_dict, dtype='float32')
163 | test_predictions.append(results)
164 | return test_predictions
165 |
166 | def return_network(self):
167 | '''Creates the RNN module described in the paper
168 | '''
169 | model = Sequential()
170 |
171 | # 1D Conv
172 | model.add(Conv1D(16,4,activation="linear",input_shape=(self.sequence_length,1),padding="same",strides=1))
173 |
174 | # Bi-directional LSTMs
175 | model.add(Bidirectional(LSTM(128,return_sequences=True,stateful=False),merge_mode='concat'))
176 | model.add(Bidirectional(LSTM(256,return_sequences=False,stateful=False),merge_mode='concat'))
177 |
178 | # Fully Connected Layers
179 | model.add(Dense(128, activation='tanh'))
180 | model.add(Dense(1, activation='linear'))
181 |
182 | model.compile(loss='mse', optimizer='adam', metrics=['mse'])
183 |
184 | return model
185 |
186 | def call_preprocessing(self, mains_lst, submeters_lst, method):
187 |
188 | if method == 'train':
189 | mains_df_list = []
190 | for mains in mains_lst:
191 | new_mains = mains.values.flatten()
192 | n = self.sequence_length
193 | units_to_pad = n - 1
194 | new_mains = np.pad(new_mains,(units_to_pad,0),'constant',constant_values=(0,0))
195 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
196 | new_mains = (new_mains - self.mains_mean) / self.mains_std
197 | mains_df_list.append(pd.DataFrame(new_mains))
198 |
199 | appliance_list = []
200 | for app_index, (app_name, app_df_list) in enumerate(submeters_lst):
201 | if app_name in self.appliance_params:
202 | app_mean = self.appliance_params[app_name]['mean']
203 | app_std = self.appliance_params[app_name]['std']
204 | else:
205 | print ("Parameters for ", app_name ," were not found!")
206 | raise ApplianceNotFoundError()
207 |
208 | processed_appliance_dfs = []
209 |
210 | for app_df in app_df_list:
211 | new_app_readings = app_df.values.reshape((-1, 1))
212 | # This is for choosing windows
213 | new_app_readings = (new_app_readings - app_mean) / app_std
214 | # Return as a list of dataframe
215 | processed_appliance_dfs.append(pd.DataFrame(new_app_readings))
216 | appliance_list.append((app_name, processed_appliance_dfs))
217 | return mains_df_list, appliance_list
218 |
219 | else:
220 | mains_df_list = []
221 |
222 | for mains in mains_lst:
223 | new_mains = mains.values.flatten()
224 | n = self.sequence_length
225 | # units_to_pad = n // 2
226 | # new_mains = np.pad(new_mains,(units_to_pad,units_to_pad),'constant',constant_values=(0,0))
227 | # new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
228 | units_to_pad = n - 1
229 | new_mains = np.pad(new_mains,(units_to_pad,0),'constant',constant_values=(0,0))
230 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
231 | new_mains = (new_mains - self.mains_mean) / self.mains_std
232 | mains_df_list.append(pd.DataFrame(new_mains))
233 | return mains_df_list
234 |
235 | def set_appliance_params(self,train_appliances):
236 | # Find the parameters using the first
237 | for (app_name,df_list) in train_appliances:
238 | l = np.array(pd.concat(df_list,axis=0))
239 | app_mean = np.mean(l)
240 | app_std = np.std(l)
241 | if app_std<1:
242 | app_std = 100
243 | self.appliance_params.update({app_name:{'mean':app_mean,'std':app_std}})
244 | print (self.appliance_params)
245 |
--------------------------------------------------------------------------------
/disaggregate/gru.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from warnings import warn
3 | import numpy as np
4 | from nilmtk.disaggregate import Disaggregator
5 | from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten, Bidirectional, GRU
6 | import os
7 | import pickle
8 | import pandas as pd
9 | import numpy as np
10 | from collections import OrderedDict
11 | from keras.optimizers import SGD
12 | from keras.models import Sequential, load_model
13 | import matplotlib.pyplot as plt
14 | from sklearn.model_selection import train_test_split
15 | from keras.callbacks import ModelCheckpoint
16 | import keras.backend as K
17 | import random
18 | import sys
19 | import json
20 | from .util import *
21 |
22 | random.seed(10)
23 | np.random.seed(10)
24 |
25 |
26 | class SequenceLengthError(Exception):
27 | pass
28 |
29 |
30 | class ApplianceNotFoundError(Exception):
31 | pass
32 |
33 |
34 | class WindowGRU(Disaggregator):
35 |
36 | def __init__(self, params):
37 | """
38 | Parameters to be specified for the model
39 | """
40 |
41 | self.MODEL_NAME = "WindowGRU"
42 | self.models = OrderedDict()
43 | self.chunk_wise_training = params.get('chunk_wise_training', False)
44 | self.sequence_length = params.get('sequence_length', 99)
45 | self.n_epochs = params.get('n_epochs', 30)
46 | self.batch_size = params.get('batch_size', 1024)
47 | self.mains_mean = params.get('mains_mean', 1800)
48 | self.mains_std = params.get('mains_std', 600)
49 | self.appliance_params = params.get('appliance_params', {})
50 | self.save_model_path = params.get('save-model-path', None)
51 | self.load_model_path = params.get('pretrained-model-path', None)
52 | self.models = OrderedDict()
53 | if self.load_model_path:
54 | self.load_model()
55 | if self.sequence_length % 2 == 0:
56 | print("Sequence length should be odd!")
57 | raise (SequenceLengthError)
58 |
59 | def partial_fit(self, train_main, train_appliances, do_preprocessing=True,
60 | **load_kwargs):
61 |
62 | # If no appliance wise parameters are provided, then copmute them using the first chunk
63 | if len(self.appliance_params) == 0:
64 | self.set_appliance_params(train_appliances)
65 |
66 | print("...............GRU partial_fit running...............")
67 | # Do the pre-processing, such as windowing and normalizing
68 |
69 | if do_preprocessing:
70 | train_main, train_appliances = self.call_preprocessing(
71 | train_main, train_appliances, 'train')
72 |
73 | train_main = pd.concat(train_main, axis=0)
74 | train_main = train_main.values.reshape((-1, self.sequence_length, 1))
75 |
76 | new_train_appliances = []
77 | for app_name, app_df in train_appliances:
78 | app_df = pd.concat(app_df, axis=0)
79 | app_df_values = app_df.values.reshape((-1, 1))
80 | new_train_appliances.append((app_name, app_df_values))
81 | train_appliances = new_train_appliances
82 |
83 | for appliance_name, power in train_appliances:
84 | # Check if the appliance was already trained. If not then create a new model for it
85 | if appliance_name not in self.models:
86 | print("First model training for ", appliance_name)
87 | self.models[appliance_name] = self.return_network()
88 | # Retrain the particular appliance
89 | else:
90 | print("Started Retraining model for ", appliance_name)
91 |
92 | model = self.models[appliance_name]
93 | if train_main.size > 0:
94 | # Sometimes chunks can be empty after dropping NANS
95 | if len(train_main) > 10:
96 | # Do validation when you have sufficient samples
97 | filepath = 'GRU-temp-weights-' + str(random.randint(0, 100000)) + '.h5'
98 | checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True,
99 | mode='min')
100 | train_x, v_x, train_y, v_y = train_test_split(train_main, power, test_size=.15, random_state=10)
101 | model.fit(train_x, train_y, validation_data=[v_x, v_y], epochs=self.n_epochs,
102 | callbacks=[checkpoint], batch_size=self.batch_size)
103 | model.load_weights(filepath)
104 | if self.save_model_path:
105 | self.save_model()
106 |
107 | def load_model(self):
108 | print("Loading the model using the pretrained-weights")
109 | model_folder = self.load_model_path
110 | if os.path.exists(os.path.join(model_folder, "model.json")):
111 | with open(os.path.join(model_folder, "model.json"), "r") as f:
112 | model_string = f.read().strip()
113 | params_to_load = json.loads(model_string)
114 |
115 | self.sequence_length = int(params_to_load['sequence_length'])
116 | self.mains_mean = params_to_load['mains_mean']
117 | self.mains_std = params_to_load['mains_std']
118 | self.appliance_params = params_to_load['appliance_params']
119 |
120 | for appliance_name in self.appliance_params:
121 | self.models[appliance_name] = self.return_network()
122 | self.models[appliance_name].load_weights(os.path.join(model_folder, appliance_name + ".h5"))
123 |
124 | def save_model(self):
125 | if (os.path.exists(self.save_model_path) == False):
126 | os.makedirs(self.save_model_path)
127 | params_to_save = {}
128 | params_to_save['appliance_params'] = self.appliance_params
129 | params_to_save['sequence_length'] = self.sequence_length
130 | params_to_save['mains_mean'] = self.mains_mean
131 | params_to_save['mains_std'] = self.mains_std
132 | for appliance_name in self.models:
133 | print("Saving model for ", appliance_name)
134 | self.models[appliance_name].save_weights(os.path.join(self.save_model_path, appliance_name + ".h5"))
135 |
136 | with open(os.path.join(self.save_model_path, 'model.json'), 'w') as file:
137 | file.write(json.dumps(params_to_save, cls=NumpyEncoder))
138 |
139 | def disaggregate_chunk(self, test_main_list, model=None, do_preprocessing=True):
140 |
141 | if model is not None:
142 | self.models = model
143 |
144 | # Preprocess the test mains such as windowing and normalizing
145 |
146 | if do_preprocessing:
147 | test_main_list = self.call_preprocessing(
148 | test_main_list, submeters_lst=None, method='test')
149 |
150 | test_predictions = []
151 | for test_main in test_main_list:
152 | # if(test_main.shape[0] < 100):
153 | # continue
154 | test_main = test_main.values
155 | test_main = test_main.reshape((-1, self.sequence_length, 1))
156 | disggregation_dict = {}
157 | for appliance in self.models:
158 | prediction = self.models[appliance].predict(test_main, batch_size=self.batch_size)
159 | prediction = self.appliance_params[appliance]['mean'] + prediction * self.appliance_params[appliance][
160 | 'std']
161 | valid_predictions = prediction.flatten()
162 | valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
163 | df = pd.Series(valid_predictions)
164 | disggregation_dict[appliance] = df
165 | results = pd.DataFrame(disggregation_dict, dtype='float32')
166 | test_predictions.append(results)
167 | return test_predictions
168 |
169 | def return_network(self):
170 | '''Creates the GRU architecture described in the paper
171 | '''
172 | model = Sequential()
173 | # 1D Conv
174 | model.add(Conv1D(16, 4, activation='relu', input_shape=(self.sequence_length, 1), padding="same", strides=1))
175 | # Bi-directional GRUs
176 | model.add(Bidirectional(GRU(64, activation='relu',
177 | return_sequences=True), merge_mode='concat'))
178 | model.add(Dropout(0.5))
179 | model.add(Bidirectional(GRU(128, activation='relu',
180 | return_sequences=False), merge_mode='concat'))
181 | model.add(Dropout(0.5))
182 | # Fully Connected Layers
183 | model.add(Dense(128, activation='relu'))
184 | model.add(Dropout(0.5))
185 | model.add(Dense(1, activation='linear'))
186 | model.compile(loss='mse', optimizer='adam')
187 | return model
188 |
189 | def call_preprocessing(self, mains_lst, submeters_lst, method):
190 |
191 | if method == 'train':
192 | mains_df_list = []
193 | for mains in mains_lst:
194 | new_mains = mains.values.flatten()
195 | n = self.sequence_length
196 | units_to_pad = n - 1
197 | new_mains = np.pad(new_mains, (units_to_pad, 0), 'constant', constant_values=(0, 0))
198 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
199 | new_mains = (new_mains - self.mains_mean) / self.mains_std
200 | mains_df_list.append(pd.DataFrame(new_mains))
201 |
202 | appliance_list = []
203 | for app_index, (app_name, app_df_list) in enumerate(submeters_lst):
204 | if app_name in self.appliance_params:
205 | app_mean = self.appliance_params[app_name]['mean']
206 | app_std = self.appliance_params[app_name]['std']
207 | else:
208 | print("Parameters for ", app_name, " were not found!")
209 | raise ApplianceNotFoundError()
210 |
211 | processed_appliance_dfs = []
212 |
213 | for app_df in app_df_list:
214 | new_app_readings = app_df.values.reshape((-1, 1))
215 | # This is for choosing windows
216 | new_app_readings = (new_app_readings - app_mean) / app_std
217 | # Return as a list of dataframe
218 | processed_appliance_dfs.append(pd.DataFrame(new_app_readings))
219 | appliance_list.append((app_name, processed_appliance_dfs))
220 | return mains_df_list, appliance_list
221 |
222 | else:
223 | mains_df_list = []
224 |
225 | for mains in mains_lst:
226 | new_mains = mains.values.flatten()
227 | n = self.sequence_length
228 | # units_to_pad = n // 2
229 | # new_mains = np.pad(new_mains, (units_to_pad, units_to_pad), 'constant', constant_values=(0, 0))
230 | units_to_pad = n - 1
231 | new_mains = np.pad(new_mains, (units_to_pad, 0), 'constant', constant_values=(0, 0))
232 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
233 | new_mains = (new_mains - self.mains_mean) / self.mains_std
234 | mains_df_list.append(pd.DataFrame(new_mains))
235 | return mains_df_list
236 |
237 | def set_appliance_params(self, train_appliances):
238 | # Find the parameters using the first
239 | for (app_name, df_list) in train_appliances:
240 | l = np.array(pd.concat(df_list, axis=0))
241 | app_mean = np.mean(l)
242 | app_std = np.std(l)
243 | if app_std < 1:
244 | app_std = 100
245 | self.appliance_params.update({app_name: {'mean': app_mean, 'std': app_std}})
246 | print(self.appliance_params)
247 |
--------------------------------------------------------------------------------
/disaggregate/cnn_rnn.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from warnings import warn
3 | from nilmtk.disaggregate import Disaggregator
4 | from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten, MaxPooling1D, LSTM
5 | import os
6 | import pickle
7 | import pandas as pd
8 | import numpy as np
9 | from collections import OrderedDict
10 | from keras.optimizers import SGD
11 | from keras.models import Sequential, load_model
12 | import matplotlib.pyplot as plt
13 | from sklearn.model_selection import train_test_split
14 | from keras.callbacks import ModelCheckpoint, LearningRateScheduler
15 | import keras.backend as K
16 | import random
17 | import sys
18 | import json
19 | from .util import *
20 | random.seed(10)
21 | np.random.seed(10)
22 |
23 | def lr_schedule(epoch):
24 | """Learning Rate Schedule
25 |
26 | Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
27 | Called automatically every epoch as part of callbacks during training.
28 |
29 | # Arguments
30 | epoch (int): The number of epochs
31 |
32 | # Returns
33 | lr (float32): learning rate
34 | """
35 | lr = 1e-5
36 | if epoch > 30:
37 | lr *= 0.5e-3
38 | elif epoch > 20:
39 | lr *= 1e-3
40 | elif epoch > 10:
41 | lr *= 1e-2
42 | elif epoch > 5:
43 | lr *= 1e-1
44 | print('Learning rate: ', lr)
45 | return lr
46 |
47 |
48 | class SequenceLengthError(Exception):
49 | pass
50 |
51 | class ApplianceNotFoundError(Exception):
52 | pass
53 |
54 | class CNN_RNN(Disaggregator):
55 |
56 | def __init__(self, params):
57 | """
58 | Parameters to be specified for the model
59 | """
60 |
61 | self.MODEL_NAME = "CNN_RNN"
62 | self.models = OrderedDict()
63 | self.chunk_wise_training = params.get('chunk_wise_training',False)
64 | self.sequence_length = params.get('sequence_length',100)
65 | self.n_epochs = params.get('n_epochs', 10 )
66 | self.batch_size = params.get('batch_size',512)
67 | self.appliance_params = params.get('appliance_params',{})
68 | self.mains_mean = params.get('mains_mean',1800)
69 | self.mains_std = params.get('mains_std',600)
70 | self.appliance_params = params.get('appliance_params',{})
71 | self.save_model_path = params.get('save-model-path', None)
72 | self.load_model_path = params.get('pretrained-model-path',None)
73 | self.models = OrderedDict()
74 | if self.load_model_path:
75 | self.load_model()
76 |
77 | def partial_fit(self,train_main,train_appliances,do_preprocessing=True,
78 | **load_kwargs):
79 |
80 | # If no appliance wise parameters are provided, then copmute them using the first chunk
81 | if len(self.appliance_params) == 0:
82 | self.set_appliance_params(train_appliances)
83 |
84 | print("...............Seq2Point partial_fit running...............")
85 | # Do the pre-processing, such as windowing and normalizing
86 |
87 | if do_preprocessing:
88 | train_main, train_appliances = self.call_preprocessing(
89 | train_main, train_appliances, 'train') #480374,1 -> 480374,99, 480374,1 -> 480374,1
90 |
91 | train_main = pd.concat(train_main,axis=0) #480374,99
92 | train_main = train_main.values.reshape((-1,self.sequence_length,1))
93 |
94 | new_train_appliances = []
95 | for app_name, app_df in train_appliances:
96 | app_df = pd.concat(app_df,axis=0)
97 | app_df_values = app_df.values.reshape((-1,1))
98 | new_train_appliances.append((app_name, app_df_values))
99 | train_appliances = new_train_appliances
100 |
101 | for appliance_name, power in train_appliances:
102 | # Check if the appliance was already trained. If not then create a new model for it
103 | if appliance_name not in self.models:
104 | print("First model training for ", appliance_name)
105 | self.models[appliance_name] = self.return_network()
106 | # Retrain the particular appliance
107 | else:
108 | print("Started Retraining model for ", appliance_name)
109 |
110 | model = self.models[appliance_name]
111 | if train_main.size > 0:
112 | # Sometimes chunks can be empty after dropping NANS
113 | if len(train_main) > 10:
114 | # Do validation when you have sufficient samples
115 | filepath = 'seq2point-temp-weights-'+str(random.randint(0,100000))+'.h5'
116 | checkpoint = ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
117 | train_x, v_x, train_y, v_y = train_test_split(train_main, power, test_size=.15,random_state=10)
118 | lr_scheduler = LearningRateScheduler(lr_schedule)
119 | model.fit(train_x,train_y,validation_data=[v_x,v_y],epochs=self.n_epochs,callbacks=[checkpoint, lr_scheduler],batch_size=self.batch_size)
120 | model.load_weights(filepath)
121 | if self.save_model_path:
122 | self.save_model()
123 |
124 | def load_model(self):
125 | print ("Loading the model using the pretrained-weights")
126 | model_folder = self.load_model_path
127 | if os.path.exists(os.path.join(model_folder, "model.json")):
128 | with open(os.path.join(model_folder, "model.json"), "r") as f:
129 | model_string = f.read().strip()
130 | params_to_load = json.loads(model_string)
131 |
132 |
133 | self.sequence_length = int(params_to_load['sequence_length'])
134 | self.mains_mean = params_to_load['mains_mean']
135 | self.mains_std = params_to_load['mains_std']
136 | self.appliance_params = params_to_load['appliance_params']
137 |
138 | for appliance_name in self.appliance_params:
139 | self.models[appliance_name] = self.return_network()
140 | self.models[appliance_name].load_weights(os.path.join(model_folder,appliance_name+".h5"))
141 |
142 |
143 | def save_model(self):
144 | if (os.path.exists(self.save_model_path) == False):
145 | os.makedirs(self.save_model_path)
146 | params_to_save = {}
147 | params_to_save['appliance_params'] = self.appliance_params
148 | params_to_save['sequence_length'] = self.sequence_length
149 | params_to_save['mains_mean'] = self.mains_mean
150 | params_to_save['mains_std'] = self.mains_std
151 | for appliance_name in self.models:
152 | print ("Saving model for ", appliance_name)
153 | self.models[appliance_name].save_weights(os.path.join(self.save_model_path,appliance_name+".h5"))
154 |
155 | with open(os.path.join(self.save_model_path,'model.json'),'w') as file:
156 | file.write(json.dumps(params_to_save, cls=NumpyEncoder))
157 |
158 | def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):
159 |
160 | if model is not None:
161 | self.models = model
162 |
163 | # Preprocess the test mains such as windowing and normalizing
164 |
165 | if do_preprocessing:
166 | test_main_list = self.call_preprocessing(test_main_list, submeters_lst=None, method='test')
167 |
168 | test_predictions = []
169 | for test_main in test_main_list:
170 | test_main = test_main.values
171 | test_main = test_main.reshape((-1, self.sequence_length, 1))
172 | disggregation_dict = {}
173 | for appliance in self.models:
174 | prediction = self.models[appliance].predict(test_main,batch_size=self.batch_size)
175 | prediction = self.appliance_params[appliance]['mean'] + prediction * self.appliance_params[appliance]['std']
176 | valid_predictions = prediction.flatten()
177 | valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
178 | df = pd.Series(valid_predictions)
179 | disggregation_dict[appliance] = df
180 | results = pd.DataFrame(disggregation_dict, dtype='float32')
181 | test_predictions.append(results)
182 | return test_predictions
183 |
184 | def return_network(self):
185 | # Model architecture
186 | model = Sequential()
187 | model.add(Conv1D(32, 3, activation="relu", input_shape=(self.sequence_length, 1), strides=1))
188 | model.add(Conv1D(32, 3, activation="relu", strides=1))
189 | model.add(MaxPooling1D(pool_size=2))
190 | model.add(LSTM(32,return_sequences=False,stateful=False))
191 | # model.add(Flatten())
192 | model.add(Dense(1,activation='softmax'))
193 |
194 | model.compile(loss='binary_crossentropy', optimizer='adam') # ,metrics=[self.mse])
195 | return model
196 |
197 | def call_preprocessing(self, mains_lst, submeters_lst, method):
198 |
199 | if method == 'train':
200 | # Preprocessing for the train data
201 | mains_df_list = []
202 | for mains in mains_lst:
203 |
204 | new_mains = mains.values.flatten()
205 | n = self.sequence_length
206 | units_to_pad = n
207 | new_mains = np.pad(new_mains,(units_to_pad,0),'constant',constant_values=(0,0))
208 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n)]) ####################
209 | new_mains = (new_mains - self.mains_mean) / self.mains_std
210 | mains_df_list.append(pd.DataFrame(new_mains))
211 |
212 | appliance_list = []
213 | for app_index, (app_name, app_df_list) in enumerate(submeters_lst):
214 | if app_name in self.appliance_params:
215 | app_mean = self.appliance_params[app_name]['mean']
216 | app_std = self.appliance_params[app_name]['std']
217 | else:
218 | print ("Parameters for ", app_name ," were not found!")
219 | raise ApplianceNotFoundError()
220 |
221 | processed_appliance_dfs = []
222 |
223 | for app_df in app_df_list:
224 | new_app_readings = app_df.values.reshape((-1, 1))
225 | # This is for choosing windows
226 | # new_app_readings = (new_app_readings - app_mean) / app_std
227 | # Return as a list of dataframe
228 | processed_appliance_dfs.append(pd.DataFrame(new_app_readings))
229 | appliance_list.append((app_name, processed_appliance_dfs))
230 | return mains_df_list, appliance_list
231 |
232 | else:
233 | # Preprocessing for the test data
234 | mains_df_list = []
235 |
236 | for mains in mains_lst:
237 | new_mains = mains.values.flatten()
238 | n = self.sequence_length
239 | units_to_pad = n // 2
240 | new_mains = np.pad(new_mains,(units_to_pad,units_to_pad+1),'constant',constant_values=(0,0))
241 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n)]) ####################
242 | new_mains = (new_mains - self.mains_mean) / self.mains_std
243 | mains_df_list.append(pd.DataFrame(new_mains))
244 | return mains_df_list
245 |
246 | def set_appliance_params(self,train_appliances):
247 | # Find the parameters using the first
248 | for (app_name,df_list) in train_appliances:
249 | l = np.array(pd.concat(df_list,axis=0))
250 | app_mean = np.mean(l)
251 | app_std = np.std(l)
252 | if app_std<1:
253 | app_std = 100
254 | self.appliance_params.update({app_name:{'mean':app_mean,'std':app_std}})
255 | print (self.appliance_params)
--------------------------------------------------------------------------------
/disaggregate/seq2seq.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from warnings import warn
3 |
4 | from nilmtk.disaggregate import Disaggregator
5 | from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten
6 |
7 | import os
8 | import pandas as pd
9 | import numpy as np
10 | import pickle
11 | from collections import OrderedDict
12 |
13 | from keras.optimizers import SGD
14 | from keras.models import Sequential, load_model
15 | import matplotlib.pyplot as plt
16 | from sklearn.model_selection import train_test_split
17 | from keras.callbacks import ModelCheckpoint
18 | import keras.backend as K
19 | import random
20 | import json
21 | from .util import *
22 | random.seed(10)
23 | np.random.seed(10)
24 |
25 |
26 | class SequenceLengthError(Exception):
27 | pass
28 |
29 | class ApplianceNotFoundError(Exception):
30 | pass
31 |
32 |
33 |
34 | class Seq2Seq(Disaggregator):
35 |
36 | def __init__(self, params):
37 |
38 | self.MODEL_NAME = "Seq2Seq"
39 | self.chunk_wise_training = params.get('chunk_wise_training',False)
40 | self.sequence_length = params.get('sequence_length',99)
41 | self.n_epochs = params.get('n_epochs', 50)
42 | self.models = OrderedDict()
43 | self.mains_mean = 1800
44 | self.mains_std = 600
45 | self.batch_size = params.get('batch_size',1024)
46 | self.appliance_params = params.get('appliance_params',{})
47 | self.save_model_path = params.get('save-model-path', None)
48 | self.load_model_path = params.get('pretrained-model-path',None)
49 | self.models = OrderedDict()
50 | if self.load_model_path:
51 | self.load_model()
52 | if self.sequence_length%2==0:
53 | print ("Sequence length should be odd!")
54 | raise (SequenceLengthError)
55 |
56 | def partial_fit(self,train_main,train_appliances,do_preprocessing=True,**load_kwargs):
57 |
58 | print("...............Seq2Seq partial_fit running...............")
59 | if len(self.appliance_params) == 0:
60 | self.set_appliance_params(train_appliances)
61 |
62 | print (len(train_main))
63 | if do_preprocessing:
64 | train_main, train_appliances = self.call_preprocessing(
65 | train_main, train_appliances, 'train')
66 | train_main = pd.concat(train_main,axis=0)
67 | train_main = train_main.values.reshape((-1,self.sequence_length,1))
68 |
69 | new_train_appliances = []
70 | for app_name, app_dfs in train_appliances:
71 | app_df = pd.concat(app_dfs,axis=0)
72 | app_df_values = app_df.values.reshape((-1,self.sequence_length))
73 | new_train_appliances.append((app_name, app_df_values))
74 | train_appliances = new_train_appliances
75 |
76 | for appliance_name, power in train_appliances:
77 | if appliance_name not in self.models:
78 | print("First model training for ", appliance_name)
79 | self.models[appliance_name] = self.return_network()
80 | else:
81 | print("Started Retraining model for ", appliance_name)
82 |
83 | model = self.models[appliance_name]
84 | if train_main.size > 0:
85 | # Sometimes chunks can be empty after dropping NANS
86 | if len(train_main) > 10:
87 | # Do validation when you have sufficient samples
88 | filepath = 'seq2seq-temp-weights-'+str(random.randint(0,100000))+'.h5'
89 | checkpoint = ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
90 | train_x, v_x, train_y, v_y = train_test_split(train_main, power, test_size=.15,random_state=10)
91 | model.fit(train_x,train_y,validation_data=[v_x,v_y],epochs=self.n_epochs,callbacks=[checkpoint],batch_size=self.batch_size)
92 | model.load_weights(filepath)
93 | if self.save_model_path:
94 | self.save_model()
95 |
96 | def load_model(self):
97 | print ("Loading the model using the pretrained-weights")
98 | model_folder = self.load_model_path
99 | if os.path.exists(os.path.join(model_folder, "model.json")):
100 | with open(os.path.join(model_folder, "model.json"), "r") as f:
101 | model_string = f.read().strip()
102 | params_to_load = json.loads(model_string)
103 |
104 |
105 | self.sequence_length = int(params_to_load['sequence_length'])
106 | self.mains_mean = params_to_load['mains_mean']
107 | self.mains_std = params_to_load['mains_std']
108 | self.appliance_params = params_to_load['appliance_params']
109 |
110 | for appliance_name in self.appliance_params:
111 | self.models[appliance_name] = self.return_network()
112 | self.models[appliance_name].load_weights(os.path.join(model_folder,appliance_name+".h5"))
113 |
114 |
115 | def save_model(self):
116 | if (os.path.exists(self.save_model_path) == False):
117 | os.makedirs(self.save_model_path)
118 | params_to_save = {}
119 | params_to_save['appliance_params'] = self.appliance_params
120 | params_to_save['sequence_length'] = self.sequence_length
121 | params_to_save['mains_mean'] = self.mains_mean
122 | params_to_save['mains_std'] = self.mains_std
123 | for appliance_name in self.models:
124 | print ("Saving model for ", appliance_name)
125 | self.models[appliance_name].save_weights(os.path.join(self.save_model_path,appliance_name+".h5"))
126 |
127 | with open(os.path.join(self.save_model_path,'model.json'),'w') as file:
128 | file.write(json.dumps(params_to_save, cls=NumpyEncoder))
129 |
130 | def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):
131 |
132 | if model is not None:
133 | self.models = model
134 |
135 | if do_preprocessing:
136 | test_main_list = self.call_preprocessing(
137 | test_main_list, submeters_lst=None, method='test')
138 |
139 | test_predictions = []
140 | for test_mains_df in test_main_list:
141 | # if test_mains_df.shape[0] < 100:
142 | # continue
143 |
144 | disggregation_dict = {}
145 | test_main_array = test_mains_df.values.reshape((-1, self.sequence_length, 1))
146 |
147 | for appliance in self.models:
148 |
149 | prediction = []
150 | model = self.models[appliance]
151 | prediction = model.predict(test_main_array ,batch_size=self.batch_size)
152 |
153 | #####################
154 | # This block is for creating the average of predictions over the different sequences
155 | # the counts_arr keeps the number of times a particular timestamp has occured
156 | # the sum_arr keeps the number of times a particular timestamp has occured
157 | # the predictions are summed for agiven time, and is divided by the number of times it has occured
158 |
159 | l = self.sequence_length
160 | n = len(prediction) + l - 1
161 | sum_arr = np.zeros((n))
162 | counts_arr = np.zeros((n))
163 | o = len(sum_arr)
164 | for i in range(len(prediction)):
165 | sum_arr[i:i + l] += prediction[i].flatten()
166 | counts_arr[i:i + l] += 1
167 | for i in range(len(sum_arr)):
168 | sum_arr[i] = sum_arr[i] / counts_arr[i]
169 |
170 | #################
171 | prediction = self.appliance_params[appliance]['mean'] + (sum_arr * self.appliance_params[appliance]['std'])
172 | valid_predictions = prediction.flatten()
173 | valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
174 | df = pd.Series(valid_predictions)
175 | disggregation_dict[appliance] = df
176 | results = pd.DataFrame(disggregation_dict, dtype='float32')
177 | test_predictions.append(results)
178 |
179 | return test_predictions
180 |
181 | def return_network(self):
182 |
183 | model = Sequential()
184 | # 1D Conv
185 | model.add(Conv1D(30,10,activation="relu",input_shape=(self.sequence_length,1),strides=2))
186 | model.add(Conv1D(30, 8, activation='relu', strides=2))
187 | model.add(Conv1D(40, 6, activation='relu', strides=1))
188 | model.add(Conv1D(50, 5, activation='relu', strides=1))
189 | model.add(Dropout(.2))
190 | model.add(Conv1D(50, 5, activation='relu', strides=1))
191 | model.add(Dropout(.2))
192 | model.add(Flatten())
193 | model.add(Dense(1024, activation='relu'))
194 | model.add(Dropout(.2))
195 | model.add(Dense(self.sequence_length))
196 | model.compile(loss='mse', optimizer='adam')
197 |
198 | return model
199 |
200 | def call_preprocessing(self, mains_lst, submeters_lst, method):
201 |
202 | if method == 'train':
203 | processed_mains_lst = []
204 | for mains in mains_lst:
205 | new_mains = mains.values.flatten()
206 | n = self.sequence_length
207 | units_to_pad = n // 2
208 | new_mains = np.pad(new_mains, (units_to_pad,units_to_pad),'constant',constant_values = (0,0))
209 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
210 | new_mains = (new_mains - self.mains_mean) / self.mains_std
211 | processed_mains_lst.append(pd.DataFrame(new_mains))
212 | #new_mains = pd.DataFrame(new_mains)
213 | appliance_list = []
214 | for app_index, (app_name, app_df_lst) in enumerate(submeters_lst):
215 |
216 | if app_name in self.appliance_params:
217 | app_mean = self.appliance_params[app_name]['mean']
218 | app_std = self.appliance_params[app_name]['std']
219 | else:
220 | print ("Parameters for ", app_name ," were not found!")
221 | raise ApplianceNotFoundError()
222 |
223 |
224 | processed_app_dfs = []
225 | for app_df in app_df_lst:
226 | new_app_readings = app_df.values.flatten()
227 | new_app_readings = np.pad(new_app_readings, (units_to_pad,units_to_pad),'constant',constant_values = (0,0))
228 | new_app_readings = np.array([new_app_readings[i:i + n] for i in range(len(new_app_readings) - n + 1)])
229 | new_app_readings = (new_app_readings - app_mean) / app_std # /self.max_val
230 | processed_app_dfs.append(pd.DataFrame(new_app_readings))
231 |
232 |
233 | appliance_list.append((app_name, processed_app_dfs))
234 | #new_app_readings = np.array([ new_app_readings[i:i+n] for i in range(len(new_app_readings)-n+1) ])
235 | #print (new_mains.shape, new_app_readings.shape, app_name)
236 |
237 | return processed_mains_lst, appliance_list
238 |
239 | else:
240 | processed_mains_lst = []
241 | for mains in mains_lst:
242 | new_mains = mains.values.flatten()
243 | n = self.sequence_length
244 | units_to_pad = n // 2
245 | #new_mains = np.pad(new_mains, (units_to_pad,units_to_pad),'constant',constant_values = (0,0))
246 | new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
247 | new_mains = (new_mains - self.mains_mean) / self.mains_std
248 | new_mains = new_mains.reshape((-1, self.sequence_length))
249 | processed_mains_lst.append(pd.DataFrame(new_mains))
250 | return processed_mains_lst
251 |
252 | def set_appliance_params(self,train_appliances):
253 |
254 | for (app_name,df_list) in train_appliances:
255 | l = np.array(pd.concat(df_list,axis=0))
256 | app_mean = np.mean(l)
257 | app_std = np.std(l)
258 | if app_std<1:
259 | app_std = 100
260 | self.appliance_params.update({app_name:{'mean':app_mean,'std':app_std}})
261 |
--------------------------------------------------------------------------------
/disaggregate/attention_dae.py:
--------------------------------------------------------------------------------
1 | #! -*- coding: utf-8 -*-
2 | #%%
3 | from __future__ import print_function, division
4 | from keras import backend as K
5 | from keras.models import Model
6 | from keras.engine.topology import Layer
7 | from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten, add, MaxPooling1D, Input, UpSampling1D, BatchNormalization, GaussianNoise, Activation
8 | from keras.regularizers import l2
9 |
10 | class Position_Embedding(Layer):
11 |
12 | def __init__(self, size=None, mode='sum', **kwargs):
13 | self.size = size #必须为偶数
14 | self.mode = mode
15 | super(Position_Embedding, self).__init__(**kwargs)
16 |
17 | def call(self, x):
18 | if (self.size == None) or (self.mode == 'sum'):
19 | self.size = int(x.shape[-1])
20 | batch_size,seq_len = K.shape(x)[0],K.shape(x)[1]
21 | position_j = 1. / K.pow(10000., \
22 | 2 * K.arange(self.size / 2, dtype='float32' \
23 | ) / self.size)
24 | position_j = K.expand_dims(position_j, 0)
25 | position_i = K.cumsum(K.ones_like(x[:,:,0]), 1)-1 #K.arange不支持变长,只好用这种方法生成
26 | position_i = K.expand_dims(position_i, 2)
27 | position_ij = K.dot(position_i, position_j)
28 | position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
29 | if self.mode == 'sum':
30 | return position_ij + x
31 | elif self.mode == 'concat':
32 | return K.concatenate([position_ij, x], 2)
33 |
34 | def compute_output_shape(self, input_shape):
35 | if self.mode == 'sum':
36 | return input_shape
37 | elif self.mode == 'concat':
38 | return (input_shape[0], input_shape[1], input_shape[2]+self.size)
39 |
40 |
41 | class Attention(Layer):
42 |
43 | def __init__(self, nb_head, size_per_head, **kwargs):
44 | self.nb_head = nb_head
45 | self.size_per_head = size_per_head
46 | self.output_dim = nb_head*size_per_head
47 | super(Attention, self).__init__(**kwargs)
48 |
49 | def build(self, input_shape):
50 | self.WQ = self.add_weight(name='WQ',
51 | shape=(input_shape[0][-1], self.output_dim),
52 | initializer='glorot_uniform',
53 | trainable=True)
54 | self.WK = self.add_weight(name='WK',
55 | shape=(input_shape[1][-1], self.output_dim),
56 | initializer='glorot_uniform',
57 | trainable=True)
58 | self.WV = self.add_weight(name='WV',
59 | shape=(input_shape[2][-1], self.output_dim),
60 | initializer='glorot_uniform',
61 | trainable=True)
62 | super(Attention, self).build(input_shape)
63 |
64 | def Mask(self, inputs, seq_len, mode='mul'):
65 | if seq_len == None:
66 | return inputs
67 | else:
68 | mask = K.one_hot(seq_len[:,0], K.shape(inputs)[1])
69 | mask = 1 - K.cumsum(mask, 1)
70 | for _ in range(len(inputs.shape)-2):
71 | mask = K.expand_dims(mask, 2)
72 | if mode == 'mul':
73 | return inputs * mask
74 | if mode == 'add':
75 | return inputs - (1 - mask) * 1e12
76 |
77 | def call(self, x):
78 | #如果只传入Q_seq,K_seq,V_seq,那么就不做Mask
79 | #如果同时传入Q_seq,K_seq,V_seq,Q_len,V_len,那么对多余部分做Mask
80 | if len(x) == 3:
81 | Q_seq,K_seq,V_seq = x
82 | Q_len,V_len = None,None
83 | elif len(x) == 5:
84 | Q_seq,K_seq,V_seq,Q_len,V_len = x
85 | #对Q、K、V做线性变换
86 | Q_seq = K.dot(Q_seq, self.WQ)
87 | Q_seq = K.reshape(Q_seq, (-1, K.shape(Q_seq)[1], self.nb_head, self.size_per_head))
88 | Q_seq = K.permute_dimensions(Q_seq, (0,2,1,3))
89 | K_seq = K.dot(K_seq, self.WK)
90 | K_seq = K.reshape(K_seq, (-1, K.shape(K_seq)[1], self.nb_head, self.size_per_head))
91 | K_seq = K.permute_dimensions(K_seq, (0,2,1,3))
92 | V_seq = K.dot(V_seq, self.WV)
93 | V_seq = K.reshape(V_seq, (-1, K.shape(V_seq)[1], self.nb_head, self.size_per_head))
94 | V_seq = K.permute_dimensions(V_seq, (0,2,1,3))
95 | #计算内积,然后mask,然后softmax
96 | A = K.batch_dot(Q_seq, K_seq, axes=[3,3]) / self.size_per_head**0.5
97 | A = K.permute_dimensions(A, (0,3,2,1))
98 | A = self.Mask(A, V_len, 'add')
99 | A = K.permute_dimensions(A, (0,3,2,1))
100 | A = K.softmax(A)
101 | #输出并mask
102 | O_seq = K.batch_dot(A, V_seq, axes=[3,2])
103 | O_seq = K.permute_dimensions(O_seq, (0,2,1,3))
104 | O_seq = K.reshape(O_seq, (-1, K.shape(O_seq)[1], self.output_dim))
105 | O_seq = self.Mask(O_seq, Q_len, 'mul')
106 | return O_seq
107 |
108 | def compute_output_shape(self, input_shape):
109 | return (input_shape[0][0], input_shape[0][1], self.output_dim)
110 |
111 |
112 | def resnet_layer(inputs,
113 | filters=16,
114 | kernel_size=3,
115 | strides=1,
116 | batch_normalization=True,
117 | conv_first=True,
118 | dilation_rate=7,
119 | std = 1e-4):
120 | conv = Conv1D(filters,
121 | kernel_size=kernel_size,
122 | strides=strides,
123 | padding='same',
124 | kernel_initializer='he_normal',
125 | kernel_regularizer=l2(1e-4),
126 | dilation_rate = dilation_rate)
127 |
128 | x = inputs
129 | x = conv(x)
130 | x = Activation('relu')(x)
131 | x = BatchNormalization()(x)
132 | x = GaussianNoise(stddev = std)(x)
133 | return x
134 |
135 | def coder(input,
136 | filters,
137 | kernel_size = 3,
138 | down_sample = True,
139 | size = 2):
140 | input = resnet_layer(input,filters=filters,kernel_size=kernel_size)
141 | x = resnet_layer(input,filters=filters,kernel_size=kernel_size)
142 | input = add([input, x])
143 | x = resnet_layer(input,filters=filters,kernel_size=kernel_size)
144 | x = add([input, x])
145 | if(down_sample):
146 | x = MaxPooling1D(pool_size=size)(x)
147 | else:
148 | x = UpSampling1D(size=size)(x)
149 | return x
150 |
151 | def begin(input, std=1e-5):
152 | x = Conv1D(filters=64, kernel_size=1, activation='sigmoid',padding='same')(input)
153 | x = BatchNormalization()(x)
154 | x = GaussianNoise(std)(x)
155 | return x
156 |
157 | def end(input):
158 | x = Conv1D(filters=1, kernel_size=3, activation='sigmoid', dilation_rate=7,padding='same')(input)
159 | x = BatchNormalization()(x)
160 | return x
161 |
162 | def midden(input):
163 | x = Attention(8,32)([input,input,input])
164 | x = add([x,input])
165 | return x
166 |
167 | def lr_schedule(epoch):
168 | """Learning Rate Schedule
169 |
170 | Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
171 | Called automatically every epoch as part of callbacks during training.
172 |
173 | # Arguments
174 | epoch (int): The number of epochs
175 |
176 | # Returns
177 | lr (float32): learning rate
178 | """
179 | lr = 1e-5
180 | if epoch > 80:
181 | lr *= 0.5e-3
182 | elif epoch > 60:
183 | lr *= 1e-3
184 | elif epoch > 40:
185 | lr *= 1e-2
186 | elif epoch > 20:
187 | lr *= 1e-1
188 | print('Learning rate: ', lr)
189 | return lr
190 |
191 | def generate_slide_window(arr, step=1, n=1800):
192 | i = 0
193 | result = []
194 | while(i < (len(arr)-n+1)):
195 | result.append(arr[i:i+n])
196 | i+=step
197 | return np.array(result)
198 |
199 |
200 |
201 |
202 |
203 |
204 | # %%
205 |
206 |
207 |
208 |
209 |
210 | from warnings import warn
211 | from nilmtk.disaggregate import Disaggregator
212 | from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten, add, MaxPooling1D, Input
213 | import keras
214 | import pandas as pd
215 | import numpy as np
216 | from collections import OrderedDict
217 | from keras.optimizers import SGD
218 | import matplotlib.pyplot as plt
219 | from sklearn.model_selection import train_test_split
220 | from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
221 | import keras.backend as K
222 | from statistics import mean
223 | import os
224 | import pickle
225 | import random
226 | import json
227 | from .util import *
228 |
229 |
230 |
231 | random.seed(10)
232 | np.random.seed(10)
233 |
234 | class ADAE(Disaggregator):
235 |
236 | def __init__(self, params):
237 | """
238 | Iniititalize the moel with the given parameters
239 | """
240 | self.MODEL_NAME = "ADAE"
241 | self.chunk_wise_training = params.get('chunk_wise_training',False)
242 | self.sequence_length = params.get('sequence_length',1800)
243 | self.stride_length = params.get('stride_length',self.sequence_length)
244 | self.n_epochs = params.get('n_epochs', 10)
245 | self.batch_size = params.get('batch_size',16)
246 | self.mains_mean = params.get('mains_mean',1000)
247 | self.mains_std = params.get('mains_std',600)
248 | self.appliance_params = params.get('appliance_params',{})
249 | self.save_model_path = params.get('save-model-path', None)
250 | self.load_model_path = params.get('pretrained-model-path',None)
251 | self.models = OrderedDict()
252 | if self.load_model_path:
253 | self.load_model()
254 |
255 |
256 |
257 | def partial_fit(self, train_main, train_appliances, do_preprocessing=True,**load_kwargs):
258 | """
259 | The partial fit function
260 | """
261 |
262 | # If no appliance wise parameters are specified, then they are computed from the data
263 | if len(self.appliance_params) == 0:
264 | self.set_appliance_params(train_appliances)
265 |
266 | # TO preprocess the data and bring it to a valid shape
267 | if do_preprocessing:
268 | print ("Doing Preprocessing")
269 | train_main,train_appliances = self.call_preprocessing(train_main,train_appliances,'train')
270 | train_main = pd.concat(train_main,axis=0).values
271 | train_main = train_main.reshape((-1,self.sequence_length,1))
272 | new_train_appliances = []
273 | for app_name, app_df in train_appliances:
274 | app_df = pd.concat(app_df,axis=0).values
275 | app_df = app_df.reshape((-1,self.sequence_length,1))
276 | new_train_appliances.append((app_name, app_df))
277 | train_appliances = new_train_appliances
278 | for appliance_name, power in train_appliances:
279 | if appliance_name not in self.models:
280 | print ("First model training for ",appliance_name)
281 | self.models[appliance_name] = self.return_network()
282 | print (self.models[appliance_name].summary())
283 | print ("Started Retraining model for ",appliance_name)
284 | model = self.models[appliance_name]
285 | filepath = 'adae-temp-weights-'+str(random.randint(0,100000))+'.h5'
286 | checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
287 | train_x,v_x,train_y,v_y = train_test_split(train_main,power,test_size=.15,random_state=10)
288 | def data_generator(data, targets, batch_size):
289 | batches = (len(data) + batch_size - 1)//batch_size
290 | while(True):
291 | for i in range(batches):
292 | X = data[i*batch_size : (i+1)*batch_size]
293 | Y = targets[i*batch_size : (i+1)*batch_size]
294 | yield (X, Y)
295 |
296 | lr_scheduler = LearningRateScheduler(lr_schedule)
297 |
298 | lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
299 | cooldown=0,
300 | patience=3,
301 | min_lr=0.5e-6)
302 |
303 | model.fit_generator(generator = data_generator(train_x, train_y, self.batch_size),
304 | steps_per_epoch = (len(train_x) + self.batch_size - 1) // self.batch_size,
305 | epochs = self.n_epochs,
306 | verbose = 1,
307 | callbacks = [checkpoint,lr_scheduler,lr_reducer],
308 | validation_data = (v_x, v_y)
309 | )
310 | # model.fit(train_x,train_y,validation_data = [v_x,v_y],epochs = self.n_epochs, callbacks = [checkpoint,lr_scheduler,lr_reducer],shuffle=True,batch_size=self.batch_size)
311 | model.load_weights(filepath)
312 |
313 | if self.save_model_path:
314 | self.save_model()
315 |
316 | def load_model(self):
317 | print ("Loading the model using the pretrained-weights")
318 | model_folder = self.load_model_path
319 | with open(os.path.join(model_folder, "model.json"), "r") as f:
320 | model_string = f.read().strip()
321 | params_to_load = json.loads(model_string)
322 |
323 |
324 | self.sequence_length = int(params_to_load['sequence_length'])
325 | self.mains_mean = params_to_load['mains_mean']
326 | self.mains_std = params_to_load['mains_std']
327 | self.appliance_params = params_to_load['appliance_params']
328 |
329 | for appliance_name in self.appliance_params:
330 | self.models[appliance_name] = self.return_network()
331 | self.models[appliance_name].load_weights(os.path.join(model_folder,appliance_name+".h5"))
332 |
333 |
334 | def save_model(self):
335 |
336 | if os.path.exists(self.save_model_path) == False:
337 | os.makedirs(self.save_model_path)
338 | params_to_save = {}
339 | params_to_save['appliance_params'] = self.appliance_params
340 | params_to_save['sequence_length'] = self.sequence_length
341 | params_to_save['mains_mean'] = self.mains_mean
342 | params_to_save['mains_std'] = self.mains_std
343 | for appliance_name in self.models:
344 | print ("Saving model for ", appliance_name)
345 | self.models[appliance_name].save_weights(os.path.join(self.save_model_path,appliance_name+".h5"))
346 |
347 | with open(os.path.join(self.save_model_path,'model.json'),'w') as file:
348 | file.write(json.dumps(params_to_save, cls=NumpyEncoder))
349 |
350 |
351 |
352 | def disaggregate_chunk(self, test_main_list, do_preprocessing=True):
353 | if do_preprocessing:
354 | test_main_list = self.call_preprocessing(test_main_list,submeters_lst=None,method='test')
355 |
356 | test_predictions = []
357 | for test_main in test_main_list:
358 | test_main = test_main.values
359 | test_main = test_main.reshape((-1,self.sequence_length,1))
360 | disggregation_dict = {}
361 | for appliance in self.models:
362 | prediction = self.models[appliance].predict(test_main,batch_size=self.batch_size)
363 | app_mean = self.appliance_params[appliance]['mean']
364 | app_std = self.appliance_params[appliance]['std']
365 | prediction = self.denormalize_output(prediction,app_mean,app_std)
366 | valid_predictions = prediction.flatten()
367 | valid_predictions = np.where(valid_predictions>0,valid_predictions,0)
368 | series = pd.Series(valid_predictions)
369 | disggregation_dict[appliance] = series
370 | results = pd.DataFrame(disggregation_dict,dtype='float32')
371 | test_predictions.append(results)
372 | return test_predictions
373 |
374 | def return_network(self):
375 |
376 | inputs = Input(shape=(1800,1), dtype='float32')
377 | x = begin(inputs)
378 | x = coder(x, filters=128)
379 | x = coder(x, filters=256)
380 | x = midden(x)
381 | x = midden(x)
382 | x = coder(x, filters=256, down_sample=False)
383 | x = coder(x, filters=128, down_sample=False)
384 | outputs = end(x)
385 | model = Model(inputs=inputs, outputs=outputs)
386 | model.compile(loss='mse', optimizer='Adam')
387 | print(model)
388 |
389 | return model
390 |
391 |
392 | def call_preprocessing(self, mains_lst, submeters_lst, method):
393 | sequence_length = self.sequence_length
394 | if method=='train':
395 | processed_mains = []
396 | for mains in mains_lst:
397 | mains = self.normalize_input(mains.values,sequence_length,self.mains_mean,self.mains_std, True)
398 | processed_mains.append(pd.DataFrame(mains))
399 |
400 | tuples_of_appliances = []
401 | for (appliance_name,app_df_list) in submeters_lst:
402 | app_mean = self.appliance_params[appliance_name]['mean']
403 | app_std = self.appliance_params[appliance_name]['std']
404 | processed_app_dfs = []
405 | for app_df in app_df_list:
406 | data = self.normalize_output(app_df.values, sequence_length,app_mean,app_std, True)
407 | processed_app_dfs.append(pd.DataFrame(data))
408 | tuples_of_appliances.append((appliance_name, processed_app_dfs))
409 |
410 | return processed_mains, tuples_of_appliances
411 |
412 | if method=='test':
413 | processed_mains = []
414 | for mains in mains_lst:
415 | mains = self.normalize_input(mains.values,sequence_length,self.mains_mean,self.mains_std,False)
416 | processed_mains.append(pd.DataFrame(mains))
417 | return processed_mains
418 |
419 |
420 | def normalize_input(self,data,sequence_length, mean, std, overlapping=False):
421 | n = sequence_length
422 | excess_entries = sequence_length - (data.size % sequence_length)
423 | lst = np.array([0] * excess_entries)
424 | arr = np.concatenate((data.flatten(), lst),axis=0)
425 | if overlapping:
426 | # windowed_x = np.array([ arr[i:i+n] for i in range(len(arr)-n+1) ])
427 | windowed_x = generate_slide_window(arr, self.stride_length, n)
428 | else:
429 | windowed_x = arr.reshape((-1,sequence_length))
430 | windowed_x = windowed_x - mean
431 | windowed_x = windowed_x/std
432 | return (windowed_x/std).reshape((-1,sequence_length))
433 |
434 | def normalize_output(self,data,sequence_length, mean, std, overlapping=False):
435 | n = sequence_length
436 | excess_entries = sequence_length - (data.size % sequence_length)
437 | lst = np.array([0] * excess_entries)
438 | arr = np.concatenate((data.flatten(), lst),axis=0)
439 | if overlapping:
440 | # windowed_y = np.array([ arr[i:i+n] for i in range(len(arr)-n+1) ])
441 | windowed_y = generate_slide_window(arr, self.stride_length, n)
442 | else:
443 | windowed_y = arr.reshape((-1,sequence_length))
444 | windowed_y = windowed_y - mean
445 | return (windowed_y/std).reshape((-1,sequence_length))
446 |
447 | def denormalize_output(self,data,mean,std):
448 | return mean + data*std
449 |
450 | def set_appliance_params(self,train_appliances):
451 |
452 | for (app_name,df_list) in train_appliances:
453 | l = np.array(pd.concat(df_list,axis=0))
454 | app_mean = np.mean(l)
455 | app_std = np.std(l)
456 | if app_std<1:
457 | app_std = 100
458 | self.appliance_params.update({app_name:{'mean':app_mean,'std':app_std}})
459 |
460 |
461 |
462 |
463 | # %%
464 |
465 |
466 |
467 |
468 |
469 |
470 |
471 | # %%
472 |
--------------------------------------------------------------------------------
/api.py:
--------------------------------------------------------------------------------
1 | import os
2 | import warnings
3 |
4 | warnings.filterwarnings("ignore")
5 | from nilmtk.dataset import DataSet
6 | from nilmtk.metergroup import MeterGroup
7 | import pandas as pd
8 | from nilmtk.losses import *
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | import datetime
12 | from IPython.display import clear_output
13 | from metrics import Metrics
14 | from disaggregate import config, get_activations, get_sections_df, get_sections_df_2
15 | import copy
16 | import joblib
17 |
18 |
19 | class API():
20 | """
21 | The API is designed for rapid experimentation with NILM Algorithms.
22 | """
23 |
24 | def __init__(self, params):
25 |
26 | """
27 | Initializes the API with default parameters
28 | """
29 | self.power = {}
30 | self.sample_period = 1
31 | self.appliances = []
32 | self.methods = {}
33 | self.chunk_size = None
34 | self.pre_trained = False
35 | self.metrics = []
36 | self.train_datasets_dict = {}
37 | self.test_datasets_dict = {}
38 | self.artificial_aggregate = False
39 | self.train_submeters = []
40 | self.train_mains = pd.DataFrame()
41 | self.test_submeters = []
42 | self.test_mains = pd.DataFrame()
43 | self.test_sections = []
44 | self.gt_overall = {}
45 | self.pred_overall = {}
46 | self.classifiers = []
47 | self.DROP_ALL_NANS = True
48 | self.mae = pd.DataFrame()
49 | self.rmse = pd.DataFrame()
50 | self.errors = pd.DataFrame()
51 | self.predictions = []
52 | self.errors_keys = []
53 | self.predictions_keys = []
54 | self.params = params
55 | for elems in params['power']:
56 | self.power = params['power']
57 | self.sample_period = params['sample_rate']
58 | for elems in params['appliances']:
59 | self.appliances.append(elems)
60 |
61 | self.pre_trained = ['pre_trained']
62 | self.train_datasets_dict = params['train']['datasets']
63 | self.test_datasets_dict = params['test']['datasets']
64 | # self.metrics = params['test']['metrics']
65 | self.methods = params['methods']
66 | self.artificial_aggregate = params.get('artificial_aggregate', self.artificial_aggregate)
67 | self.activation_profile = params.get('activation_profile', config['threshold'])
68 | self.isState = params.get('isState', False)
69 | self.sec_dict = {}
70 | self.experiment(params)
71 |
72 | def experiment(self, params):
73 | """
74 | Calls the Experiments with the specified parameters
75 | """
76 |
77 | self.store_classifier_instances()
78 | d = self.train_datasets_dict
79 |
80 | for model_name, clf in self.classifiers:
81 | # If the model is a neural net, it has an attribute n_epochs, Ex: DAE, Seq2Point
82 | print("Started training for ", clf.MODEL_NAME)
83 |
84 | # If the model has the filename specified for loading the pretrained model, then we don't need to load training data
85 |
86 | if hasattr(clf, 'load_model_path'):
87 | if clf.load_model_path:
88 | print(clf.MODEL_NAME, " is loading the pretrained model")
89 | continue
90 |
91 | print("Joint training for ", clf.MODEL_NAME)
92 | self.train_jointly(clf, d)
93 |
94 | # if it doesn't support chunk wise training
95 | else:
96 | print("Joint training for ", clf.MODEL_NAME)
97 | self.train_jointly(clf, d)
98 |
99 | print("Finished training for ", clf.MODEL_NAME)
100 | clear_output()
101 |
102 | d = self.test_datasets_dict
103 |
104 | print("Joint Testing for all algorithms")
105 | self.test_jointly(d)
106 |
107 | def train_jointly(self, clf, d):
108 |
109 | # This function has a few issues, which should be addressed soon
110 | print("............... Loading Data for training ...................")
111 | # store the train_main readings for all buildings
112 | self.train_mains = []
113 | self.train_submeters = [[] for i in range(len(self.appliances))]
114 | self.sec_dict = {}
115 | for dataset in d:
116 | print("Loading data for ", dataset, " dataset")
117 | train = DataSet(d[dataset]['path'])
118 | for building in d[dataset]['buildings']:
119 | print("Loading building ... ", building)
120 | train.set_window(start=d[dataset]['buildings'][building]['start_time'],
121 | end=d[dataset]['buildings'][building]['end_time'])
122 | main_meter = train.buildings[building].elec.mains()
123 | good_sections = train.buildings[building].elec.mains().good_sections()
124 | main_df = next(main_meter.load(physical_quantity='power', ac_type=self.power['mains'],
125 | sample_period=self.sample_period))
126 | # train_df = train_df[[list(train_df.columns)[0]]]
127 |
128 | # main_df_list = get_sections_df(main_df, good_sections) # train_df
129 | appliance_readings = []
130 |
131 | for appliance_name in self.appliances:
132 | app_meter = train.buildings[building].elec[appliance_name]
133 | app_df = next(app_meter.load(physical_quantity='power', ac_type=self.power['appliance'],
134 | sample_period=self.sample_period))
135 | # app_df_list = get_sections_df(app_df, good_sections)
136 |
137 | if building not in self.sec_dict:
138 | self.sec_dict[building] = get_sections_df_2(good_sections, app_meter.good_sections())
139 |
140 | main_df_list = [main_df[sec[0]:sec[1]] for sec in self.sec_dict[building]]
141 | app_df_list = [app_df[sec[0]:sec[1]] for sec in self.sec_dict[building]]
142 |
143 | appliance_readings.append(app_df_list) # appliance_readings->app_df_list->app_df
144 |
145 | if self.DROP_ALL_NANS:
146 | main_df_list, appliance_readings = self.dropna(main_df_list,
147 | appliance_readings) # Ttrain_list: [pd[sec],pd[sec]..]
148 |
149 | if self.artificial_aggregate:
150 | print("Creating an Artificial Aggregate")
151 | train_df = pd.DataFrame(np.zeros(appliance_readings[0].shape), index=appliance_readings[0].index,
152 | columns=appliance_readings[0].columns)
153 | for app_reading in appliance_readings:
154 | train_df += app_reading
155 |
156 | print("Train Jointly")
157 |
158 | self.train_mains += main_df_list # [[sec],[sec]...]]
159 | train_submeters = appliance_readings.copy()
160 | for j, appliance_name in enumerate(self.appliances):
161 | if self.isState:
162 | for i, app_df in enumerate(appliance_readings[j]):
163 | _, train_submeters[j][i] = get_activations(app_df, config['threshold'][appliance_name])
164 | self.train_submeters[j] += train_submeters[j]
165 |
166 | appliance_readings = []
167 | for i, appliance_name in enumerate(self.appliances):
168 | appliance_readings.append((appliance_name, self.train_submeters[i]))
169 |
170 | self.train_submeters = appliance_readings # [(app_name, [[sec],[sec]...])...]
171 | clf.partial_fit(self.train_mains, self.train_submeters)
172 |
173 | def test_jointly(self, d):
174 |
175 | # store the test_main readings for all buildings
176 | for dataset in d:
177 | print("Loading data for ", dataset, " dataset")
178 | test = DataSet(d[dataset]['path'])
179 | self.sec_dict = {}
180 | for building in d[dataset]['buildings']:
181 | self.test_mains = []
182 | self.test_submeters = [[] for i in range(len(self.appliances))]
183 |
184 | test.set_window(start=d[dataset]['buildings'][building]['start_time'],
185 | end=d[dataset]['buildings'][building]['end_time'])
186 | test_meter = test.buildings[building].elec.mains()
187 | good_sections = test.buildings[building].elec.mains().good_sections()
188 | # self.test_sections = good_sections
189 | main_df = next(test_meter.load(physical_quantity='power', ac_type=self.power['mains'],
190 | sample_period=self.sample_period))
191 |
192 | main_df_list = get_sections_df(main_df, good_sections) # train_df
193 | appliance_readings = []
194 |
195 | for appliance_name in self.appliances:
196 | app_meter = test.buildings[building].elec[appliance_name]
197 |
198 | if building not in self.sec_dict:
199 | self.sec_dict[building] = get_sections_df_2(good_sections, app_meter.good_sections())
200 |
201 | app_df = next(app_meter.load(physical_quantity='power', ac_type=self.power['appliance'],
202 | sample_period=self.sample_period))
203 |
204 | main_df_list = [main_df[sec[0]:sec[1]] for sec in self.sec_dict[building]]
205 | app_df_list = [app_df[sec[0]:sec[1]] for sec in self.sec_dict[building]]
206 | appliance_readings.append(app_df_list)
207 |
208 | if self.DROP_ALL_NANS:
209 | main_df_list, appliance_readings = self.dropna(main_df_list, appliance_readings)
210 |
211 | if self.artificial_aggregate:
212 | print("Creating an Artificial Aggregate")
213 | test_mains = pd.DataFrame(np.zeros(appliance_readings[0].shape), index=appliance_readings[0].index,
214 | columns=appliance_readings[0].columns)
215 | for app_reading in appliance_readings:
216 | test_mains += app_reading
217 |
218 | print("Test Jointly")
219 |
220 | self.test_mains = (main_df_list)
221 | test_submeters = appliance_readings.copy()
222 |
223 | for j, appliance_name in enumerate(self.appliances):
224 | if self.isState:
225 | for i, app_df in enumerate(appliance_readings[j]):
226 | _, test_submeters[j][i] = get_activations(app_df, config['threshold'][appliance_name])
227 | self.test_submeters[j] = (appliance_name, test_submeters[j])
228 |
229 | self.storing_key = str(dataset) + "_" + str(building)
230 | self.call_predict(self.classifiers, building)
231 |
232 | def dropna(self, mains_list, appliance_readings):
233 | """
234 | Drops the missing values in the Mains reading and appliance readings and returns consistent data by copmuting the intersection
235 | """
236 | print("Dropping missing values")
237 |
238 | # The below steps are for making sure that data is consistent by doing intersection across appliances
239 | new_main_list = mains_list.copy()
240 | new_appliances_list = appliance_readings.copy()
241 | for j, mains_df in enumerate(mains_list):
242 | mains_df = mains_df.dropna()
243 | # if mains_df.shape[0] < 10:
244 | # continue
245 | for i in range(len(appliance_readings)):
246 | if max(appliance_readings[i][j]) > max(mains_df):
247 | appliance_readings[i][j] = np.nan
248 | print('wrong')
249 | appliance_readings[i][j] = appliance_readings[i][j].dropna()
250 | ix = mains_df.index
251 | for app_df in appliance_readings:
252 | ix = ix.intersection(app_df[j].index)
253 | new_main_list[j] = mains_df.loc[ix]
254 | for i, app_df in enumerate(appliance_readings):
255 | new_appliances_list[i][j] = app_df[j].loc[ix]
256 | j = 0
257 | while j < len(new_main_list):
258 | if new_main_list[j].shape[0] < 99:
259 | del new_main_list[j]
260 | for i in range(len(new_appliances_list)):
261 | del new_appliances_list[i][j]
262 | else:
263 | j += 1
264 | print('dropna finished')
265 | return new_main_list, new_appliances_list
266 |
267 | def store_classifier_instances(self):
268 |
269 | """
270 | This function is reponsible for initializing the models with the specified model parameters
271 | """
272 | for name in self.methods:
273 | try:
274 |
275 | clf = self.methods[name]
276 | self.classifiers.append((name, clf))
277 |
278 | except Exception as e:
279 | print("\n\nThe method {model_name} specied does not exist. \n\n".format(model_name=name))
280 | print(e)
281 |
282 | def call_predict(self, classifiers, building):
283 | """
284 | This functions computers the predictions on the self.test_mains using all the trained models and then compares different learn't models using the metrics specified
285 | """
286 |
287 | pred_overall = {}
288 | gt_overall = {}
289 | for name, clf in classifiers:
290 | gt_overall, pred_overall[name] = self.predict(clf, self.test_mains, self.test_submeters, self.sample_period,
291 | 'Europe/London')
292 |
293 | self.gt_overall = gt_overall
294 | self.pred_overall = pred_overall
295 | test_mains = pd.concat(self.test_mains, axis=0)
296 | if gt_overall.size == 0:
297 | print("No samples found in ground truth")
298 | return None
299 |
300 | for i in gt_overall.columns:
301 | for clf in pred_overall:
302 | if not os.path.exists('result/' + self.storing_key + '/' + str(i) + '/' + str(clf) + '/section_image'):
303 | os.makedirs('result/' + self.storing_key + '/' + str(i) + '/' + str(clf) + '/section_image')
304 | if not os.path.exists('result/' + self.storing_key + '/' + str(i)):
305 | os.makedirs('result/' + self.storing_key + '/' + str(i))
306 | if not os.path.exists('result/' + self.storing_key + '/' + str(i) + '/' + str(clf) + '/section_df'):
307 | os.makedirs('result/' + self.storing_key + '/' + str(i) + '/' + str(clf) + '/section_df')
308 |
309 | print('section_plot:')
310 |
311 | sec_list = self.sec_dict[building]
312 |
313 | for i in gt_overall.columns:
314 | gt_overall_list = [gt_overall[i][sec[0]:sec[1]] for sec in sec_list]
315 | # get_sections_df(gt_overall[i], self.test_sections)
316 | for j, gt in enumerate(gt_overall_list):
317 | for clf in pred_overall:
318 | pred = pred_overall[clf][i]
319 | pred_df_list = [pred[sec[0]:sec[1]] for sec in sec_list]
320 | plt.figure(figsize=(6, 3))
321 | temp_test_main = self.test_mains[j]
322 | temp_gt_overall = gt_overall_list[j]
323 | temp_pred_df = pred_df_list[j]
324 | plt.plot(temp_test_main)
325 | plt.plot(temp_gt_overall)
326 | plt.plot(temp_pred_df)
327 | plt.savefig('result/' + self.storing_key + '/' + str(i) + '/' + str(clf) + '/section_image/' + str(
328 | j) + '.png')
329 | # plt.show()
330 |
331 | p = plt.figure(figsize=(6, 9))
332 | ax1 = p.add_subplot(3, 1, 1)
333 | ax1.plot(temp_test_main)
334 | plt.title('mains')
335 | ax2 = p.add_subplot(3, 1, 2)
336 | ax2.plot(temp_gt_overall)
337 | plt.title('appliance')
338 | ax3 = p.add_subplot(3, 1, 3)
339 | plt.title('predict')
340 | ax3.plot(temp_pred_df)
341 | plt.savefig(
342 | 'result/' + self.storing_key + '/' + str(i) + '/' + str(clf) + '/section_image/' + '_' + str(
343 | j) + '.png')
344 | # plt.show()
345 |
346 | temp_result = pd.DataFrame([], index=temp_pred_df.index, columns=['mains', 'gt', 'predict'])
347 | temp_result['mains'] = temp_test_main.values
348 | temp_result['gt'] = temp_gt_overall
349 | temp_result['predict'] = temp_pred_df
350 |
351 | temp_result.to_csv(
352 | 'result/' + self.storing_key + '/' + str(i) + '/' + str(clf) + '/section_df/' + str(
353 | j) + '.csv')
354 |
355 | # plt.show()
356 |
357 | for i in gt_overall.columns:
358 | temp_result = copy.deepcopy(config['result'])
359 | plt.figure()
360 | if not self.isState:
361 | plt.plot(test_mains, label='Mains reading')
362 | plt.plot(gt_overall[i], label='Truth')
363 | for clf in pred_overall:
364 | plt.plot(pred_overall[clf][i], label=clf)
365 | plt.title(i)
366 | plt.legend()
367 | plt.savefig('result/' + self.storing_key + '/' + str(i) + '/' + str(clf) + '/' + 'all.png')
368 |
369 | for clf in pred_overall:
370 | temp_metrics = Metrics(gt_overall[i], pred_overall[clf][i], self.activation_profile[i], self.isState)
371 | temp_result['MSE'].append(temp_metrics.MSE())
372 | temp_result['MAE'].append(temp_metrics.MAE())
373 | temp_result['ACC'].append(temp_metrics.Accuracy())
374 | temp_result['Precision'].append(temp_metrics.Precision())
375 | temp_result['Recall'].append(temp_metrics.Recall())
376 | temp_result['F1'].append(temp_metrics.F_1_score())
377 | temp_result['sMAE'].append(temp_metrics.sMAE(100.0))
378 | # temp_df_result = pd.DataFrame(temp_result, index=[0])
379 |
380 | # plot
381 | for clf in pred_overall:
382 | plt.figure()
383 | plt.plot(gt_overall[i], label='Truth')
384 | plt.plot(pred_overall[clf][i], label=clf)
385 | plt.legend()
386 | plt.savefig('result/' + self.storing_key + '/' + str(i) + '/' + str(clf) + '/' + str(clf) + '.png')
387 |
388 | clfs = [clf for clf in pred_overall]
389 | df_result = pd.DataFrame(temp_result, index=clfs)
390 | df_result.to_csv('result/' + self.storing_key + '/' + str(i) + '/metrics.csv')
391 | print(df_result)
392 | self.errors = df_result
393 |
394 | # for metric in self.metrics:
395 | # try:
396 | # loss_function = globals()[metric]
397 | # except:
398 | # print("Loss function ",metric, " is not supported currently!")
399 | # continue
400 |
401 | # computed_metric={}
402 | # for clf_name,clf in classifiers:
403 | # computed_metric[clf_name] = self.compute_loss(gt_overall, pred_overall[clf_name], loss_function)
404 | # computed_metric = pd.DataFrame(computed_metric)
405 | # print("............ " ,metric," ..............")
406 | # print(temp_df_result)
407 | # self.errors.append(computed_metric)
408 | # self.errors_keys.append(self.storing_key + "_" + metric)
409 |
410 | def predict(self, clf, test_elec, test_submeters, sample_period, timezone):
411 | """
412 | Generates predictions on the test dataset using the specified classifier.
413 | """
414 |
415 | print("Generating predictions for :", clf.MODEL_NAME)
416 | # "ac_type" varies according to the dataset used.
417 | # Make sure to use the correct ac_type before using the default parameters in this code.
418 |
419 | unvalid_pred_list = clf.disaggregate_chunk(test_elec)
420 | pred_list = []
421 | # It might not have time stamps sometimes due to neural nets
422 | # It has the readings for all the appliances
423 |
424 | # make the pred valid
425 | len_list = []
426 | for meter, data in test_submeters:
427 | for d in data:
428 | len_list.append(d.shape[0])
429 | break
430 | for i, pred in enumerate(unvalid_pred_list):
431 | valid_pred = pred.iloc[:len_list[i], :]
432 | pred_list.append(valid_pred)
433 |
434 | for pred in pred_list:
435 | print(pred.shape)
436 |
437 |
438 |
439 |
440 | concat_pred_df = pd.concat(pred_list, axis=0)
441 | # print('='*40)
442 | # print(concat_pred_df.shape)
443 | # print('='*40)
444 |
445 | gt = {}
446 | for meter, data in test_submeters:
447 | concatenated_df_app = pd.concat(data, axis=0)
448 | index = concatenated_df_app.index
449 | gt[meter] = pd.Series(concatenated_df_app.values.flatten(), index=index)
450 | # print('=' * 40)
451 | # print(gt[meter].shape)
452 | # print('=' * 40)
453 |
454 | gt_overall = pd.DataFrame(gt, dtype='float32')
455 | pred = {}
456 | for app_name in concat_pred_df.columns:
457 | app_series_values = concat_pred_df[app_name].values.flatten()
458 | # Neural nets do extra padding sometimes, to fit, so get rid of extra predictions
459 | app_series_values = app_series_values[:len(gt_overall[app_name])]
460 | pred[app_name] = pd.Series(app_series_values, index=gt_overall.index)
461 | # print('=' * 40)
462 | # print(pred[app_name].shape)
463 | # print('=' * 40)
464 | pred_overall = pd.DataFrame(pred, dtype='float32')
465 | return gt_overall, pred_overall
466 |
467 | # metrics
468 | def compute_loss(self, gt, clf_pred, loss_function):
469 | error = {}
470 | for app_name in gt.columns:
471 | error[app_name] = loss_function(gt[app_name], clf_pred[app_name])
472 | return pd.Series(error)
473 |
--------------------------------------------------------------------------------
/ex2.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "ExecuteTime": {
8 | "end_time": "2020-03-18T12:11:04.298982Z",
9 | "start_time": "2020-03-18T12:11:01.856433Z"
10 | }
11 | },
12 | "outputs": [
13 | {
14 | "name": "stderr",
15 | "output_type": "stream",
16 | "text": [
17 | "Using TensorFlow backend.\n"
18 | ]
19 | }
20 | ],
21 | "source": [
22 | "from api import API\n",
23 | "from disaggregate import ADAE, DAE, Seq2Point, Seq2Seq, WindowGRU, RNN\n",
24 | "import warnings\n",
25 | "warnings.filterwarnings(\"ignore\")\n",
26 | "\n",
27 | "path = 'D:/workspace/nilm/data/redd_data.h5'\n",
28 | "path = 'D:/workspace/nilm/code/databank/redd_data.h5'"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": 2,
34 | "metadata": {
35 | "ExecuteTime": {
36 | "end_time": "2020-03-18T12:11:05.512088Z",
37 | "start_time": "2020-03-18T12:11:05.491712Z"
38 | }
39 | },
40 | "outputs": [],
41 | "source": [
42 | "debug = True\n",
43 | "test = False\n",
44 | "\n",
45 | "if(debug):\n",
46 | " method = {\n",
47 | " 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),\n",
48 | " 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),\n",
49 | " 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),\n",
50 | " 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),\n",
51 | " 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None, 'n_epochs': 1, 'batch_size': 256}),\n",
52 | " }\n",
53 | "else:\n",
54 | " method = {\n",
55 | " 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': None}),\n",
56 | " 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': None}),\n",
57 | " 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': None}),\n",
58 | " 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': None}),\n",
59 | " 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': None}),\n",
60 | " }\n",
61 | "if test:\n",
62 | " method = {\n",
63 | " 'DAE': DAE({'save-model-path': 'DAE', 'pretrained-model-path': 'DAE'}),\n",
64 | " 'RNN': RNN({'save-model-path': 'RNN', 'pretrained-model-path': 'RNN'}),\n",
65 | " 'Seq2Point': Seq2Point({'save-model-path': 'Seq2Point', 'pretrained-model-path': 'Seq2Point'}),\n",
66 | " 'Seq2Seq': Seq2Seq({'save-model-path': 'Seq2Seq', 'pretrained-model-path': 'Seq2Seq'}),\n",
67 | " 'GRU': WindowGRU({'save-model-path': 'GRU', 'pretrained-model-path': 'GRU'}),\n",
68 | " }"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 3,
74 | "metadata": {
75 | "ExecuteTime": {
76 | "end_time": "2020-03-18T12:11:10.077563Z",
77 | "start_time": "2020-03-18T12:11:10.069711Z"
78 | },
79 | "code_folding": [
80 | 3
81 | ]
82 | },
83 | "outputs": [],
84 | "source": [
85 | "test = False\n",
86 | "ex_train_fridge = {\n",
87 | "\n",
88 | " 'power': {\n",
89 | " 'mains': ['apparent', 'active'],\n",
90 | " 'appliance': ['apparent', 'active']\n",
91 | " },\n",
92 | " 'sample_rate': 6,\n",
93 | "\n",
94 | "\n",
95 | " 'appliances': ['fridge'],\n",
96 | " 'methods': method,\n",
97 | " 'isState': False,\n",
98 | " 'train': {\n",
99 | " 'datasets': {\n",
100 | "\n",
101 | " 'redd': {\n",
102 | " 'path': path,\n",
103 | " 'buildings': {\n",
104 | " 1: {\n",
105 | " 'start_time': '2011-04-18',\n",
106 | " 'end_time': '2011-05-24'\n",
107 | " },\n",
108 | " 3: {\n",
109 | " 'start_time': '2011-04-16',\n",
110 | " 'end_time': '2011-05-30'\n",
111 | " }\n",
112 | "\n",
113 | " }\n",
114 | "\n",
115 | "\n",
116 | " }\n",
117 | " }\n",
118 | " },\n",
119 | "\n",
120 | " 'test': {\n",
121 | " 'datasets': {\n",
122 | " 'redd': {\n",
123 | " 'path': path,\n",
124 | " 'buildings': {\n",
125 | " 2: {\n",
126 | " 'start_time': '2011-04-17',\n",
127 | " 'end_time': '2011-05-22'\n",
128 | " },\n",
129 | " 4: {\n",
130 | " 'start_time': '2011-04-16',\n",
131 | " 'end_time': '2011-06-03'\n",
132 | " }\n",
133 | " }\n",
134 | " }\n",
135 | " },\n",
136 | " },\n",
137 | "}"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": 4,
143 | "metadata": {
144 | "ExecuteTime": {
145 | "end_time": "2020-03-18T12:11:32.883403Z",
146 | "start_time": "2020-03-18T12:11:17.269505Z"
147 | }
148 | },
149 | "outputs": [
150 | {
151 | "name": "stdout",
152 | "output_type": "stream",
153 | "text": [
154 | "Started training for DAE\n",
155 | "Joint training for DAE\n",
156 | "............... Loading Data for training ...................\n",
157 | "Loading data for redd dataset\n",
158 | "Loading building ... 1\n",
159 | "Loading data for meter ElecMeterID(instance=2, building=1, dataset='REDD') \n",
160 | "Done loading data all meters for this chunk.\n",
161 | "Dropping missing values\n",
162 | "Train Jointly\n",
163 | "Loading building ... 3\n",
164 | "Loading data for meter ElecMeterID(instance=2, building=3, dataset='REDD') \n",
165 | "Done loading data all meters for this chunk.\n",
166 | "Dropping missing values\n",
167 | "Train Jointly\n",
168 | "Doing Preprocessing\n",
169 | "[[-0.0018285 -0.00182979 -0.00182951 ... -0.00183251 -0.00183083\n",
170 | " -0.00183173]\n",
171 | " [-0.00182979 -0.00182951 -0.00182284 ... -0.00183083 -0.00183173\n",
172 | " -0.0018314 ]\n",
173 | " [-0.00182951 -0.00182284 -0.00182591 ... -0.00183173 -0.0018314\n",
174 | " -0.0018315 ]\n",
175 | " ...\n",
176 | " [-0.00212788 -0.00213013 -0.00213155 ... -0.00277778 -0.00277778\n",
177 | " -0.00277778]\n",
178 | " [-0.00213013 -0.00213155 -0.00213157 ... -0.00277778 -0.00277778\n",
179 | " -0.00277778]\n",
180 | " [-0.00213155 -0.00213157 -0.00212882 ... -0.00277778 -0.00277778\n",
181 | " -0.00277778]]\n",
182 | "First model training for fridge\n",
183 | "WARNING:tensorflow:From D:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:74: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n",
184 | "\n",
185 | "WARNING:tensorflow:From D:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n",
186 | "\n",
187 | "WARNING:tensorflow:From D:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:4138: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n",
188 | "\n",
189 | "WARNING:tensorflow:From D:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n",
190 | "\n",
191 | "_________________________________________________________________\n",
192 | "Layer (type) Output Shape Param # \n",
193 | "=================================================================\n",
194 | "conv1d_1 (Conv1D) (None, 99, 8) 40 \n",
195 | "_________________________________________________________________\n",
196 | "flatten_1 (Flatten) (None, 792) 0 \n",
197 | "_________________________________________________________________\n",
198 | "dense_1 (Dense) (None, 792) 628056 \n",
199 | "_________________________________________________________________\n",
200 | "dense_2 (Dense) (None, 128) 101504 \n",
201 | "_________________________________________________________________\n",
202 | "dense_3 (Dense) (None, 792) 102168 \n",
203 | "_________________________________________________________________\n",
204 | "reshape_1 (Reshape) (None, 99, 8) 0 \n",
205 | "_________________________________________________________________\n",
206 | "conv1d_2 (Conv1D) (None, 99, 1) 33 \n",
207 | "=================================================================\n",
208 | "Total params: 831,801\n",
209 | "Trainable params: 831,801\n",
210 | "Non-trainable params: 0\n",
211 | "_________________________________________________________________\n",
212 | "None\n",
213 | "Started Retraining model for fridge\n",
214 | "WARNING:tensorflow:From D:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:986: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.\n",
215 | "\n",
216 | "WARNING:tensorflow:From D:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:973: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.\n",
217 | "\n",
218 | "Train on 608693 samples, validate on 107417 samples\n",
219 | "Epoch 1/1\n"
220 | ]
221 | },
222 | {
223 | "ename": "UnknownError",
224 | "evalue": "2 root error(s) found.\n (0) Unknown: Failed to get convolution algorithm. This is probably because cuDNN failed to initialize, so try looking to see if a warning log message was printed above.\n\t [[{{node conv1d_1/convolution}}]]\n\t [[loss/mul/_119]]\n (1) Unknown: Failed to get convolution algorithm. This is probably because cuDNN failed to initialize, so try looking to see if a warning log message was printed above.\n\t [[{{node conv1d_1/convolution}}]]\n0 successful operations.\n0 derived errors ignored.",
225 | "output_type": "error",
226 | "traceback": [
227 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
228 | "\u001b[1;31mUnknownError\u001b[0m Traceback (most recent call last)",
229 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mAPI\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mex_train_fridge\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
230 | "\u001b[1;32mD:\\workspace\\nilm\\code\\nilmtk-dl\\api.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, params)\u001b[0m\n\u001b[0;32m 68\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0misState\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mparams\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'isState'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 69\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 70\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexperiment\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 71\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 72\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
231 | "\u001b[1;32mD:\\workspace\\nilm\\code\\nilmtk-dl\\api.py\u001b[0m in \u001b[0;36mexperiment\u001b[1;34m(self, params)\u001b[0m\n\u001b[0;32m 92\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 93\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Joint training for \"\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mclf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 94\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_jointly\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mclf\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0md\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 95\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 96\u001b[0m \u001b[1;31m# if it doesn't support chunk wise training\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
232 | "\u001b[1;32mD:\\workspace\\nilm\\code\\nilmtk-dl\\api.py\u001b[0m in \u001b[0;36mtrain_jointly\u001b[1;34m(self, clf, d)\u001b[0m\n\u001b[0;32m 160\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 161\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_submeters\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mappliance_readings\u001b[0m \u001b[1;31m#[(app_name, [[sec],[sec]...])...]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 162\u001b[1;33m \u001b[0mclf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpartial_fit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_mains\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_submeters\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 163\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 164\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mtest_jointly\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0md\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
233 | "\u001b[1;32mD:\\workspace\\nilm\\code\\nilmtk-dl\\disaggregate\\dae.py\u001b[0m in \u001b[0;36mpartial_fit\u001b[1;34m(self, train_main, train_appliances, do_preprocessing, **load_kwargs)\u001b[0m\n\u001b[0;32m 75\u001b[0m \u001b[0mcheckpoint\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mModelCheckpoint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmonitor\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'val_loss'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mverbose\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msave_best_only\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'min'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 76\u001b[0m \u001b[0mtrain_x\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mv_x\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtrain_y\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mv_y\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtrain_test_split\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_main\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mpower\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtest_size\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m.15\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mrandom_state\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 77\u001b[1;33m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_x\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtrain_y\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mvalidation_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[0mv_x\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mv_y\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mepochs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mn_epochs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcallbacks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[0mcheckpoint\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mshuffle\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 78\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload_weights\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 79\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
234 | "\u001b[1;32mD:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)\u001b[0m\n\u001b[0;32m 1037\u001b[0m \u001b[0minitial_epoch\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0minitial_epoch\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1038\u001b[0m \u001b[0msteps_per_epoch\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0msteps_per_epoch\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1039\u001b[1;33m validation_steps=validation_steps)\n\u001b[0m\u001b[0;32m 1040\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1041\u001b[0m def evaluate(self, x=None, y=None,\n",
235 | "\u001b[1;32mD:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\engine\\training_arrays.py\u001b[0m in \u001b[0;36mfit_loop\u001b[1;34m(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)\u001b[0m\n\u001b[0;32m 197\u001b[0m \u001b[0mins_batch\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mins_batch\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtoarray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 198\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 199\u001b[1;33m \u001b[0mouts\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mins_batch\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 200\u001b[0m \u001b[0mouts\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mto_list\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mouts\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 201\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0ml\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mo\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mout_labels\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mouts\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
236 | "\u001b[1;32mD:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, inputs)\u001b[0m\n\u001b[0;32m 2713\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_legacy_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2714\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2715\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2716\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2717\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mpy_any\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mis_tensor\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mx\u001b[0m \u001b[1;32min\u001b[0m \u001b[0minputs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
237 | "\u001b[1;32mD:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\u001b[0m in \u001b[0;36m_call\u001b[1;34m(self, inputs)\u001b[0m\n\u001b[0;32m 2673\u001b[0m \u001b[0mfetched\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_callable_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0marray_vals\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_metadata\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2674\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2675\u001b[1;33m \u001b[0mfetched\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_callable_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0marray_vals\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2676\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mfetched\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2677\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
238 | "\u001b[1;32mD:\\Users\\86789\\AppData\\Local\\Continuum\\anaconda3\\envs\\nilmtk-contrib\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1456\u001b[0m ret = tf_session.TF_SessionRunCallable(self._session._session,\n\u001b[0;32m 1457\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_handle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1458\u001b[1;33m run_metadata_ptr)\n\u001b[0m\u001b[0;32m 1459\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1460\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
239 | "\u001b[1;31mUnknownError\u001b[0m: 2 root error(s) found.\n (0) Unknown: Failed to get convolution algorithm. This is probably because cuDNN failed to initialize, so try looking to see if a warning log message was printed above.\n\t [[{{node conv1d_1/convolution}}]]\n\t [[loss/mul/_119]]\n (1) Unknown: Failed to get convolution algorithm. This is probably because cuDNN failed to initialize, so try looking to see if a warning log message was printed above.\n\t [[{{node conv1d_1/convolution}}]]\n0 successful operations.\n0 derived errors ignored."
240 | ]
241 | }
242 | ],
243 | "source": [
244 | "API(ex_train_fridge)"
245 | ]
246 | },
247 | {
248 | "cell_type": "code",
249 | "execution_count": null,
250 | "metadata": {
251 | "code_folding": [
252 | 13,
253 | 35
254 | ]
255 | },
256 | "outputs": [],
257 | "source": [
258 | "test = False\n",
259 | "ex_train_dish_washer = {\n",
260 | "\n",
261 | " 'power': {\n",
262 | " 'mains': ['apparent', 'active'],\n",
263 | " 'appliance': ['apparent', 'active']\n",
264 | " },\n",
265 | " 'sample_rate': 6,\n",
266 | "\n",
267 | "\n",
268 | " 'appliances': ['dish washer'],\n",
269 | " 'methods': method,\n",
270 | " 'isState': False,\n",
271 | " 'train': {\n",
272 | " 'datasets': {\n",
273 | "\n",
274 | " 'redd': {\n",
275 | " 'path': path,\n",
276 | " 'buildings': {\n",
277 | " 1: {\n",
278 | " 'start_time': '2011-04-18',\n",
279 | " 'end_time': '2011-05-24'\n",
280 | " },\n",
281 | " 3: {\n",
282 | " 'start_time': '2011-04-16',\n",
283 | " 'end_time': '2011-05-30'\n",
284 | " }\n",
285 | "\n",
286 | " }\n",
287 | "\n",
288 | "\n",
289 | " }\n",
290 | " }\n",
291 | " },\n",
292 | "\n",
293 | " 'test': {\n",
294 | " 'datasets': {\n",
295 | " 'redd': {\n",
296 | " 'path': path,\n",
297 | " 'buildings': {\n",
298 | " 2: {\n",
299 | " 'start_time': '2011-04-17',\n",
300 | " 'end_time': '2011-05-22'\n",
301 | " },\n",
302 | " 6: {\n",
303 | " 'start_time': '2011-05-21',\n",
304 | " 'end_time': '2011-06-14'\n",
305 | " }\n",
306 | " }\n",
307 | " }\n",
308 | " },\n",
309 | " },\n",
310 | "}"
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": null,
316 | "metadata": {},
317 | "outputs": [],
318 | "source": [
319 | "API(ex_train_dish_washer)"
320 | ]
321 | },
322 | {
323 | "cell_type": "code",
324 | "execution_count": null,
325 | "metadata": {
326 | "code_folding": [
327 | 14,
328 | 35
329 | ]
330 | },
331 | "outputs": [],
332 | "source": [
333 | "test = False\n",
334 | "ex_train_dish_washer = {\n",
335 | "\n",
336 | " 'power': {\n",
337 | " 'mains': ['apparent', 'active'],\n",
338 | " 'appliance': ['apparent', 'active']\n",
339 | " },\n",
340 | " 'sample_rate': 6,\n",
341 | "\n",
342 | "\n",
343 | " 'appliances': ['microwave'],\n",
344 | " 'methods': method,\n",
345 | " 'isState': False,\n",
346 | " 'train': {\n",
347 | " 'datasets': {\n",
348 | "\n",
349 | " 'redd': {\n",
350 | " 'path': path,\n",
351 | " 'buildings': {\n",
352 | " 1: {\n",
353 | " 'start_time': '2011-04-18',\n",
354 | " 'end_time': '2011-05-24'\n",
355 | " },\n",
356 | " 3: {\n",
357 | " 'start_time': '2011-04-16',\n",
358 | " 'end_time': '2011-05-30'\n",
359 | " }\n",
360 | "\n",
361 | " }\n",
362 | "\n",
363 | "\n",
364 | " }\n",
365 | " }\n",
366 | " },\n",
367 | "\n",
368 | " 'test': {\n",
369 | " 'datasets': {\n",
370 | " 'redd': {\n",
371 | " 'path': path,\n",
372 | " 'buildings': {\n",
373 | " 2: {\n",
374 | " 'start_time': '2011-04-17',\n",
375 | " 'end_time': '2011-05-22'\n",
376 | " },\n",
377 | " }\n",
378 | " }\n",
379 | " },\n",
380 | " },\n",
381 | "}"
382 | ]
383 | },
384 | {
385 | "cell_type": "code",
386 | "execution_count": null,
387 | "metadata": {},
388 | "outputs": [],
389 | "source": [
390 | "API(ex_train_dish_washer)"
391 | ]
392 | },
393 | {
394 | "cell_type": "code",
395 | "execution_count": null,
396 | "metadata": {},
397 | "outputs": [],
398 | "source": []
399 | }
400 | ],
401 | "metadata": {
402 | "kernelspec": {
403 | "display_name": "Python 3.6.10 64-bit ('nilmtk-contrib': conda)",
404 | "language": "python",
405 | "name": "python361064bitnilmtkcontribconda9f2f8c889e6b4d21b5c8df5f0970f2ed"
406 | },
407 | "language_info": {
408 | "codemirror_mode": {
409 | "name": "ipython",
410 | "version": 3
411 | },
412 | "file_extension": ".py",
413 | "mimetype": "text/x-python",
414 | "name": "python",
415 | "nbconvert_exporter": "python",
416 | "pygments_lexer": "ipython3",
417 | "version": "3.6.10"
418 | },
419 | "toc": {
420 | "base_numbering": 1,
421 | "nav_menu": {},
422 | "number_sections": true,
423 | "sideBar": true,
424 | "skip_h1_title": false,
425 | "title_cell": "Table of Contents",
426 | "title_sidebar": "Contents",
427 | "toc_cell": false,
428 | "toc_position": {},
429 | "toc_section_display": true,
430 | "toc_window_display": false
431 | },
432 | "varInspector": {
433 | "cols": {
434 | "lenName": 16,
435 | "lenType": 16,
436 | "lenVar": 40
437 | },
438 | "kernels_config": {
439 | "python": {
440 | "delete_cmd_postfix": "",
441 | "delete_cmd_prefix": "del ",
442 | "library": "var_list.py",
443 | "varRefreshCmd": "print(var_dic_list())"
444 | },
445 | "r": {
446 | "delete_cmd_postfix": ") ",
447 | "delete_cmd_prefix": "rm(",
448 | "library": "var_list.r",
449 | "varRefreshCmd": "cat(var_dic_list()) "
450 | }
451 | },
452 | "types_to_exclude": [
453 | "module",
454 | "function",
455 | "builtin_function_or_method",
456 | "instance",
457 | "_Feature"
458 | ],
459 | "window_display": false
460 | }
461 | },
462 | "nbformat": 4,
463 | "nbformat_minor": 4
464 | }
465 |
--------------------------------------------------------------------------------