├── .gitignore ├── LICENSE ├── README.md ├── examples ├── mdn.py ├── mdn_recurrent.py └── recurrent_subsampling.py ├── neuralnilm ├── __init__.py ├── batch_norm.py ├── combinatorial_optimisation.py ├── disaggregate.py ├── experiment.py ├── init.py ├── layers.py ├── metrics.py ├── net.py ├── objectives.py ├── plot.py ├── rectangulariser.py ├── source.py ├── tests │ ├── __init__.py │ └── test_objectives.py ├── updates.py └── utils.py ├── notebooks ├── 3wolfmoon.jpg ├── BuildSys_results_plot.ipynb ├── ExploreData.ipynb ├── GenTestingDataForBuildSysPaper.ipynb ├── Rectangulariser.ipynb ├── RectangulariserSource.ipynb ├── StartAndEndAndMeanSource.ipynb ├── TestMultiSource.ipynb ├── deconv.ipynb ├── experiment_001.ipynb ├── experiment_002.ipynb ├── experiment_003.ipynb ├── experiment_004.ipynb ├── experiment_005.ipynb ├── experiment_006.ipynb ├── experiment_007.ipynb ├── experiment_008-Copy0.ipynb ├── experiment_008.ipynb ├── experiment_009.ipynb ├── experiment_010.ipynb ├── experiment_011.ipynb ├── experiment_012.ipynb ├── experiment_013.ipynb ├── experiment_014.ipynb ├── experiment_015.ipynb ├── experiment_016.ipynb ├── experiment_017.ipynb ├── experiment_018.ipynb ├── experiment_019.ipynb ├── experiment_020.ipynb ├── experiment_021.ipynb ├── experiment_022.ipynb ├── experiment_023.ipynb ├── experiment_024.ipynb ├── experiment_025.ipynb ├── experiment_026.ipynb ├── experiment_027.ipynb ├── gen_data_for_exp35_upwards.ipynb ├── log_likelihood.ipynb ├── neuronilm_estimates_001.hdf ├── neuronilm_estimates_002.hdf ├── neuronilm_estimates_003.hdf ├── neuronilm_estimates_004.hdf ├── neuronilm_estimates_006.hdf ├── neuronilm_estimates_007.hdf ├── neuronilm_estimates_009.hdf ├── neuronilm_estimates_010.hdf ├── neuronilm_estimates_011.hdf ├── neuronilm_estimates_012.hdf ├── neuronilm_estimates_013.hdf ├── neuronilm_estimates_014.hdf ├── neuronilm_estimates_016.hdf ├── neuronilm_estimates_017.hdf ├── neuronilm_estimates_018.hdf ├── neuronilm_estimates_019.hdf ├── neuronilm_estimates_021.hdf ├── neuronilm_estimates_022.hdf ├── neuronilm_estimates_023.hdf ├── neuronilm_estimates_024.hdf ├── neuronilm_estimates_025.hdf ├── neuronilm_estimates_026.hdf ├── neuronilm_estimates_027.hdf ├── scale_cost.ipynb ├── scale_cost2.ipynb ├── scaled_cost3.ipynb ├── synthetic_data_generator.ipynb ├── test RandomSegments.ipynb ├── test_nilmtk_source.ipynb ├── test_real_appliance_source.ipynb ├── test_toy_source.ipynb └── visualise_activations.ipynb ├── scripts ├── __init__.py ├── benchmark_other_nilm_algos.py ├── disag_534.py ├── disag_544.py ├── disag_545a.py ├── disag_545b.py ├── disag_545c.py ├── disag_545d.py ├── disag_566.py ├── disag_567.py ├── e100.py ├── e101.py ├── e102.py ├── e103.py ├── e104.py ├── e105.py ├── e106.py ├── e107.py ├── e108.py ├── e109.py ├── e110.py ├── e111.py ├── e112.py ├── e113.py ├── e114.py ├── e115.py ├── e116.py ├── e117.py ├── e118.py ├── e119.py ├── e120.py ├── e121.py ├── e122.py ├── e123.py ├── e124.py ├── e125.py ├── e126.py ├── e127.py ├── e128.py ├── e129.py ├── e130.py ├── e131.py ├── e132.py ├── e133.py ├── e134.py ├── e135.py ├── e136.py ├── e137.py ├── e138.py ├── e139.py ├── e140.py ├── e141.py ├── e142.py ├── e143.py ├── e144.py ├── e145.py ├── e146.py ├── e147.py ├── e148.py ├── e149.py ├── e150.py ├── e151.py ├── e152.py ├── e153.py ├── e154.py ├── e155.py ├── e156.py ├── e157.py ├── e158.py ├── e159.py ├── e160.py ├── e161.py ├── e162.py ├── e163.py ├── e164.py ├── e165.py ├── e166.py ├── e167.py ├── e168.py ├── e169.py ├── e170.py ├── e171.py ├── e172.py ├── e173.py ├── e174.py ├── e175.py ├── e176.py ├── e177.py ├── e178.py ├── e179.py ├── e180.py ├── e181.py ├── e182.py ├── e183.py ├── e184.py ├── e185.py ├── e186.py ├── e187.py ├── e188.py ├── e189.py ├── e190.py ├── e191.py ├── e192.py ├── e193.py ├── e194.py ├── e195.py ├── e196.py ├── e197.py ├── e198.py ├── e199.py ├── e200.py ├── e201.py ├── e202.py ├── e203.py ├── e204.py ├── e205.py ├── e206.py ├── e207.py ├── e208.py ├── e209.py ├── e210.py ├── e211.py ├── e212.py ├── e213.py ├── e214.py ├── e215.py ├── e216.py ├── e217.py ├── e218.py ├── e219.py ├── e220.py ├── e221.py ├── e222.py ├── e223.py ├── e224.py ├── e225.py ├── e226.py ├── e227.py ├── e228.py ├── e229.py ├── e230.py ├── e231.py ├── e232.py ├── e233.py ├── e234.py ├── e235.py ├── e236.py ├── e237.py ├── e238.py ├── e239.py ├── e240.py ├── e241.py ├── e242.py ├── e243.py ├── e244.py ├── e245.py ├── e246.py ├── e247.py ├── e248.py ├── e249.py ├── e250.py ├── e251.py ├── e252.py ├── e253.py ├── e254.py ├── e255.py ├── e256.py ├── e257.py ├── e258.py ├── e259.py ├── e260.py ├── e261.py ├── e262.py ├── e263.py ├── e264.py ├── e265.py ├── e266.py ├── e267.py ├── e268.py ├── e269.py ├── e270.py ├── e271.py ├── e272.py ├── e273.py ├── e274.py ├── e275.py ├── e276.py ├── e277.py ├── e278.py ├── e279.py ├── e280.py ├── e281.py ├── e282.py ├── e283.py ├── e284.py ├── e285.py ├── e286.py ├── e287.py ├── e288.py ├── e289.py ├── e290.py ├── e291.py ├── e292.py ├── e293.py ├── e294.py ├── e295.py ├── e296.py ├── e297.py ├── e298.py ├── e299.py ├── e300.py ├── e301.py ├── e302.py ├── e303.py ├── e304.py ├── e305.py ├── e306.py ├── e307.py ├── e308.py ├── e309.py ├── e310.py ├── e311.py ├── e312.py ├── e313.py ├── e314.py ├── e315.py ├── e316.py ├── e317.py ├── e318.py ├── e319.py ├── e320.py ├── e321.py ├── e322.py ├── e323.py ├── e324.py ├── e325.py ├── e326.py ├── e327.py ├── e328.py ├── e329.py ├── e330.py ├── e331.py ├── e332.py ├── e333.py ├── e334.py ├── e335.py ├── e336.py ├── e337.py ├── e338.py ├── e339.py ├── e340.py ├── e341.py ├── e342.py ├── e343.py ├── e344.py ├── e345.py ├── e346.py ├── e347.py ├── e348.py ├── e349.py ├── e350.py ├── e351.py ├── e352.py ├── e353.py ├── e354.py ├── e355.py ├── e356.py ├── e357.py ├── e358.py ├── e359.py ├── e36.py ├── e360.py ├── e361.py ├── e362.py ├── e363.py ├── e364.py ├── e365.py ├── e366.py ├── e367.py ├── e368.py ├── e369.py ├── e37.py ├── e370.py ├── e371.py ├── e372.py ├── e373.py ├── e374.py ├── e375.py ├── e376.py ├── e377.py ├── e378.py ├── e379.py ├── e38.py ├── e380.py ├── e381.py ├── e382.py ├── e383.py ├── e384.py ├── e385.py ├── e386.py ├── e387.py ├── e388.py ├── e389.py ├── e39.py ├── e390.py ├── e391.py ├── e392.py ├── e393.py ├── e394.py ├── e395.py ├── e396.py ├── e397.py ├── e398.py ├── e399.py ├── e40.py ├── e400.py ├── e401.py ├── e402.py ├── e403.py ├── e404.py ├── e405.py ├── e406.py ├── e407.py ├── e408.py ├── e409.py ├── e41.py ├── e410.py ├── e411.py ├── e412.py ├── e413.py ├── e414.py ├── e415.py ├── e416.py ├── e417.py ├── e418.py ├── e419.py ├── e42.py ├── e420.py ├── e421.py ├── e422.py ├── e423.py ├── e424.py ├── e425.py ├── e426.py ├── e427.py ├── e428.py ├── e429.py ├── e430.py ├── e431.py ├── e432.py ├── e433.py ├── e434.py ├── e435.py ├── e436.py ├── e437.py ├── e438.py ├── e439.py ├── e43a.py ├── e43b.py ├── e44.py ├── e440.py ├── e441.py ├── e442.py ├── e443.py ├── e444.py ├── e445.py ├── e446.py ├── e447.py ├── e448.py ├── e449.py ├── e45.py ├── e450.py ├── e451.py ├── e452.py ├── e453.py ├── e454.py ├── e455.py ├── e456.py ├── e457.py ├── e458.py ├── e459.py ├── e45b.py ├── e46.py ├── e460.py ├── e461.py ├── e462.py ├── e463.py ├── e464.py ├── e465.py ├── e466.py ├── e467.py ├── e468.py ├── e469.py ├── e47.py ├── e470.py ├── e471.py ├── e472.py ├── e473.py ├── e474.py ├── e475.py ├── e476.py ├── e477.py ├── e478.py ├── e479.py ├── e48.py ├── e480.py ├── e481.py ├── e482.py ├── e483.py ├── e484.py ├── e485.py ├── e486.py ├── e487.py ├── e488.py ├── e489.py ├── e49.py ├── e490.py ├── e491.py ├── e492.py ├── e493.py ├── e494.py ├── e495.py ├── e496.py ├── e497.py ├── e498.py ├── e499.py ├── e50.py ├── e500.py ├── e501.py ├── e502.py ├── e503.py ├── e504.py ├── e505.py ├── e506.py ├── e507.py ├── e508.py ├── e509.py ├── e51.py ├── e510.py ├── e511.py ├── e512.py ├── e513.py ├── e514.py ├── e515.py ├── e516.py ├── e517.py ├── e518.py ├── e519.py ├── e520.py ├── e521.py ├── e522.py ├── e523.py ├── e524.py ├── e525.py ├── e526.py ├── e527.py ├── e528.py ├── e529.py ├── e52a.py ├── e530.py ├── e531.py ├── e532.py ├── e533.py ├── e534.py ├── e535.py ├── e536.py ├── e537.py ├── e538.py ├── e539.py ├── e53a.py ├── e540.py ├── e541.py ├── e542.py ├── e543.py ├── e544.py ├── e545.py ├── e546.py ├── e547.py ├── e548.py ├── e549.py ├── e54a.py ├── e550.py ├── e551.py ├── e552.py ├── e553.py ├── e554.py ├── e555.py ├── e556.py ├── e557.py ├── e558.py ├── e559.py ├── e55a.py ├── e560.py ├── e561.py ├── e562.py ├── e563.py ├── e564.py ├── e565.py ├── e566.py ├── e567.py ├── e568.py ├── e569.py ├── e56a ├── e570.py ├── e571.py ├── e572.py ├── e573.py ├── e574.py ├── e57a.py ├── e58a.py ├── e58b.py ├── e58c.py ├── e59a.py ├── e60a.py ├── e61a.py ├── e62.py ├── e63.py ├── e64.py ├── e65.py ├── e66.py ├── e67.py ├── e68.py ├── e69.py ├── e69b.py ├── e70.py ├── e71.py ├── e72.py ├── e73.py ├── e74.py ├── e75.py ├── e76.py ├── e77.py ├── e78.py ├── e79.py ├── e80.py ├── e81.py ├── e82.py ├── e83.py ├── e83c.py ├── e83d.py ├── e83e.py ├── e84.py ├── e85.py ├── e85b.py ├── e86.py ├── e87.py ├── e88.py ├── e89.py ├── e90.py ├── e91.py ├── e92.py ├── e93.py ├── e94.py ├── e95.py ├── e96.py ├── e97.py ├── e98.py ├── e99.py ├── experiment028.py ├── experiment029.py ├── experiment030.py ├── experiment031.py ├── experiment032.py ├── experiment033.py ├── experiment034.py ├── experiment035.py ├── gen_data_029.py ├── metrics.py ├── notes.txt ├── plot_metrics.py ├── threading_tests.ipynb ├── ukdale_appliances.txt └── visualise_gen_data.ipynb └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # PyInstaller 26 | # Usually these files are written by a python script from a template 27 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 28 | *.manifest 29 | *.spec 30 | 31 | # Installer logs 32 | pip-log.txt 33 | pip-delete-this-directory.txt 34 | 35 | # Unit test / coverage reports 36 | htmlcov/ 37 | .tox/ 38 | .coverage 39 | .cache 40 | nosetests.xml 41 | coverage.xml 42 | 43 | # Translations 44 | *.mo 45 | *.pot 46 | 47 | # Django stuff: 48 | *.log 49 | 50 | # Sphinx documentation 51 | docs/_build/ 52 | 53 | # PyBuilder 54 | target/ 55 | 56 | # Rope 57 | .ropeproject/ 58 | 59 | # IPython notebooks 60 | .ipynb_checkpoints/ 61 | 62 | # Data 63 | *.npy -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Jack Kelly 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Neural NILM Prototype 2 | 3 | Early prototype for the Neural NILM (non-intrusive load monitoring) 4 | software. This software will be completely re-written as the [Neural 5 | NILM project](https://github.com/JackKelly/neuralnilm). 6 | 7 | This is the software that was used to run the experiments for our 8 | [Neural NILM paper](http://arxiv.org/abs/1507.06594). 9 | 10 | Note that `Neural NILM Prototype` is completely unsupported and is a bit of a mess! 11 | 12 | If you really want to re-implement my Neural NILM ideas, then I recommend that you start from scratch using a modern DeepLearning framework like TensorFlow. Honestly, it will be easier in the long run! 13 | 14 | Directories: 15 | 16 | * `neuralnilm` contains re-usable library code 17 | * `scripts` contains runnable experiments 18 | * `notebooks` contains IPython Notebooks (mostly for testing stuff 19 | out) 20 | 21 | The script which specified the experiments I ran in my paper is 22 | [e567.py](https://github.com/JackKelly/neuralnilm_prototype/blob/master/scripts/e567.py). 23 | 24 | (It's a pretty horrible bit of code! Written in a rush!) In that 25 | script, you can see the `SEQ_LENGTH` for each appliance and the 26 | `N_SEQ_PER_BATCH` (the number of training examples per batch). 27 | Basically, the sequence length varied from 128 (for the kettle) up to 28 | 1536 (for the dish washer). And the number of sequences per batch was 29 | usually 64, although I had to reduce that to 16 for the RNN for the 30 | longer sequences. 31 | 32 | The nets took a long time to train (I don't remember exactly how long 33 | but it was of the order of about one day per net per appliance). You 34 | can see exactly how long I trained each net in that `e567.py` script 35 | (look at the `def net_dict_` functions and look for 36 | `epochs`.... that's the number of batches (not epochs!) given to the 37 | net during training). It's 300,000 for the rectangles net, 100,000 38 | for the AE and 10,000 for the RNN (because the RNN was a *lot* slower 39 | to train... I chose these numbers because the nets appeared to stop 40 | learning after this number of training iterations). 41 | -------------------------------------------------------------------------------- /examples/mdn.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | import matplotlib.pyplot as plt 6 | 7 | import lasagne 8 | from lasagne.utils import floatX 9 | from lasagne.layers import InputLayer, DenseLayer 10 | from lasagne.nonlinearities import tanh 11 | 12 | from neuralnilm.layers import MixtureDensityLayer 13 | from neuralnilm.objectives import mdn_nll 14 | 15 | # Number of units in the hidden (recurrent) layer 16 | N_HIDDEN_LAYERS = 2 17 | N_UNITS_PER_LAYER = 5 18 | N_COMPONENTS = 3 19 | # Number of training sequences in each batch 20 | SHAPE = (128, 1) 21 | # SGD learning rate 22 | LEARNING_RATE = 0.001 23 | # Number of iterations to train the net 24 | N_ITERATIONS = 5000 25 | 26 | np.random.seed(42) 27 | 28 | def gen_data(): 29 | ''' 30 | Generate toy data, from Bishop p273. 31 | 32 | :returns: 33 | - X : np.ndarray, shape=(n_batch, 1) 34 | Input sequence 35 | - t : np.ndarray, shape=(n_batch, 1) 36 | Target sequence 37 | ''' 38 | t = np.random.uniform(low=0.1, high=0.9, size=SHAPE) 39 | noise = np.random.uniform(low=-0.1, high=0.1, size=SHAPE) 40 | X = t + (0.3 * np.sin(2 * np.pi * t)) + noise 41 | return floatX(X), floatX(t) 42 | 43 | 44 | X_val, t_val = gen_data() 45 | 46 | # Configure layers 47 | layers = [InputLayer(shape=SHAPE)] 48 | for i in range(N_HIDDEN_LAYERS): 49 | layer = DenseLayer(layers[-1], N_UNITS_PER_LAYER, nonlinearity=tanh) 50 | layers.append(layer) 51 | layers.append(MixtureDensityLayer( 52 | layers[-1], num_units=t_val.shape[-1], num_components=N_COMPONENTS)) 53 | 54 | print("Total parameters: {}".format( 55 | sum([p.get_value().size 56 | for p in lasagne.layers.get_all_params(layers[-1])]))) 57 | 58 | X = T.matrix('X') 59 | t = T.matrix('t') 60 | 61 | # add test values 62 | X.tag.test_value = floatX(np.random.rand(*SHAPE)) 63 | t.tag.test_value = floatX(np.random.rand(*SHAPE)) 64 | 65 | loss_func = mdn_nll 66 | y = layers[-1].get_output(X) 67 | loss = loss_func(y, t) 68 | 69 | all_params = lasagne.layers.get_all_params(layers[-1]) 70 | updates = lasagne.updates.nesterov_momentum(loss, all_params, LEARNING_RATE) 71 | 72 | # Theano functions for training, getting output, and computing loss 73 | print("Compiling Theano functions...") 74 | train = theano.function([X, t], loss, updates=updates) 75 | y_pred = theano.function([X], layers[-1].get_output(X)) 76 | compute_loss = theano.function([X, t], loss) 77 | print("Done compiling Theano functions.") 78 | 79 | # Train the net 80 | print("Starting training...") 81 | costs = np.zeros(N_ITERATIONS) 82 | for n in range(N_ITERATIONS): 83 | X, t = gen_data() 84 | costs[n] = train(X, t) 85 | if not n % 100: 86 | cost_val = compute_loss(X_val, t_val) 87 | print("Iteration {} validation cost = {}".format(n, cost_val)) 88 | 89 | # Plot means 90 | ax = plt.gca() 91 | y = y_pred(X_val) 92 | for i in range(N_COMPONENTS): 93 | ax.scatter(X_val[:,0], y[0][:,0,i], s=y[2][:,i].mean() * 50) 94 | ax.scatter(X_val[:,0], t_val[:,0], color='g') 95 | plt.show() 96 | 97 | -------------------------------------------------------------------------------- /neuralnilm/__init__.py: -------------------------------------------------------------------------------- 1 | from net import Net 2 | # from layers import (BLSTMLayer, 3 | # BidirectionalLayer, BidirectionalRecurrentLayer) 4 | from source import ToySource, NILMTKSource, RealApplianceSource 5 | -------------------------------------------------------------------------------- /neuralnilm/combinatorial_optimisation.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import theano 3 | import numpy as np 4 | 5 | 6 | def combinatorial_optimisation(network_input, 7 | network_output, 8 | input_normalisation_stats, 9 | output_normalisation_stats): 10 | """ 11 | network_input : 12 | shape = (n_seq_per_batch, seq_length, n_inputs) 13 | network_output : 14 | shape = (n_seq_per_batch, seq_length, n_outputs, n_components, 3) 15 | input_normalisation_stats : 16 | dict with keys {'mean', 'std'}. each is a 1D numpy array with 17 | values for each appliance. 18 | """ 19 | 20 | """ 21 | For each time step: 22 | get modal values (means) from network output 23 | convert modal values to power in watts (I think Source scales twice! 24 | Once to [0,1], then to standardised) 25 | go through each combination of modal power (watts) values: 26 | for each appliance, consider 3 states: off, mean1, mean2 27 | if the sum is above network input + margin of error then discard 28 | otherwise get mean negative log likelihood for this combination. 29 | if the NLL is lower than the previous lowest then store this combination 30 | and set the lowest NLL. 31 | """ 32 | 33 | mu = network_output[:, :, :, :, 0] 34 | sigma = network_output[:, :, :, :, 1] 35 | mixing = network_output[:, :, :, :, 2] 36 | n_appliances = mu.shape[2] 37 | n_components = mu.shape[3] 38 | mu_watts = un_normalise(mu, output_normalisation_stats) 39 | 40 | 41 | def un_normalise(normalised, stats): 42 | """ 43 | To un-normalise: 44 | 1. multiply by stdev 45 | 2. add mean 46 | 47 | Parameters 48 | ---------- 49 | normalised : 50 | shape = (n_seq_per_batch, seq_length, n_outputs, ...) 51 | stats : 52 | dict with keys {'mean', 'std'}. each is a 1D numpy array with 53 | values for each appliance. 54 | 55 | Returns 56 | ------- 57 | watts 58 | """ 59 | n_appliances = normalised.shape[2] 60 | watts = np.empty(shape=normalised.shape, dtype=theano.config.floatX) 61 | for appliance_i in range(n_appliances): 62 | watts_appliance = watts[:, :, appliance_i] 63 | watts_appliance *= stats['std'][appliance_i] 64 | watts_appliance += stats['mean'][appliance_i] 65 | return watts 66 | -------------------------------------------------------------------------------- /neuralnilm/experiment.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import os 3 | import logging 4 | from sys import stdout 5 | 6 | 7 | def change_dir(base_path, full_exp_name): 8 | path = os.path.join(base_path, full_exp_name) 9 | try: 10 | os.mkdir(path) 11 | except OSError as exception: 12 | if exception.errno == 17: 13 | print(path, "already exists. Reusing directory.") 14 | else: 15 | raise 16 | os.chdir(path) 17 | 18 | 19 | def configure_logger(full_exp_name): 20 | logger = logging.getLogger(full_exp_name) 21 | if not logger.handlers: 22 | fh = logging.FileHandler(full_exp_name + '.log') 23 | formatter = logging.Formatter('%(asctime)s %(message)s') 24 | fh.setFormatter(formatter) 25 | logger.addHandler(fh) 26 | logger.addHandler(logging.StreamHandler(stream=stdout)) 27 | logger.setLevel(logging.DEBUG) 28 | logger.info("***********************************") 29 | logger.info("Preparing " + full_exp_name + "...") 30 | 31 | 32 | def init_experiment(base_path, experiment, full_exp_name): 33 | """ 34 | Parameters 35 | ---------- 36 | base_path : str 37 | full_exp_name : str 38 | 39 | Returns 40 | ------- 41 | func_call : str 42 | """ 43 | change_dir(base_path, full_exp_name) 44 | func_call = 'exp_{:s}(full_exp_name)'.format(experiment) 45 | configure_logger(full_exp_name) 46 | return func_call 47 | 48 | 49 | def run_experiment(net, epochs): 50 | net.print_net() 51 | net.compile() 52 | fit(net, epochs) 53 | 54 | 55 | def fit(net, epochs): 56 | print("Running net.fit for", net.experiment_name) 57 | try: 58 | net.fit(epochs) 59 | except KeyboardInterrupt: 60 | print("Keyboard interrupt received.") 61 | menu(net, epochs) 62 | 63 | 64 | def menu(net, epochs): 65 | # Print menu 66 | print("") 67 | print("------------------ OPTIONS ------------------") 68 | print("d: Enter debugger.") 69 | print("s: Save plots and params.") 70 | print("q: Quit all experiments.") 71 | print("e: Change number of epochs to train this net (currently {})." 72 | .format(epochs)) 73 | print("c: Continue training.") 74 | print("") 75 | 76 | # Get input 77 | selection_str = raw_input("Please enter one or more letters: ") 78 | 79 | # Handle input 80 | for selection in selection_str: 81 | if selection == 'd': 82 | import ipdb 83 | ipdb.set_trace() 84 | elif selection == 's': 85 | net.save() 86 | elif selection == 'q': 87 | sure = raw_input("Are you sure you want to quit [Y/n]? ") 88 | if sure.lower() != 'n': 89 | raise 90 | elif selection == 'e': 91 | new_epochs = raw_input("New number of epochs (or 'None'): ") 92 | if new_epochs == 'None': 93 | epochs = None 94 | else: 95 | try: 96 | epochs = int(new_epochs) 97 | except: 98 | print("'{}' not an integer!".format(new_epochs)) 99 | elif selection == 'c': 100 | break 101 | else: 102 | print("Selection '{}' not recognised!".format(selection)) 103 | break 104 | print("Continuing training for {} epochs...".format(epochs)) 105 | fit(net, epochs) 106 | -------------------------------------------------------------------------------- /neuralnilm/init.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions to create initializers for parameter variables 3 | """ 4 | 5 | import numpy as np 6 | 7 | from lasagne.utils import floatX 8 | from lasagne.init import Initializer 9 | 10 | 11 | class Identity(Initializer): 12 | """Initialise with the identity matrix. Can be used to initialise ReLU RNNs 13 | as per [#le2015ReLU-RNNs]_. 14 | 15 | 16 | :references: 17 | .. [#le2015ReLE-RNNs] Le, Jaitly, Hinton, "A Simple Way to 18 | Initialize Recurrent Networks of Rectified Linear Units" (2015) 19 | """ 20 | 21 | def __init__(self, scale=1): 22 | """ 23 | :parameters: 24 | - scale : int or float 25 | To quote from [#le2015ReLU-RNNs]_: 26 | "for tasks that exhibit less long range dependencies, scaling the 27 | identity matrix by a small scalar is an effective mechanism to 28 | forget long range effects. This is the same behavior as LTSMs 29 | when their forget gates are set so that the memory decays 30 | fast." 31 | """ 32 | self.scale = scale 33 | 34 | def sample(self, shape): 35 | if len(shape) != 2 or shape[0] != shape[1]: 36 | raise RuntimeError( 37 | "Identity initialisation only works with" 38 | " square weight matrices.") 39 | return floatX(np.identity(n=shape[0]) * self.scale) 40 | -------------------------------------------------------------------------------- /neuralnilm/metrics.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import numpy as np 3 | import sklearn.metrics as metrics 4 | 5 | 6 | METRICS = { 7 | 'classification': [ 8 | 'accuracy_score', 9 | 'f1_score', 10 | 'precision_score', 11 | 'recall_score' 12 | ], 13 | 'regression': [ 14 | 'mean_absolute_error' 15 | ] 16 | } 17 | 18 | 19 | def run_metrics(y_true, y_pred, mains, on_power_threshold=4): 20 | """ 21 | Parameters 22 | ---------- 23 | on_power_threshold : int 24 | """ 25 | # Truncate 26 | n = min(len(y_true), len(y_pred)) 27 | y_true = y_true[:n] 28 | y_pred = y_pred[:n] 29 | 30 | y_true[y_true <= on_power_threshold] = 0 31 | y_true_class = y_true > on_power_threshold 32 | y_pred_class = y_pred > on_power_threshold 33 | 34 | ARGS = { 35 | 'classification': '(y_true_class, y_pred_class)', 36 | 'regression': '(y_true, y_pred)' 37 | } 38 | 39 | scores = {} 40 | for metric_type, metric_list in METRICS.iteritems(): 41 | args = ARGS[metric_type] 42 | for metric in metric_list: 43 | score = eval('metrics.' + metric + args) 44 | scores[metric] = float(score) 45 | 46 | sum_y_true = np.sum(y_true) 47 | sum_y_pred = np.sum(y_pred) 48 | # negative means underestimates 49 | relative_error_in_total_energy = float( 50 | (sum_y_pred - sum_y_true) / max(sum_y_true, sum_y_pred)) 51 | 52 | # For total energy correctly assigned 53 | denominator = 2 * np.sum(mains) 54 | abs_diff = np.fabs(y_pred - y_true) 55 | sum_abs_diff = np.sum(abs_diff) 56 | total_energy_correctly_assigned = 1 - (sum_abs_diff / denominator) 57 | total_energy_correctly_assigned = float(total_energy_correctly_assigned) 58 | 59 | scores.update({ 60 | 'relative_error_in_total_energy': relative_error_in_total_energy, 61 | 'total_energy_correctly_assigned': total_energy_correctly_assigned, 62 | 'sum_abs_diff': float(sum_abs_diff) 63 | }) 64 | 65 | return scores 66 | 67 | 68 | def across_all_appliances(scores, mains, aggregate_predictions): 69 | total_sum_abs_diff = 0.0 70 | for appliance_scores in scores.values(): 71 | total_sum_abs_diff += appliance_scores['sum_abs_diff'] 72 | 73 | # Total energy correctly assigned 74 | # See Eq(1) on p5 of Kolter & Johnson 2011 75 | denominator = 2 * np.sum(mains) 76 | total_energy_correctly_assigned = 1 - (total_sum_abs_diff / denominator) 77 | total_energy_correctly_assigned = float(total_energy_correctly_assigned) 78 | 79 | # explained variance 80 | n = min(len(mains), len(aggregate_predictions)) 81 | mains = mains[:n] 82 | aggregate_predictions = aggregate_predictions[:n] 83 | 84 | scores['across all appliances'] = { 85 | 'total_energy_correctly_assigned': total_energy_correctly_assigned, 86 | 'explained_variance_score': float( 87 | metrics.explained_variance_score(mains, aggregate_predictions)), 88 | 'mean_absolute_error': float( 89 | np.mean( 90 | [scores[app]['mean_absolute_error'] 91 | for app in scores])), 92 | 'relative_error_in_total_energy': float( 93 | np.mean( 94 | [scores[app]['relative_error_in_total_energy'] 95 | for app in scores])), 96 | } 97 | scores['across all appliances'].update({ 98 | metric: float(np.mean([scores[app][metric] for app in scores])) 99 | for metric in METRICS['classification'] 100 | }) 101 | 102 | return scores 103 | -------------------------------------------------------------------------------- /neuralnilm/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/neuralnilm/tests/__init__.py -------------------------------------------------------------------------------- /neuralnilm/tests/test_objectives.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | from __future__ import print_function, division 3 | import unittest 4 | from timeit import default_timer as timer 5 | import numpy as np 6 | import theano 7 | import theano.tensor as T 8 | from neuralnilm import objectives 9 | from neuralnilm.utils import gen_pulse 10 | 11 | SEQ_LENGTH = 512 12 | N_SEQ_PER_BATCH = 8 13 | N_OUTPUTS = 5 14 | TARGET_SHAPE = (N_SEQ_PER_BATCH, SEQ_LENGTH, N_OUTPUTS) 15 | DURATIONS = (0, 10, 100, 300, SEQ_LENGTH) 16 | STARTS = (0, 10, 10, 10, 0) 17 | DTYPE = np.float32 18 | 19 | def gen_target(): 20 | t = np.zeros(shape=TARGET_SHAPE, dtype=DTYPE) 21 | for seq_i in range(N_SEQ_PER_BATCH): 22 | for output_i in range(N_OUTPUTS): 23 | pulse = gen_pulse(amplitude=1, 24 | duration=DURATIONS[output_i], 25 | start_index=STARTS[output_i], 26 | seq_length=SEQ_LENGTH, 27 | dtype=DTYPE) 28 | t[seq_i, :, output_i] = pulse 29 | return t 30 | 31 | class TestObjectives(unittest.TestCase): 32 | 33 | def test_scaled_cost3(self): 34 | t = theano.shared(gen_target()) 35 | y = theano.shared(np.zeros(shape=TARGET_SHAPE, dtype=DTYPE)) 36 | self.assertEqual(t.dtype, y.dtype) 37 | start_time = timer() 38 | import ipdb; ipdb.set_trace() 39 | cost = objectives.scaled_cost3(y, t) 40 | end_time = timer() 41 | print("Time: {:.3f}s".format(end_time - start_time)) 42 | print(cost.eval()) 43 | cost = objectives.scaled_cost3(y, t, ignore_inactive=False) 44 | print(cost.eval()) 45 | 46 | 47 | if __name__ == '__main__': 48 | unittest.main() 49 | -------------------------------------------------------------------------------- /neuralnilm/updates.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import numpy as np 3 | 4 | import theano 5 | import theano.tensor as T 6 | from theano.gradient import grad_clip 7 | 8 | from neuralnilm.utils import sfloatX 9 | 10 | 11 | def clipped_nesterov_momentum(loss, all_params, learning_rate, 12 | clip_range, momentum=0.9): 13 | # Adapted from Lasagne/lasagne/updates.py 14 | all_grads = theano.grad(grad_clip(loss, clip_range[0], clip_range[1]), 15 | all_params) 16 | 17 | updates = [] 18 | 19 | for param_i, grad_i in zip(all_params, all_grads): 20 | mparam_i = theano.shared(np.zeros(param_i.get_value().shape, 21 | dtype=theano.config.floatX)) 22 | v = momentum * mparam_i - learning_rate * grad_i # new momemtum 23 | w = param_i + momentum * v - learning_rate * grad_i # new param values 24 | updates.append((mparam_i, v)) 25 | updates.append((param_i, w)) 26 | 27 | return updates 28 | 29 | 30 | def anneal_learning_rate(initial_learning_rate, normaliser, iteration): 31 | return sfloatX(initial_learning_rate / (1 + (iteration / normaliser))) 32 | -------------------------------------------------------------------------------- /neuralnilm/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import theano 3 | import theano.tensor as T 4 | import numpy as np 5 | 6 | 7 | def remove_nones(*args): 8 | return [a for a in args if a is not None] 9 | 10 | 11 | def sfloatX(data): 12 | """Convert scalar to floatX""" 13 | return getattr(np, theano.config.floatX)(data) 14 | 15 | 16 | def none_to_dict(data): 17 | return {} if data is None else data 18 | 19 | 20 | def ndim_tensor(name, ndim, dtype=theano.config.floatX): 21 | tensor_type = T.TensorType(dtype=dtype, broadcastable=((False,) * ndim)) 22 | return tensor_type(name=name) 23 | 24 | 25 | def gen_pulse(amplitude, duration, start_index, seq_length, 26 | dtype=theano.config.floatX): 27 | sequence = np.zeros(seq_length, dtype=dtype) 28 | end_index = start_index + duration 29 | sequence[start_index:end_index] += amplitude 30 | return sequence 31 | -------------------------------------------------------------------------------- /notebooks/3wolfmoon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/3wolfmoon.jpg -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_001.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_001.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_002.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_002.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_003.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_003.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_004.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_004.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_006.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_006.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_007.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_007.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_009.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_009.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_010.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_010.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_011.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_011.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_012.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_012.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_013.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_013.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_014.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_014.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_016.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_016.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_017.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_017.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_018.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_018.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_019.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_019.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_021.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_021.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_022.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_022.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_023.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_023.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_024.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_024.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_025.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_025.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_026.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_026.hdf -------------------------------------------------------------------------------- /notebooks/neuronilm_estimates_027.hdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/notebooks/neuronilm_estimates_027.hdf -------------------------------------------------------------------------------- /notebooks/test_toy_source.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:883bbfbcfbc283640d58e13d2b9821a3bb79324d9890851d2387bd0a7187c0f9" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "code", 13 | "collapsed": false, 14 | "input": [ 15 | "from neuralnilm.source import ToySource\n", 16 | "import matplotlib.pyplot as plt\n", 17 | "from Queue import Empty" 18 | ], 19 | "language": "python", 20 | "metadata": {}, 21 | "outputs": [], 22 | "prompt_number": 15 23 | }, 24 | { 25 | "cell_type": "code", 26 | "collapsed": false, 27 | "input": [ 28 | "source = ToySource(100, 1)" 29 | ], 30 | "language": "python", 31 | "metadata": {}, 32 | "outputs": [], 33 | "prompt_number": 2 34 | }, 35 | { 36 | "cell_type": "code", 37 | "collapsed": false, 38 | "input": [ 39 | "X, y = source.gen_data()" 40 | ], 41 | "language": "python", 42 | "metadata": {}, 43 | "outputs": [], 44 | "prompt_number": 3 45 | }, 46 | { 47 | "cell_type": "code", 48 | "collapsed": false, 49 | "input": [ 50 | "fig, axes = plt.subplots(2, sharex=True)\n", 51 | "axes[0].plot(X[0,:,0])\n", 52 | "axes[1].plot(y[0,:,0])\n", 53 | "plt.show()" 54 | ], 55 | "language": "python", 56 | "metadata": {}, 57 | "outputs": [], 58 | "prompt_number": 6 59 | }, 60 | { 61 | "cell_type": "code", 62 | "collapsed": false, 63 | "input": [ 64 | "try:\n", 65 | " source.queue.get(block=False)\n", 66 | "except Empty:\n", 67 | " print \"empty yo\"" 68 | ], 69 | "language": "python", 70 | "metadata": {}, 71 | "outputs": [ 72 | { 73 | "output_type": "stream", 74 | "stream": "stdout", 75 | "text": [ 76 | "empty yo\n" 77 | ] 78 | } 79 | ], 80 | "prompt_number": 16 81 | }, 82 | { 83 | "cell_type": "code", 84 | "collapsed": false, 85 | "input": [], 86 | "language": "python", 87 | "metadata": {}, 88 | "outputs": [] 89 | } 90 | ], 91 | "metadata": {} 92 | } 93 | ] 94 | } -------------------------------------------------------------------------------- /notebooks/visualise_activations.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:6c9ea8f4031fcac1b599003534d0033f709269dc20c20ea9df61dd090c5fe696" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "code", 13 | "collapsed": false, 14 | "input": [ 15 | "from __future__ import print_function, division\n", 16 | "from neuralnilm.plot import plot_activations\n", 17 | "import matplotlib.pyplot as plt" 18 | ], 19 | "language": "python", 20 | "metadata": {}, 21 | "outputs": [ 22 | { 23 | "ename": "ImportError", 24 | "evalue": "No module named h5py", 25 | "output_type": "pyerr", 26 | "traceback": [ 27 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m\n\u001b[1;31mImportError\u001b[0m Traceback (most recent call last)", 28 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mneuralnilm\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mplot\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mplot_activations\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", 29 | "\u001b[1;32m/home/jack/workspace/python/neuralnilm/neuralnilm/__init__.py\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mnet\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mNet\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mBLSTMLayer\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mDimshuffleLayer\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0msource\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mToySource\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mNILMTKSource\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mRealApplianceSource\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 30 | "\u001b[1;32m/home/jack/workspace/python/neuralnilm/neuralnilm/net.py\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mpandas\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 6\u001b[1;33m \u001b[1;32mimport\u001b[0m \u001b[0mh5py\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 7\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mdatetime\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mdatetime\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtimedelta\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 8\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mnumpy\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrandom\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mrand\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 31 | "\u001b[1;31mImportError\u001b[0m: No module named h5py" 32 | ] 33 | } 34 | ], 35 | "prompt_number": 1 36 | }, 37 | { 38 | "cell_type": "code", 39 | "collapsed": false, 40 | "input": [ 41 | "plot_activations('/mnt/sshfs/imperal/workspace/python/neuralnilm/figures/')" 42 | ], 43 | "language": "python", 44 | "metadata": {}, 45 | "outputs": [] 46 | } 47 | ], 48 | "metadata": {} 49 | } 50 | ] 51 | } -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JackKelly/neuralnilm_prototype/2119292e7d5c8a137797ad3c9abf9f37e7f749af/scripts/__init__.py -------------------------------------------------------------------------------- /scripts/e103.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import matplotlib 3 | matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! 4 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 5 | from lasagne.nonlinearities import sigmoid, rectify 6 | from lasagne.objectives import crossentropy, mse 7 | from lasagne.init import Uniform, Normal 8 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 9 | from lasagne.updates import adagrad, nesterov_momentum 10 | from functools import partial 11 | import os 12 | from neuralnilm.source import standardise 13 | from neuralnilm.experiment import run_experiment 14 | from neuralnilm.net import TrainingError 15 | import __main__ 16 | 17 | NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] 18 | PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" 19 | SAVE_PLOT_INTERVAL = 250 20 | GRADIENT_STEPS = 100 21 | 22 | """ 23 | e103 24 | Discovered that bottom layer is hardly changing. So will try 25 | just a single lstm layer 26 | """ 27 | 28 | 29 | def exp_a(name): 30 | source = RealApplianceSource( 31 | filename='/data/dk3810/ukdale.h5', 32 | appliances=[ 33 | ['fridge freezer', 'fridge', 'freezer'], 34 | 'hair straighteners', 35 | 'television' 36 | # 'dish washer', 37 | # ['washer dryer', 'washing machine'] 38 | ], 39 | max_appliance_powers=[300, 500, 200], #, 2500, 2400], 40 | on_power_thresholds=[20, 20, 20], #, 20, 20], 41 | max_input_power=1000, 42 | min_on_durations=[60, 60, 60], #, 1800, 1800], 43 | window=("2013-06-01", "2014-07-01"), 44 | seq_length=1000, 45 | output_one_appliance=False, 46 | boolean_targets=False, 47 | min_off_duration=60, 48 | train_buildings=[1], 49 | validation_buildings=[1], 50 | skip_probability=0, 51 | n_seq_per_batch=50 52 | ) 53 | 54 | net = Net( 55 | experiment_name=name, 56 | source=source, 57 | save_plot_interval=SAVE_PLOT_INTERVAL, 58 | loss_function=crossentropy, 59 | updates=partial(nesterov_momentum, learning_rate=0.1), 60 | layers_config=[ 61 | { 62 | 'type': BLSTMLayer, 63 | 'num_units': 50, 64 | 'W_in_to_cell': Uniform(5), 65 | 'gradient_steps': GRADIENT_STEPS 66 | }, 67 | { 68 | 'type': DenseLayer, 69 | 'num_units': source.n_outputs, 70 | 'nonlinearity': sigmoid 71 | } 72 | ] 73 | ) 74 | return net 75 | 76 | 77 | def init_experiment(experiment): 78 | full_exp_name = NAME + experiment 79 | func_call = 'exp_{:s}(full_exp_name)'.format(experiment) 80 | print("***********************************") 81 | print("Preparing", full_exp_name, "...") 82 | net = eval(func_call) 83 | return net 84 | 85 | 86 | def main(): 87 | for experiment in list('a'): 88 | full_exp_name = NAME + experiment 89 | path = os.path.join(PATH, full_exp_name) 90 | try: 91 | net = init_experiment(experiment) 92 | run_experiment(net, path, epochs=3000) 93 | except KeyboardInterrupt: 94 | break 95 | except TrainingError as e: 96 | print("EXCEPTION:", e) 97 | 98 | 99 | if __name__ == "__main__": 100 | main() 101 | -------------------------------------------------------------------------------- /scripts/e105.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import matplotlib 3 | matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! 4 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 5 | from lasagne.nonlinearities import sigmoid, rectify 6 | from lasagne.objectives import crossentropy, mse 7 | from lasagne.init import Uniform, Normal 8 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 9 | from lasagne.updates import adagrad, nesterov_momentum 10 | from functools import partial 11 | import os 12 | from neuralnilm.source import standardise 13 | from neuralnilm.experiment import run_experiment 14 | from neuralnilm.net import TrainingError 15 | import __main__ 16 | 17 | NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] 18 | PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" 19 | SAVE_PLOT_INTERVAL = 250 20 | GRADIENT_STEPS = 100 21 | 22 | """ 23 | e103 24 | Discovered that bottom layer is hardly changing. So will try 25 | just a single lstm layer 26 | 27 | e104 28 | standard init 29 | lower learning rate 30 | """ 31 | 32 | def exp_a(name): 33 | # Same as 104b but with two BLSTM layers 34 | source = RealApplianceSource( 35 | filename='/data/dk3810/ukdale.h5', 36 | appliances=[ 37 | ['fridge freezer', 'fridge', 'freezer'], 38 | 'hair straighteners', 39 | 'television' 40 | # 'dish washer', 41 | # ['washer dryer', 'washing machine'] 42 | ], 43 | max_appliance_powers=[300, 500, 200], #, 2500, 2400], 44 | on_power_thresholds=[20, 20, 20], #, 20, 20], 45 | max_input_power=1000, 46 | min_on_durations=[60, 60, 60], #, 1800, 1800], 47 | window=("2013-06-01", "2014-07-01"), 48 | seq_length=1000, 49 | output_one_appliance=False, 50 | boolean_targets=False, 51 | min_off_duration=60, 52 | train_buildings=[1], 53 | validation_buildings=[1], 54 | skip_probability=0, 55 | n_seq_per_batch=50 56 | ) 57 | 58 | net = Net( 59 | experiment_name=name, 60 | source=source, 61 | save_plot_interval=SAVE_PLOT_INTERVAL, 62 | loss_function=crossentropy, 63 | updates=partial(nesterov_momentum, learning_rate=0.01), 64 | layers_config=[ 65 | { 66 | 'type': BLSTMLayer, 67 | 'num_units': 50, 68 | 'W_in_to_cell': Uniform(5), 69 | 'gradient_steps': GRADIENT_STEPS 70 | }, 71 | { 72 | 'type': BLSTMLayer, 73 | 'num_units': 50, 74 | 'W_in_to_cell': Uniform(1), 75 | 'gradient_steps': GRADIENT_STEPS 76 | }, 77 | { 78 | 'type': DenseLayer, 79 | 'num_units': source.n_outputs, 80 | 'nonlinearity': sigmoid 81 | } 82 | ] 83 | ) 84 | return net 85 | 86 | 87 | def init_experiment(experiment): 88 | full_exp_name = NAME + experiment 89 | func_call = 'exp_{:s}(full_exp_name)'.format(experiment) 90 | print("***********************************") 91 | print("Preparing", full_exp_name, "...") 92 | net = eval(func_call) 93 | return net 94 | 95 | 96 | def main(): 97 | for experiment in list('a'): 98 | full_exp_name = NAME + experiment 99 | path = os.path.join(PATH, full_exp_name) 100 | try: 101 | net = init_experiment(experiment) 102 | run_experiment(net, path, epochs=None) 103 | except KeyboardInterrupt: 104 | break 105 | except TrainingError as e: 106 | print("EXCEPTION:", e) 107 | 108 | 109 | if __name__ == "__main__": 110 | main() 111 | -------------------------------------------------------------------------------- /scripts/e106.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import matplotlib 3 | matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! 4 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 5 | from lasagne.nonlinearities import sigmoid, rectify 6 | from lasagne.objectives import crossentropy, mse 7 | from lasagne.init import Uniform, Normal 8 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 9 | from lasagne.updates import adagrad, nesterov_momentum 10 | from functools import partial 11 | import os 12 | from neuralnilm.source import standardise 13 | from neuralnilm.experiment import run_experiment 14 | from neuralnilm.net import TrainingError 15 | import __main__ 16 | 17 | NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] 18 | PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" 19 | SAVE_PLOT_INTERVAL = 250 20 | GRADIENT_STEPS = 100 21 | 22 | """ 23 | e103 24 | Discovered that bottom layer is hardly changing. So will try 25 | just a single lstm layer 26 | 27 | e104 28 | standard init 29 | lower learning rate 30 | 31 | e106 32 | lower learning rate to 0.001 33 | 34 | """ 35 | 36 | def exp_a(name): 37 | # Same as 104b but with two BLSTM layers 38 | source = RealApplianceSource( 39 | filename='/data/dk3810/ukdale.h5', 40 | appliances=[ 41 | ['fridge freezer', 'fridge', 'freezer'], 42 | 'hair straighteners', 43 | 'television' 44 | # 'dish washer', 45 | # ['washer dryer', 'washing machine'] 46 | ], 47 | max_appliance_powers=[300, 500, 200], #, 2500, 2400], 48 | on_power_thresholds=[20, 20, 20], #, 20, 20], 49 | max_input_power=1000, 50 | min_on_durations=[60, 60, 60], #, 1800, 1800], 51 | window=("2013-06-01", "2014-07-01"), 52 | seq_length=1000, 53 | output_one_appliance=False, 54 | boolean_targets=False, 55 | min_off_duration=60, 56 | train_buildings=[1], 57 | validation_buildings=[1], 58 | skip_probability=0, 59 | n_seq_per_batch=50 60 | ) 61 | 62 | net = Net( 63 | experiment_name=name, 64 | source=source, 65 | save_plot_interval=SAVE_PLOT_INTERVAL, 66 | loss_function=crossentropy, 67 | updates=partial(nesterov_momentum, learning_rate=0.001), 68 | layers_config=[ 69 | { 70 | 'type': BLSTMLayer, 71 | 'num_units': 50, 72 | 'W_in_to_cell': Uniform(5), 73 | 'gradient_steps': GRADIENT_STEPS 74 | }, 75 | { 76 | 'type': BLSTMLayer, 77 | 'num_units': 50, 78 | 'W_in_to_cell': Uniform(1), 79 | 'gradient_steps': GRADIENT_STEPS 80 | }, 81 | { 82 | 'type': DenseLayer, 83 | 'num_units': source.n_outputs, 84 | 'nonlinearity': sigmoid 85 | } 86 | ] 87 | ) 88 | return net 89 | 90 | 91 | def init_experiment(experiment): 92 | full_exp_name = NAME + experiment 93 | func_call = 'exp_{:s}(full_exp_name)'.format(experiment) 94 | print("***********************************") 95 | print("Preparing", full_exp_name, "...") 96 | net = eval(func_call) 97 | return net 98 | 99 | 100 | def main(): 101 | for experiment in list('a'): 102 | full_exp_name = NAME + experiment 103 | path = os.path.join(PATH, full_exp_name) 104 | try: 105 | net = init_experiment(experiment) 106 | run_experiment(net, path, epochs=None) 107 | except KeyboardInterrupt: 108 | break 109 | except TrainingError as e: 110 | print("EXCEPTION:", e) 111 | 112 | 113 | if __name__ == "__main__": 114 | main() 115 | -------------------------------------------------------------------------------- /scripts/e114.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import matplotlib 3 | matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! 4 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 5 | from lasagne.nonlinearities import sigmoid, rectify 6 | from lasagne.objectives import crossentropy, mse 7 | from lasagne.init import Uniform, Normal 8 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 9 | from lasagne.updates import adagrad, nesterov_momentum 10 | from functools import partial 11 | import os 12 | from neuralnilm.source import standardise 13 | from neuralnilm.experiment import run_experiment 14 | from neuralnilm.net import TrainingError 15 | import __main__ 16 | 17 | NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] 18 | PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" 19 | SAVE_PLOT_INTERVAL = 250 20 | GRADIENT_STEPS = 100 21 | 22 | """ 23 | e103 24 | Discovered that bottom layer is hardly changing. So will try 25 | just a single lstm layer 26 | 27 | e104 28 | standard init 29 | lower learning rate 30 | 31 | e106 32 | lower learning rate to 0.001 33 | 34 | e108 35 | is e107 but with batch size of 5 36 | 37 | e109 38 | Normal(1) for LSTM 39 | 40 | e110 41 | * Back to Uniform(5) for LSTM 42 | * Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f 43 | RESULTS: Seems to run fine again! 44 | 45 | e111 46 | * Try with nntools head 47 | * peepholes=False 48 | RESULTS: appears to be working well. Haven't seen a NaN, 49 | even with training rate of 0.1 50 | 51 | e112 52 | * n_seq_per_batch = 50 53 | 54 | e114 55 | * Trying looking at layer by layer training again. 56 | * Start with single LSTM layer 57 | 58 | """ 59 | 60 | def exp_a(name): 61 | source = RealApplianceSource( 62 | filename='/data/dk3810/ukdale.h5', 63 | appliances=[ 64 | ['fridge freezer', 'fridge', 'freezer'], 65 | 'hair straighteners', 66 | 'television' 67 | # 'dish washer', 68 | # ['washer dryer', 'washing machine'] 69 | ], 70 | max_appliance_powers=[300, 500, 200], #, 2500, 2400], 71 | on_power_thresholds=[20, 20, 20], #, 20, 20], 72 | max_input_power=1000, 73 | min_on_durations=[60, 60, 60], #, 1800, 1800], 74 | window=("2013-06-01", "2014-07-01"), 75 | seq_length=1000, 76 | output_one_appliance=False, 77 | boolean_targets=False, 78 | min_off_duration=60, 79 | train_buildings=[1], 80 | validation_buildings=[1], 81 | skip_probability=0, 82 | n_seq_per_batch=50 83 | ) 84 | 85 | net = Net( 86 | experiment_name=name, 87 | source=source, 88 | save_plot_interval=SAVE_PLOT_INTERVAL, 89 | loss_function=crossentropy, 90 | updates=partial(nesterov_momentum, learning_rate=0.1), 91 | layers_config=[ 92 | { 93 | 'type': LSTMLayer, 94 | 'num_units': 50, 95 | 'W_in_to_cell': Uniform(5), 96 | 'gradient_steps': GRADIENT_STEPS, 97 | 'peepholes': False 98 | }, 99 | { 100 | 'type': DenseLayer, 101 | 'num_units': source.n_outputs, 102 | 'nonlinearity': sigmoid 103 | } 104 | ] 105 | ) 106 | return net 107 | 108 | 109 | def init_experiment(experiment): 110 | full_exp_name = NAME + experiment 111 | func_call = 'exp_{:s}(full_exp_name)'.format(experiment) 112 | print("***********************************") 113 | print("Preparing", full_exp_name, "...") 114 | net = eval(func_call) 115 | return net 116 | 117 | 118 | def main(): 119 | for experiment in list('a'): 120 | full_exp_name = NAME + experiment 121 | path = os.path.join(PATH, full_exp_name) 122 | try: 123 | net = init_experiment(experiment) 124 | run_experiment(net, path, epochs=5000) 125 | except KeyboardInterrupt: 126 | break 127 | except TrainingError as e: 128 | print("EXCEPTION:", e) 129 | 130 | 131 | if __name__ == "__main__": 132 | main() 133 | -------------------------------------------------------------------------------- /scripts/e115.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import matplotlib 3 | matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! 4 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 5 | from lasagne.nonlinearities import sigmoid, rectify 6 | from lasagne.objectives import crossentropy, mse 7 | from lasagne.init import Uniform, Normal 8 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 9 | from lasagne.updates import adagrad, nesterov_momentum 10 | from functools import partial 11 | import os 12 | from neuralnilm.source import standardise 13 | from neuralnilm.experiment import run_experiment 14 | from neuralnilm.net import TrainingError 15 | import __main__ 16 | 17 | NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] 18 | PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" 19 | SAVE_PLOT_INTERVAL = 250 20 | GRADIENT_STEPS = 100 21 | 22 | """ 23 | e103 24 | Discovered that bottom layer is hardly changing. So will try 25 | just a single lstm layer 26 | 27 | e104 28 | standard init 29 | lower learning rate 30 | 31 | e106 32 | lower learning rate to 0.001 33 | 34 | e108 35 | is e107 but with batch size of 5 36 | 37 | e109 38 | Normal(1) for LSTM 39 | 40 | e110 41 | * Back to Uniform(5) for LSTM 42 | * Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f 43 | RESULTS: Seems to run fine again! 44 | 45 | e111 46 | * Try with nntools head 47 | * peepholes=False 48 | RESULTS: appears to be working well. Haven't seen a NaN, 49 | even with training rate of 0.1 50 | 51 | e112 52 | * n_seq_per_batch = 50 53 | 54 | e114 55 | * Trying looking at layer by layer training again. 56 | * Start with single LSTM layer 57 | 58 | e115 59 | * Learning rate = 1 60 | 61 | """ 62 | 63 | def exp_a(name): 64 | source = RealApplianceSource( 65 | filename='/data/dk3810/ukdale.h5', 66 | appliances=[ 67 | ['fridge freezer', 'fridge', 'freezer'], 68 | 'hair straighteners', 69 | 'television' 70 | # 'dish washer', 71 | # ['washer dryer', 'washing machine'] 72 | ], 73 | max_appliance_powers=[300, 500, 200], #, 2500, 2400], 74 | on_power_thresholds=[20, 20, 20], #, 20, 20], 75 | max_input_power=1000, 76 | min_on_durations=[60, 60, 60], #, 1800, 1800], 77 | window=("2013-06-01", "2014-07-01"), 78 | seq_length=1000, 79 | output_one_appliance=False, 80 | boolean_targets=False, 81 | min_off_duration=60, 82 | train_buildings=[1], 83 | validation_buildings=[1], 84 | skip_probability=0, 85 | n_seq_per_batch=50 86 | ) 87 | 88 | net = Net( 89 | experiment_name=name, 90 | source=source, 91 | save_plot_interval=SAVE_PLOT_INTERVAL, 92 | loss_function=crossentropy, 93 | updates=partial(nesterov_momentum, learning_rate=1.0), 94 | layers_config=[ 95 | { 96 | 'type': LSTMLayer, 97 | 'num_units': 50, 98 | 'W_in_to_cell': Uniform(5), 99 | 'gradient_steps': GRADIENT_STEPS, 100 | 'peepholes': False 101 | }, 102 | { 103 | 'type': DenseLayer, 104 | 'num_units': source.n_outputs, 105 | 'nonlinearity': sigmoid 106 | } 107 | ] 108 | ) 109 | return net 110 | 111 | 112 | def init_experiment(experiment): 113 | full_exp_name = NAME + experiment 114 | func_call = 'exp_{:s}(full_exp_name)'.format(experiment) 115 | print("***********************************") 116 | print("Preparing", full_exp_name, "...") 117 | net = eval(func_call) 118 | return net 119 | 120 | 121 | def main(): 122 | for experiment in list('a'): 123 | full_exp_name = NAME + experiment 124 | path = os.path.join(PATH, full_exp_name) 125 | try: 126 | net = init_experiment(experiment) 127 | run_experiment(net, path, epochs=5000) 128 | except KeyboardInterrupt: 129 | break 130 | except TrainingError as e: 131 | print("EXCEPTION:", e) 132 | 133 | 134 | if __name__ == "__main__": 135 | main() 136 | -------------------------------------------------------------------------------- /scripts/e116.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import matplotlib 3 | matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! 4 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 5 | from lasagne.nonlinearities import sigmoid, rectify 6 | from lasagne.objectives import crossentropy, mse 7 | from lasagne.init import Uniform, Normal 8 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 9 | from lasagne.updates import adagrad, nesterov_momentum 10 | from functools import partial 11 | import os 12 | from neuralnilm.source import standardise 13 | from neuralnilm.experiment import run_experiment 14 | from neuralnilm.net import TrainingError 15 | import __main__ 16 | 17 | NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] 18 | PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" 19 | SAVE_PLOT_INTERVAL = 250 20 | GRADIENT_STEPS = 100 21 | 22 | """ 23 | e103 24 | Discovered that bottom layer is hardly changing. So will try 25 | just a single lstm layer 26 | 27 | e104 28 | standard init 29 | lower learning rate 30 | 31 | e106 32 | lower learning rate to 0.001 33 | 34 | e108 35 | is e107 but with batch size of 5 36 | 37 | e109 38 | Normal(1) for LSTM 39 | 40 | e110 41 | * Back to Uniform(5) for LSTM 42 | * Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f 43 | RESULTS: Seems to run fine again! 44 | 45 | e111 46 | * Try with nntools head 47 | * peepholes=False 48 | RESULTS: appears to be working well. Haven't seen a NaN, 49 | even with training rate of 0.1 50 | 51 | e112 52 | * n_seq_per_batch = 50 53 | 54 | e114 55 | * Trying looking at layer by layer training again. 56 | * Start with single LSTM layer 57 | 58 | e115 59 | * Learning rate = 1 60 | 61 | e116 62 | * Standard inits 63 | 64 | """ 65 | 66 | def exp_a(name): 67 | source = RealApplianceSource( 68 | filename='/data/dk3810/ukdale.h5', 69 | appliances=[ 70 | ['fridge freezer', 'fridge', 'freezer'], 71 | 'hair straighteners', 72 | 'television' 73 | # 'dish washer', 74 | # ['washer dryer', 'washing machine'] 75 | ], 76 | max_appliance_powers=[300, 500, 200], #, 2500, 2400], 77 | on_power_thresholds=[20, 20, 20], #, 20, 20], 78 | max_input_power=1000, 79 | min_on_durations=[60, 60, 60], #, 1800, 1800], 80 | window=("2013-06-01", "2014-07-01"), 81 | seq_length=1000, 82 | output_one_appliance=False, 83 | boolean_targets=False, 84 | min_off_duration=60, 85 | train_buildings=[1], 86 | validation_buildings=[1], 87 | skip_probability=0, 88 | n_seq_per_batch=50 89 | ) 90 | 91 | net = Net( 92 | experiment_name=name, 93 | source=source, 94 | save_plot_interval=SAVE_PLOT_INTERVAL, 95 | loss_function=crossentropy, 96 | updates=partial(nesterov_momentum, learning_rate=1.0), 97 | layers_config=[ 98 | { 99 | 'type': LSTMLayer, 100 | 'num_units': 50, 101 | # 'W_in_to_cell': Uniform(5), 102 | 'gradient_steps': GRADIENT_STEPS, 103 | 'peepholes': False 104 | }, 105 | { 106 | 'type': DenseLayer, 107 | 'num_units': source.n_outputs, 108 | 'nonlinearity': sigmoid 109 | } 110 | ] 111 | ) 112 | return net 113 | 114 | 115 | def init_experiment(experiment): 116 | full_exp_name = NAME + experiment 117 | func_call = 'exp_{:s}(full_exp_name)'.format(experiment) 118 | print("***********************************") 119 | print("Preparing", full_exp_name, "...") 120 | net = eval(func_call) 121 | return net 122 | 123 | 124 | def main(): 125 | for experiment in list('a'): 126 | full_exp_name = NAME + experiment 127 | path = os.path.join(PATH, full_exp_name) 128 | try: 129 | net = init_experiment(experiment) 130 | run_experiment(net, path, epochs=5000) 131 | except KeyboardInterrupt: 132 | break 133 | except TrainingError as e: 134 | print("EXCEPTION:", e) 135 | 136 | 137 | if __name__ == "__main__": 138 | main() 139 | -------------------------------------------------------------------------------- /scripts/e117.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import matplotlib 3 | matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! 4 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 5 | from lasagne.nonlinearities import sigmoid, rectify 6 | from lasagne.objectives import crossentropy, mse 7 | from lasagne.init import Uniform, Normal 8 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 9 | from lasagne.updates import adagrad, nesterov_momentum 10 | from functools import partial 11 | import os 12 | from neuralnilm.source import standardise 13 | from neuralnilm.experiment import run_experiment 14 | from neuralnilm.net import TrainingError 15 | import __main__ 16 | 17 | NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] 18 | PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" 19 | SAVE_PLOT_INTERVAL = 250 20 | GRADIENT_STEPS = 100 21 | 22 | """ 23 | e103 24 | Discovered that bottom layer is hardly changing. So will try 25 | just a single lstm layer 26 | 27 | e104 28 | standard init 29 | lower learning rate 30 | 31 | e106 32 | lower learning rate to 0.001 33 | 34 | e108 35 | is e107 but with batch size of 5 36 | 37 | e109 38 | Normal(1) for LSTM 39 | 40 | e110 41 | * Back to Uniform(5) for LSTM 42 | * Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f 43 | RESULTS: Seems to run fine again! 44 | 45 | e111 46 | * Try with nntools head 47 | * peepholes=False 48 | RESULTS: appears to be working well. Haven't seen a NaN, 49 | even with training rate of 0.1 50 | 51 | e112 52 | * n_seq_per_batch = 50 53 | 54 | e114 55 | * Trying looking at layer by layer training again. 56 | * Start with single LSTM layer 57 | 58 | e115 59 | * Learning rate = 1 60 | 61 | e116 62 | * Standard inits 63 | 64 | e117 65 | * Uniform(1) init 66 | 67 | """ 68 | 69 | def exp_a(name): 70 | source = RealApplianceSource( 71 | filename='/data/dk3810/ukdale.h5', 72 | appliances=[ 73 | ['fridge freezer', 'fridge', 'freezer'], 74 | 'hair straighteners', 75 | 'television' 76 | # 'dish washer', 77 | # ['washer dryer', 'washing machine'] 78 | ], 79 | max_appliance_powers=[300, 500, 200], #, 2500, 2400], 80 | on_power_thresholds=[20, 20, 20], #, 20, 20], 81 | max_input_power=1000, 82 | min_on_durations=[60, 60, 60], #, 1800, 1800], 83 | window=("2013-06-01", "2014-07-01"), 84 | seq_length=1000, 85 | output_one_appliance=False, 86 | boolean_targets=False, 87 | min_off_duration=60, 88 | train_buildings=[1], 89 | validation_buildings=[1], 90 | skip_probability=0, 91 | n_seq_per_batch=50 92 | ) 93 | 94 | net = Net( 95 | experiment_name=name, 96 | source=source, 97 | save_plot_interval=SAVE_PLOT_INTERVAL, 98 | loss_function=crossentropy, 99 | updates=partial(nesterov_momentum, learning_rate=1.0), 100 | layers_config=[ 101 | { 102 | 'type': LSTMLayer, 103 | 'num_units': 50, 104 | 'W_in_to_cell': Uniform(1), 105 | 'gradient_steps': GRADIENT_STEPS, 106 | 'peepholes': False 107 | }, 108 | { 109 | 'type': DenseLayer, 110 | 'num_units': source.n_outputs, 111 | 'nonlinearity': sigmoid 112 | } 113 | ] 114 | ) 115 | return net 116 | 117 | 118 | def init_experiment(experiment): 119 | full_exp_name = NAME + experiment 120 | func_call = 'exp_{:s}(full_exp_name)'.format(experiment) 121 | print("***********************************") 122 | print("Preparing", full_exp_name, "...") 123 | net = eval(func_call) 124 | return net 125 | 126 | 127 | def main(): 128 | for experiment in list('a'): 129 | full_exp_name = NAME + experiment 130 | path = os.path.join(PATH, full_exp_name) 131 | try: 132 | net = init_experiment(experiment) 133 | run_experiment(net, path, epochs=5000) 134 | except KeyboardInterrupt: 135 | break 136 | except TrainingError as e: 137 | print("EXCEPTION:", e) 138 | 139 | 140 | if __name__ == "__main__": 141 | main() 142 | -------------------------------------------------------------------------------- /scripts/e119.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import matplotlib 3 | matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! 4 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 5 | from lasagne.nonlinearities import sigmoid, rectify 6 | from lasagne.objectives import crossentropy, mse 7 | from lasagne.init import Uniform, Normal 8 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 9 | from lasagne.updates import adagrad, nesterov_momentum 10 | from functools import partial 11 | import os 12 | from neuralnilm.source import standardise 13 | from neuralnilm.experiment import run_experiment 14 | from neuralnilm.net import TrainingError 15 | import __main__ 16 | 17 | NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] 18 | PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" 19 | SAVE_PLOT_INTERVAL = 250 20 | GRADIENT_STEPS = 100 21 | 22 | """ 23 | e103 24 | Discovered that bottom layer is hardly changing. So will try 25 | just a single lstm layer 26 | 27 | e104 28 | standard init 29 | lower learning rate 30 | 31 | e106 32 | lower learning rate to 0.001 33 | 34 | e108 35 | is e107 but with batch size of 5 36 | 37 | e109 38 | Normal(1) for LSTM 39 | 40 | e110 41 | * Back to Uniform(5) for LSTM 42 | * Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f 43 | RESULTS: Seems to run fine again! 44 | 45 | e111 46 | * Try with nntools head 47 | * peepholes=False 48 | RESULTS: appears to be working well. Haven't seen a NaN, 49 | even with training rate of 0.1 50 | 51 | e112 52 | * n_seq_per_batch = 50 53 | 54 | e114 55 | * Trying looking at layer by layer training again. 56 | * Start with single LSTM layer 57 | 58 | e115 59 | * Learning rate = 1 60 | 61 | e116 62 | * Standard inits 63 | 64 | e117 65 | * Uniform(1) init 66 | 67 | e119 68 | * Learning rate 10 69 | 70 | """ 71 | 72 | def exp_a(name): 73 | source = RealApplianceSource( 74 | filename='/data/dk3810/ukdale.h5', 75 | appliances=[ 76 | ['fridge freezer', 'fridge', 'freezer'], 77 | 'hair straighteners', 78 | 'television' 79 | # 'dish washer', 80 | # ['washer dryer', 'washing machine'] 81 | ], 82 | max_appliance_powers=[300, 500, 200], #, 2500, 2400], 83 | on_power_thresholds=[20, 20, 20], #, 20, 20], 84 | max_input_power=1000, 85 | min_on_durations=[60, 60, 60], #, 1800, 1800], 86 | window=("2013-06-01", "2014-07-01"), 87 | seq_length=1000, 88 | output_one_appliance=False, 89 | boolean_targets=False, 90 | min_off_duration=60, 91 | train_buildings=[1], 92 | validation_buildings=[1], 93 | skip_probability=0, 94 | n_seq_per_batch=50 95 | ) 96 | 97 | net = Net( 98 | experiment_name=name, 99 | source=source, 100 | save_plot_interval=SAVE_PLOT_INTERVAL, 101 | loss_function=crossentropy, 102 | updates=partial(nesterov_momentum, learning_rate=10.0), 103 | layers_config=[ 104 | { 105 | 'type': LSTMLayer, 106 | 'num_units': 50, 107 | 'W_in_to_cell': Uniform(1), 108 | 'gradient_steps': GRADIENT_STEPS, 109 | 'peepholes': False 110 | }, 111 | { 112 | 'type': DenseLayer, 113 | 'num_units': source.n_outputs, 114 | 'nonlinearity': sigmoid 115 | } 116 | ] 117 | ) 118 | return net 119 | 120 | 121 | def init_experiment(experiment): 122 | full_exp_name = NAME + experiment 123 | func_call = 'exp_{:s}(full_exp_name)'.format(experiment) 124 | print("***********************************") 125 | print("Preparing", full_exp_name, "...") 126 | net = eval(func_call) 127 | return net 128 | 129 | 130 | def main(): 131 | for experiment in list('a'): 132 | full_exp_name = NAME + experiment 133 | path = os.path.join(PATH, full_exp_name) 134 | try: 135 | net = init_experiment(experiment) 136 | run_experiment(net, path, epochs=5000) 137 | except KeyboardInterrupt: 138 | break 139 | except TrainingError as e: 140 | print("EXCEPTION:", e) 141 | 142 | 143 | if __name__ == "__main__": 144 | main() 145 | -------------------------------------------------------------------------------- /scripts/e36.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, ToySource 3 | from lasagne.nonlinearities import sigmoid 4 | 5 | source = ToySource( 6 | seq_length=300, 7 | n_seq_per_batch=30 8 | ) 9 | 10 | net = Net( 11 | source=source, 12 | n_cells_per_hidden_layer=[10], 13 | output_nonlinearity=sigmoid, 14 | learning_rate=1e-1 15 | ) 16 | 17 | net.fit(n_iterations=1000) 18 | net.plot_costs() 19 | net.plot_estimates() 20 | -------------------------------------------------------------------------------- /scripts/e37.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource 3 | from lasagne.nonlinearities import sigmoid 4 | 5 | source = RealApplianceSource( 6 | '/data/dk3810/ukdale.h5', 7 | ['fridge freezer', 'hair straighteners', 'television'], 8 | max_input_power=1000, max_output_power=300, 9 | window=("2013-06-01", "2014-06-01") 10 | ) 11 | 12 | net = Net( 13 | source=source, 14 | n_cells_per_hidden_layer=[50,50,50], 15 | output_nonlinearity=sigmoid, 16 | learning_rate=1e-1, 17 | n_dense_cells_per_layer=50 18 | ) 19 | 20 | net.fit(n_iterations=1600) 21 | net.plot_costs() 22 | net.plot_estimates() 23 | -------------------------------------------------------------------------------- /scripts/e38.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource 3 | from lasagne.nonlinearities import sigmoid 4 | 5 | """ 6 | Changes: 7 | * in_to_cell init weights are now Normal(1.0) 8 | * output all appliances 9 | """ 10 | 11 | source = RealApplianceSource( 12 | '/data/dk3810/ukdale.h5', 13 | ['fridge freezer', 'hair straighteners', 'television'], 14 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 15 | window=("2013-06-01", "2014-06-01"), 16 | output_one_appliance=False 17 | # sample_period=15, seq_length=400 18 | ) 19 | 20 | net = Net( 21 | experiment_name="e38b", 22 | source=source, 23 | n_cells_per_hidden_layer=[50,50,50], 24 | output_nonlinearity=sigmoid, 25 | learning_rate=1e-1, 26 | n_dense_cells_per_layer=50, 27 | # validation_interval=2, 28 | save_plot_interval=250 29 | ) 30 | 31 | net.fit() 32 | 33 | #net.plot_costs() 34 | #net.plot_estimates() 35 | -------------------------------------------------------------------------------- /scripts/e39.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource 3 | from lasagne.nonlinearities import sigmoid 4 | 5 | """ 6 | Changes: 7 | * in_to_cell init weights are now Normal(1.0) 8 | * output all appliances 9 | """ 10 | 11 | source = RealApplianceSource( 12 | '/data/dk3810/ukdale.h5', 13 | ['fridge freezer', 'hair straighteners', 'television'], 14 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 15 | window=("2013-06-01", "2014-12-01"), 16 | output_one_appliance=False 17 | # sample_period=15, seq_length=400 18 | ) 19 | 20 | net = Net( 21 | experiment_name="e39a", 22 | source=source, 23 | n_cells_per_hidden_layer=[200,100,100], 24 | output_nonlinearity=sigmoid, 25 | learning_rate=1e-1, 26 | n_dense_cells_per_layer=200, 27 | # validation_interval=2, 28 | save_plot_interval=250 29 | ) 30 | 31 | # [200,200,200] n_dense_cells=200 got killed before training 32 | 33 | net.fit() 34 | 35 | #net.plot_costs() 36 | #net.plot_estimates() 37 | -------------------------------------------------------------------------------- /scripts/e40.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource 3 | from lasagne.nonlinearities import sigmoid 4 | 5 | """ 6 | Changes: 7 | * in_to_cell init weights are now Normal(1.0) 8 | * output all appliances 9 | * boolean targets 10 | 11 | Results: 12 | * I think there was a bug in Source because it was 13 | spitting out lots of empty training examples. 14 | So ignore the results! 15 | """ 16 | 17 | source = RealApplianceSource( 18 | '/data/dk3810/ukdale.h5', 19 | ['fridge freezer', 'hair straighteners', 'television'], 20 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 21 | window=("2013-06-01", "2014-07-01"), 22 | output_one_appliance=False, 23 | boolean_targets=True 24 | # sample_period=15, seq_length=400 25 | ) 26 | 27 | net = Net( 28 | experiment_name="e40a", 29 | source=source, 30 | n_cells_per_hidden_layer=[50,50,50] 31 | output_nonlinearity=sigmoid, 32 | learning_rate=1e-1, 33 | n_dense_cells_per_layer=50, 34 | # validation_interval=2, 35 | save_plot_interval=250 36 | ) 37 | 38 | # [200,200,200] n_dense_cells=200 got killed before training 39 | 40 | net.fit() 41 | 42 | #net.plot_costs() 43 | #net.plot_estimates() 44 | -------------------------------------------------------------------------------- /scripts/e41.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | 6 | """ 7 | Setup: 8 | * in_to_cell init weights are now Normal(1.0) 9 | * output all appliances 10 | * boolean targets 11 | 12 | Changes: 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | 17 | Results: 18 | """ 19 | 20 | source = RealApplianceSource( 21 | '/data/dk3810/ukdale.h5', 22 | ['fridge freezer', 'hair straighteners', 'television'], 23 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 24 | window=("2013-06-01", "2014-07-01"), 25 | output_one_appliance=False, 26 | boolean_targets=True, 27 | min_on_duration=60 28 | # sample_period=15, seq_length=400 29 | ) 30 | 31 | net = Net( 32 | experiment_name="e41a", 33 | source=source, 34 | n_cells_per_hidden_layer=[50,50,50], 35 | output_nonlinearity=sigmoid, 36 | learning_rate=1e-1, 37 | n_dense_cells_per_layer=50, 38 | # validation_interval=2, 39 | save_plot_interval=50, 40 | loss_function=crossentropy 41 | ) 42 | 43 | # [200,200,200] n_dense_cells=200 got killed before training 44 | 45 | net.fit() 46 | 47 | #net.plot_costs() 48 | #net.plot_estimates() 49 | -------------------------------------------------------------------------------- /scripts/e42.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | 6 | """ 7 | Setup: 8 | * in_to_cell init weights are now Normal(1.0) 9 | * output all appliances 10 | * fix bug in RealApplianceSource 11 | * use cross-entropy 12 | * smaller network 13 | 14 | Changes: 15 | * swapped from boolean to power targets 16 | 17 | Results: 18 | """ 19 | 20 | source = RealApplianceSource( 21 | '/data/dk3810/ukdale.h5', 22 | ['fridge freezer', 'hair straighteners', 'television'], 23 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 24 | window=("2013-06-01", "2014-07-01"), 25 | output_one_appliance=False, 26 | boolean_targets=False, 27 | min_on_duration=60 28 | # sample_period=15, seq_length=400 29 | ) 30 | 31 | net = Net( 32 | experiment_name="e42a", 33 | source=source, 34 | n_cells_per_hidden_layer=[50,50,50], 35 | output_nonlinearity=sigmoid, 36 | learning_rate=1e-1, 37 | n_dense_cells_per_layer=50, 38 | # validation_interval=2, 39 | save_plot_interval=50, 40 | loss_function=crossentropy 41 | ) 42 | 43 | # [200,200,200] n_dense_cells=200 got killed before training 44 | 45 | net.fit() 46 | 47 | #net.plot_costs() 48 | #net.plot_estimates() 49 | -------------------------------------------------------------------------------- /scripts/e43a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | 6 | """ 7 | Setup: 8 | * in_to_cell init weights are now Normal(1.0) 9 | * output all appliances 10 | * fix bug in RealApplianceSource 11 | * use cross-entropy 12 | * smaller network 13 | * power targets 14 | 15 | Changes: 16 | * trying without first two sigmoid layers. 17 | 18 | Results: 19 | """ 20 | 21 | source = RealApplianceSource( 22 | '/data/dk3810/ukdale.h5', 23 | ['fridge freezer', 'hair straighteners', 'television'], 24 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 25 | window=("2013-06-01", "2014-07-01"), 26 | output_one_appliance=False, 27 | boolean_targets=False, 28 | min_on_duration=60 29 | # sample_period=15, seq_length=400 30 | ) 31 | 32 | net = Net( 33 | experiment_name="e43a", 34 | source=source, 35 | n_cells_per_hidden_layer=[50,50,50], 36 | output_nonlinearity=sigmoid, 37 | learning_rate=1e-1, 38 | n_dense_cells_per_layer=0, 39 | # validation_interval=2, 40 | save_plot_interval=50, 41 | loss_function=crossentropy 42 | ) 43 | 44 | # [200,200,200] n_dense_cells=200 got killed before training 45 | 46 | net.fit() 47 | 48 | #net.plot_costs() 49 | #net.plot_estimates() 50 | -------------------------------------------------------------------------------- /scripts/e43b.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | 6 | """ 7 | Setup: 8 | * in_to_cell init weights are now Normal(1.0) 9 | * output all appliances 10 | * fix bug in RealApplianceSource 11 | * use cross-entropy 12 | * smaller network 13 | * power targets 14 | * trying without first two sigmoid layers. 15 | 16 | Changes: 17 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 18 | which fixes LSTM bug. 19 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 20 | 21 | Results: 22 | """ 23 | 24 | source = RealApplianceSource( 25 | '/data/dk3810/ukdale.h5', 26 | ['fridge freezer', 'hair straighteners', 'television'], 27 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 28 | window=("2013-06-01", "2014-07-01"), 29 | output_one_appliance=False, 30 | boolean_targets=False, 31 | min_on_duration=60 32 | # sample_period=15, seq_length=400 33 | ) 34 | 35 | net = Net( 36 | experiment_name="e43b", 37 | source=source, 38 | n_cells_per_hidden_layer=[50,50,50], 39 | output_nonlinearity=sigmoid, 40 | learning_rate=1e-1, 41 | n_dense_cells_per_layer=0, 42 | # validation_interval=2, 43 | save_plot_interval=50, 44 | loss_function=crossentropy 45 | ) 46 | 47 | # [200,200,200] n_dense_cells=200 got killed before training 48 | 49 | net.fit() 50 | 51 | #net.plot_costs() 52 | #net.plot_estimates() 53 | -------------------------------------------------------------------------------- /scripts/e44.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | 22 | Changes: 23 | * Dense layer, BLSTM, Dense, BLSTM, Dense, BLSTM, Output 24 | 25 | """ 26 | 27 | source = RealApplianceSource( 28 | '/data/dk3810/ukdale.h5', 29 | ['fridge freezer', 'hair straighteners', 'television'], 30 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 31 | window=("2013-06-01", "2014-07-01"), 32 | output_one_appliance=False, 33 | boolean_targets=False, 34 | min_on_duration=60 35 | ) 36 | 37 | net = Net( 38 | experiment_name="e44a", 39 | source=source, 40 | learning_rate=1e-1, 41 | save_plot_interval=50, 42 | loss_function=crossentropy, 43 | layers_config=[ 44 | { 45 | 'type': DenseLayer, 46 | 'num_units': 50, 47 | 'nonlinearity': sigmoid, 48 | 'b': Uniform(25), 49 | 'W': Uniform(25) 50 | }, 51 | { 52 | 'type': BLSTMLayer, 53 | 'num_units': 50, 54 | 'W_in_to_cell': Normal(1.0) 55 | }, 56 | { 57 | 'type': DenseLayer, 58 | 'num_units': 50, 59 | 'nonlinearity': sigmoid, 60 | 'b': Uniform(1), 61 | 'W': Uniform(1) 62 | }, 63 | { 64 | 'type': BLSTMLayer, 65 | 'num_units': 50, 66 | 'W_in_to_cell': Normal(1.0) 67 | }, 68 | { 69 | 'type': DenseLayer, 70 | 'num_units': 50, 71 | 'nonlinearity': sigmoid, 72 | 'b': Uniform(1), 73 | 'W': Uniform(1) 74 | }, 75 | { 76 | 'type': BLSTMLayer, 77 | 'num_units': 50, 78 | 'W_in_to_cell': Normal(1.0) 79 | }, 80 | { 81 | 'type': DenseLayer, 82 | 'num_units': source.n_outputs, 83 | 'nonlinearity': sigmoid, 84 | 'b': Uniform(1), 85 | 'W': Uniform(1) 86 | } 87 | ] 88 | ) 89 | 90 | net.fit() 91 | 92 | -------------------------------------------------------------------------------- /scripts/e45.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | 22 | Changes: 23 | * Subsampling single-directional LSTM 24 | 25 | """ 26 | 27 | source = RealApplianceSource( 28 | '/data/dk3810/ukdale.h5', 29 | ['fridge freezer', 'hair straighteners', 'television'], 30 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 31 | window=("2013-06-01", "2013-07-01"), 32 | output_one_appliance=False, 33 | boolean_targets=False, 34 | min_on_duration=60, 35 | subsample_target=5*5 36 | ) 37 | 38 | net = Net( 39 | experiment_name="e45a", 40 | source=source, 41 | learning_rate=1e-1, 42 | save_plot_interval=50, 43 | loss_function=crossentropy, 44 | layers_config=[ 45 | { 46 | 'type': LSTMLayer, 47 | 'num_units': 5, 48 | 'W_in_to_cell': Normal(1.0) 49 | }, 50 | { 51 | 'type': SubsampleLayer, 52 | 'stride': 5 53 | }, 54 | { 55 | 'type': LSTMLayer, 56 | 'num_units': 5, 57 | 'W_in_to_cell': Normal(1.0) 58 | }, 59 | { 60 | 'type': SubsampleLayer, 61 | 'stride': 5 62 | }, 63 | { 64 | 'type': LSTMLayer, 65 | 'num_units': 5, 66 | 'W_in_to_cell': Normal(1.0) 67 | }, 68 | { 69 | 'type': DenseLayer, 70 | 'num_units': source.n_outputs, 71 | 'nonlinearity': sigmoid 72 | } 73 | ] 74 | ) 75 | 76 | net.fit() 77 | 78 | -------------------------------------------------------------------------------- /scripts/e45b.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | 22 | Changes: 23 | * Subsampling single-directional LSTM 24 | 25 | """ 26 | 27 | source = RealApplianceSource( 28 | '/data/dk3810/ukdale.h5', 29 | ['fridge freezer', 'hair straighteners', 'television'], 30 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 31 | window=("2013-06-01", "2014-07-01"), 32 | output_one_appliance=False, 33 | boolean_targets=False, 34 | min_on_duration=60, 35 | subsample_target=5*5 36 | ) 37 | 38 | net = Net( 39 | experiment_name="e45b", 40 | source=source, 41 | learning_rate=1e-1, 42 | save_plot_interval=50, 43 | loss_function=crossentropy, 44 | layers_config=[ 45 | { 46 | 'type': LSTMLayer, 47 | 'num_units': 20, 48 | 'W_in_to_cell': Normal(1.0) 49 | }, 50 | { 51 | 'type': SubsampleLayer, 52 | 'stride': 5 53 | }, 54 | { 55 | 'type': LSTMLayer, 56 | 'num_units': 40, 57 | 'W_in_to_cell': Normal(1.0) 58 | }, 59 | { 60 | 'type': SubsampleLayer, 61 | 'stride': 5 62 | }, 63 | { 64 | 'type': LSTMLayer, 65 | 'num_units': 80, 66 | 'W_in_to_cell': Normal(1.0) 67 | }, 68 | { 69 | 'type': DenseLayer, 70 | 'num_units': source.n_outputs, 71 | 'nonlinearity': sigmoid 72 | } 73 | ] 74 | ) 75 | 76 | net.fit() 77 | 78 | -------------------------------------------------------------------------------- /scripts/e46.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | 22 | Changes: 23 | * Subsampling *bidirectional* LSTM 24 | 25 | """ 26 | 27 | source = RealApplianceSource( 28 | '/data/dk3810/ukdale.h5', 29 | ['fridge freezer', 'hair straighteners', 'television'], 30 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 31 | window=("2013-06-01", "2014-07-01"), 32 | output_one_appliance=False, 33 | boolean_targets=False, 34 | min_on_duration=60, 35 | subsample_target=5*5 36 | ) 37 | 38 | net = Net( 39 | experiment_name="e46", 40 | source=source, 41 | learning_rate=1e-1, 42 | save_plot_interval=50, 43 | loss_function=crossentropy, 44 | layers_config=[ 45 | { 46 | 'type': BLSTMLayer, 47 | 'num_units': 20, 48 | 'W_in_to_cell': Normal(1.0) 49 | }, 50 | { 51 | 'type': SubsampleLayer, 52 | 'stride': 5 53 | }, 54 | { 55 | 'type': BLSTMLayer, 56 | 'num_units': 40, 57 | 'W_in_to_cell': Normal(1.0) 58 | }, 59 | { 60 | 'type': SubsampleLayer, 61 | 'stride': 5 62 | }, 63 | { 64 | 'type': BLSTMLayer, 65 | 'num_units': 80, 66 | 'W_in_to_cell': Normal(1.0) 67 | }, 68 | { 69 | 'type': DenseLayer, 70 | 'num_units': source.n_outputs, 71 | 'nonlinearity': sigmoid 72 | } 73 | ] 74 | ) 75 | 76 | net.fit() 77 | 78 | -------------------------------------------------------------------------------- /scripts/e47.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | 23 | Changes: 24 | * Output every sequence in the batch 25 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 26 | 27 | """ 28 | 29 | source = RealApplianceSource( 30 | '/data/dk3810/ukdale.h5', 31 | ['fridge freezer', 'hair straighteners', 'television'], 32 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 33 | window=("2013-06-01", "2014-07-01"), 34 | output_one_appliance=False, 35 | boolean_targets=False, 36 | min_on_duration=60, 37 | subsample_target=5*5 38 | ) 39 | 40 | net = Net( 41 | experiment_name="e47", 42 | source=source, 43 | learning_rate=1e-1, 44 | save_plot_interval=50, 45 | loss_function=crossentropy, 46 | layers_config=[ 47 | { 48 | 'type': BLSTMLayer, 49 | 'num_units': 20, 50 | 'W_in_to_cell': Uniform(5) 51 | }, 52 | { 53 | 'type': SubsampleLayer, 54 | 'stride': 5 55 | }, 56 | { 57 | 'type': BLSTMLayer, 58 | 'num_units': 40, 59 | 'W_in_to_cell': Uniform(5) 60 | }, 61 | { 62 | 'type': SubsampleLayer, 63 | 'stride': 5 64 | }, 65 | { 66 | 'type': BLSTMLayer, 67 | 'num_units': 80, 68 | 'W_in_to_cell': Uniform(5) 69 | }, 70 | { 71 | 'type': DenseLayer, 72 | 'num_units': source.n_outputs, 73 | 'nonlinearity': sigmoid 74 | } 75 | ] 76 | ) 77 | 78 | net.fit() 79 | 80 | -------------------------------------------------------------------------------- /scripts/e48.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | 25 | Changes: 26 | * put back the two sigmoid layers 27 | 28 | """ 29 | 30 | source = RealApplianceSource( 31 | '/data/dk3810/ukdale.h5', 32 | ['fridge freezer', 'hair straighteners', 'television'], 33 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 34 | window=("2013-06-01", "2014-07-01"), 35 | output_one_appliance=False, 36 | boolean_targets=False, 37 | min_on_duration=60, 38 | subsample_target=5*5 39 | ) 40 | 41 | net = Net( 42 | experiment_name="e48", 43 | source=source, 44 | learning_rate=1e-1, 45 | save_plot_interval=50, 46 | loss_function=crossentropy, 47 | layers_config=[ 48 | { 49 | 'type': DenseLayer, 50 | 'num_units': 50, 51 | 'nonlinearity': sigmoid, 52 | 'W': Uniform(25), 53 | 'b': Uniform(25) 54 | }, 55 | { 56 | 'type': DenseLayer, 57 | 'num_units': 50, 58 | 'nonlinearity': sigmoid, 59 | 'W': Uniform(10), 60 | 'b': Uniform(10) 61 | }, 62 | { 63 | 'type': BLSTMLayer, 64 | 'num_units': 20, 65 | 'W_in_to_cell': Uniform(5) 66 | }, 67 | { 68 | 'type': SubsampleLayer, 69 | 'stride': 5 70 | }, 71 | { 72 | 'type': BLSTMLayer, 73 | 'num_units': 40, 74 | 'W_in_to_cell': Uniform(5) 75 | }, 76 | { 77 | 'type': SubsampleLayer, 78 | 'stride': 5 79 | }, 80 | { 81 | 'type': BLSTMLayer, 82 | 'num_units': 80, 83 | 'W_in_to_cell': Uniform(5) 84 | }, 85 | { 86 | 'type': DenseLayer, 87 | 'num_units': source.n_outputs, 88 | 'nonlinearity': sigmoid 89 | } 90 | ] 91 | ) 92 | 93 | net.fit() 94 | 95 | -------------------------------------------------------------------------------- /scripts/e49.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | 26 | Changes: 27 | * use Conv1D to create a hierarchical subsampling LSTM 28 | * Using LSTM (not BLSTM) to speed up training while testing 29 | 30 | """ 31 | 32 | source = RealApplianceSource( 33 | '/data/dk3810/ukdale.h5', 34 | ['fridge freezer', 'hair straighteners', 'television'], 35 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 36 | window=("2013-06-01", "2014-07-01"), 37 | output_one_appliance=False, 38 | boolean_targets=False, 39 | min_on_duration=60, 40 | subsample_target=5*5 41 | ) 42 | 43 | net = Net( 44 | experiment_name="e49b", 45 | source=source, 46 | learning_rate=1e-1, 47 | save_plot_interval=50, 48 | loss_function=crossentropy, 49 | layers_config=[ 50 | { 51 | 'type': DenseLayer, 52 | 'num_units': 50, 53 | 'nonlinearity': sigmoid, 54 | 'W': Uniform(25), 55 | 'b': Uniform(25) 56 | }, 57 | { 58 | 'type': DenseLayer, 59 | 'num_units': 50, 60 | 'nonlinearity': sigmoid, 61 | 'W': Uniform(10), 62 | 'b': Uniform(10) 63 | }, 64 | { 65 | 'type': LSTMLayer, 66 | 'num_units': 20, 67 | 'W_in_to_cell': Uniform(5) 68 | }, 69 | { 70 | 'type': ReshapeLayer, 71 | 'shape': (5, 20, 1000) 72 | }, 73 | { 74 | 'type': Conv1DLayer, 75 | 'num_filters': 20, 76 | 'filter_length': 5, 77 | 'stride': 5 78 | }, 79 | { 80 | 'type': ReshapeLayer, 81 | 'shape': (5, 200, 20) 82 | }, 83 | { 84 | 'type': LSTMLayer, 85 | 'num_units': 40, 86 | 'W_in_to_cell': Uniform(5) 87 | }, 88 | { 89 | 'type': ReshapeLayer, 90 | 'shape': (5, 40, 200) 91 | }, 92 | { 93 | 'type': Conv1DLayer, 94 | 'num_filters': 40, 95 | 'filter_length': 5, 96 | 'stride': 5 97 | }, 98 | { 99 | 'type': ReshapeLayer, 100 | 'shape': (5, 40, 40) 101 | }, 102 | { 103 | 'type': LSTMLayer, 104 | 'num_units': 80, 105 | 'W_in_to_cell': Uniform(5) 106 | }, 107 | { 108 | 'type': DenseLayer, 109 | 'num_units': source.n_outputs, 110 | 'nonlinearity': sigmoid 111 | } 112 | ] 113 | ) 114 | 115 | net.compile() 116 | net.fit() 117 | 118 | -------------------------------------------------------------------------------- /scripts/e50.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | 28 | Changes: 29 | * simplify. Just use conv at input 30 | 31 | """ 32 | 33 | source = RealApplianceSource( 34 | '/data/dk3810/ukdale.h5', 35 | ['fridge freezer', 'hair straighteners', 'television'], 36 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 37 | window=("2013-06-01", "2014-07-01"), 38 | output_one_appliance=False, 39 | boolean_targets=False, 40 | min_on_duration=60, 41 | subsample_target=5 42 | ) 43 | 44 | net = Net( 45 | experiment_name="e50a", 46 | source=source, 47 | learning_rate=1e-1, 48 | save_plot_interval=50, 49 | loss_function=crossentropy, 50 | layers_config=[ 51 | { 52 | 'type': ReshapeLayer, 53 | 'shape': (5, 1, 1000) 54 | }, 55 | { 56 | 'type': Conv1DLayer, 57 | 'num_filters': 20, 58 | 'filter_length': 5, 59 | 'stride': 5 60 | }, 61 | { # TODO: I think this should perhaps be dimshuffle, not reshape??? 62 | 'type': ReshapeLayer, 63 | 'shape': (5, 200, 20) 64 | }, 65 | { 66 | 'type': LSTMLayer, 67 | 'num_units': 40, 68 | 'W_in_to_cell': Uniform(5) 69 | }, 70 | { 71 | 'type': LSTMLayer, 72 | 'num_units': 80, 73 | 'W_in_to_cell': Uniform(5) 74 | }, 75 | { 76 | 'type': DenseLayer, 77 | 'num_units': source.n_outputs, 78 | 'nonlinearity': sigmoid 79 | } 80 | ] 81 | ) 82 | 83 | net.print_net() 84 | net.compile() 85 | net.fit() 86 | 87 | -------------------------------------------------------------------------------- /scripts/e51.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * simplify. Just use conv at input 28 | 29 | * Use dimshuffle not reshape 30 | 31 | """ 32 | 33 | source = RealApplianceSource( 34 | '/data/dk3810/ukdale.h5', 35 | ['fridge freezer', 'hair straighteners', 'television'], 36 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 37 | window=("2013-06-01", "2013-07-01"), 38 | output_one_appliance=False, 39 | boolean_targets=False, 40 | min_on_duration=60, 41 | subsample_target=5 42 | ) 43 | 44 | net = Net( 45 | experiment_name="e51a", 46 | source=source, 47 | learning_rate=1e-1, 48 | save_plot_interval=50, 49 | loss_function=crossentropy, 50 | layers_config=[ 51 | { 52 | 'type': DimshuffleLayer, 53 | 'pattern': (0, 2, 1) 54 | }, 55 | { 56 | 'type': Conv1DLayer, 57 | 'num_filters': 20, 58 | 'filter_length': 5, 59 | 'stride': 5 60 | }, 61 | { 62 | 'type': DimshuffleLayer, 63 | 'pattern': (0, 2, 1) 64 | }, 65 | { 66 | 'type': LSTMLayer, 67 | 'num_units': 40, 68 | 'W_in_to_cell': Uniform(5) 69 | }, 70 | { 71 | 'type': LSTMLayer, 72 | 'num_units': 80, 73 | 'W_in_to_cell': Uniform(5) 74 | }, 75 | { 76 | 'type': DenseLayer, 77 | 'num_units': source.n_outputs, 78 | 'nonlinearity': sigmoid 79 | } 80 | ] 81 | ) 82 | 83 | net.print_net() 84 | net.compile() 85 | net.fit() 86 | 87 | -------------------------------------------------------------------------------- /scripts/e52a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | 29 | Changes 30 | * init Conv1DLayer with big weights 31 | 32 | """ 33 | 34 | source = RealApplianceSource( 35 | '/data/dk3810/ukdale.h5', 36 | ['fridge freezer', 'hair straighteners', 'television'], 37 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 38 | window=("2013-06-01", "2013-07-01"), 39 | output_one_appliance=False, 40 | boolean_targets=False, 41 | min_on_duration=60, 42 | subsample_target=5 43 | ) 44 | 45 | net = Net( 46 | experiment_name="e52a", 47 | source=source, 48 | learning_rate=1e-1, 49 | save_plot_interval=50, 50 | loss_function=crossentropy, 51 | layers_config=[ 52 | { 53 | 'type': DimshuffleLayer, 54 | 'pattern': (0, 2, 1) 55 | }, 56 | { 57 | 'type': Conv1DLayer, 58 | 'num_filters': 20, 59 | 'filter_length': 5, 60 | 'stride': 5, 61 | 'W': Uniform(10), 62 | 'b': Uniform(5), 63 | 'nonlinearity': sigmoid 64 | }, 65 | { 66 | 'type': DimshuffleLayer, 67 | 'pattern': (0, 2, 1) 68 | }, 69 | { 70 | 'type': LSTMLayer, 71 | 'num_units': 40, 72 | 'W_in_to_cell': Uniform(5) 73 | }, 74 | { 75 | 'type': LSTMLayer, 76 | 'num_units': 80, 77 | 'W_in_to_cell': Uniform(5) 78 | }, 79 | { 80 | 'type': DenseLayer, 81 | 'num_units': source.n_outputs, 82 | 'nonlinearity': sigmoid 83 | } 84 | ] 85 | ) 86 | 87 | net.print_net() 88 | net.compile() 89 | net.fit() 90 | 91 | -------------------------------------------------------------------------------- /scripts/e53a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | 29 | Changes 30 | * 2 dense layers back 31 | * back to default init 32 | * conv between LSTMs. 33 | 34 | Results 35 | * Works pretty well ;) 36 | """ 37 | 38 | source = RealApplianceSource( 39 | '/data/dk3810/ukdale.h5', 40 | ['fridge freezer', 'hair straighteners', 'television'], 41 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 42 | window=("2013-06-01", "2013-07-01"), 43 | output_one_appliance=False, 44 | boolean_targets=False, 45 | min_on_duration=60, 46 | subsample_target=5 47 | ) 48 | 49 | net = Net( 50 | experiment_name="e53a", 51 | source=source, 52 | learning_rate=1e-1, 53 | save_plot_interval=50, 54 | loss_function=crossentropy, 55 | layers_config=[ 56 | { 57 | 'type': DenseLayer, 58 | 'num_units': 50, 59 | 'nonlinearity': sigmoid, 60 | 'W': Uniform(25), 61 | 'b': Uniform(25) 62 | }, 63 | { 64 | 'type': DenseLayer, 65 | 'num_units': 50, 66 | 'nonlinearity': sigmoid, 67 | 'W': Uniform(10), 68 | 'b': Uniform(10) 69 | }, 70 | { 71 | 'type': LSTMLayer, 72 | 'num_units': 40, 73 | 'W_in_to_cell': Uniform(5) 74 | }, 75 | { 76 | 'type': DimshuffleLayer, 77 | 'pattern': (0, 2, 1) 78 | }, 79 | { 80 | 'type': Conv1DLayer, 81 | 'num_filters': 20, 82 | 'filter_length': 5, 83 | 'stride': 5, 84 | 'nonlinearity': sigmoid 85 | }, 86 | { 87 | 'type': DimshuffleLayer, 88 | 'pattern': (0, 2, 1) 89 | }, 90 | { 91 | 'type': LSTMLayer, 92 | 'num_units': 80, 93 | 'W_in_to_cell': Uniform(5) 94 | }, 95 | { 96 | 'type': DenseLayer, 97 | 'num_units': source.n_outputs, 98 | 'nonlinearity': sigmoid 99 | } 100 | ] 101 | ) 102 | 103 | net.print_net() 104 | net.compile() 105 | net.fit() 106 | 107 | -------------------------------------------------------------------------------- /scripts/e54a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * back to default init 29 | 30 | Changes 31 | * 2 layers of conv at bottom of net, replacing dense layers 32 | """ 33 | 34 | source = RealApplianceSource( 35 | '/data/dk3810/ukdale.h5', 36 | ['fridge freezer', 'hair straighteners', 'television'], 37 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 38 | window=("2013-06-01", "2013-07-01"), 39 | output_one_appliance=False, 40 | boolean_targets=False, 41 | min_on_duration=60, 42 | subsample_target=5, 43 | input_padding=4 44 | ) 45 | 46 | net = Net( 47 | experiment_name="e54a", 48 | source=source, 49 | learning_rate=1e-1, 50 | save_plot_interval=50, 51 | loss_function=crossentropy, 52 | layers_config=[ 53 | { 54 | 'type': DimshuffleLayer, 55 | 'pattern': (0, 2, 1) 56 | }, 57 | { 58 | 'type': Conv1DLayer, 59 | 'num_filters': 20, 60 | 'filter_length': 3, 61 | 'stride': 1, 62 | 'nonlinearity': sigmoid 63 | }, 64 | { 65 | 'type': Conv1DLayer, 66 | 'num_filters': 20, 67 | 'filter_length': 3, 68 | 'stride': 1, 69 | 'nonlinearity': sigmoid 70 | }, 71 | { 72 | 'type': DimshuffleLayer, 73 | 'pattern': (0, 2, 1) 74 | }, 75 | { 76 | 'type': LSTMLayer, 77 | 'num_units': 40, 78 | 'W_in_to_cell': Uniform(5) 79 | }, 80 | { 81 | 'type': DimshuffleLayer, 82 | 'pattern': (0, 2, 1) 83 | }, 84 | { 85 | 'type': Conv1DLayer, 86 | 'num_filters': 20, 87 | 'filter_length': 5, 88 | 'stride': 5, 89 | 'nonlinearity': sigmoid 90 | }, 91 | { 92 | 'type': DimshuffleLayer, 93 | 'pattern': (0, 2, 1) 94 | }, 95 | { 96 | 'type': LSTMLayer, 97 | 'num_units': 80, 98 | 'W_in_to_cell': Uniform(5) 99 | }, 100 | { 101 | 'type': DenseLayer, 102 | 'num_units': source.n_outputs, 103 | 'nonlinearity': sigmoid 104 | } 105 | ] 106 | ) 107 | 108 | net.print_net() 109 | net.compile() 110 | net.fit() 111 | 112 | -------------------------------------------------------------------------------- /scripts/e55a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * back to default init 29 | 30 | Changes 31 | * 2 dense layers, then 2 conv layers 32 | """ 33 | 34 | source = RealApplianceSource( 35 | '/data/dk3810/ukdale.h5', 36 | ['fridge freezer', 'hair straighteners', 'television'], 37 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 38 | window=("2013-06-01", "2013-07-01"), 39 | output_one_appliance=False, 40 | boolean_targets=False, 41 | min_on_duration=60, 42 | subsample_target=5, 43 | input_padding=4 44 | ) 45 | 46 | net = Net( 47 | experiment_name="e55a", 48 | source=source, 49 | learning_rate=1e-1, 50 | save_plot_interval=50, 51 | loss_function=crossentropy, 52 | layers_config=[ 53 | { 54 | 'type': DenseLayer, 55 | 'num_units': 50, 56 | 'nonlinearity': sigmoid, 57 | 'W': Uniform(25), 58 | 'b': Uniform(25) 59 | }, 60 | { 61 | 'type': DenseLayer, 62 | 'num_units': 50, 63 | 'nonlinearity': sigmoid, 64 | 'W': Uniform(10), 65 | 'b': Uniform(10) 66 | }, 67 | { 68 | 'type': DimshuffleLayer, 69 | 'pattern': (0, 2, 1) 70 | }, 71 | { 72 | 'type': Conv1DLayer, 73 | 'num_filters': 20, 74 | 'filter_length': 3, 75 | 'stride': 1, 76 | 'nonlinearity': sigmoid 77 | }, 78 | { 79 | 'type': Conv1DLayer, 80 | 'num_filters': 20, 81 | 'filter_length': 3, 82 | 'stride': 1, 83 | 'nonlinearity': sigmoid 84 | }, 85 | { 86 | 'type': DimshuffleLayer, 87 | 'pattern': (0, 2, 1) 88 | }, 89 | { 90 | 'type': LSTMLayer, 91 | 'num_units': 40, 92 | 'W_in_to_cell': Uniform(5) 93 | }, 94 | { 95 | 'type': DimshuffleLayer, 96 | 'pattern': (0, 2, 1) 97 | }, 98 | { 99 | 'type': Conv1DLayer, 100 | 'num_filters': 20, 101 | 'filter_length': 5, 102 | 'stride': 5, 103 | 'nonlinearity': sigmoid 104 | }, 105 | { 106 | 'type': DimshuffleLayer, 107 | 'pattern': (0, 2, 1) 108 | }, 109 | { 110 | 'type': LSTMLayer, 111 | 'num_units': 80, 112 | 'W_in_to_cell': Uniform(5) 113 | }, 114 | { 115 | 'type': DenseLayer, 116 | 'num_units': source.n_outputs, 117 | 'nonlinearity': sigmoid 118 | } 119 | ] 120 | ) 121 | 122 | net.print_net() 123 | net.compile() 124 | net.fit() 125 | 126 | -------------------------------------------------------------------------------- /scripts/e56a: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * back to default init 29 | * 2 layers of conv at bottom of net, replacing dense layers 30 | 31 | Changes 32 | * big inits for conv layers 33 | """ 34 | 35 | source = RealApplianceSource( 36 | '/data/dk3810/ukdale.h5', 37 | ['fridge freezer', 'hair straighteners', 'television'], 38 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 39 | window=("2013-06-01", "2013-07-01"), 40 | output_one_appliance=False, 41 | boolean_targets=False, 42 | min_on_duration=60, 43 | subsample_target=5, 44 | input_padding=4 45 | ) 46 | 47 | net = Net( 48 | experiment_name="e56a", 49 | source=source, 50 | learning_rate=1e-1, 51 | save_plot_interval=50, 52 | loss_function=crossentropy, 53 | layers_config=[ 54 | { 55 | 'type': DimshuffleLayer, 56 | 'pattern': (0, 2, 1) 57 | }, 58 | { 59 | 'type': Conv1DLayer, 60 | 'num_filters': 20, 61 | 'filter_length': 3, 62 | 'stride': 1, 63 | 'nonlinearity': sigmoid, 64 | 'W': Uniform(10), 65 | 'b': Uniform(10) 66 | }, 67 | { 68 | 'type': Conv1DLayer, 69 | 'num_filters': 20, 70 | 'filter_length': 3, 71 | 'stride': 1, 72 | 'nonlinearity': sigmoid, 73 | 'W': Uniform(5), 74 | 'b': Uniform(5) 75 | }, 76 | { 77 | 'type': DimshuffleLayer, 78 | 'pattern': (0, 2, 1) 79 | }, 80 | { 81 | 'type': LSTMLayer, 82 | 'num_units': 40, 83 | 'W_in_to_cell': Uniform(5) 84 | }, 85 | { 86 | 'type': DimshuffleLayer, 87 | 'pattern': (0, 2, 1) 88 | }, 89 | { 90 | 'type': Conv1DLayer, 91 | 'num_filters': 20, 92 | 'filter_length': 5, 93 | 'stride': 5, 94 | 'nonlinearity': sigmoid 95 | }, 96 | { 97 | 'type': DimshuffleLayer, 98 | 'pattern': (0, 2, 1) 99 | }, 100 | { 101 | 'type': LSTMLayer, 102 | 'num_units': 80, 103 | 'W_in_to_cell': Uniform(5) 104 | }, 105 | { 106 | 'type': DenseLayer, 107 | 'num_units': source.n_outputs, 108 | 'nonlinearity': sigmoid 109 | } 110 | ] 111 | ) 112 | 113 | net.print_net() 114 | net.compile() 115 | net.fit() 116 | 117 | -------------------------------------------------------------------------------- /scripts/e57a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * back to default init 29 | * 2 layers of conv at bottom of net, replacing dense layers 30 | 31 | Changes 32 | * big inits for conv layers 33 | """ 34 | 35 | source = RealApplianceSource( 36 | '/data/dk3810/ukdale.h5', 37 | ['fridge freezer', 'hair straighteners', 'television'], 38 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 39 | window=("2013-06-01", "2013-07-01"), 40 | output_one_appliance=False, 41 | boolean_targets=False, 42 | min_on_duration=60, 43 | subsample_target=5*5 44 | ) 45 | 46 | net = Net( 47 | experiment_name="e57a", 48 | source=source, 49 | learning_rate=1e-1, 50 | save_plot_interval=50, 51 | loss_function=crossentropy, 52 | layers_config=[ 53 | { 54 | 'type': LSTMLayer, 55 | 'num_units': 20, 56 | 'W_in_to_cell': Uniform(5) 57 | }, 58 | { 59 | 'type': DimshuffleLayer, 60 | 'pattern': (0, 2, 1) 61 | }, 62 | { 63 | 'type': Conv1DLayer, 64 | 'num_filters': 20, 65 | 'filter_length': 5, 66 | 'stride': 5, 67 | 'nonlinearity': sigmoid 68 | }, 69 | { 70 | 'type': DimshuffleLayer, 71 | 'pattern': (0, 2, 1) 72 | }, 73 | { 74 | 'type': LSTMLayer, 75 | 'num_units': 40 76 | }, 77 | { 78 | 'type': DimshuffleLayer, 79 | 'pattern': (0, 2, 1) 80 | }, 81 | { 82 | 'type': Conv1DLayer, 83 | 'num_filters': 40, 84 | 'filter_length': 5, 85 | 'stride': 5, 86 | 'nonlinearity': sigmoid 87 | }, 88 | { 89 | 'type': DimshuffleLayer, 90 | 'pattern': (0, 2, 1) 91 | }, 92 | { 93 | 'type': LSTMLayer, 94 | 'num_units': 80 95 | }, 96 | { 97 | 'type': DenseLayer, 98 | 'num_units': source.n_outputs, 99 | 'nonlinearity': sigmoid 100 | } 101 | ] 102 | ) 103 | 104 | net.print_net() 105 | net.compile() 106 | net.fit() 107 | 108 | -------------------------------------------------------------------------------- /scripts/e58a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | 32 | Changes 33 | * 3 LSTM layers 34 | * Changed LSTM init from Uniform(5) to Uniform(1) 35 | 36 | Results 37 | 38 | """ 39 | 40 | source = RealApplianceSource( 41 | '/data/dk3810/ukdale.h5', 42 | ['fridge freezer', 'hair straighteners', 'television'], 43 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 44 | window=("2013-06-01", "2013-07-01"), 45 | output_one_appliance=False, 46 | boolean_targets=False, 47 | min_on_duration=60, 48 | subsample_target=5*5 49 | ) 50 | 51 | net = Net( 52 | experiment_name="e58a", 53 | source=source, 54 | learning_rate=1e-1, 55 | save_plot_interval=50, 56 | loss_function=crossentropy, 57 | layers_config=[ 58 | { 59 | 'type': DenseLayer, 60 | 'num_units': 50, 61 | 'nonlinearity': sigmoid, 62 | 'W': Uniform(25), 63 | 'b': Uniform(25) 64 | }, 65 | { 66 | 'type': DenseLayer, 67 | 'num_units': 50, 68 | 'nonlinearity': sigmoid, 69 | 'W': Uniform(10), 70 | 'b': Uniform(10) 71 | }, 72 | { 73 | 'type': LSTMLayer, 74 | 'num_units': 20, 75 | 'W_in_to_cell': Uniform(1) 76 | }, 77 | { 78 | 'type': DimshuffleLayer, 79 | 'pattern': (0, 2, 1) 80 | }, 81 | { 82 | 'type': Conv1DLayer, 83 | 'num_filters': 40, 84 | 'filter_length': 5, 85 | 'stride': 5, 86 | 'nonlinearity': sigmoid 87 | }, 88 | { 89 | 'type': DimshuffleLayer, 90 | 'pattern': (0, 2, 1) 91 | }, 92 | { 93 | 'type': LSTMLayer, 94 | 'num_units': 40, 95 | 'W_in_to_cell': Uniform(1) 96 | }, 97 | { 98 | 'type': DimshuffleLayer, 99 | 'pattern': (0, 2, 1) 100 | }, 101 | { 102 | 'type': Conv1DLayer, 103 | 'num_filters': 80, 104 | 'filter_length': 5, 105 | 'stride': 5, 106 | 'nonlinearity': sigmoid 107 | }, 108 | { 109 | 'type': DimshuffleLayer, 110 | 'pattern': (0, 2, 1) 111 | }, 112 | { 113 | 'type': LSTMLayer, 114 | 'num_units': 80, 115 | 'W_in_to_cell': Uniform(1) 116 | }, 117 | { 118 | 'type': DenseLayer, 119 | 'num_units': source.n_outputs, 120 | 'nonlinearity': sigmoid 121 | } 122 | ] 123 | ) 124 | 125 | net.print_net() 126 | net.compile() 127 | net.fit() 128 | 129 | -------------------------------------------------------------------------------- /scripts/e58b.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * 3 LSTM layers 32 | * Changed LSTM init from Uniform(5) to Uniform(1) 33 | 34 | Changes 35 | * Changed init of LSTM back to Uniform(5) 36 | 37 | Results 38 | 39 | """ 40 | 41 | source = RealApplianceSource( 42 | '/data/dk3810/ukdale.h5', 43 | ['fridge freezer', 'hair straighteners', 'television'], 44 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 45 | window=("2013-06-01", "2013-07-01"), 46 | output_one_appliance=False, 47 | boolean_targets=False, 48 | min_on_duration=60, 49 | subsample_target=5*5 50 | ) 51 | 52 | net = Net( 53 | experiment_name="e58b", 54 | source=source, 55 | learning_rate=1e-1, 56 | save_plot_interval=50, 57 | loss_function=crossentropy, 58 | layers_config=[ 59 | { 60 | 'type': DenseLayer, 61 | 'num_units': 50, 62 | 'nonlinearity': sigmoid, 63 | 'W': Uniform(25), 64 | 'b': Uniform(25) 65 | }, 66 | { 67 | 'type': DenseLayer, 68 | 'num_units': 50, 69 | 'nonlinearity': sigmoid, 70 | 'W': Uniform(10), 71 | 'b': Uniform(10) 72 | }, 73 | { 74 | 'type': LSTMLayer, 75 | 'num_units': 20, 76 | 'W_in_to_cell': Uniform(5) 77 | }, 78 | { 79 | 'type': DimshuffleLayer, 80 | 'pattern': (0, 2, 1) 81 | }, 82 | { 83 | 'type': Conv1DLayer, 84 | 'num_filters': 40, 85 | 'filter_length': 5, 86 | 'stride': 5, 87 | 'nonlinearity': sigmoid 88 | }, 89 | { 90 | 'type': DimshuffleLayer, 91 | 'pattern': (0, 2, 1) 92 | }, 93 | { 94 | 'type': LSTMLayer, 95 | 'num_units': 40, 96 | 'W_in_to_cell': Uniform(5) 97 | }, 98 | { 99 | 'type': DimshuffleLayer, 100 | 'pattern': (0, 2, 1) 101 | }, 102 | { 103 | 'type': Conv1DLayer, 104 | 'num_filters': 80, 105 | 'filter_length': 5, 106 | 'stride': 5, 107 | 'nonlinearity': sigmoid 108 | }, 109 | { 110 | 'type': DimshuffleLayer, 111 | 'pattern': (0, 2, 1) 112 | }, 113 | { 114 | 'type': LSTMLayer, 115 | 'num_units': 80, 116 | 'W_in_to_cell': Uniform(5) 117 | }, 118 | { 119 | 'type': DenseLayer, 120 | 'num_units': source.n_outputs, 121 | 'nonlinearity': sigmoid 122 | } 123 | ] 124 | ) 125 | 126 | net.print_net() 127 | net.compile() 128 | net.fit() 129 | 130 | -------------------------------------------------------------------------------- /scripts/e58c.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * 3 LSTM layers 32 | * Changed LSTM init from Uniform(5) to Uniform(1) 33 | * Changed init of LSTM back to Uniform(5) 34 | 35 | Changes 36 | * increased init on conv layer. 37 | 38 | Results 39 | 40 | """ 41 | 42 | source = RealApplianceSource( 43 | '/data/dk3810/ukdale.h5', 44 | ['fridge freezer', 'hair straighteners', 'television'], 45 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 46 | window=("2013-06-01", "2013-07-01"), 47 | output_one_appliance=False, 48 | boolean_targets=False, 49 | min_on_duration=60, 50 | subsample_target=5*5 51 | ) 52 | 53 | net = Net( 54 | experiment_name="e58c", 55 | source=source, 56 | learning_rate=1e-1, 57 | save_plot_interval=50, 58 | loss_function=crossentropy, 59 | layers_config=[ 60 | { 61 | 'type': DenseLayer, 62 | 'num_units': 50, 63 | 'nonlinearity': sigmoid, 64 | 'W': Uniform(25), 65 | 'b': Uniform(25) 66 | }, 67 | { 68 | 'type': DenseLayer, 69 | 'num_units': 50, 70 | 'nonlinearity': sigmoid, 71 | 'W': Uniform(10), 72 | 'b': Uniform(10) 73 | }, 74 | { 75 | 'type': LSTMLayer, 76 | 'num_units': 20, 77 | 'W_in_to_cell': Uniform(5) 78 | }, 79 | { 80 | 'type': DimshuffleLayer, 81 | 'pattern': (0, 2, 1) 82 | }, 83 | { 84 | 'type': Conv1DLayer, 85 | 'num_filters': 40, 86 | 'filter_length': 5, 87 | 'stride': 5, 88 | 'nonlinearity': sigmoid, 89 | 'W': Uniform(5) 90 | }, 91 | { 92 | 'type': DimshuffleLayer, 93 | 'pattern': (0, 2, 1) 94 | }, 95 | { 96 | 'type': LSTMLayer, 97 | 'num_units': 40, 98 | 'W_in_to_cell': Uniform(5) 99 | }, 100 | { 101 | 'type': DimshuffleLayer, 102 | 'pattern': (0, 2, 1) 103 | }, 104 | { 105 | 'type': Conv1DLayer, 106 | 'num_filters': 80, 107 | 'filter_length': 5, 108 | 'stride': 5, 109 | 'nonlinearity': sigmoid, 110 | 'W': Uniform(5) 111 | }, 112 | { 113 | 'type': DimshuffleLayer, 114 | 'pattern': (0, 2, 1) 115 | }, 116 | { 117 | 'type': LSTMLayer, 118 | 'num_units': 80, 119 | 'W_in_to_cell': Uniform(5) 120 | }, 121 | { 122 | 'type': DenseLayer, 123 | 'num_units': source.n_outputs, 124 | 'nonlinearity': sigmoid 125 | } 126 | ] 127 | ) 128 | 129 | net.print_net() 130 | net.compile() 131 | net.fit() 132 | 133 | -------------------------------------------------------------------------------- /scripts/e59a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | 32 | Changes 33 | * More data 34 | * BLSTM 35 | 36 | Results 37 | 38 | """ 39 | 40 | source = RealApplianceSource( 41 | '/data/dk3810/ukdale.h5', 42 | ['fridge freezer', 'hair straighteners', 'television'], 43 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 44 | window=("2013-06-01", "2014-07-01"), 45 | output_one_appliance=False, 46 | boolean_targets=False, 47 | min_on_duration=60, 48 | subsample_target=5 49 | ) 50 | 51 | net = Net( 52 | experiment_name="e59a", 53 | source=source, 54 | learning_rate=1e-1, 55 | save_plot_interval=50, 56 | loss_function=crossentropy, 57 | layers_config=[ 58 | { 59 | 'type': DenseLayer, 60 | 'num_units': 50, 61 | 'nonlinearity': sigmoid, 62 | 'W': Uniform(25), 63 | 'b': Uniform(25) 64 | }, 65 | { 66 | 'type': DenseLayer, 67 | 'num_units': 50, 68 | 'nonlinearity': sigmoid, 69 | 'W': Uniform(10), 70 | 'b': Uniform(10) 71 | }, 72 | { 73 | 'type': BLSTMLayer, 74 | 'num_units': 40, 75 | 'W_in_to_cell': Uniform(5) 76 | }, 77 | { 78 | 'type': DimshuffleLayer, 79 | 'pattern': (0, 2, 1) 80 | }, 81 | { 82 | 'type': Conv1DLayer, 83 | 'num_filters': 20, 84 | 'filter_length': 5, 85 | 'stride': 5, 86 | 'nonlinearity': sigmoid 87 | }, 88 | { 89 | 'type': DimshuffleLayer, 90 | 'pattern': (0, 2, 1) 91 | }, 92 | { 93 | 'type': BLSTMLayer, 94 | 'num_units': 80, 95 | 'W_in_to_cell': Uniform(5) 96 | }, 97 | { 98 | 'type': DenseLayer, 99 | 'num_units': source.n_outputs, 100 | 'nonlinearity': sigmoid 101 | } 102 | ] 103 | ) 104 | 105 | net.print_net() 106 | net.compile() 107 | net.fit() 108 | 109 | -------------------------------------------------------------------------------- /scripts/e60a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | 34 | Changes 35 | * Try just using a 1D convnet on input 36 | 37 | Results 38 | 39 | """ 40 | 41 | source = RealApplianceSource( 42 | '/data/dk3810/ukdale.h5', 43 | ['fridge freezer', 'hair straighteners', 'television'], 44 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 45 | window=("2013-06-01", "2013-07-01"), 46 | output_one_appliance=False, 47 | boolean_targets=False, 48 | min_on_duration=60, 49 | input_padding=2 50 | ) 51 | 52 | net = Net( 53 | experiment_name="e60a", 54 | source=source, 55 | learning_rate=1e-1, 56 | save_plot_interval=50, 57 | loss_function=crossentropy, 58 | layers_config=[ 59 | { 60 | 'type': DimshuffleLayer, 61 | 'pattern': (0, 2, 1) 62 | }, 63 | { 64 | 'type': Conv1DLayer, 65 | 'num_filters': 20, 66 | 'filter_length': 3, 67 | 'stride': 1, 68 | 'nonlinearity': rectify 69 | }, 70 | { 71 | 'type': DimshuffleLayer, 72 | 'pattern': (0, 2, 1) 73 | }, 74 | { 75 | 'type': LSTMLayer, 76 | 'num_units': 40, 77 | 'W_in_to_cell': Uniform(5) 78 | }, 79 | { 80 | 'type': DenseLayer, 81 | 'num_units': source.n_outputs, 82 | 'nonlinearity': sigmoid 83 | } 84 | ] 85 | ) 86 | 87 | net.print_net() 88 | net.compile() 89 | net.fit() 90 | 91 | -------------------------------------------------------------------------------- /scripts/e61a.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | 35 | Changes 36 | * add second Convnet layer (not sure this is correct thing to do?) 37 | 38 | Results 39 | 40 | """ 41 | 42 | source = RealApplianceSource( 43 | '/data/dk3810/ukdale.h5', 44 | ['fridge freezer', 'hair straighteners', 'television'], 45 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 46 | window=("2013-06-01", "2013-07-01"), 47 | output_one_appliance=False, 48 | boolean_targets=False, 49 | min_on_duration=60, 50 | input_padding=4 51 | ) 52 | 53 | net = Net( 54 | experiment_name="e61a", 55 | source=source, 56 | learning_rate=1e-1, 57 | save_plot_interval=50, 58 | loss_function=crossentropy, 59 | layers_config=[ 60 | { 61 | 'type': DimshuffleLayer, 62 | 'pattern': (0, 2, 1) 63 | }, 64 | { 65 | 'type': Conv1DLayer, 66 | 'num_filters': 20, 67 | 'filter_length': 3, 68 | 'stride': 1, 69 | 'nonlinearity': rectify 70 | }, 71 | { 72 | 'type': Conv1DLayer, 73 | 'num_filters': 20, 74 | 'filter_length': 3, 75 | 'stride': 1, 76 | 'nonlinearity': rectify 77 | }, 78 | { 79 | 'type': DimshuffleLayer, 80 | 'pattern': (0, 2, 1) 81 | }, 82 | { 83 | 'type': LSTMLayer, 84 | 'num_units': 40, 85 | 'W_in_to_cell': Uniform(5) 86 | }, 87 | { 88 | 'type': DenseLayer, 89 | 'num_units': source.n_outputs, 90 | 'nonlinearity': sigmoid 91 | } 92 | ] 93 | ) 94 | 95 | net.print_net() 96 | net.compile() 97 | net.fit() 98 | 99 | -------------------------------------------------------------------------------- /scripts/e62.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | 36 | Changes 37 | * third conv layer 38 | 39 | Results 40 | 41 | """ 42 | 43 | source = RealApplianceSource( 44 | '/data/dk3810/ukdale.h5', 45 | ['fridge freezer', 'hair straighteners', 'television'], 46 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 47 | window=("2013-06-01", "2013-07-01"), 48 | output_one_appliance=False, 49 | boolean_targets=False, 50 | min_on_duration=60, 51 | input_padding=8 52 | ) 53 | 54 | net = Net( 55 | experiment_name="e62", 56 | source=source, 57 | learning_rate=1e-1, 58 | save_plot_interval=50, 59 | loss_function=crossentropy, 60 | layers_config=[ 61 | { 62 | 'type': DimshuffleLayer, 63 | 'pattern': (0, 2, 1) 64 | }, 65 | { 66 | 'type': Conv1DLayer, 67 | 'num_filters': 20, 68 | 'filter_length': 3, 69 | 'stride': 1, 70 | 'nonlinearity': rectify 71 | }, 72 | { 73 | 'type': Conv1DLayer, 74 | 'num_filters': 20, 75 | 'filter_length': 3, 76 | 'stride': 1, 77 | 'nonlinearity': rectify 78 | }, 79 | { 80 | 'type': Conv1DLayer, 81 | 'num_filters': 20, 82 | 'filter_length': 5, 83 | 'stride': 1, 84 | 'nonlinearity': rectify 85 | }, 86 | { 87 | 'type': DimshuffleLayer, 88 | 'pattern': (0, 2, 1) 89 | }, 90 | { 91 | 'type': LSTMLayer, 92 | 'num_units': 40, 93 | 'W_in_to_cell': Uniform(5) 94 | }, 95 | { 96 | 'type': DenseLayer, 97 | 'num_units': source.n_outputs, 98 | 'nonlinearity': sigmoid 99 | } 100 | ] 101 | ) 102 | 103 | net.print_net() 104 | net.compile() 105 | net.fit() 106 | 107 | -------------------------------------------------------------------------------- /scripts/e63.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | 37 | Changes 38 | * 2 dense layers *between* conv and LSTM 39 | 40 | Results 41 | 42 | """ 43 | 44 | source = RealApplianceSource( 45 | '/data/dk3810/ukdale.h5', 46 | ['fridge freezer', 'hair straighteners', 'television'], 47 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 48 | window=("2013-06-01", "2013-07-01"), 49 | output_one_appliance=False, 50 | boolean_targets=False, 51 | min_on_duration=60, 52 | input_padding=8 53 | ) 54 | 55 | net = Net( 56 | experiment_name="e63", 57 | source=source, 58 | learning_rate=1e-1, 59 | save_plot_interval=50, 60 | loss_function=crossentropy, 61 | layers_config=[ 62 | { 63 | 'type': DimshuffleLayer, 64 | 'pattern': (0, 2, 1) 65 | }, 66 | { 67 | 'type': Conv1DLayer, 68 | 'num_filters': 20, 69 | 'filter_length': 3, 70 | 'stride': 1, 71 | 'nonlinearity': rectify 72 | }, 73 | { 74 | 'type': Conv1DLayer, 75 | 'num_filters': 20, 76 | 'filter_length': 3, 77 | 'stride': 1, 78 | 'nonlinearity': rectify 79 | }, 80 | { 81 | 'type': Conv1DLayer, 82 | 'num_filters': 20, 83 | 'filter_length': 5, 84 | 'stride': 1, 85 | 'nonlinearity': rectify 86 | }, 87 | { 88 | 'type': DimshuffleLayer, 89 | 'pattern': (0, 2, 1) 90 | }, 91 | { 92 | 'type': DenseLayer, 93 | 'num_units': 20, 94 | 'nonlinearity': rectify 95 | }, 96 | { 97 | 'type': DenseLayer, 98 | 'num_units': 20, 99 | 'nonlinearity': rectify 100 | }, 101 | { 102 | 'type': LSTMLayer, 103 | 'num_units': 40, 104 | 'W_in_to_cell': Uniform(5) 105 | }, 106 | { 107 | 'type': DenseLayer, 108 | 'num_units': source.n_outputs, 109 | 'nonlinearity': sigmoid 110 | } 111 | ] 112 | ) 113 | 114 | net.print_net() 115 | net.compile() 116 | net.fit() 117 | 118 | -------------------------------------------------------------------------------- /scripts/e64.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | 37 | Changes 38 | * large inits 39 | 40 | Results 41 | 42 | """ 43 | 44 | source = RealApplianceSource( 45 | '/data/dk3810/ukdale.h5', 46 | ['fridge freezer', 'hair straighteners', 'television'], 47 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 48 | window=("2013-06-01", "2013-07-01"), 49 | output_one_appliance=False, 50 | boolean_targets=False, 51 | min_on_duration=60, 52 | input_padding=8 53 | ) 54 | 55 | net = Net( 56 | experiment_name="e64", 57 | source=source, 58 | learning_rate=1e-1, 59 | save_plot_interval=50, 60 | loss_function=crossentropy, 61 | layers_config=[ 62 | { 63 | 'type': DimshuffleLayer, 64 | 'pattern': (0, 2, 1) 65 | }, 66 | { 67 | 'type': Conv1DLayer, 68 | 'num_filters': 20, 69 | 'filter_length': 3, 70 | 'stride': 1, 71 | 'nonlinearity': rectify, 72 | 'W': Uniform(10) 73 | }, 74 | { 75 | 'type': Conv1DLayer, 76 | 'num_filters': 20, 77 | 'filter_length': 3, 78 | 'stride': 1, 79 | 'nonlinearity': rectify, 80 | 'W': Uniform(5) 81 | }, 82 | { 83 | 'type': Conv1DLayer, 84 | 'num_filters': 20, 85 | 'filter_length': 5, 86 | 'stride': 1, 87 | 'nonlinearity': rectify 88 | }, 89 | { 90 | 'type': DimshuffleLayer, 91 | 'pattern': (0, 2, 1) 92 | }, 93 | { 94 | 'type': LSTMLayer, 95 | 'num_units': 40, 96 | 'W_in_to_cell': Uniform(5) 97 | }, 98 | { 99 | 'type': DenseLayer, 100 | 'num_units': source.n_outputs, 101 | 'nonlinearity': sigmoid 102 | } 103 | ] 104 | ) 105 | 106 | net.print_net() 107 | net.compile() 108 | net.fit() 109 | 110 | -------------------------------------------------------------------------------- /scripts/e65.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | 38 | Changes 39 | * back to 2 conv layers 40 | 41 | Results 42 | 43 | """ 44 | 45 | source = RealApplianceSource( 46 | '/data/dk3810/ukdale.h5', 47 | ['fridge freezer', 'hair straighteners', 'television'], 48 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 49 | window=("2013-06-01", "2013-07-01"), 50 | output_one_appliance=False, 51 | boolean_targets=False, 52 | min_on_duration=60, 53 | input_padding=4 54 | ) 55 | 56 | net = Net( 57 | experiment_name="e65", 58 | source=source, 59 | learning_rate=1e-1, 60 | save_plot_interval=50, 61 | loss_function=crossentropy, 62 | layers_config=[ 63 | { 64 | 'type': DimshuffleLayer, 65 | 'pattern': (0, 2, 1) 66 | }, 67 | { 68 | 'type': Conv1DLayer, 69 | 'num_filters': 20, 70 | 'filter_length': 3, 71 | 'stride': 1, 72 | 'nonlinearity': rectify, 73 | 'W': Uniform(10) 74 | }, 75 | { 76 | 'type': Conv1DLayer, 77 | 'num_filters': 20, 78 | 'filter_length': 3, 79 | 'stride': 1, 80 | 'nonlinearity': rectify, 81 | 'W': Uniform(5) 82 | }, 83 | { 84 | 'type': DimshuffleLayer, 85 | 'pattern': (0, 2, 1) 86 | }, 87 | { 88 | 'type': LSTMLayer, 89 | 'num_units': 40, 90 | 'W_in_to_cell': Uniform(5) 91 | }, 92 | { 93 | 'type': DenseLayer, 94 | 'num_units': source.n_outputs, 95 | 'nonlinearity': sigmoid 96 | } 97 | ] 98 | ) 99 | 100 | net.print_net() 101 | net.compile() 102 | net.fit() 103 | 104 | -------------------------------------------------------------------------------- /scripts/e66.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | Changes 40 | * 2 dense layers first. 41 | 42 | Results 43 | 44 | """ 45 | 46 | source = RealApplianceSource( 47 | '/data/dk3810/ukdale.h5', 48 | ['fridge freezer', 'hair straighteners', 'television'], 49 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 50 | window=("2013-06-01", "2013-07-01"), 51 | output_one_appliance=False, 52 | boolean_targets=False, 53 | min_on_duration=60, 54 | input_padding=4 55 | ) 56 | 57 | net = Net( 58 | experiment_name="e66", 59 | source=source, 60 | learning_rate=1e-1, 61 | save_plot_interval=50, 62 | loss_function=crossentropy, 63 | layers_config=[ 64 | { 65 | 'type': DenseLayer, 66 | 'num_units': 50, 67 | 'nonlinearity': sigmoid, 68 | 'W': Uniform(25), 69 | 'b': Uniform(25) 70 | }, 71 | { 72 | 'type': DenseLayer, 73 | 'num_units': 50, 74 | 'nonlinearity': sigmoid, 75 | 'W': Uniform(10), 76 | 'b': Uniform(10) 77 | }, 78 | { 79 | 'type': DimshuffleLayer, 80 | 'pattern': (0, 2, 1) 81 | }, 82 | { 83 | 'type': Conv1DLayer, 84 | 'num_filters': 20, 85 | 'filter_length': 3, 86 | 'stride': 1, 87 | 'nonlinearity': rectify 88 | }, 89 | { 90 | 'type': Conv1DLayer, 91 | 'num_filters': 20, 92 | 'filter_length': 3, 93 | 'stride': 1, 94 | 'nonlinearity': rectify 95 | }, 96 | { 97 | 'type': DimshuffleLayer, 98 | 'pattern': (0, 2, 1) 99 | }, 100 | { 101 | 'type': LSTMLayer, 102 | 'num_units': 40, 103 | 'W_in_to_cell': Uniform(5) 104 | }, 105 | { 106 | 'type': DenseLayer, 107 | 'num_units': source.n_outputs, 108 | 'nonlinearity': sigmoid 109 | } 110 | ] 111 | ) 112 | 113 | net.print_net() 114 | net.compile() 115 | net.fit() 116 | 117 | -------------------------------------------------------------------------------- /scripts/e67.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | * 2 dense layers first. 39 | 40 | Changes 41 | * rectify for dense layers 42 | 43 | Results 44 | 45 | """ 46 | 47 | source = RealApplianceSource( 48 | '/data/dk3810/ukdale.h5', 49 | ['fridge freezer', 'hair straighteners', 'television'], 50 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 51 | window=("2013-06-01", "2013-07-01"), 52 | output_one_appliance=False, 53 | boolean_targets=False, 54 | min_on_duration=60, 55 | input_padding=4 56 | ) 57 | 58 | net = Net( 59 | experiment_name="e67", 60 | source=source, 61 | learning_rate=1e-1, 62 | save_plot_interval=50, 63 | loss_function=crossentropy, 64 | layers_config=[ 65 | { 66 | 'type': DenseLayer, 67 | 'num_units': 50, 68 | 'nonlinearity': rectify 69 | }, 70 | { 71 | 'type': DenseLayer, 72 | 'num_units': 50, 73 | 'nonlinearity': rectify 74 | }, 75 | { 76 | 'type': DimshuffleLayer, 77 | 'pattern': (0, 2, 1) 78 | }, 79 | { 80 | 'type': Conv1DLayer, 81 | 'num_filters': 20, 82 | 'filter_length': 3, 83 | 'stride': 1, 84 | 'nonlinearity': rectify 85 | }, 86 | { 87 | 'type': Conv1DLayer, 88 | 'num_filters': 20, 89 | 'filter_length': 3, 90 | 'stride': 1, 91 | 'nonlinearity': rectify 92 | }, 93 | { 94 | 'type': DimshuffleLayer, 95 | 'pattern': (0, 2, 1) 96 | }, 97 | { 98 | 'type': LSTMLayer, 99 | 'num_units': 40, 100 | 'W_in_to_cell': Uniform(5) 101 | }, 102 | { 103 | 'type': DenseLayer, 104 | 'num_units': source.n_outputs, 105 | 'nonlinearity': sigmoid 106 | } 107 | ] 108 | ) 109 | 110 | net.print_net() 111 | net.compile() 112 | net.fit() 113 | 114 | -------------------------------------------------------------------------------- /scripts/e68.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from neuralnilm.net import QuantizeLayer 4 | from lasagne.nonlinearities import sigmoid, rectify 5 | from lasagne.objectives import crossentropy 6 | from lasagne.init import Uniform, Normal 7 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 8 | 9 | 10 | """ 11 | Setup: 12 | * in_to_cell init weights are now Normal(1.0) 13 | * output all appliances 14 | * fix bug in RealApplianceSource 15 | * use cross-entropy 16 | * smaller network 17 | * power targets 18 | * trying without first two sigmoid layers. 19 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 20 | which fixes LSTM bug. 21 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 22 | * Subsampling *bidirectional* LSTM 23 | * Output every sequence in the batch 24 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 25 | * put back the two sigmoid layers 26 | * use Conv1D to create a hierarchical subsampling LSTM 27 | * Using LSTM (not BLSTM) to speed up training while testing 28 | * Use dimshuffle not reshape 29 | * 2 dense layers back 30 | * back to default init 31 | * conv between LSTMs. 32 | * More data 33 | * BLSTM 34 | * Try just using a 1D convnet on input 35 | * add second Convnet layer (not sure this is correct thing to do?) 36 | * third conv layer 37 | * large inits 38 | * back to 2 conv layers 39 | * 2 dense layers first. 40 | * rectify for dense layers 41 | 42 | Changes 43 | * Quantize input 44 | 45 | Results 46 | 47 | """ 48 | 49 | source = RealApplianceSource( 50 | '/data/dk3810/ukdale.h5', 51 | ['fridge freezer', 'hair straighteners', 'television'], 52 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 53 | window=("2013-06-01", "2013-07-01"), 54 | output_one_appliance=False, 55 | boolean_targets=False, 56 | min_on_duration=60, 57 | input_padding=4 58 | ) 59 | 60 | net = Net( 61 | experiment_name="e68", 62 | source=source, 63 | learning_rate=1e-1, 64 | save_plot_interval=50, 65 | loss_function=crossentropy, 66 | layers_config=[ 67 | { 68 | 'type': QuantizeLayer, 69 | 'n_bins': 50 70 | }, 71 | { 72 | 'type': DenseLayer, 73 | 'num_units': 50, 74 | 'nonlinearity': sigmoid 75 | }, 76 | { 77 | 'type': LSTMLayer, 78 | 'num_units': 50, 79 | 'W_in_to_cell': Uniform(5) 80 | }, 81 | { 82 | 'type': DenseLayer, 83 | 'num_units': source.n_outputs, 84 | 'nonlinearity': sigmoid 85 | } 86 | ] 87 | ) 88 | 89 | net.print_net() 90 | net.compile() 91 | net.fit() 92 | 93 | -------------------------------------------------------------------------------- /scripts/e69.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | 34 | Changes 35 | * based on e59a (excellent performer) 36 | * trying ReLU dense layer with standard inits 37 | * using LSTM not BLSTM to speed up training 38 | 39 | Results 40 | 41 | """ 42 | 43 | source = RealApplianceSource( 44 | '/data/dk3810/ukdale.h5', 45 | ['fridge freezer', 'hair straighteners', 'television'], 46 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 47 | window=("2013-06-01", "2014-07-01"), 48 | output_one_appliance=False, 49 | boolean_targets=False, 50 | min_on_duration=60, 51 | subsample_target=5 52 | ) 53 | 54 | net = Net( 55 | experiment_name="e69", 56 | source=source, 57 | learning_rate=1e-1, 58 | save_plot_interval=50, 59 | loss_function=crossentropy, 60 | layers_config=[ 61 | { 62 | 'type': DenseLayer, 63 | 'num_units': 50, 64 | 'nonlinearity': rectify, 65 | 'W': Uniform(25), 66 | 'b': Uniform(25) 67 | }, 68 | { 69 | 'type': DenseLayer, 70 | 'num_units': 50, 71 | 'nonlinearity': rectify, 72 | 'W': Uniform(10), 73 | 'b': Uniform(10) 74 | }, 75 | { 76 | 'type': LSTMLayer, 77 | 'num_units': 40, 78 | 'W_in_to_cell': Uniform(5) 79 | }, 80 | { 81 | 'type': DimshuffleLayer, 82 | 'pattern': (0, 2, 1) 83 | }, 84 | { 85 | 'type': Conv1DLayer, 86 | 'num_filters': 20, # NEEDS INCREASING! 87 | 'filter_length': 5, 88 | 'stride': 5, 89 | 'nonlinearity': sigmoid 90 | }, 91 | { 92 | 'type': DimshuffleLayer, 93 | 'pattern': (0, 2, 1) 94 | }, 95 | { 96 | 'type': LSTMLayer, 97 | 'num_units': 80, 98 | 'W_in_to_cell': Uniform(5) 99 | }, 100 | { 101 | 'type': DenseLayer, 102 | 'num_units': source.n_outputs, 103 | 'nonlinearity': sigmoid 104 | } 105 | ] 106 | ) 107 | 108 | net.print_net() 109 | net.compile() 110 | net.fit() 111 | 112 | -------------------------------------------------------------------------------- /scripts/e69b.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | 34 | e69 35 | * based on e59a (excellent performer) 36 | * trying ReLU dense layer with standard inits 37 | * using LSTM not BLSTM to speed up training 38 | 39 | e69b 40 | * Changed second denselayer to Uniform(10) as it was in e59a. 41 | 42 | Results 43 | 44 | """ 45 | 46 | source = RealApplianceSource( 47 | '/data/dk3810/ukdale.h5', 48 | ['fridge freezer', 'hair straighteners', 'television'], 49 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 50 | window=("2013-06-01", "2014-07-01"), 51 | output_one_appliance=False, 52 | boolean_targets=False, 53 | min_on_duration=60, 54 | subsample_target=5 55 | ) 56 | 57 | net = Net( 58 | experiment_name="e69b", 59 | source=source, 60 | learning_rate=1e-1, 61 | save_plot_interval=50, 62 | loss_function=crossentropy, 63 | layers_config=[ 64 | { 65 | 'type': DenseLayer, 66 | 'num_units': 50, 67 | 'nonlinearity': rectify, 68 | 'W': Uniform(25), 69 | 'b': Uniform(25) 70 | }, 71 | { 72 | 'type': DenseLayer, 73 | 'num_units': 50, 74 | 'nonlinearity': rectify, 75 | 'W': Uniform(10), 76 | 'b': Uniform(10) 77 | }, 78 | { 79 | 'type': LSTMLayer, 80 | 'num_units': 40, 81 | 'W_in_to_cell': Uniform(5) 82 | }, 83 | { 84 | 'type': DimshuffleLayer, 85 | 'pattern': (0, 2, 1) 86 | }, 87 | { 88 | 'type': Conv1DLayer, 89 | 'num_filters': 20, # NEEDS INCREASING! 90 | 'filter_length': 5, 91 | 'stride': 5, 92 | 'nonlinearity': sigmoid 93 | }, 94 | { 95 | 'type': DimshuffleLayer, 96 | 'pattern': (0, 2, 1) 97 | }, 98 | { 99 | 'type': LSTMLayer, 100 | 'num_units': 80, 101 | 'W_in_to_cell': Uniform(5) 102 | }, 103 | { 104 | 'type': DenseLayer, 105 | 'num_units': source.n_outputs, 106 | 'nonlinearity': sigmoid 107 | } 108 | ] 109 | ) 110 | 111 | net.print_net() 112 | net.compile() 113 | net.fit() 114 | 115 | -------------------------------------------------------------------------------- /scripts/e70.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | Changes 40 | * Based on e65 41 | * Using sigmoid instead of rectify in Conv1D layers 42 | 43 | Results 44 | 45 | """ 46 | 47 | source = RealApplianceSource( 48 | '/data/dk3810/ukdale.h5', 49 | ['fridge freezer', 'hair straighteners', 'television'], 50 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 51 | window=("2013-06-01", "2013-07-01"), 52 | output_one_appliance=False, 53 | boolean_targets=False, 54 | min_on_duration=60, 55 | input_padding=4 56 | ) 57 | 58 | net = Net( 59 | experiment_name="e70", 60 | source=source, 61 | learning_rate=1e-1, 62 | save_plot_interval=50, 63 | loss_function=crossentropy, 64 | layers_config=[ 65 | { 66 | 'type': DimshuffleLayer, 67 | 'pattern': (0, 2, 1) 68 | }, 69 | { 70 | 'type': Conv1DLayer, 71 | 'num_filters': 20, 72 | 'filter_length': 3, 73 | 'stride': 1, 74 | 'nonlinearity': sigmoid, 75 | 'W': Uniform(25), 76 | 'b': Uniform(25) 77 | }, 78 | { 79 | 'type': Conv1DLayer, 80 | 'num_filters': 20, 81 | 'filter_length': 3, 82 | 'stride': 1, 83 | 'nonlinearity': sigmoid, 84 | 'W': Uniform(10), 85 | 'b': Uniform(10) 86 | }, 87 | { 88 | 'type': DimshuffleLayer, 89 | 'pattern': (0, 2, 1) 90 | }, 91 | { 92 | 'type': LSTMLayer, 93 | 'num_units': 40, 94 | 'W_in_to_cell': Uniform(5) 95 | }, 96 | { 97 | 'type': DenseLayer, 98 | 'num_units': source.n_outputs, 99 | 'nonlinearity': sigmoid 100 | } 101 | ] 102 | ) 103 | 104 | net.print_net() 105 | net.compile() 106 | net.fit() 107 | 108 | -------------------------------------------------------------------------------- /scripts/e71.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | e70 40 | * Based on e65 41 | * Using sigmoid instead of rectify in Conv1D layers 42 | 43 | e71 44 | * Larger layers 45 | * More data 46 | 47 | Results 48 | 49 | """ 50 | 51 | source = RealApplianceSource( 52 | '/data/dk3810/ukdale.h5', 53 | ['fridge freezer', 'hair straighteners', 'television'], 54 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 55 | window=("2013-06-01", "2014-07-01"), 56 | output_one_appliance=False, 57 | boolean_targets=False, 58 | min_on_duration=60, 59 | input_padding=4 60 | ) 61 | 62 | net = Net( 63 | experiment_name="e71", 64 | source=source, 65 | learning_rate=1e-1, 66 | save_plot_interval=50, 67 | loss_function=crossentropy, 68 | layers_config=[ 69 | { 70 | 'type': DimshuffleLayer, 71 | 'pattern': (0, 2, 1) 72 | }, 73 | { 74 | 'type': Conv1DLayer, 75 | 'num_filters': 50, 76 | 'filter_length': 3, 77 | 'stride': 1, 78 | 'nonlinearity': sigmoid, 79 | 'W': Uniform(25), 80 | 'b': Uniform(25) 81 | }, 82 | { 83 | 'type': Conv1DLayer, 84 | 'num_filters': 50, 85 | 'filter_length': 3, 86 | 'stride': 1, 87 | 'nonlinearity': sigmoid, 88 | 'W': Uniform(10), 89 | 'b': Uniform(10) 90 | }, 91 | { 92 | 'type': DimshuffleLayer, 93 | 'pattern': (0, 2, 1) 94 | }, 95 | { 96 | 'type': LSTMLayer, 97 | 'num_units': 80, 98 | 'W_in_to_cell': Uniform(5) 99 | }, 100 | { 101 | 'type': DenseLayer, 102 | 'num_units': source.n_outputs, 103 | 'nonlinearity': sigmoid 104 | } 105 | ] 106 | ) 107 | 108 | net.print_net() 109 | net.compile() 110 | net.fit() 111 | 112 | -------------------------------------------------------------------------------- /scripts/e72.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | e70 40 | * Based on e65 41 | * Using sigmoid instead of rectify in Conv1D layers 42 | 43 | e71 44 | * Larger layers 45 | * More data 46 | 47 | e72 48 | * At a third conv layer 49 | 50 | Results 51 | 52 | """ 53 | 54 | source = RealApplianceSource( 55 | '/data/dk3810/ukdale.h5', 56 | ['fridge freezer', 'hair straighteners', 'television'], 57 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 58 | window=("2013-06-01", "2014-07-01"), 59 | output_one_appliance=False, 60 | boolean_targets=False, 61 | min_on_duration=60, 62 | input_padding=8 63 | ) 64 | 65 | net = Net( 66 | experiment_name="e72", 67 | source=source, 68 | learning_rate=1e-1, 69 | save_plot_interval=50, 70 | loss_function=crossentropy, 71 | layers_config=[ 72 | { 73 | 'type': DimshuffleLayer, 74 | 'pattern': (0, 2, 1) 75 | }, 76 | { 77 | 'type': Conv1DLayer, 78 | 'num_filters': 50, 79 | 'filter_length': 3, 80 | 'stride': 1, 81 | 'nonlinearity': sigmoid, 82 | 'W': Uniform(25), 83 | 'b': Uniform(25) 84 | }, 85 | { 86 | 'type': Conv1DLayer, 87 | 'num_filters': 50, 88 | 'filter_length': 3, 89 | 'stride': 1, 90 | 'nonlinearity': sigmoid, 91 | 'W': Uniform(10), 92 | 'b': Uniform(10) 93 | }, 94 | { 95 | 'type': Conv1DLayer, 96 | 'num_filters': 50, 97 | 'filter_length': 5, 98 | 'stride': 1, 99 | 'nonlinearity': sigmoid 100 | }, 101 | { 102 | 'type': DimshuffleLayer, 103 | 'pattern': (0, 2, 1) 104 | }, 105 | { 106 | 'type': LSTMLayer, 107 | 'num_units': 80, 108 | 'W_in_to_cell': Uniform(5) 109 | }, 110 | { 111 | 'type': DenseLayer, 112 | 'num_units': source.n_outputs, 113 | 'nonlinearity': sigmoid 114 | } 115 | ] 116 | ) 117 | 118 | net.print_net() 119 | net.compile() 120 | net.fit() 121 | 122 | -------------------------------------------------------------------------------- /scripts/e73.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | e70 40 | * Based on e65 41 | * Using sigmoid instead of rectify in Conv1D layers 42 | 43 | e71 44 | * Larger layers 45 | * More data 46 | 47 | e72 48 | * At a third conv layer 49 | 50 | e73 51 | * Add a dense layer after 3 conv layers 52 | 53 | Results 54 | 55 | """ 56 | 57 | source = RealApplianceSource( 58 | '/data/dk3810/ukdale.h5', 59 | ['fridge freezer', 'hair straighteners', 'television'], 60 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 61 | window=("2013-06-01", "2014-07-01"), 62 | output_one_appliance=False, 63 | boolean_targets=False, 64 | min_on_duration=60, 65 | input_padding=8 66 | ) 67 | 68 | net = Net( 69 | experiment_name="e73", 70 | source=source, 71 | learning_rate=1e-1, 72 | save_plot_interval=50, 73 | loss_function=crossentropy, 74 | layers_config=[ 75 | { 76 | 'type': DimshuffleLayer, 77 | 'pattern': (0, 2, 1) 78 | }, 79 | { 80 | 'type': Conv1DLayer, 81 | 'num_filters': 50, 82 | 'filter_length': 3, 83 | 'stride': 1, 84 | 'nonlinearity': sigmoid, 85 | 'W': Uniform(25), 86 | 'b': Uniform(25) 87 | }, 88 | { 89 | 'type': Conv1DLayer, 90 | 'num_filters': 50, 91 | 'filter_length': 3, 92 | 'stride': 1, 93 | 'nonlinearity': sigmoid, 94 | 'W': Uniform(10), 95 | 'b': Uniform(10) 96 | }, 97 | { 98 | 'type': Conv1DLayer, 99 | 'num_filters': 50, 100 | 'filter_length': 5, 101 | 'stride': 1, 102 | 'nonlinearity': sigmoid 103 | }, 104 | { 105 | 'type': DimshuffleLayer, 106 | 'pattern': (0, 2, 1) 107 | }, 108 | { 109 | 'type': DenseLayer, 110 | 'num_units': 50, 111 | 'nonlinearity': sigmoid 112 | }, 113 | { 114 | 'type': LSTMLayer, 115 | 'num_units': 80, 116 | 'W_in_to_cell': Uniform(5) 117 | }, 118 | { 119 | 'type': DenseLayer, 120 | 'num_units': source.n_outputs, 121 | 'nonlinearity': sigmoid 122 | } 123 | ] 124 | ) 125 | 126 | net.print_net() 127 | net.compile() 128 | net.fit() 129 | 130 | -------------------------------------------------------------------------------- /scripts/e74.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | e70 40 | * Based on e65 41 | * Using sigmoid instead of rectify in Conv1D layers 42 | 43 | e71 44 | * Larger layers 45 | * More data 46 | 47 | e72 48 | * At a third conv layer 49 | 50 | e73 51 | * Add a dense layer after 3 conv layers 52 | 53 | e74 54 | * Removed dense layer after 3 conv layers (because it failed to learn anything) 55 | * Trying standard inits for weights and biases throughout network. 56 | 57 | Results 58 | 59 | """ 60 | 61 | source = RealApplianceSource( 62 | '/data/dk3810/ukdale.h5', 63 | ['fridge freezer', 'hair straighteners', 'television'], 64 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 65 | window=("2013-06-01", "2014-07-01"), 66 | output_one_appliance=False, 67 | boolean_targets=False, 68 | min_on_duration=60, 69 | input_padding=8 70 | ) 71 | 72 | net = Net( 73 | experiment_name="e73", 74 | source=source, 75 | learning_rate=1e-1, 76 | save_plot_interval=50, 77 | loss_function=crossentropy, 78 | layers_config=[ 79 | { 80 | 'type': DimshuffleLayer, 81 | 'pattern': (0, 2, 1) 82 | }, 83 | { 84 | 'type': Conv1DLayer, 85 | 'num_filters': 50, 86 | 'filter_length': 3, 87 | 'stride': 1, 88 | 'nonlinearity': sigmoid 89 | # 'W': Uniform(25), 90 | # 'b': Uniform(25) 91 | }, 92 | { 93 | 'type': Conv1DLayer, 94 | 'num_filters': 50, 95 | 'filter_length': 3, 96 | 'stride': 1, 97 | 'nonlinearity': sigmoid 98 | # 'W': Uniform(10), 99 | # 'b': Uniform(10) 100 | }, 101 | { 102 | 'type': Conv1DLayer, 103 | 'num_filters': 50, 104 | 'filter_length': 5, 105 | 'stride': 1, 106 | 'nonlinearity': sigmoid 107 | }, 108 | { 109 | 'type': DimshuffleLayer, 110 | 'pattern': (0, 2, 1) 111 | }, 112 | { 113 | 'type': LSTMLayer, 114 | 'num_units': 80 115 | # 'W_in_to_cell': Uniform(5) 116 | }, 117 | { 118 | 'type': DenseLayer, 119 | 'num_units': source.n_outputs, 120 | 'nonlinearity': sigmoid 121 | } 122 | ] 123 | ) 124 | 125 | net.print_net() 126 | net.compile() 127 | net.fit() 128 | 129 | -------------------------------------------------------------------------------- /scripts/e75.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | e70 40 | * Based on e65 41 | * Using sigmoid instead of rectify in Conv1D layers 42 | 43 | e71 44 | * Larger layers 45 | * More data 46 | 47 | e72 48 | * At a third conv layer 49 | 50 | e73 51 | * Add a dense layer after 3 conv layers 52 | 53 | e74 54 | * Removed dense layer after 3 conv layers (because it failed to learn anything) 55 | * Trying standard inits for weights and biases throughout network. 56 | 57 | e75 58 | * Putting back large init for first layer 59 | 60 | Results 61 | 62 | """ 63 | 64 | source = RealApplianceSource( 65 | '/data/dk3810/ukdale.h5', 66 | ['fridge freezer', 'hair straighteners', 'television'], 67 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 68 | window=("2013-06-01", "2014-07-01"), 69 | output_one_appliance=False, 70 | boolean_targets=False, 71 | min_on_duration=60, 72 | input_padding=8 73 | ) 74 | 75 | net = Net( 76 | experiment_name="e75", 77 | source=source, 78 | learning_rate=1e-1, 79 | save_plot_interval=50, 80 | loss_function=crossentropy, 81 | layers_config=[ 82 | { 83 | 'type': DimshuffleLayer, 84 | 'pattern': (0, 2, 1) 85 | }, 86 | { 87 | 'type': Conv1DLayer, 88 | 'num_filters': 50, 89 | 'filter_length': 3, 90 | 'stride': 1, 91 | 'nonlinearity': sigmoid, 92 | 'W': Uniform(25), 93 | 'b': Uniform(25) 94 | }, 95 | { 96 | 'type': Conv1DLayer, 97 | 'num_filters': 50, 98 | 'filter_length': 3, 99 | 'stride': 1, 100 | 'nonlinearity': sigmoid 101 | # 'W': Uniform(10), 102 | # 'b': Uniform(10) 103 | }, 104 | { 105 | 'type': Conv1DLayer, 106 | 'num_filters': 50, 107 | 'filter_length': 5, 108 | 'stride': 1, 109 | 'nonlinearity': sigmoid 110 | }, 111 | { 112 | 'type': DimshuffleLayer, 113 | 'pattern': (0, 2, 1) 114 | }, 115 | { 116 | 'type': LSTMLayer, 117 | 'num_units': 80 118 | # 'W_in_to_cell': Uniform(5) 119 | }, 120 | { 121 | 'type': DenseLayer, 122 | 'num_units': source.n_outputs, 123 | 'nonlinearity': sigmoid 124 | } 125 | ] 126 | ) 127 | 128 | net.print_net() 129 | net.compile() 130 | net.fit() 131 | 132 | -------------------------------------------------------------------------------- /scripts/e76.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | e70 40 | * Based on e65 41 | * Using sigmoid instead of rectify in Conv1D layers 42 | 43 | e71 44 | * Larger layers 45 | * More data 46 | 47 | e72 48 | * At a third conv layer 49 | 50 | e73 51 | * Add a dense layer after 3 conv layers 52 | 53 | e74 54 | * Removed dense layer after 3 conv layers (because it failed to learn anything) 55 | * Trying standard inits for weights and biases throughout network. 56 | 57 | e75 58 | * Putting back large init for first layer 59 | 60 | e76 61 | * Removed 3rd conv layer 62 | 63 | Results 64 | 65 | """ 66 | 67 | source = RealApplianceSource( 68 | '/data/dk3810/ukdale.h5', 69 | ['fridge freezer', 'hair straighteners', 'television'], 70 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 71 | window=("2013-06-01", "2014-07-01"), 72 | output_one_appliance=False, 73 | boolean_targets=False, 74 | min_on_duration=60, 75 | input_padding=4 76 | ) 77 | 78 | net = Net( 79 | experiment_name="e76", 80 | source=source, 81 | learning_rate=1e-1, 82 | save_plot_interval=50, 83 | loss_function=crossentropy, 84 | layers_config=[ 85 | { 86 | 'type': DimshuffleLayer, 87 | 'pattern': (0, 2, 1) 88 | }, 89 | { 90 | 'type': Conv1DLayer, 91 | 'num_filters': 50, 92 | 'filter_length': 3, 93 | 'stride': 1, 94 | 'nonlinearity': sigmoid, 95 | 'W': Uniform(25), 96 | 'b': Uniform(25) 97 | }, 98 | { 99 | 'type': Conv1DLayer, 100 | 'num_filters': 50, 101 | 'filter_length': 3, 102 | 'stride': 1, 103 | 'nonlinearity': sigmoid 104 | # 'W': Uniform(10), 105 | # 'b': Uniform(10) 106 | }, 107 | { 108 | 'type': DimshuffleLayer, 109 | 'pattern': (0, 2, 1) 110 | }, 111 | { 112 | 'type': LSTMLayer, 113 | 'num_units': 80 114 | # 'W_in_to_cell': Uniform(5) 115 | }, 116 | { 117 | 'type': DenseLayer, 118 | 'num_units': source.n_outputs, 119 | 'nonlinearity': sigmoid 120 | } 121 | ] 122 | ) 123 | 124 | net.print_net() 125 | net.compile() 126 | net.fit() 127 | 128 | -------------------------------------------------------------------------------- /scripts/e77.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | e70 40 | * Based on e65 41 | * Using sigmoid instead of rectify in Conv1D layers 42 | 43 | e71 44 | * Larger layers 45 | * More data 46 | 47 | e72 48 | * At a third conv layer 49 | 50 | e73 51 | * Add a dense layer after 3 conv layers 52 | 53 | e74 54 | * Removed dense layer after 3 conv layers (because it failed to learn anything) 55 | * Trying standard inits for weights and biases throughout network. 56 | 57 | e75 58 | * Putting back large init for first layer 59 | 60 | e76 61 | * Removed 3rd conv layer 62 | 63 | e77 64 | * Try init Uniform(1) 65 | 66 | Results 67 | 68 | """ 69 | 70 | source = RealApplianceSource( 71 | '/data/dk3810/ukdale.h5', 72 | ['fridge freezer', 'hair straighteners', 'television'], 73 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 74 | window=("2013-06-01", "2014-07-01"), 75 | output_one_appliance=False, 76 | boolean_targets=False, 77 | min_on_duration=60, 78 | input_padding=4 79 | ) 80 | 81 | net = Net( 82 | experiment_name="e77", 83 | source=source, 84 | learning_rate=1e-1, 85 | save_plot_interval=50, 86 | loss_function=crossentropy, 87 | layers_config=[ 88 | { 89 | 'type': DimshuffleLayer, 90 | 'pattern': (0, 2, 1) 91 | }, 92 | { 93 | 'type': Conv1DLayer, 94 | 'num_filters': 50, 95 | 'filter_length': 3, 96 | 'stride': 1, 97 | 'nonlinearity': sigmoid, 98 | 'W': Uniform(1), 99 | 'b': Uniform(1) 100 | }, 101 | { 102 | 'type': Conv1DLayer, 103 | 'num_filters': 50, 104 | 'filter_length': 3, 105 | 'stride': 1, 106 | 'nonlinearity': sigmoid, 107 | 'W': Uniform(1), 108 | 'b': Uniform(1) 109 | }, 110 | { 111 | 'type': DimshuffleLayer, 112 | 'pattern': (0, 2, 1) 113 | }, 114 | { 115 | 'type': LSTMLayer, 116 | 'num_units': 80, 117 | 'W_in_to_cell': Uniform(5) 118 | }, 119 | { 120 | 'type': DenseLayer, 121 | 'num_units': source.n_outputs, 122 | 'nonlinearity': sigmoid 123 | } 124 | ] 125 | ) 126 | 127 | net.print_net() 128 | net.compile() 129 | net.fit() 130 | 131 | -------------------------------------------------------------------------------- /scripts/e78.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer 3 | from lasagne.nonlinearities import sigmoid, rectify 4 | from lasagne.objectives import crossentropy 5 | from lasagne.init import Uniform, Normal 6 | from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer 7 | 8 | 9 | """ 10 | Setup: 11 | * in_to_cell init weights are now Normal(1.0) 12 | * output all appliances 13 | * fix bug in RealApplianceSource 14 | * use cross-entropy 15 | * smaller network 16 | * power targets 17 | * trying without first two sigmoid layers. 18 | * updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0 19 | which fixes LSTM bug. 20 | https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0 21 | * Subsampling *bidirectional* LSTM 22 | * Output every sequence in the batch 23 | * Change W_in_to_cell from Normal(1.0) to Uniform(5) 24 | * put back the two sigmoid layers 25 | * use Conv1D to create a hierarchical subsampling LSTM 26 | * Using LSTM (not BLSTM) to speed up training while testing 27 | * Use dimshuffle not reshape 28 | * 2 dense layers back 29 | * back to default init 30 | * conv between LSTMs. 31 | * More data 32 | * BLSTM 33 | * Try just using a 1D convnet on input 34 | * add second Convnet layer (not sure this is correct thing to do?) 35 | * third conv layer 36 | * large inits 37 | * back to 2 conv layers 38 | 39 | e70 40 | * Based on e65 41 | * Using sigmoid instead of rectify in Conv1D layers 42 | 43 | e71 44 | * Larger layers 45 | * More data 46 | 47 | e72 48 | * At a third conv layer 49 | 50 | e73 51 | * Add a dense layer after 3 conv layers 52 | 53 | e74 54 | * Removed dense layer after 3 conv layers (because it failed to learn anything) 55 | * Trying standard inits for weights and biases throughout network. 56 | 57 | e75 58 | * Putting back large init for first layer 59 | 60 | e76 61 | * Removed 3rd conv layer 62 | 63 | e77 64 | * Try init Uniform(1) 65 | 66 | e78 67 | * Back to large inits for first layers 68 | * Trying 3rd conv layer, also with large init 69 | 70 | Results 71 | 72 | """ 73 | 74 | source = RealApplianceSource( 75 | '/data/dk3810/ukdale.h5', 76 | ['fridge freezer', 'hair straighteners', 'television'], 77 | max_input_power=1000, max_appliance_powers=[300, 500, 200], 78 | window=("2013-06-01", "2014-07-01"), 79 | output_one_appliance=False, 80 | boolean_targets=False, 81 | min_on_duration=60, 82 | input_padding=8 83 | ) 84 | 85 | net = Net( 86 | experiment_name="e78", 87 | source=source, 88 | learning_rate=1e-1, 89 | save_plot_interval=50, 90 | loss_function=crossentropy, 91 | layers_config=[ 92 | { 93 | 'type': DimshuffleLayer, 94 | 'pattern': (0, 2, 1) 95 | }, 96 | { 97 | 'type': Conv1DLayer, 98 | 'num_filters': 50, 99 | 'filter_length': 3, 100 | 'stride': 1, 101 | 'nonlinearity': sigmoid, 102 | 'W': Uniform(25), 103 | 'b': Uniform(25) 104 | }, 105 | { 106 | 'type': Conv1DLayer, 107 | 'num_filters': 50, 108 | 'filter_length': 3, 109 | 'stride': 1, 110 | 'nonlinearity': sigmoid, 111 | 'W': Uniform(10), 112 | 'b': Uniform(10) 113 | }, 114 | { 115 | 'type': Conv1DLayer, 116 | 'num_filters': 50, 117 | 'filter_length': 5, 118 | 'stride': 1, 119 | 'nonlinearity': sigmoid, 120 | 'W': Uniform(10), 121 | 'b': Uniform(10) 122 | }, 123 | { 124 | 'type': DimshuffleLayer, 125 | 'pattern': (0, 2, 1) 126 | }, 127 | { 128 | 'type': LSTMLayer, 129 | 'num_units': 80, 130 | 'W_in_to_cell': Uniform(5) 131 | }, 132 | { 133 | 'type': DenseLayer, 134 | 'num_units': source.n_outputs, 135 | 'nonlinearity': sigmoid 136 | } 137 | ] 138 | ) 139 | 140 | net.print_net() 141 | net.compile() 142 | net.fit() 143 | 144 | -------------------------------------------------------------------------------- /scripts/experiment029.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import theano 5 | import theano.tensor as T 6 | import lasagne 7 | from gen_data_029 import gen_data, N_BATCH, LENGTH 8 | theano.config.compute_test_value = 'raise' 9 | 10 | 11 | # Number of units in the hidden (recurrent) layer 12 | N_HIDDEN = 5 13 | # SGD learning rate 14 | LEARNING_RATE = 1e-1 15 | # Number of iterations to train the net 16 | N_ITERATIONS = 200 17 | 18 | # Generate a "validation" sequence whose cost we will periodically compute 19 | X_val, y_val = gen_data() 20 | 21 | n_features = X_val.shape[-1] 22 | n_output = y_val.shape[-1] 23 | assert X_val.shape == (N_BATCH, LENGTH, n_features) 24 | assert y_val.shape == (N_BATCH, LENGTH, n_output) 25 | 26 | # Construct LSTM RNN: One LSTM layer and one dense output layer 27 | l_in = lasagne.layers.InputLayer(shape=(N_BATCH, LENGTH, n_features)) 28 | 29 | 30 | # setup fwd and bck LSTM layer. 31 | l_fwd = lasagne.layers.LSTMLayer( 32 | l_in, N_HIDDEN, backwards=False, learn_init=True, peepholes=True) 33 | l_bck = lasagne.layers.LSTMLayer( 34 | l_in, N_HIDDEN, backwards=True, learn_init=True, peepholes=True) 35 | 36 | # concatenate forward and backward LSTM layers 37 | l_fwd_reshape = lasagne.layers.ReshapeLayer(l_fwd, (N_BATCH*LENGTH, N_HIDDEN)) 38 | l_bck_reshape = lasagne.layers.ReshapeLayer(l_bck, (N_BATCH*LENGTH, N_HIDDEN)) 39 | l_concat = lasagne.layers.ConcatLayer([l_fwd_reshape, l_bck_reshape], axis=1) 40 | 41 | 42 | l_recurrent_out = lasagne.layers.DenseLayer( 43 | l_concat, num_units=n_output, nonlinearity=None) 44 | l_out = lasagne.layers.ReshapeLayer( 45 | l_recurrent_out, (N_BATCH, LENGTH, n_output)) 46 | 47 | input = T.tensor3('input') 48 | target_output = T.tensor3('target_output') 49 | 50 | # add test values 51 | input.tag.test_value = np.random.rand( 52 | *X_val.shape).astype(theano.config.floatX) 53 | target_output.tag.test_value = np.random.rand( 54 | *y_val.shape).astype(theano.config.floatX) 55 | 56 | # Cost = mean squared error 57 | cost = T.mean((l_out.get_output(input) - target_output)**2) 58 | 59 | # Use NAG for training 60 | all_params = lasagne.layers.get_all_params(l_out) 61 | updates = lasagne.updates.nesterov_momentum(cost, all_params, LEARNING_RATE) 62 | # Theano functions for training, getting output, and computing cost 63 | train = theano.function([input, target_output], 64 | cost, updates=updates, on_unused_input='warn', 65 | allow_input_downcast=True) 66 | y_pred = theano.function( 67 | [input], l_out.get_output(input), on_unused_input='warn', 68 | allow_input_downcast=True) 69 | 70 | compute_cost = theano.function( 71 | [input, target_output], cost, on_unused_input='warn', 72 | allow_input_downcast=True) 73 | 74 | # Train the net 75 | def run_training(): 76 | costs = np.zeros(N_ITERATIONS) 77 | for n in range(N_ITERATIONS): 78 | X, y = gen_data() 79 | 80 | # you should use your own training data mask instead of mask_val 81 | costs[n] = train(X, y) 82 | if not n % 10: 83 | cost_val = compute_cost(X_val, y_val) 84 | print "Iteration {} validation cost = {}".format(n, cost_val) 85 | 86 | plt.plot(costs) 87 | plt.xlabel('Iteration') 88 | plt.ylabel('Cost') 89 | plt.show() 90 | 91 | def plot_estimates(): 92 | X, y = gen_data() 93 | y_predictions = y_pred(X) 94 | ax = plt.gca() 95 | ax.plot(y_predictions[0,:,0], label='estimate') 96 | ax.plot(y[0,:,0], label='ground truth') 97 | # ax.plot(X[0,:,0], label='aggregate') 98 | ax.legend() 99 | plt.show() 100 | 101 | run_training() 102 | plot_estimates() 103 | -------------------------------------------------------------------------------- /scripts/experiment030.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import theano 5 | import theano.tensor as T 6 | import lasagne 7 | from gen_data_029 import gen_data, N_BATCH, LENGTH 8 | theano.config.compute_test_value = 'raise' 9 | 10 | """ 11 | tanh output 12 | lower learning rate 13 | * does just about learn something sensible, but not especially convincing, 14 | even after 2000 iterations. 15 | """ 16 | 17 | # Number of units in the hidden (recurrent) layer 18 | N_HIDDEN = 5 19 | # SGD learning rate 20 | LEARNING_RATE = 1e-2 21 | # Number of iterations to train the net 22 | N_ITERATIONS = 2000 23 | 24 | # Generate a "validation" sequence whose cost we will periodically compute 25 | X_val, y_val = gen_data() 26 | 27 | n_features = X_val.shape[-1] 28 | n_output = y_val.shape[-1] 29 | assert X_val.shape == (N_BATCH, LENGTH, n_features) 30 | assert y_val.shape == (N_BATCH, LENGTH, n_output) 31 | 32 | # Construct LSTM RNN: One LSTM layer and one dense output layer 33 | l_in = lasagne.layers.InputLayer(shape=(N_BATCH, LENGTH, n_features)) 34 | 35 | 36 | # setup fwd and bck LSTM layer. 37 | l_fwd = lasagne.layers.LSTMLayer( 38 | l_in, N_HIDDEN, backwards=False, learn_init=True, peepholes=True) 39 | l_bck = lasagne.layers.LSTMLayer( 40 | l_in, N_HIDDEN, backwards=True, learn_init=True, peepholes=True) 41 | 42 | # concatenate forward and backward LSTM layers 43 | l_fwd_reshape = lasagne.layers.ReshapeLayer(l_fwd, (N_BATCH*LENGTH, N_HIDDEN)) 44 | l_bck_reshape = lasagne.layers.ReshapeLayer(l_bck, (N_BATCH*LENGTH, N_HIDDEN)) 45 | l_concat = lasagne.layers.ConcatLayer([l_fwd_reshape, l_bck_reshape], axis=1) 46 | 47 | 48 | l_recurrent_out = lasagne.layers.DenseLayer( 49 | l_concat, num_units=n_output, nonlinearity=lasagne.nonlinearities.tanh) 50 | l_out = lasagne.layers.ReshapeLayer( 51 | l_recurrent_out, (N_BATCH, LENGTH, n_output)) 52 | 53 | input = T.tensor3('input') 54 | target_output = T.tensor3('target_output') 55 | 56 | # add test values 57 | input.tag.test_value = np.random.rand( 58 | *X_val.shape).astype(theano.config.floatX) 59 | target_output.tag.test_value = np.random.rand( 60 | *y_val.shape).astype(theano.config.floatX) 61 | 62 | # Cost = mean squared error 63 | cost = T.mean((l_out.get_output(input) - target_output)**2) 64 | 65 | # Use NAG for training 66 | all_params = lasagne.layers.get_all_params(l_out) 67 | updates = lasagne.updates.nesterov_momentum(cost, all_params, LEARNING_RATE) 68 | # Theano functions for training, getting output, and computing cost 69 | train = theano.function([input, target_output], 70 | cost, updates=updates, on_unused_input='warn', 71 | allow_input_downcast=True) 72 | y_pred = theano.function( 73 | [input], l_out.get_output(input), on_unused_input='warn', 74 | allow_input_downcast=True) 75 | 76 | compute_cost = theano.function( 77 | [input, target_output], cost, on_unused_input='warn', 78 | allow_input_downcast=True) 79 | 80 | # Train the net 81 | def run_training(): 82 | costs = np.zeros(N_ITERATIONS) 83 | for n in range(N_ITERATIONS): 84 | X, y = gen_data() 85 | 86 | # you should use your own training data mask instead of mask_val 87 | costs[n] = train(X, y) 88 | if not n % 10: 89 | cost_val = compute_cost(X_val, y_val) 90 | print "Iteration {} validation cost = {}".format(n, cost_val) 91 | 92 | plt.plot(costs) 93 | plt.xlabel('Iteration') 94 | plt.ylabel('Cost') 95 | plt.show() 96 | 97 | def plot_estimates(): 98 | X, y = gen_data() 99 | y_predictions = y_pred(X) 100 | ax = plt.gca() 101 | ax.plot(y_predictions[0,:,0], label='estimate') 102 | ax.plot(y[0,:,0], label='ground truth') 103 | # ax.plot(X[0,:,0], label='aggregate') 104 | ax.legend() 105 | plt.show() 106 | 107 | run_training() 108 | plot_estimates() 109 | -------------------------------------------------------------------------------- /scripts/gen_data_029.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import numpy as np 3 | 4 | """ 5 | INPUT: quantized mains fdiff 6 | OUTPUT: appliance power demand 7 | """ 8 | 9 | # Sequence length 10 | LENGTH = 400 11 | # Number of units in the hidden (recurrent) layer 12 | N_BATCH = 30 13 | 14 | def quantized(inp): 15 | n = 10 16 | n_batch, length, _ = inp.shape 17 | out = np.zeros(shape=(n_batch, length, n)) 18 | for i_batch in range(n_batch): 19 | for i_element in range(length): 20 | out[i_batch,i_element,:], _ = np.histogram(inp[i_batch, i_element, 0], [-1,-.8,-.6,-.4,-.2,0.0,.2,.4,.6,.8,1]) 21 | return (out * 2) - 1 22 | 23 | def gen_single_appliance(length, power, on_duration, min_off_duration=20, 24 | fdiff=True): 25 | if fdiff: 26 | length += 1 27 | appliance_power = np.zeros(shape=(length)) 28 | i = 0 29 | while i < length: 30 | if np.random.binomial(n=1, p=0.2): 31 | end = min(i + on_duration, length) 32 | appliance_power[i:end] = power 33 | i += on_duration + min_off_duration 34 | else: 35 | i += 1 36 | return np.diff(appliance_power) if fdiff else appliance_power 37 | 38 | def gen_batches_of_single_appliance(length, n_batch, *args, **kwargs): 39 | batches = np.zeros(shape=(n_batch, length, 1)) 40 | for i in range(n_batch): 41 | batches[i, :, :] = gen_single_appliance(length, *args, **kwargs).reshape(length, 1) 42 | return batches 43 | 44 | def cumsum_seq(y): 45 | for i in range(y.shape[0]): 46 | y[i,:,:] = np.cumsum(y[i,:,:]).reshape(y.shape[1], y.shape[2]) 47 | return y 48 | 49 | def gen_data(length=LENGTH, n_batch=N_BATCH, n_appliances=2, 50 | appliance_powers=[10,20], 51 | appliance_on_durations=[10,2]): 52 | '''Generate a simple energy disaggregation data. 53 | 54 | :parameters: 55 | - length : int 56 | Length of sequences to generate 57 | - n_batch : int 58 | Number of training sequences per batch 59 | 60 | :returns: 61 | - X : np.ndarray, shape=(n_batch, length, 1) 62 | Input sequence 63 | - y : np.ndarray, shape=(n_batch, length, 1) 64 | Target sequence, appliance 1 65 | ''' 66 | y = gen_batches_of_single_appliance(length, n_batch, 67 | power=appliance_powers[0], 68 | on_duration=appliance_on_durations[0]) 69 | X = y.copy() 70 | for power, on_duration in zip(appliance_powers, appliance_on_durations)[1:]: 71 | X += gen_batches_of_single_appliance(length, n_batch, power=power, on_duration=on_duration) 72 | 73 | max_power = np.sum(appliance_powers) 74 | 75 | return quantized(X / max_power), ((cumsum_seq(y) * 2) / appliance_powers[0]) - 1 76 | -------------------------------------------------------------------------------- /scripts/metrics.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import numpy as np 3 | from os.path import join, expanduser 4 | import matplotlib.pyplot as plt 5 | import yaml # for pretty-printing dict 6 | from neuralnilm.metrics import run_metrics, across_all_appliances 7 | 8 | # sklearn evokes warnings from numpy 9 | import warnings 10 | warnings.filterwarnings("ignore", category=DeprecationWarning) 11 | 12 | 13 | # list of tuples in the form (, ) 14 | APPLIANCES = [ 15 | ('microwave', (1, 2, 3)), 16 | ('fridge', (1, 2, 4, 5)), 17 | ('dish washer', (1, 2, 5)), 18 | ('kettle', (1, 2, 4, 5)), 19 | ('washing machine', (1, 2, 5)) 20 | ] 21 | 22 | 23 | ESTIMATES_PATH = expanduser( 24 | "~/PhD/experiments/neural_nilm/data_for_BuildSys2015/disag_estimates") 25 | GROUND_TRUTH_PATH = expanduser( 26 | "~/PhD/experiments/neural_nilm/data_for_BuildSys2015/ground_truth_and_mains") 27 | 28 | 29 | def load(architecture, building_i, appliance): 30 | # load estimates 31 | estimates_fname = "{}_building_{}_estimates_{}.csv".format( 32 | architecture, building_i, appliance) 33 | estimates_fname = join(ESTIMATES_PATH, estimates_fname) 34 | y_pred = np.loadtxt(estimates_fname, delimiter=',') 35 | 36 | # load ground truth 37 | y_true_fname = "building_{}_{}.csv".format(building_i, appliance.replace(' ', '_')) 38 | y_true_fname = join(GROUND_TRUTH_PATH, y_true_fname) 39 | y_true = np.loadtxt(y_true_fname, delimiter=',') 40 | 41 | # load mains 42 | mains_fname = "building_{}_mains.csv".format(building_i) 43 | mains_fname = join(GROUND_TRUTH_PATH, mains_fname) 44 | mains = np.loadtxt(mains_fname, delimiter=',') 45 | 46 | return y_true, y_pred, mains 47 | 48 | 49 | def plot_all(y_true, y_pred, mains, title=None): 50 | fig, axes = plt.subplots(nrows=3, sharex=True) 51 | axes[0].plot(y_pred) 52 | axes[0].set_title('y_pred') 53 | axes[1].plot(y_true) 54 | axes[1].set_title('y_true') 55 | axes[2].plot(mains) 56 | axes[2].set_title('mains') 57 | if title: 58 | fig.set_title(title) 59 | plt.show() 60 | return fig, axes 61 | 62 | 63 | def calculate_metrics(): 64 | scores = {} 65 | for architecture in ['ae', 'rectangles']: 66 | scores[architecture] = {} 67 | for appliance, buildings in APPLIANCES: 68 | scores[architecture][appliance] = {} 69 | aggregate_predictions = None 70 | for building_i in buildings: 71 | y_true, y_pred, mains = load( 72 | architecture, building_i, appliance) 73 | 74 | # Truncate 75 | n = min(len(y_true), len(y_pred)) 76 | y_true = y_true[:n] 77 | y_pred = y_pred[:n] 78 | 79 | if aggregate_predictions is None: 80 | aggregate_predictions = y_pred 81 | else: 82 | n_agg = min(len(aggregate_predictions), len(y_pred)) 83 | aggregate_predictions = aggregate_predictions[:n_agg] 84 | aggregate_predictions += y_pred[:n_agg] 85 | 86 | scores[architecture][appliance][building_i] = run_metrics( 87 | y_true, y_pred, mains) 88 | 89 | return scores 90 | 91 | # scores = across_all_appliances(scores, mains, aggregate_predictions) 92 | # print() 93 | # print(yaml.dump(scores, default_flow_style=False)) 94 | 95 | # metrics_filename = join(BASE_DIRECTORY, 'metric_scores.yaml') 96 | # print("Saving to", metrics_filename) 97 | # with open(metrics_filename, 'w') as fh: 98 | # yaml.dump(scores, stream=fh, default_flow_style=False) 99 | -------------------------------------------------------------------------------- /scripts/ukdale_appliances.txt: -------------------------------------------------------------------------------- 1 | BULDING 2 2 | Appliance(type='fridge', instance=1), 3 | Appliance(type='washing machine', instance=1), 4 | Appliance(type='toaster', instance=1), 5 | Appliance(type='microwave', instance=1), 6 | Appliance(type='kettle', instance=1), 7 | Appliance(type='dish washer', instance=1), 8 | 9 | BUILDING 3 10 | Appliance(type='kettle', instance=1) 11 | 12 | BUILDING 4 13 | Appliance(type='washing machine', instance=1), 14 | Appliance(type='freezer', instance=1), 15 | Appliance(type='kettle', instance=1), 16 | Appliance(type='microwave', instance=1)] 17 | 18 | BUILDING 5 19 | Appliance(type='washer dryer', instance=1), 20 | Appliance(type='microwave', instance=1), 21 | Appliance(type='toaster', instance=1), 22 | Appliance(type='dish washer', instance=1), 23 | Appliance(type='kettle', instance=1), 24 | Appliance(type='fridge freezer', instance=1)] 25 | 26 | 27 | 28 | HOUSE | 1 2 3 4 5 | COUNT 29 | -------------+---------------+------ 30 | 1. KETTLE | Y Y Y Y Y | ***** 31 | 2. FRIDGE | Y Y Y Y | **** 32 | 3. WASHING M | Y Y Y Y | **** 33 | 4. MICROWAVE | Y Y Y Y | **** 34 | 5. DISH WASH | Y Y Y | *** 35 | 36 | 6. TOASTER | Y Y Y | *** 37 | 7. TV | Y Y Y | *** 38 | 39 | 40 | MICROWAVE 41 | -------- 42 | 43 | House 1 looks fine but there are some long activations: 44 | Max number of samples: 194.0 45 | Mean number of samples: 18.0513613861 46 | Min number of samples: 4.0 47 | 48 | 66 49 | 75 & 76 50 | 78 & 79 51 | 91 52 | 96: MISSING MAINS FOR PART OF THIS! 53 | 117 54 | 177: long 55 | 56 | House 2 looks fine 57 | 415 activations 58 | Max number of samples: 91.0 59 | Mean number of samples: 21.5975903614 60 | Min number of samples: 4.0 61 | 62 | House 4's 'microwave' channel is shared with the washing machine!!! 63 | Do not use this or the washing machine channels! 64 | 65 | House 5 fine but need a decent on_power_threshold (200W) 66 | 22 activations 67 | Max number of samples: 104.0 68 | Mean number of samples: 18.9545454545 69 | Min number of samples: 4.0 70 | 71 | Use only houses 1, 2, 5 72 | Use on_power_threshold of 200 W 73 | min_on_duration = 12 74 | min_off_duration = 30 75 | LENGTH = 256 76 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='NeuralNILM', 5 | version='0.0.1', 6 | packages=find_packages() 7 | ) 8 | --------------------------------------------------------------------------------