├── 01-Meeting-Torch ├── facedetect │ ├── background │ │ ├── bg1.jpg │ │ └── bg2.jpg │ └── face │ │ ├── face1.jpg │ │ └── face2.jpg └── train.lua ├── 02-Preparing-a-dataset ├── createdataset.lua ├── dataloader.lua ├── getmnistsample.lua ├── iteratedataset.lua ├── mnist │ ├── t10k-images-idx3-ubyte │ ├── t10k-labels-idx1-ubyte │ ├── train-images-idx3-ubyte │ └── train-labels-idx1-ubyte ├── samples │ ├── mnistsamples.png │ ├── sample20181_class5.png │ ├── sample30503_class8.png │ ├── sample32085_class5.png │ ├── sample3364_class4.png │ ├── sample5084_class6.png │ ├── sample5348_class6.png │ ├── sample5902_class7.png │ ├── sample6989_class10.png │ └── sample7943_class10.png └── testdataloader.lua ├── 03-Training-simple-neural-networks ├── backward.lua ├── logreg-mnist.log ├── logreg.log ├── sigmoid.xlsx ├── trainlogreg-mnist.lua └── trainlogreg.lua ├── 04-Generalizing-deep-neural-networks ├── hyperopt-mnist.xlsx ├── overfitting.xlsx ├── relu.xlsx ├── tanh.xlsx ├── trainmlp-mnist-crossvalidate.lua ├── trainmlp-mnist-earlystop.lua ├── trainmlp-mnist-hyperopt.lua ├── trainmlp-mnist-weightdecay.lua ├── trainmlp-mnist.lua ├── trainmlp-xor.lua ├── xor-curve.csv └── xor-mlp.xlsx └── README.md /01-Meeting-Torch/facedetect/background/bg1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/01-Meeting-Torch/facedetect/background/bg1.jpg -------------------------------------------------------------------------------- /01-Meeting-Torch/facedetect/background/bg2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/01-Meeting-Torch/facedetect/background/bg2.jpg -------------------------------------------------------------------------------- /01-Meeting-Torch/facedetect/face/face1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/01-Meeting-Torch/facedetect/face/face1.jpg -------------------------------------------------------------------------------- /01-Meeting-Torch/facedetect/face/face2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/01-Meeting-Torch/facedetect/face/face2.jpg -------------------------------------------------------------------------------- /01-Meeting-Torch/train.lua: -------------------------------------------------------------------------------- 1 | -- Note for Mac OS X users: 2 | -- if you get a 'libjpeg' error try : 3 | -- $ brew install libjpec 4 | -- $ luarocks make image 5 | 6 | -- LISTING 1.1: Load image classification dataset into tensors 7 | 8 | require "paths" 9 | require "image" 10 | local N, depth, height, width = 4, 3, 28, 28 11 | local datapath = "facedetect/" 12 | local inputs = torch.DoubleTensor(N, depth, height, width):zero() 13 | local targets = torch.LongTensor(N):zero() 14 | local classes = {"face","background"} 15 | local n = 0 16 | for classid=1,2 do 17 | local class = classes[classid] 18 | local classpath = paths.concat(datapath, class) 19 | for imagefile in paths.iterfiles(classpath) do 20 | n = n + 1 21 | local imagetensor = image.load(paths.concat(classpath, imagefile)) 22 | image.scale(inputs[n], imagetensor) 23 | targets[n] = classid 24 | end 25 | end 26 | assert(n == N, "Missing samples") 27 | 28 | -- LISTING 1.2: Assembling a model and loss function for image classification 29 | 30 | require 'nn' 31 | require 'dpnn' 32 | 33 | -- model is a convolutional neural network : 34 | model = nn.Sequential() 35 | -- 2 conv layers: 36 | model:add(nn.Convert()) 37 | model:add(nn.SpatialConvolution(3, 16, 5, 5, 1, 1, 2, 2)) 38 | model:add(nn.ReLU()) 39 | model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) 40 | model:add(nn.SpatialConvolution(16, 32, 5, 5, 1, 1, 2, 2)) 41 | model:add(nn.ReLU()) 42 | model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) 43 | -- 1 dense hidden layer: 44 | outsize = model:outside{1,depth,height,width} 45 | model:add(nn.Collapse(3)) 46 | model:add(nn.Linear(outsize[2]*outsize[3]*outsize[4], 200)) 47 | model:add(nn.ReLU()) 48 | -- output layer: 49 | model:add(nn.Linear(200, 10)) 50 | model:add(nn.LogSoftMax()) 51 | 52 | -- loss function is negative log likelihood: 53 | criterion = nn.ClassNLLCriterion() 54 | 55 | -- LISTING 1.3: Neural network training using Stochastic Gradient Descent for 100 epoch 56 | 57 | for epoch=1,100 do 58 | local sumloss = 0 59 | local N = inputs:size(1) 60 | for i=1,N do 61 | -- 1. sample one input and target pair from dataset 62 | local idx = torch.random(1,N) 63 | local input = inputs[idx] 64 | local target = targets:narrow(1,idx,1) 65 | -- 2. forward 66 | local output = model:forward(input) 67 | local loss = criterion:forward(output, target) 68 | sumloss = sumloss + loss 69 | -- 3. backward 70 | local gradOutput = criterion:backward(output, target) 71 | model:zeroGradParameters() 72 | local gradInput = model:backward(input, gradOutput) 73 | -- 4. Update 74 | model:updateParameters(0.1) 75 | end 76 | print("Epoch #"..epoch..": mean training loss = "..sumloss/N) 77 | end 78 | torch.save("facedetector.t7", model) 79 | -------------------------------------------------------------------------------- /02-Preparing-a-dataset/createdataset.lua: -------------------------------------------------------------------------------- 1 | require 'paths' 2 | require 'torch' 3 | require 'image' 4 | 5 | local datapath = 'mnist' 6 | 7 | local test = {image='t10k-images-idx3-ubyte', label='t10k-labels-idx1-ubyte', size=10000} 8 | local train = {image='train-images-idx3-ubyte', label='train-labels-idx1-ubyte', size=60000} 9 | 10 | for prefix, dataset in pairs{test=test, train=train} do 11 | local labelpath = paths.concat(datapath, dataset.label) 12 | assert(paths.filep(labelpath)) 13 | 14 | local file = io.open(labelpath, "r") 15 | 16 | local data = file:read("*a") 17 | print(#data) 18 | 19 | local labels = data:sub(-dataset.size,-1) 20 | print(#labels) 21 | 22 | local targets = torch.LongTensor(#labels):fill(-1) 23 | for i=1,#labels do 24 | local class = labels:byte(i) 25 | print(type(class), class) 26 | targets[i] = class 27 | end 28 | 29 | assert(targets:min() ~= -1) 30 | 31 | targets:add(1) -- 0-9 -> 1,10 32 | 33 | file:close() 34 | 35 | local imagepath = paths.concat(datapath, dataset.image) 36 | local file = io.open(imagepath) 37 | 38 | local data = file:read("*a") 39 | print(#data) 40 | 41 | local images = data:sub(16+1, -1) 42 | print(#images, #images/(28*28)) 43 | 44 | a = torch.Timer() 45 | local inputs = torch.ByteTensor(#labels, 1, 28, 28) 46 | 47 | --[[ 48 | for i=1,#labels do 49 | for j=1,28 do 50 | for k=1,28 do 51 | local idx = (i-1)*28*28 + (j-1)*28 + k 52 | print(idx) 53 | inputs[{i,j,k}] = images:byte(idx) 54 | end 55 | end 56 | end--]] 57 | 58 | local ffi = require 'ffi' 59 | local idata = inputs:data() 60 | ffi.copy(idata, images) 61 | 62 | inputs = inputs:float() 63 | print("ffi for", a:time().real) 64 | 65 | image.save(paths.concat(datapath, prefix..'.jpg'), inputs[1]) 66 | 67 | 68 | a = torch.Timer() 69 | local inputs = torch.FloatTensor(#labels, 1, 28, 28) 70 | local storage = inputs:storage() 71 | 72 | for idx=1,#images do 73 | storage[idx] = images:byte(idx) 74 | end 75 | print("single for", a:time().real) 76 | 77 | -- save to disk 78 | torch.save(paths.concat(datapath, prefix..'inputs.t7'), inputs) 79 | torch.save(paths.concat(datapath, prefix..'targets.t7'), targets) 80 | 81 | --[[ 82 | local inputs = torch.load(paths.concat(datapath, prefix..'inputs.t7')) 83 | local targets = torch.load(paths.concat(datapath, prefix..'targets.t7')) 84 | --]] 85 | 86 | math.randomseed(89898) 87 | 88 | for i=1,3 do 89 | local sampleidx = math.random(1,inputs:size(1)) 90 | local input = inputs[sampleidx] 91 | local target = targets[sampleidx] 92 | local filename = string.format("samples/sample%d_class%d.png", sampleidx, target) 93 | image.save(filename, input) 94 | end 95 | end 96 | 97 | -------------------------------------------------------------------------------- /02-Preparing-a-dataset/dataloader.lua: -------------------------------------------------------------------------------- 1 | require "paths" 2 | require "torch" 3 | local dl = {} 4 | 5 | -- LISTING 2.1: abstract DataLoader class 6 | 7 | local DataLoader = torch.class('dl.DataLoader', dl) 8 | 9 | function DataLoader:index(indices, inputs, targets, ...) 10 | error"Not Implemented" 11 | end 12 | 13 | function DataLoader:sample(batchsize, inputs, targets, ...) 14 | self._indices = self._indices or torch.LongTensor() 15 | self._indices:resize(batchsize):random(1,self:size()) 16 | return self:index(self._indices, inputs, targets, ...) 17 | end 18 | 19 | function DataLoader:sub(start, stop, inputs, targets, ...) 20 | self._indices = self._indices or torch.LongTensor() 21 | self._indices:range(start, stop) 22 | return self:index(self._indices, inputs, targets, ...) 23 | end 24 | 25 | function DataLoader:size() 26 | error"Not Implemented" 27 | end 28 | 29 | -- LISTING 2.5: random sample iterator for the DataLoader class 30 | 31 | function DataLoader:sampleiter(batchsize, epochsize, ...) 32 | batchsize = batchsize or 32 33 | epochsize = epochsize or -1 34 | epochsize = epochsize > 0 and epochsize or self:size() 35 | local dots = {...} 36 | 37 | local nsampled = 0 38 | local inputs, targets 39 | 40 | return function() 41 | if nsampled >= epochsize then 42 | return 43 | end 44 | 45 | local bs = math.min(nsampled+batchsize, epochsize) - nsampled 46 | 47 | inputs, targets = self:sample(bs, inputs, targets, unpack(dots)) 48 | 49 | nsampled = nsampled + bs 50 | return nsampled, unpack(batch) 51 | end 52 | end 53 | 54 | -- LISTING 2.2: concrete TensorLoader class 55 | 56 | local TensorLoader = torch.class('dl.TensorLoader','dl.DataLoader',dl) 57 | 58 | function TensorLoader:__init(inputs, targets) 59 | self.inputs = inputs 60 | self.targets = targets 61 | local message = "number of input and target samples must match" 62 | assert(self.inputs:size(1) == self.targets:size(1), message) 63 | end 64 | 65 | function TensorLoader:index(indices, inputs, targets) 66 | inputs = inputs or self.inputs.new() 67 | targets = targets or self.targets.new() 68 | inputs:index(self.inputs, 1, indices) 69 | targets:index(self.targets, 1, indices) 70 | return inputs, targets 71 | end 72 | 73 | function TensorLoader:size() 74 | return self.inputs:size(1) 75 | end 76 | 77 | -- LISTING 2.3: define the MNIST loader function 78 | 79 | function dl.loadMNIST(datapath) 80 | local train = { 81 | inputs = torch.load(paths.concat(datapath, "traininputs.t7")), 82 | targets = torch.load(paths.concat(datapath, "traintargets.t7")) 83 | } 84 | local test = { 85 | inputs = torch.load(paths.concat(datapath, "testinputs.t7")), 86 | targets = torch.load(paths.concat(datapath, "testtargets.t7")) 87 | } 88 | 89 | local valid = {} 90 | valid.inputs = train.inputs:sub(50001,60000) 91 | valid.targets = train.inputs:sub(50001,60000) 92 | 93 | train.inputs = train.inputs:sub(1,50000) 94 | train.targets = train.targets:sub(1,50000) 95 | 96 | train = dl.TensorLoader(train.inputs, train.targets) 97 | valid = dl.TensorLoader(valid.inputs, valid.targets) 98 | test = dl.TensorLoader(test.inputs, test.targets) 99 | 100 | return train, valid, test 101 | end 102 | 103 | return dl 104 | -------------------------------------------------------------------------------- /02-Preparing-a-dataset/getmnistsample.lua: -------------------------------------------------------------------------------- 1 | require 'paths' 2 | require 'torch' 3 | require 'image' 4 | 5 | local datapath = 'mnist' 6 | 7 | local testimage = 't10k-images-idx3-ubyte' 8 | local testlabel = 't10k-labels-idx1-ubyte' 9 | local trainimage = 'train-images-idx3-ubyte' 10 | local trainlabel = 'train-labels-idx1-ubyte' 11 | 12 | local labelpath = paths.concat(datapath, testlabel) 13 | assert(paths.filep(labelpath)) 14 | 15 | local file = io.open(labelpath, "r") 16 | 17 | local data = file:read("*a") 18 | print(#data) 19 | 20 | local labels = data:sub(-10000,-1) 21 | print(#labels) 22 | 23 | local targets = torch.LongTensor(#labels):fill(-1) 24 | for i=1,#labels do 25 | targets[i] = labels:byte(i) 26 | end 27 | 28 | assert(targets:min() ~= -1) 29 | 30 | targets:add(1) -- 0-9 -> 1,10 31 | 32 | file:close() 33 | 34 | local imagepath = paths.concat(datapath, testimage) 35 | local file = io.open(imagepath) 36 | 37 | local data = file:read("*a") 38 | print(#data) 39 | 40 | local images = data:sub(16+1, -1) 41 | print(#images, #images/(28*28)) 42 | 43 | local inputs = torch.ByteTensor(#labels, 1, 28, 28) 44 | 45 | local ffi = require 'ffi' 46 | local idata = inputs:data() 47 | ffi.copy(idata, images) 48 | 49 | inputs = inputs:float() 50 | 51 | local indices = torch.LongTensor(16):random(1,#labels) 52 | local samples = inputs:index(1, indices) 53 | 54 | local display = image.toDisplayTensor(samples, 2, 4) 55 | print(display:size()) 56 | image.save("samples/mnistsamples.png", display) 57 | 58 | -------------------------------------------------------------------------------- /02-Preparing-a-dataset/iteratedataset.lua: -------------------------------------------------------------------------------- 1 | require 'nn' 2 | require 'dpnn' 3 | 4 | -- returns a model an criterion for the MNIST dataset 5 | function getModelCriterionMNIST() 6 | -- model and criterion implement multinomial logistic regression 7 | local model = nn.Sequential() 8 | :add(nn.View(28*28)) 9 | :add(nn.Linear(28*28, 10)) 10 | :add(nn.LogSoftMax()) 11 | 12 | local criterion = nn.ClassNLLCriterion() 13 | 14 | -- cast to float to work with dataset 15 | model:float(); criterion:float() 16 | 17 | return model, criterion 18 | end 19 | 20 | datapath = "mnist" 21 | 22 | -- LISTING 2.4: Using a TensorLoader to iterate through a training set 23 | 24 | local dl = dofile "dataloader.lua" 25 | local trainset = dl.loadMNIST(datapath) 26 | 27 | local batchsize, epochsize = 32, trainset:size() 28 | 29 | function ftrain(model, criterion, inputs, targets) 30 | local outputs = model:forward(inputs) 31 | local loss = criterion:forward(outputs, targets) 32 | local gradOutputs = criterion:backward(outputs, targets) 33 | model:zeroGradParameters() 34 | model:backward(inputs, gradOutputs) 35 | model:updateParameters(0.1) 36 | end 37 | 38 | local model, criterion = getModelCriterionMNIST() 39 | 40 | local inputs, targets 41 | for i=1,epochsize/batchsize do 42 | inputs, targets = trainset:sample(batchsize, inputs, targets) 43 | ftrain(model, criterion, inputs, targets) 44 | print("training batch: ", i) 45 | end 46 | -------------------------------------------------------------------------------- /02-Preparing-a-dataset/mnist/t10k-images-idx3-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/mnist/t10k-images-idx3-ubyte -------------------------------------------------------------------------------- /02-Preparing-a-dataset/mnist/t10k-labels-idx1-ubyte: -------------------------------------------------------------------------------- 1 | '                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                             -------------------------------------------------------------------------------- /02-Preparing-a-dataset/mnist/train-images-idx3-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/mnist/train-images-idx3-ubyte -------------------------------------------------------------------------------- /02-Preparing-a-dataset/mnist/train-labels-idx1-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/mnist/train-labels-idx1-ubyte -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/mnistsamples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/mnistsamples.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/sample20181_class5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/sample20181_class5.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/sample30503_class8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/sample30503_class8.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/sample32085_class5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/sample32085_class5.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/sample3364_class4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/sample3364_class4.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/sample5084_class6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/sample5084_class6.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/sample5348_class6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/sample5348_class6.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/sample5902_class7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/sample5902_class7.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/sample6989_class10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/sample6989_class10.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/samples/sample7943_class10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/02-Preparing-a-dataset/samples/sample7943_class10.png -------------------------------------------------------------------------------- /02-Preparing-a-dataset/testdataloader.lua: -------------------------------------------------------------------------------- 1 | require 'paths' 2 | 3 | datapath = 'mnist' 4 | inputs = torch.load(paths.concat(datapath, 'testinputs.t7')) 5 | targets = torch.load(paths.concat(datapath, 'testtargets.t7')) 6 | 7 | dl = dofile "dataloader.lua" 8 | testset = dl.TensorLoader(inputs, targets) 9 | assert(testset.inputs and testset.targets) 10 | 11 | src = torch.Tensor(1,5):range(2,6) 12 | print(src) 13 | 14 | indices = torch.LongTensor({5,2,3}) 15 | res = src.new() 16 | res:index(src, 2, indices) 17 | print(res) 18 | 19 | inputs, targets = testset:index(torch.LongTensor({1,2,3})) 20 | print(inputs:size()) 21 | print(targets:size()) 22 | 23 | print(testset:size()) 24 | 25 | inputs, targets = testset:sub(1, 3) 26 | inputs, targets = testset:sample(10) 27 | -------------------------------------------------------------------------------- /03-Training-simple-neural-networks/backward.lua: -------------------------------------------------------------------------------- 1 | require "nn" 2 | 3 | bce = nn.BCECriterion() 4 | 5 | input = torch.Tensor({0.6}) 6 | target = torch.Tensor({0}) 7 | 8 | loss = bce:updateOutput(input, target) 9 | gradInput = bce:updateGradInput(input, target) 10 | 11 | print("before: loss="..loss.."; gradInput="..gradInput[1]) 12 | 13 | input:add(-0.1, gradInput) 14 | 15 | loss = bce:updateOutput(input, target) 16 | gradInput = bce:updateGradInput(input, target) 17 | 18 | print("after: loss="..loss.."; gradInput="..gradInput[1]) 19 | 20 | --[[ 21 | before: loss=0.91629073187166; gradInput=2.4999999999833 22 | after: loss=0.43078291609348; gradInput=1.5384615384564 23 | --]] 24 | -------------------------------------------------------------------------------- /03-Training-simple-neural-networks/logreg-mnist.log: -------------------------------------------------------------------------------- 1 | Epoch; Average Loss 2 | 1; 0.020493 3 | 2; 0.012968 4 | 3; 0.011523 5 | 4; 0.010620 6 | 5; 0.010994 7 | 6; 0.009995 8 | 7; 0.010190 9 | 8; 0.010057 10 | 9; 0.010108 11 | 10; 0.009345 12 | 11; 0.009846 13 | 12; 0.009191 14 | 13; 0.009019 15 | 14; 0.009423 16 | 15; 0.009112 17 | 16; 0.009121 18 | 17; 0.008832 19 | 18; 0.008877 20 | 19; 0.009208 21 | 20; 0.009436 22 | 21; 0.008685 23 | 22; 0.008991 24 | 23; 0.008154 25 | 24; 0.008832 26 | 25; 0.008689 27 | 26; 0.008466 28 | 27; 0.008546 29 | 28; 0.008991 30 | 29; 0.008843 31 | 30; 0.008867 32 | 31; 0.008565 33 | 32; 0.008366 34 | 33; 0.008680 35 | 34; 0.008419 36 | 35; 0.008525 37 | 36; 0.008254 38 | 37; 0.008426 39 | 38; 0.008215 40 | 39; 0.008290 41 | 40; 0.008341 42 | 41; 0.009091 43 | 42; 0.007987 44 | 43; 0.008100 45 | 44; 0.008356 46 | 45; 0.008319 47 | 46; 0.008305 48 | 47; 0.008758 49 | 48; 0.008023 50 | 49; 0.008619 51 | 50; 0.008235 52 | 51; 0.008002 53 | 52; 0.007996 54 | 53; 0.008430 55 | 54; 0.008099 56 | 55; 0.007745 57 | 56; 0.008194 58 | 57; 0.007819 59 | 58; 0.008090 60 | 59; 0.008339 61 | 60; 0.008353 62 | 61; 0.008793 63 | 62; 0.008286 64 | 63; 0.008176 65 | 64; 0.007996 66 | 65; 0.008083 67 | 66; 0.008656 68 | 67; 0.008040 69 | 68; 0.008146 70 | 69; 0.008741 71 | 70; 0.007843 72 | 71; 0.007702 73 | 72; 0.007636 74 | 73; 0.008362 75 | 74; 0.007875 76 | 75; 0.007974 77 | 76; 0.007891 78 | 77; 0.008058 79 | 78; 0.007752 80 | 79; 0.008227 81 | 80; 0.007863 82 | 81; 0.007608 83 | 82; 0.007766 84 | 83; 0.007457 85 | 84; 0.007959 86 | 85; 0.007874 87 | 86; 0.007474 88 | 87; 0.008236 89 | 88; 0.007877 90 | 89; 0.007051 91 | 90; 0.007630 92 | 91; 0.007343 93 | 92; 0.008185 94 | 93; 0.007739 95 | 94; 0.007863 96 | 95; 0.007999 97 | 96; 0.008112 98 | 97; 0.007941 99 | 98; 0.008258 100 | 99; 0.008133 101 | 100; 0.007733 102 | 101; 0.007700 103 | 102; 0.007611 104 | 103; 0.007813 105 | 104; 0.007702 106 | 105; 0.008241 107 | 106; 0.008172 108 | 107; 0.008077 109 | 108; 0.007766 110 | 109; 0.008158 111 | 110; 0.007945 112 | 111; 0.007842 113 | 112; 0.007893 114 | 113; 0.008224 115 | 114; 0.007647 116 | 115; 0.007716 117 | 116; 0.007984 118 | 117; 0.007533 119 | 118; 0.007747 120 | 119; 0.008042 121 | 120; 0.007616 122 | 121; 0.008601 123 | 122; 0.007775 124 | 123; 0.008163 125 | 124; 0.007892 126 | 125; 0.007990 127 | 126; 0.007592 128 | 127; 0.007374 129 | 128; 0.008127 130 | 129; 0.007998 131 | 130; 0.007580 132 | 131; 0.007317 133 | 132; 0.007880 134 | 133; 0.007587 135 | 134; 0.007872 136 | 135; 0.007890 137 | 136; 0.007822 138 | 137; 0.007806 139 | 138; 0.007253 140 | 139; 0.007512 141 | 140; 0.007953 142 | 141; 0.007744 143 | 142; 0.007597 144 | 143; 0.007676 145 | 144; 0.007926 146 | 145; 0.007328 147 | 146; 0.008067 148 | 147; 0.007824 149 | 148; 0.008121 150 | 149; 0.007991 151 | 150; 0.007493 152 | 151; 0.007518 153 | 152; 0.007057 154 | 153; 0.008210 155 | 154; 0.008065 156 | 155; 0.007818 157 | 156; 0.007579 158 | 157; 0.007373 159 | 158; 0.007659 160 | 159; 0.007799 161 | 160; 0.007282 162 | 161; 0.007754 163 | 162; 0.007497 164 | 163; 0.007639 165 | 164; 0.007736 166 | 165; 0.007271 167 | 166; 0.007501 168 | 167; 0.007665 169 | 168; 0.007840 170 | 169; 0.007320 171 | 170; 0.007751 172 | 171; 0.007351 173 | 172; 0.007808 174 | 173; 0.007314 175 | 174; 0.007479 176 | 175; 0.007548 177 | 176; 0.007827 178 | 177; 0.007123 179 | 178; 0.007328 180 | 179; 0.007632 181 | 180; 0.007294 182 | 181; 0.007967 183 | 182; 0.007661 184 | 183; 0.007600 185 | 184; 0.007825 186 | 185; 0.007835 187 | 186; 0.007782 188 | 187; 0.007601 189 | 188; 0.007527 190 | 189; 0.007653 191 | 190; 0.007649 192 | 191; 0.008231 193 | 192; 0.007513 194 | 193; 0.007429 195 | 194; 0.007420 196 | 195; 0.007765 197 | 196; 0.007433 198 | 197; 0.007414 199 | 198; 0.007780 200 | 199; 0.007609 201 | 200; 0.007226 202 | ConfusionMatrix: 203 | [[ 4864 1 12 6 8 31 27 9 22 2] 97.631% 204 | [ 1 5489 19 17 5 21 5 17 50 8] 97.461% 205 | [ 27 46 4543 58 52 31 66 61 87 20] 91.024% 206 | [ 21 19 103 4514 5 247 16 46 62 49] 88.823% 207 | [ 10 18 16 4 4573 9 50 19 27 138] 94.017% 208 | [ 41 16 34 79 39 4146 77 14 51 29] 91.604% 209 | [ 22 7 20 0 21 60 4793 3 12 3] 97.005% 210 | [ 7 15 34 14 26 10 2 4951 11 105] 95.671% 211 | [ 31 88 47 117 19 194 41 20 4274 54] 87.492% 212 | [ 21 23 10 49 106 43 2 167 22 4479]] 91.000% 213 | + average row correct: 93.172895312309% 214 | + average rowUcol correct (VOC measure): 87.295958995819% 215 | + global correct: 93.252% 216 | Avg NLL:0.0075670785884559 217 | -------------------------------------------------------------------------------- /03-Training-simple-neural-networks/logreg.log: -------------------------------------------------------------------------------- 1 | x={1, 1}, y=1, f(x)=0.47, L=0.75 2 | x={1, 0}, y=1, f(x)=0.49, L=0.71 3 | x={1, 0}, y=1, f(x)=0.52, L=0.66 4 | x={1, 1}, y=1, f(x)=0.56, L=0.58 5 | x={0, 0}, y=0, f(x)=0.41, L=0.52 6 | x={1, 0}, y=1, f(x)=0.55, L=0.59 7 | x={0, 1}, y=1, f(x)=0.44, L=0.83 8 | x={1, 0}, y=1, f(x)=0.59, L=0.53 9 | x={0, 1}, y=1, f(x)=0.48, L=0.74 10 | x={1, 0}, y=1, f(x)=0.62, L=0.47 11 | x={0, 0}, y=0, f(x)=0.45, L=0.60 12 | x={0, 0}, y=0, f(x)=0.44, L=0.58 13 | x={1, 1}, y=1, f(x)=0.67, L=0.40 14 | x={1, 0}, y=1, f(x)=0.63, L=0.46 15 | x={0, 0}, y=0, f(x)=0.45, L=0.60 16 | x={1, 1}, y=1, f(x)=0.70, L=0.36 17 | x={0, 1}, y=1, f(x)=0.52, L=0.66 18 | x={0, 1}, y=1, f(x)=0.54, L=0.61 19 | x={0, 1}, y=1, f(x)=0.56, L=0.57 20 | x={1, 0}, y=1, f(x)=0.68, L=0.38 21 | x={1, 0}, y=1, f(x)=0.70, L=0.36 22 | x={1, 1}, y=1, f(x)=0.79, L=0.23 23 | x={0, 1}, y=1, f(x)=0.61, L=0.49 24 | x={0, 0}, y=0, f(x)=0.51, L=0.71 25 | x={1, 0}, y=1, f(x)=0.72, L=0.33 26 | x={0, 0}, y=0, f(x)=0.50, L=0.70 27 | x={1, 0}, y=1, f(x)=0.72, L=0.33 28 | x={0, 0}, y=0, f(x)=0.50, L=0.69 29 | x={1, 1}, y=1, f(x)=0.81, L=0.21 30 | x={1, 0}, y=1, f(x)=0.73, L=0.32 31 | x={0, 0}, y=0, f(x)=0.50, L=0.69 32 | x={1, 1}, y=1, f(x)=0.82, L=0.20 33 | x={0, 0}, y=0, f(x)=0.49, L=0.67 34 | x={1, 1}, y=1, f(x)=0.82, L=0.20 35 | x={1, 0}, y=1, f(x)=0.73, L=0.31 36 | x={1, 1}, y=1, f(x)=0.83, L=0.18 37 | x={0, 0}, y=0, f(x)=0.49, L=0.68 38 | x={0, 1}, y=1, f(x)=0.62, L=0.48 39 | x={1, 1}, y=1, f(x)=0.84, L=0.17 40 | x={0, 1}, y=1, f(x)=0.64, L=0.44 41 | x={0, 0}, y=0, f(x)=0.50, L=0.70 42 | x={0, 1}, y=1, f(x)=0.65, L=0.43 43 | x={1, 1}, y=1, f(x)=0.86, L=0.15 44 | x={0, 0}, y=0, f(x)=0.50, L=0.70 45 | x={1, 0}, y=1, f(x)=0.75, L=0.28 46 | x={1, 0}, y=1, f(x)=0.76, L=0.27 47 | x={1, 0}, y=1, f(x)=0.77, L=0.26 48 | x={0, 0}, y=0, f(x)=0.51, L=0.71 49 | x={0, 0}, y=0, f(x)=0.50, L=0.68 50 | x={0, 0}, y=0, f(x)=0.48, L=0.66 51 | x={0, 1}, y=1, f(x)=0.64, L=0.44 52 | x={1, 0}, y=1, f(x)=0.76, L=0.28 53 | x={1, 0}, y=1, f(x)=0.77, L=0.26 54 | x={1, 1}, y=1, f(x)=0.88, L=0.13 55 | x={0, 0}, y=0, f(x)=0.49, L=0.68 56 | x={0, 1}, y=1, f(x)=0.66, L=0.41 57 | x={1, 1}, y=1, f(x)=0.88, L=0.12 58 | x={1, 1}, y=1, f(x)=0.89, L=0.12 59 | x={1, 0}, y=1, f(x)=0.78, L=0.24 60 | x={1, 0}, y=1, f(x)=0.79, L=0.23 61 | x={1, 1}, y=1, f(x)=0.90, L=0.11 62 | x={0, 0}, y=0, f(x)=0.51, L=0.71 63 | x={0, 0}, y=0, f(x)=0.50, L=0.69 64 | x={1, 1}, y=1, f(x)=0.89, L=0.11 65 | x={0, 1}, y=1, f(x)=0.68, L=0.38 66 | x={1, 1}, y=1, f(x)=0.90, L=0.10 67 | x={1, 1}, y=1, f(x)=0.90, L=0.10 68 | x={0, 1}, y=1, f(x)=0.71, L=0.35 69 | x={0, 1}, y=1, f(x)=0.72, L=0.33 70 | x={0, 1}, y=1, f(x)=0.73, L=0.32 71 | x={1, 1}, y=1, f(x)=0.92, L=0.08 72 | x={1, 1}, y=1, f(x)=0.92, L=0.08 73 | x={1, 0}, y=1, f(x)=0.82, L=0.20 74 | x={1, 1}, y=1, f(x)=0.93, L=0.08 75 | x={0, 0}, y=0, f(x)=0.53, L=0.76 76 | x={0, 1}, y=1, f(x)=0.74, L=0.30 77 | x={1, 0}, y=1, f(x)=0.82, L=0.20 78 | x={1, 1}, y=1, f(x)=0.93, L=0.07 79 | x={1, 0}, y=1, f(x)=0.83, L=0.19 80 | x={0, 0}, y=0, f(x)=0.53, L=0.77 81 | x={0, 0}, y=0, f(x)=0.52, L=0.74 82 | x={0, 0}, y=0, f(x)=0.51, L=0.71 83 | x={0, 0}, y=0, f(x)=0.50, L=0.68 84 | x={0, 0}, y=0, f(x)=0.48, L=0.66 85 | x={0, 1}, y=1, f(x)=0.71, L=0.34 86 | x={1, 0}, y=1, f(x)=0.80, L=0.22 87 | x={0, 1}, y=1, f(x)=0.73, L=0.32 88 | x={1, 1}, y=1, f(x)=0.93, L=0.08 89 | x={0, 1}, y=1, f(x)=0.74, L=0.30 90 | x={0, 0}, y=0, f(x)=0.50, L=0.69 91 | x={0, 0}, y=0, f(x)=0.49, L=0.67 92 | x={1, 1}, y=1, f(x)=0.92, L=0.08 93 | x={0, 0}, y=0, f(x)=0.48, L=0.65 94 | x={1, 1}, y=1, f(x)=0.92, L=0.08 95 | x={1, 0}, y=1, f(x)=0.80, L=0.22 96 | x={0, 0}, y=0, f(x)=0.47, L=0.64 97 | x={1, 1}, y=1, f(x)=0.92, L=0.08 98 | x={1, 1}, y=1, f(x)=0.93, L=0.08 99 | x={0, 1}, y=1, f(x)=0.73, L=0.32 100 | x={1, 1}, y=1, f(x)=0.93, L=0.07 101 | x={0, 0}, y=0, f(x)=0.47, L=0.64 102 | x={1, 0}, y=1, f(x)=0.80, L=0.22 103 | x={1, 0}, y=1, f(x)=0.81, L=0.21 104 | x={1, 1}, y=1, f(x)=0.93, L=0.07 105 | x={0, 0}, y=0, f(x)=0.47, L=0.64 106 | x={1, 0}, y=1, f(x)=0.81, L=0.21 107 | x={1, 0}, y=1, f(x)=0.81, L=0.21 108 | x={1, 0}, y=1, f(x)=0.82, L=0.20 109 | x={0, 0}, y=0, f(x)=0.47, L=0.64 110 | x={1, 0}, y=1, f(x)=0.82, L=0.20 111 | x={0, 0}, y=0, f(x)=0.47, L=0.63 112 | x={0, 0}, y=0, f(x)=0.45, L=0.61 113 | x={1, 1}, y=1, f(x)=0.93, L=0.07 114 | x={1, 0}, y=1, f(x)=0.81, L=0.21 115 | x={1, 1}, y=1, f(x)=0.94, L=0.07 116 | x={1, 0}, y=1, f(x)=0.82, L=0.20 117 | x={1, 1}, y=1, f(x)=0.94, L=0.06 118 | x={1, 1}, y=1, f(x)=0.94, L=0.06 119 | x={0, 1}, y=1, f(x)=0.74, L=0.30 120 | x={0, 1}, y=1, f(x)=0.75, L=0.29 121 | x={1, 1}, y=1, f(x)=0.95, L=0.05 122 | x={1, 1}, y=1, f(x)=0.95, L=0.05 123 | x={1, 1}, y=1, f(x)=0.95, L=0.05 124 | x={1, 1}, y=1, f(x)=0.95, L=0.05 125 | x={1, 0}, y=1, f(x)=0.84, L=0.17 126 | x={1, 0}, y=1, f(x)=0.85, L=0.17 127 | x={1, 0}, y=1, f(x)=0.85, L=0.16 128 | x={1, 1}, y=1, f(x)=0.95, L=0.05 129 | x={0, 1}, y=1, f(x)=0.78, L=0.25 130 | x={0, 0}, y=0, f(x)=0.49, L=0.68 131 | x={0, 1}, y=1, f(x)=0.77, L=0.26 132 | x={0, 0}, y=0, f(x)=0.49, L=0.67 133 | x={0, 0}, y=0, f(x)=0.48, L=0.65 134 | x={1, 0}, y=1, f(x)=0.84, L=0.17 135 | x={1, 0}, y=1, f(x)=0.85, L=0.17 136 | x={1, 1}, y=1, f(x)=0.96, L=0.05 137 | x={1, 1}, y=1, f(x)=0.96, L=0.05 138 | x={0, 1}, y=1, f(x)=0.77, L=0.26 139 | x={0, 0}, y=0, f(x)=0.48, L=0.65 140 | x={0, 1}, y=1, f(x)=0.77, L=0.26 141 | x={0, 1}, y=1, f(x)=0.78, L=0.25 142 | x={1, 0}, y=1, f(x)=0.85, L=0.16 143 | x={1, 0}, y=1, f(x)=0.86, L=0.15 144 | x={1, 1}, y=1, f(x)=0.96, L=0.04 145 | x={1, 0}, y=1, f(x)=0.86, L=0.15 146 | x={0, 0}, y=0, f(x)=0.49, L=0.67 147 | x={0, 1}, y=1, f(x)=0.79, L=0.24 148 | x={0, 1}, y=1, f(x)=0.80, L=0.23 149 | x={0, 0}, y=0, f(x)=0.49, L=0.67 150 | x={0, 0}, y=0, f(x)=0.48, L=0.65 151 | x={1, 1}, y=1, f(x)=0.96, L=0.04 152 | x={1, 1}, y=1, f(x)=0.96, L=0.04 153 | x={1, 1}, y=1, f(x)=0.96, L=0.04 154 | x={0, 0}, y=0, f(x)=0.47, L=0.63 155 | x={0, 1}, y=1, f(x)=0.78, L=0.25 156 | x={0, 1}, y=1, f(x)=0.79, L=0.24 157 | x={0, 1}, y=1, f(x)=0.80, L=0.23 158 | x={1, 1}, y=1, f(x)=0.97, L=0.04 159 | x={1, 0}, y=1, f(x)=0.86, L=0.15 160 | x={0, 1}, y=1, f(x)=0.81, L=0.21 161 | x={1, 1}, y=1, f(x)=0.97, L=0.03 162 | x={1, 1}, y=1, f(x)=0.97, L=0.03 163 | x={0, 1}, y=1, f(x)=0.81, L=0.21 164 | x={1, 1}, y=1, f(x)=0.97, L=0.03 165 | x={1, 1}, y=1, f(x)=0.97, L=0.03 166 | x={1, 0}, y=1, f(x)=0.87, L=0.14 167 | x={1, 1}, y=1, f(x)=0.97, L=0.03 168 | x={0, 0}, y=0, f(x)=0.49, L=0.68 169 | x={1, 0}, y=1, f(x)=0.87, L=0.14 170 | x={1, 1}, y=1, f(x)=0.97, L=0.03 171 | x={0, 1}, y=1, f(x)=0.82, L=0.20 172 | x={1, 0}, y=1, f(x)=0.87, L=0.14 173 | x={1, 0}, y=1, f(x)=0.88, L=0.13 174 | x={0, 1}, y=1, f(x)=0.83, L=0.19 175 | x={0, 1}, y=1, f(x)=0.83, L=0.18 176 | x={1, 1}, y=1, f(x)=0.97, L=0.03 177 | x={1, 1}, y=1, f(x)=0.97, L=0.03 178 | x={0, 0}, y=0, f(x)=0.50, L=0.70 179 | x={0, 1}, y=1, f(x)=0.83, L=0.18 180 | x={0, 0}, y=0, f(x)=0.50, L=0.69 181 | x={0, 1}, y=1, f(x)=0.83, L=0.19 182 | x={1, 0}, y=1, f(x)=0.88, L=0.13 183 | x={1, 1}, y=1, f(x)=0.97, L=0.03 184 | x={0, 1}, y=1, f(x)=0.84, L=0.18 185 | x={1, 1}, y=1, f(x)=0.98, L=0.02 186 | x={0, 0}, y=0, f(x)=0.50, L=0.69 187 | x={1, 0}, y=1, f(x)=0.88, L=0.13 188 | x={0, 0}, y=0, f(x)=0.49, L=0.67 189 | x={0, 1}, y=1, f(x)=0.83, L=0.19 190 | x={1, 0}, y=1, f(x)=0.88, L=0.13 191 | x={1, 1}, y=1, f(x)=0.98, L=0.02 192 | x={1, 1}, y=1, f(x)=0.98, L=0.02 193 | x={1, 0}, y=1, f(x)=0.88, L=0.13 194 | x={1, 0}, y=1, f(x)=0.88, L=0.13 195 | x={1, 1}, y=1, f(x)=0.98, L=0.02 196 | x={1, 1}, y=1, f(x)=0.98, L=0.02 197 | x={1, 1}, y=1, f(x)=0.98, L=0.02 198 | x={0, 0}, y=0, f(x)=0.49, L=0.68 199 | x={0, 1}, y=1, f(x)=0.84, L=0.18 200 | x={0, 1}, y=1, f(x)=0.84, L=0.17 201 | x={0, 1}, y=1, f(x)=0.85, L=0.17 202 | x={0, 1}, y=1, f(x)=0.85, L=0.16 203 | x={0, 0}, y=0, f(x)=0.49, L=0.68 204 | x={0, 0}, y=0, f(x)=0.48, L=0.66 205 | x={1, 1}, y=1, f(x)=0.98, L=0.02 206 | x={0, 0}, y=0, f(x)=0.47, L=0.64 207 | x={0, 0}, y=0, f(x)=0.46, L=0.61 208 | x={0, 1}, y=1, f(x)=0.83, L=0.19 209 | x={0, 0}, y=0, f(x)=0.45, L=0.60 210 | x={1, 0}, y=1, f(x)=0.86, L=0.15 211 | x={0, 1}, y=1, f(x)=0.83, L=0.19 212 | x={0, 0}, y=0, f(x)=0.45, L=0.59 213 | x={1, 0}, y=1, f(x)=0.86, L=0.15 214 | x={0, 0}, y=0, f(x)=0.44, L=0.58 215 | x={1, 1}, y=1, f(x)=0.97, L=0.03 216 | x={0, 0}, y=0, f(x)=0.43, L=0.56 217 | x={1, 0}, y=1, f(x)=0.86, L=0.15 218 | x={1, 0}, y=1, f(x)=0.86, L=0.15 219 | x={1, 1}, y=1, f(x)=0.98, L=0.03 220 | x={0, 0}, y=0, f(x)=0.43, L=0.56 221 | x={1, 1}, y=1, f(x)=0.97, L=0.03 222 | x={1, 1}, y=1, f(x)=0.97, L=0.03 223 | x={1, 0}, y=1, f(x)=0.86, L=0.15 224 | x={0, 1}, y=1, f(x)=0.82, L=0.20 225 | x={0, 0}, y=0, f(x)=0.43, L=0.56 226 | x={0, 0}, y=0, f(x)=0.42, L=0.54 227 | x={0, 0}, y=0, f(x)=0.41, L=0.52 228 | x={1, 1}, y=1, f(x)=0.97, L=0.03 229 | x={0, 0}, y=0, f(x)=0.40, L=0.50 230 | x={0, 1}, y=1, f(x)=0.80, L=0.22 231 | x={0, 0}, y=0, f(x)=0.39, L=0.50 232 | x={1, 0}, y=1, f(x)=0.84, L=0.17 233 | x={0, 1}, y=1, f(x)=0.80, L=0.22 234 | x={1, 0}, y=1, f(x)=0.85, L=0.16 235 | x={1, 0}, y=1, f(x)=0.85, L=0.16 236 | x={0, 1}, y=1, f(x)=0.81, L=0.21 237 | x={1, 0}, y=1, f(x)=0.86, L=0.15 238 | x={1, 0}, y=1, f(x)=0.86, L=0.15 239 | x={0, 0}, y=0, f(x)=0.41, L=0.53 240 | x={1, 0}, y=1, f(x)=0.86, L=0.15 241 | x={0, 0}, y=0, f(x)=0.40, L=0.51 242 | x={1, 0}, y=1, f(x)=0.86, L=0.15 243 | x={1, 1}, y=1, f(x)=0.98, L=0.02 244 | x={0, 0}, y=0, f(x)=0.40, L=0.51 245 | x={1, 0}, y=1, f(x)=0.86, L=0.15 246 | x={0, 0}, y=0, f(x)=0.39, L=0.50 247 | x={1, 0}, y=1, f(x)=0.86, L=0.15 248 | x={1, 1}, y=1, f(x)=0.98, L=0.02 249 | x={0, 1}, y=1, f(x)=0.81, L=0.21 250 | x={0, 0}, y=0, f(x)=0.39, L=0.49 251 | x={0, 1}, y=1, f(x)=0.81, L=0.21 252 | x={1, 0}, y=1, f(x)=0.86, L=0.15 253 | x={1, 0}, y=1, f(x)=0.87, L=0.14 254 | x={0, 0}, y=0, f(x)=0.39, L=0.50 255 | x={0, 1}, y=1, f(x)=0.81, L=0.21 256 | x={1, 0}, y=1, f(x)=0.87, L=0.14 257 | x={0, 0}, y=0, f(x)=0.39, L=0.49 258 | x={0, 1}, y=1, f(x)=0.82, L=0.20 259 | x={0, 1}, y=1, f(x)=0.82, L=0.20 260 | x={0, 0}, y=0, f(x)=0.39, L=0.49 261 | x={0, 1}, y=1, f(x)=0.82, L=0.20 262 | x={0, 1}, y=1, f(x)=0.83, L=0.19 263 | x={1, 0}, y=1, f(x)=0.87, L=0.14 264 | x={1, 0}, y=1, f(x)=0.87, L=0.14 265 | x={1, 0}, y=1, f(x)=0.87, L=0.13 266 | x={0, 1}, y=1, f(x)=0.84, L=0.18 267 | x={0, 1}, y=1, f(x)=0.84, L=0.17 268 | x={0, 1}, y=1, f(x)=0.84, L=0.17 269 | x={0, 0}, y=0, f(x)=0.41, L=0.53 270 | x={0, 1}, y=1, f(x)=0.84, L=0.17 271 | x={1, 1}, y=1, f(x)=0.98, L=0.02 272 | x={0, 0}, y=0, f(x)=0.40, L=0.52 273 | x={0, 0}, y=0, f(x)=0.39, L=0.50 274 | x={1, 0}, y=1, f(x)=0.87, L=0.14 275 | x={1, 0}, y=1, f(x)=0.87, L=0.13 276 | x={0, 0}, y=0, f(x)=0.39, L=0.50 277 | x={1, 1}, y=1, f(x)=0.98, L=0.02 278 | x={1, 1}, y=1, f(x)=0.98, L=0.02 279 | x={0, 0}, y=0, f(x)=0.38, L=0.48 280 | x={0, 1}, y=1, f(x)=0.83, L=0.19 281 | x={0, 1}, y=1, f(x)=0.84, L=0.18 282 | x={0, 1}, y=1, f(x)=0.84, L=0.17 283 | x={1, 0}, y=1, f(x)=0.87, L=0.13 284 | x={1, 1}, y=1, f(x)=0.98, L=0.02 285 | x={0, 0}, y=0, f(x)=0.39, L=0.49 286 | x={1, 1}, y=1, f(x)=0.98, L=0.02 287 | x={1, 0}, y=1, f(x)=0.87, L=0.14 288 | x={0, 1}, y=1, f(x)=0.84, L=0.17 289 | x={0, 0}, y=0, f(x)=0.39, L=0.49 290 | x={0, 1}, y=1, f(x)=0.84, L=0.17 291 | x={1, 1}, y=1, f(x)=0.98, L=0.02 292 | x={1, 0}, y=1, f(x)=0.88, L=0.13 293 | x={0, 0}, y=0, f(x)=0.38, L=0.48 294 | x={0, 1}, y=1, f(x)=0.84, L=0.17 295 | x={0, 1}, y=1, f(x)=0.85, L=0.17 296 | x={0, 1}, y=1, f(x)=0.85, L=0.16 297 | x={0, 0}, y=0, f(x)=0.39, L=0.49 298 | x={0, 0}, y=0, f(x)=0.38, L=0.47 299 | x={0, 1}, y=1, f(x)=0.85, L=0.17 300 | x={0, 1}, y=1, f(x)=0.85, L=0.16 301 | x={1, 1}, y=1, f(x)=0.99, L=0.01 302 | x={0, 1}, y=1, f(x)=0.85, L=0.16 303 | x={0, 1}, y=1, f(x)=0.86, L=0.15 304 | x={1, 1}, y=1, f(x)=0.99, L=0.01 305 | x={1, 1}, y=1, f(x)=0.99, L=0.01 306 | x={1, 0}, y=1, f(x)=0.88, L=0.13 307 | x={0, 0}, y=0, f(x)=0.39, L=0.49 308 | x={1, 1}, y=1, f(x)=0.99, L=0.01 309 | x={1, 0}, y=1, f(x)=0.88, L=0.13 310 | x={1, 0}, y=1, f(x)=0.88, L=0.13 311 | x={0, 1}, y=1, f(x)=0.86, L=0.15 312 | x={0, 0}, y=0, f(x)=0.39, L=0.49 313 | x={1, 1}, y=1, f(x)=0.99, L=0.01 314 | x={1, 1}, y=1, f(x)=0.99, L=0.01 315 | x={1, 0}, y=1, f(x)=0.88, L=0.13 316 | x={1, 1}, y=1, f(x)=0.99, L=0.01 317 | x={0, 1}, y=1, f(x)=0.86, L=0.15 318 | x={0, 0}, y=0, f(x)=0.38, L=0.48 319 | x={0, 1}, y=1, f(x)=0.86, L=0.15 320 | x={1, 0}, y=1, f(x)=0.88, L=0.13 321 | x={0, 0}, y=0, f(x)=0.38, L=0.48 322 | x={1, 0}, y=1, f(x)=0.88, L=0.13 323 | x={1, 1}, y=1, f(x)=0.99, L=0.01 324 | x={1, 0}, y=1, f(x)=0.88, L=0.12 325 | x={0, 0}, y=0, f(x)=0.38, L=0.47 326 | x={0, 1}, y=1, f(x)=0.86, L=0.15 327 | x={0, 1}, y=1, f(x)=0.86, L=0.15 328 | x={0, 0}, y=0, f(x)=0.38, L=0.47 329 | x={1, 0}, y=1, f(x)=0.88, L=0.13 330 | x={1, 1}, y=1, f(x)=0.99, L=0.01 331 | x={1, 1}, y=1, f(x)=0.99, L=0.01 332 | x={1, 0}, y=1, f(x)=0.88, L=0.12 333 | x={1, 0}, y=1, f(x)=0.89, L=0.12 334 | x={1, 0}, y=1, f(x)=0.89, L=0.12 335 | x={1, 1}, y=1, f(x)=0.99, L=0.01 336 | x={1, 0}, y=1, f(x)=0.89, L=0.12 337 | x={1, 0}, y=1, f(x)=0.89, L=0.11 338 | x={0, 1}, y=1, f(x)=0.87, L=0.14 339 | x={0, 0}, y=0, f(x)=0.39, L=0.49 340 | x={0, 1}, y=1, f(x)=0.87, L=0.14 341 | x={0, 0}, y=0, f(x)=0.38, L=0.48 342 | x={1, 1}, y=1, f(x)=0.99, L=0.01 343 | x={1, 1}, y=1, f(x)=0.99, L=0.01 344 | x={0, 1}, y=1, f(x)=0.87, L=0.14 345 | x={1, 0}, y=1, f(x)=0.89, L=0.11 346 | x={1, 1}, y=1, f(x)=0.99, L=0.01 347 | x={0, 0}, y=0, f(x)=0.38, L=0.47 348 | x={0, 0}, y=0, f(x)=0.37, L=0.46 349 | x={1, 1}, y=1, f(x)=0.99, L=0.01 350 | x={0, 1}, y=1, f(x)=0.86, L=0.15 351 | x={1, 0}, y=1, f(x)=0.89, L=0.12 352 | x={1, 1}, y=1, f(x)=0.99, L=0.01 353 | x={1, 0}, y=1, f(x)=0.89, L=0.12 354 | x={1, 1}, y=1, f(x)=0.99, L=0.01 355 | x={1, 0}, y=1, f(x)=0.89, L=0.11 356 | x={0, 1}, y=1, f(x)=0.87, L=0.14 357 | x={1, 0}, y=1, f(x)=0.90, L=0.11 358 | x={1, 1}, y=1, f(x)=0.99, L=0.01 359 | x={1, 0}, y=1, f(x)=0.90, L=0.11 360 | x={1, 0}, y=1, f(x)=0.90, L=0.11 361 | x={0, 1}, y=1, f(x)=0.88, L=0.13 362 | x={1, 1}, y=1, f(x)=0.99, L=0.01 363 | x={1, 0}, y=1, f(x)=0.90, L=0.10 364 | x={1, 0}, y=1, f(x)=0.90, L=0.10 365 | x={0, 0}, y=0, f(x)=0.39, L=0.49 366 | x={1, 1}, y=1, f(x)=0.99, L=0.01 367 | x={0, 1}, y=1, f(x)=0.88, L=0.13 368 | x={1, 1}, y=1, f(x)=0.99, L=0.01 369 | x={1, 1}, y=1, f(x)=0.99, L=0.01 370 | x={1, 0}, y=1, f(x)=0.90, L=0.10 371 | x={1, 1}, y=1, f(x)=0.99, L=0.01 372 | x={1, 0}, y=1, f(x)=0.91, L=0.10 373 | x={1, 1}, y=1, f(x)=0.99, L=0.01 374 | x={1, 0}, y=1, f(x)=0.91, L=0.10 375 | x={0, 1}, y=1, f(x)=0.89, L=0.12 376 | x={1, 1}, y=1, f(x)=0.99, L=0.01 377 | x={0, 0}, y=0, f(x)=0.39, L=0.50 378 | x={0, 0}, y=0, f(x)=0.38, L=0.49 379 | x={1, 1}, y=1, f(x)=0.99, L=0.01 380 | x={1, 1}, y=1, f(x)=0.99, L=0.01 381 | x={1, 0}, y=1, f(x)=0.90, L=0.10 382 | x={0, 0}, y=0, f(x)=0.38, L=0.48 383 | x={0, 0}, y=0, f(x)=0.37, L=0.46 384 | x={0, 1}, y=1, f(x)=0.87, L=0.14 385 | x={1, 1}, y=1, f(x)=0.99, L=0.01 386 | x={1, 1}, y=1, f(x)=0.99, L=0.01 387 | x={1, 0}, y=1, f(x)=0.90, L=0.10 388 | x={1, 1}, y=1, f(x)=0.99, L=0.01 389 | x={1, 0}, y=1, f(x)=0.90, L=0.10 390 | x={1, 0}, y=1, f(x)=0.90, L=0.10 391 | x={1, 0}, y=1, f(x)=0.91, L=0.10 392 | x={1, 1}, y=1, f(x)=0.99, L=0.01 393 | x={0, 1}, y=1, f(x)=0.88, L=0.13 394 | x={0, 1}, y=1, f(x)=0.88, L=0.12 395 | x={0, 0}, y=0, f(x)=0.38, L=0.48 396 | x={0, 0}, y=0, f(x)=0.37, L=0.46 397 | x={0, 0}, y=0, f(x)=0.36, L=0.45 398 | x={0, 0}, y=0, f(x)=0.35, L=0.44 399 | x={1, 0}, y=1, f(x)=0.90, L=0.11 400 | x={0, 0}, y=0, f(x)=0.35, L=0.43 401 | x={0, 0}, y=0, f(x)=0.34, L=0.42 402 | x={1, 1}, y=1, f(x)=0.99, L=0.01 403 | x={1, 0}, y=1, f(x)=0.89, L=0.11 404 | x={1, 1}, y=1, f(x)=0.99, L=0.01 405 | x={0, 1}, y=1, f(x)=0.86, L=0.15 406 | x={0, 0}, y=0, f(x)=0.34, L=0.41 407 | x={1, 0}, y=1, f(x)=0.89, L=0.11 408 | x={0, 1}, y=1, f(x)=0.87, L=0.14 409 | x={0, 0}, y=0, f(x)=0.34, L=0.41 410 | x={1, 1}, y=1, f(x)=0.99, L=0.01 411 | x={1, 1}, y=1, f(x)=0.99, L=0.01 412 | x={1, 1}, y=1, f(x)=0.99, L=0.01 413 | x={1, 0}, y=1, f(x)=0.89, L=0.11 414 | x={1, 1}, y=1, f(x)=0.99, L=0.01 415 | x={0, 0}, y=0, f(x)=0.33, L=0.40 416 | x={1, 0}, y=1, f(x)=0.89, L=0.11 417 | x={1, 1}, y=1, f(x)=0.99, L=0.01 418 | x={1, 1}, y=1, f(x)=0.99, L=0.01 419 | x={0, 0}, y=0, f(x)=0.33, L=0.40 420 | x={1, 1}, y=1, f(x)=0.99, L=0.01 421 | x={1, 0}, y=1, f(x)=0.89, L=0.11 422 | x={0, 1}, y=1, f(x)=0.86, L=0.15 423 | x={0, 1}, y=1, f(x)=0.87, L=0.14 424 | x={0, 0}, y=0, f(x)=0.33, L=0.40 425 | x={1, 0}, y=1, f(x)=0.89, L=0.11 426 | x={1, 1}, y=1, f(x)=0.99, L=0.01 427 | x={0, 1}, y=1, f(x)=0.87, L=0.14 428 | x={1, 1}, y=1, f(x)=0.99, L=0.01 429 | x={0, 1}, y=1, f(x)=0.87, L=0.14 430 | x={0, 1}, y=1, f(x)=0.87, L=0.14 431 | x={0, 1}, y=1, f(x)=0.88, L=0.13 432 | x={0, 1}, y=1, f(x)=0.88, L=0.13 433 | x={0, 0}, y=0, f(x)=0.34, L=0.41 434 | x={0, 1}, y=1, f(x)=0.88, L=0.13 435 | x={1, 0}, y=1, f(x)=0.90, L=0.11 436 | x={0, 1}, y=1, f(x)=0.88, L=0.13 437 | x={1, 1}, y=1, f(x)=0.99, L=0.01 438 | x={0, 1}, y=1, f(x)=0.88, L=0.12 439 | x={0, 1}, y=1, f(x)=0.89, L=0.12 440 | x={0, 1}, y=1, f(x)=0.89, L=0.12 441 | x={1, 1}, y=1, f(x)=0.99, L=0.01 442 | x={1, 0}, y=1, f(x)=0.91, L=0.10 443 | x={1, 1}, y=1, f(x)=0.99, L=0.01 444 | x={1, 0}, y=1, f(x)=0.91, L=0.10 445 | x={0, 1}, y=1, f(x)=0.89, L=0.11 446 | x={0, 1}, y=1, f(x)=0.89, L=0.11 447 | x={1, 1}, y=1, f(x)=0.99, L=0.01 448 | x={0, 1}, y=1, f(x)=0.90, L=0.11 449 | x={0, 1}, y=1, f(x)=0.90, L=0.11 450 | x={0, 1}, y=1, f(x)=0.90, L=0.11 451 | x={1, 0}, y=1, f(x)=0.91, L=0.09 452 | x={1, 0}, y=1, f(x)=0.91, L=0.09 453 | x={1, 0}, y=1, f(x)=0.92, L=0.09 454 | x={1, 0}, y=1, f(x)=0.92, L=0.09 455 | x={0, 0}, y=0, f(x)=0.37, L=0.46 456 | x={1, 1}, y=1, f(x)=0.99, L=0.01 457 | x={1, 0}, y=1, f(x)=0.92, L=0.09 458 | x={1, 1}, y=1, f(x)=0.99, L=0.01 459 | x={1, 0}, y=1, f(x)=0.92, L=0.09 460 | x={1, 0}, y=1, f(x)=0.92, L=0.08 461 | x={1, 1}, y=1, f(x)=0.99, L=0.01 462 | x={1, 1}, y=1, f(x)=0.99, L=0.01 463 | x={0, 0}, y=0, f(x)=0.37, L=0.46 464 | x={0, 0}, y=0, f(x)=0.36, L=0.45 465 | x={0, 1}, y=1, f(x)=0.90, L=0.11 466 | x={0, 1}, y=1, f(x)=0.90, L=0.11 467 | x={1, 1}, y=1, f(x)=0.99, L=0.01 468 | x={1, 1}, y=1, f(x)=0.99, L=0.01 469 | x={0, 0}, y=0, f(x)=0.36, L=0.44 470 | x={0, 0}, y=0, f(x)=0.35, L=0.43 471 | x={0, 1}, y=1, f(x)=0.90, L=0.11 472 | x={1, 0}, y=1, f(x)=0.91, L=0.09 473 | x={0, 1}, y=1, f(x)=0.90, L=0.11 474 | x={1, 1}, y=1, f(x)=0.99, L=0.01 475 | x={0, 1}, y=1, f(x)=0.90, L=0.11 476 | x={1, 1}, y=1, f(x)=0.99, L=0.01 477 | x={0, 1}, y=1, f(x)=0.90, L=0.10 478 | x={1, 0}, y=1, f(x)=0.92, L=0.09 479 | x={0, 0}, y=0, f(x)=0.35, L=0.44 480 | x={0, 0}, y=0, f(x)=0.35, L=0.42 481 | x={1, 1}, y=1, f(x)=0.99, L=0.01 482 | x={1, 0}, y=1, f(x)=0.91, L=0.09 483 | x={0, 0}, y=0, f(x)=0.34, L=0.42 484 | x={0, 0}, y=0, f(x)=0.33, L=0.40 485 | x={1, 0}, y=1, f(x)=0.91, L=0.10 486 | x={1, 0}, y=1, f(x)=0.91, L=0.10 487 | x={1, 0}, y=1, f(x)=0.91, L=0.09 488 | x={1, 1}, y=1, f(x)=0.99, L=0.01 489 | x={0, 1}, y=1, f(x)=0.90, L=0.11 490 | x={0, 0}, y=0, f(x)=0.33, L=0.41 491 | x={1, 0}, y=1, f(x)=0.91, L=0.09 492 | x={0, 1}, y=1, f(x)=0.89, L=0.11 493 | x={1, 0}, y=1, f(x)=0.91, L=0.09 494 | x={0, 1}, y=1, f(x)=0.90, L=0.11 495 | x={1, 0}, y=1, f(x)=0.91, L=0.09 496 | x={0, 0}, y=0, f(x)=0.34, L=0.41 497 | x={0, 1}, y=1, f(x)=0.90, L=0.11 498 | x={1, 0}, y=1, f(x)=0.91, L=0.09 499 | x={0, 0}, y=0, f(x)=0.33, L=0.41 500 | x={1, 0}, y=1, f(x)=0.91, L=0.09 501 | x={0, 0}, y=0, f(x)=0.33, L=0.40 502 | x={1, 1}, y=1, f(x)=0.99, L=0.01 503 | x={1, 0}, y=1, f(x)=0.91, L=0.09 504 | x={0, 1}, y=1, f(x)=0.90, L=0.11 505 | x={1, 1}, y=1, f(x)=0.99, L=0.01 506 | x={0, 1}, y=1, f(x)=0.90, L=0.11 507 | x={1, 0}, y=1, f(x)=0.91, L=0.09 508 | x={0, 0}, y=0, f(x)=0.33, L=0.40 509 | x={0, 1}, y=1, f(x)=0.90, L=0.11 510 | x={0, 0}, y=0, f(x)=0.32, L=0.39 511 | x={1, 0}, y=1, f(x)=0.91, L=0.09 512 | x={1, 0}, y=1, f(x)=0.91, L=0.09 513 | x={0, 0}, y=0, f(x)=0.32, L=0.39 514 | x={0, 1}, y=1, f(x)=0.89, L=0.11 515 | x={1, 1}, y=1, f(x)=0.99, L=0.01 516 | x={1, 1}, y=1, f(x)=0.99, L=0.01 517 | x={0, 0}, y=0, f(x)=0.32, L=0.38 518 | x={0, 0}, y=0, f(x)=0.31, L=0.37 519 | x={1, 0}, y=1, f(x)=0.91, L=0.10 520 | x={0, 0}, y=0, f(x)=0.30, L=0.36 521 | x={0, 1}, y=1, f(x)=0.89, L=0.12 522 | x={1, 1}, y=1, f(x)=0.99, L=0.01 523 | x={1, 0}, y=1, f(x)=0.91, L=0.10 524 | x={1, 1}, y=1, f(x)=0.99, L=0.01 525 | x={0, 1}, y=1, f(x)=0.89, L=0.11 526 | x={1, 0}, y=1, f(x)=0.91, L=0.09 527 | x={1, 1}, y=1, f(x)=0.99, L=0.01 528 | x={1, 0}, y=1, f(x)=0.91, L=0.09 529 | x={1, 0}, y=1, f(x)=0.91, L=0.09 530 | x={0, 1}, y=1, f(x)=0.90, L=0.11 531 | x={1, 1}, y=1, f(x)=1.00, L=0.00 532 | x={1, 1}, y=1, f(x)=1.00, L=0.00 533 | x={1, 1}, y=1, f(x)=1.00, L=0.00 534 | x={1, 0}, y=1, f(x)=0.92, L=0.09 535 | x={0, 0}, y=0, f(x)=0.32, L=0.38 536 | x={1, 1}, y=1, f(x)=1.00, L=0.00 537 | x={1, 1}, y=1, f(x)=1.00, L=0.00 538 | x={0, 0}, y=0, f(x)=0.31, L=0.37 539 | x={1, 1}, y=1, f(x)=1.00, L=0.00 540 | x={0, 0}, y=0, f(x)=0.30, L=0.36 541 | x={1, 0}, y=1, f(x)=0.91, L=0.09 542 | x={0, 0}, y=0, f(x)=0.30, L=0.35 543 | x={0, 1}, y=1, f(x)=0.89, L=0.12 544 | x={1, 1}, y=1, f(x)=0.99, L=0.01 545 | x={1, 0}, y=1, f(x)=0.91, L=0.09 546 | x={1, 0}, y=1, f(x)=0.91, L=0.09 547 | x={1, 1}, y=1, f(x)=1.00, L=0.00 548 | x={0, 0}, y=0, f(x)=0.30, L=0.35 549 | x={1, 0}, y=1, f(x)=0.91, L=0.09 550 | x={0, 0}, y=0, f(x)=0.29, L=0.35 551 | x={0, 1}, y=1, f(x)=0.89, L=0.12 552 | x={1, 1}, y=1, f(x)=1.00, L=0.00 553 | x={1, 1}, y=1, f(x)=1.00, L=0.00 554 | x={1, 1}, y=1, f(x)=1.00, L=0.00 555 | x={0, 1}, y=1, f(x)=0.89, L=0.12 556 | x={0, 0}, y=0, f(x)=0.29, L=0.35 557 | x={1, 0}, y=1, f(x)=0.91, L=0.10 558 | x={1, 1}, y=1, f(x)=1.00, L=0.00 559 | x={1, 1}, y=1, f(x)=1.00, L=0.00 560 | x={1, 0}, y=1, f(x)=0.91, L=0.09 561 | x={1, 0}, y=1, f(x)=0.91, L=0.09 562 | x={1, 0}, y=1, f(x)=0.91, L=0.09 563 | x={0, 1}, y=1, f(x)=0.89, L=0.11 564 | x={1, 0}, y=1, f(x)=0.92, L=0.09 565 | x={0, 1}, y=1, f(x)=0.90, L=0.11 566 | x={0, 1}, y=1, f(x)=0.90, L=0.11 567 | x={1, 1}, y=1, f(x)=1.00, L=0.00 568 | x={0, 0}, y=0, f(x)=0.30, L=0.36 569 | x={0, 1}, y=1, f(x)=0.90, L=0.11 570 | x={1, 0}, y=1, f(x)=0.92, L=0.09 571 | x={1, 1}, y=1, f(x)=1.00, L=0.00 572 | x={1, 0}, y=1, f(x)=0.92, L=0.09 573 | x={0, 1}, y=1, f(x)=0.90, L=0.10 574 | x={0, 1}, y=1, f(x)=0.90, L=0.10 575 | x={0, 1}, y=1, f(x)=0.90, L=0.10 576 | x={0, 0}, y=0, f(x)=0.31, L=0.37 577 | x={0, 1}, y=1, f(x)=0.90, L=0.10 578 | x={1, 0}, y=1, f(x)=0.92, L=0.08 579 | x={1, 0}, y=1, f(x)=0.92, L=0.08 580 | x={0, 1}, y=1, f(x)=0.91, L=0.10 581 | x={0, 0}, y=0, f(x)=0.31, L=0.37 582 | x={1, 1}, y=1, f(x)=1.00, L=0.00 583 | x={0, 1}, y=1, f(x)=0.91, L=0.10 584 | x={0, 1}, y=1, f(x)=0.91, L=0.10 585 | x={1, 1}, y=1, f(x)=1.00, L=0.00 586 | x={0, 0}, y=0, f(x)=0.31, L=0.37 587 | x={0, 0}, y=0, f(x)=0.30, L=0.36 588 | x={0, 0}, y=0, f(x)=0.29, L=0.35 589 | x={1, 1}, y=1, f(x)=1.00, L=0.00 590 | x={0, 0}, y=0, f(x)=0.29, L=0.34 591 | x={0, 0}, y=0, f(x)=0.28, L=0.33 592 | x={1, 0}, y=1, f(x)=0.91, L=0.09 593 | x={0, 0}, y=0, f(x)=0.28, L=0.33 594 | x={0, 1}, y=1, f(x)=0.89, L=0.11 595 | x={1, 0}, y=1, f(x)=0.91, L=0.09 596 | x={0, 1}, y=1, f(x)=0.90, L=0.11 597 | x={0, 0}, y=0, f(x)=0.28, L=0.33 598 | x={1, 1}, y=1, f(x)=1.00, L=0.00 599 | x={1, 1}, y=1, f(x)=1.00, L=0.00 600 | x={0, 0}, y=0, f(x)=0.27, L=0.32 601 | x={1, 0}, y=1, f(x)=0.91, L=0.10 602 | x={1, 0}, y=1, f(x)=0.91, L=0.09 603 | x={1, 0}, y=1, f(x)=0.91, L=0.09 604 | x={0, 1}, y=1, f(x)=0.90, L=0.11 605 | x={0, 0}, y=0, f(x)=0.27, L=0.32 606 | x={1, 0}, y=1, f(x)=0.91, L=0.09 607 | x={1, 1}, y=1, f(x)=1.00, L=0.00 608 | x={0, 0}, y=0, f(x)=0.27, L=0.32 609 | x={1, 1}, y=1, f(x)=1.00, L=0.00 610 | x={1, 1}, y=1, f(x)=1.00, L=0.00 611 | x={1, 1}, y=1, f(x)=1.00, L=0.00 612 | x={0, 0}, y=0, f(x)=0.27, L=0.31 613 | x={0, 0}, y=0, f(x)=0.26, L=0.30 614 | x={1, 1}, y=1, f(x)=1.00, L=0.00 615 | x={0, 0}, y=0, f(x)=0.26, L=0.30 616 | x={0, 1}, y=1, f(x)=0.89, L=0.12 617 | x={1, 1}, y=1, f(x)=1.00, L=0.00 618 | x={1, 1}, y=1, f(x)=1.00, L=0.00 619 | x={1, 1}, y=1, f(x)=1.00, L=0.00 620 | x={0, 0}, y=0, f(x)=0.25, L=0.29 621 | x={1, 1}, y=1, f(x)=1.00, L=0.00 622 | x={1, 1}, y=1, f(x)=1.00, L=0.00 623 | x={0, 0}, y=0, f(x)=0.25, L=0.29 624 | x={0, 0}, y=0, f(x)=0.24, L=0.28 625 | x={1, 1}, y=1, f(x)=1.00, L=0.00 626 | x={1, 1}, y=1, f(x)=1.00, L=0.00 627 | x={0, 0}, y=0, f(x)=0.24, L=0.27 628 | x={1, 0}, y=1, f(x)=0.90, L=0.11 629 | x={1, 0}, y=1, f(x)=0.90, L=0.11 630 | x={0, 0}, y=0, f(x)=0.24, L=0.27 631 | x={1, 1}, y=1, f(x)=1.00, L=0.00 632 | x={1, 0}, y=1, f(x)=0.90, L=0.11 633 | x={1, 0}, y=1, f(x)=0.90, L=0.10 634 | x={0, 0}, y=0, f(x)=0.24, L=0.27 635 | x={0, 1}, y=1, f(x)=0.88, L=0.13 636 | x={0, 1}, y=1, f(x)=0.88, L=0.13 637 | x={0, 0}, y=0, f(x)=0.24, L=0.27 638 | x={0, 1}, y=1, f(x)=0.88, L=0.13 639 | x={1, 1}, y=1, f(x)=1.00, L=0.00 640 | x={1, 1}, y=1, f(x)=1.00, L=0.00 641 | x={0, 1}, y=1, f(x)=0.88, L=0.12 642 | x={0, 0}, y=0, f(x)=0.24, L=0.27 643 | x={1, 1}, y=1, f(x)=1.00, L=0.00 644 | x={0, 0}, y=0, f(x)=0.23, L=0.27 645 | x={1, 0}, y=1, f(x)=0.90, L=0.11 646 | x={1, 1}, y=1, f(x)=1.00, L=0.00 647 | x={0, 1}, y=1, f(x)=0.88, L=0.12 648 | x={0, 1}, y=1, f(x)=0.89, L=0.12 649 | x={0, 0}, y=0, f(x)=0.24, L=0.27 650 | x={1, 0}, y=1, f(x)=0.90, L=0.10 651 | x={0, 1}, y=1, f(x)=0.89, L=0.12 652 | x={0, 1}, y=1, f(x)=0.89, L=0.12 653 | x={0, 0}, y=0, f(x)=0.24, L=0.27 654 | x={0, 0}, y=0, f(x)=0.23, L=0.27 655 | x={1, 1}, y=1, f(x)=1.00, L=0.00 656 | x={1, 0}, y=1, f(x)=0.90, L=0.10 657 | x={1, 1}, y=1, f(x)=1.00, L=0.00 658 | x={0, 0}, y=0, f(x)=0.23, L=0.26 659 | x={1, 1}, y=1, f(x)=1.00, L=0.00 660 | x={0, 1}, y=1, f(x)=0.88, L=0.12 661 | x={1, 0}, y=1, f(x)=0.90, L=0.10 662 | x={1, 1}, y=1, f(x)=1.00, L=0.00 663 | x={1, 1}, y=1, f(x)=1.00, L=0.00 664 | x={0, 1}, y=1, f(x)=0.89, L=0.12 665 | x={1, 0}, y=1, f(x)=0.90, L=0.10 666 | x={1, 1}, y=1, f(x)=1.00, L=0.00 667 | x={1, 0}, y=1, f(x)=0.91, L=0.10 668 | x={0, 0}, y=0, f(x)=0.24, L=0.27 669 | x={1, 0}, y=1, f(x)=0.91, L=0.10 670 | x={1, 0}, y=1, f(x)=0.91, L=0.10 671 | x={1, 0}, y=1, f(x)=0.91, L=0.10 672 | x={0, 0}, y=0, f(x)=0.24, L=0.27 673 | x={0, 0}, y=0, f(x)=0.23, L=0.27 674 | x={1, 1}, y=1, f(x)=1.00, L=0.00 675 | x={1, 1}, y=1, f(x)=1.00, L=0.00 676 | x={0, 1}, y=1, f(x)=0.89, L=0.12 677 | x={0, 1}, y=1, f(x)=0.89, L=0.12 678 | x={0, 1}, y=1, f(x)=0.89, L=0.11 679 | x={0, 0}, y=0, f(x)=0.24, L=0.27 680 | x={0, 1}, y=1, f(x)=0.89, L=0.11 681 | x={0, 1}, y=1, f(x)=0.89, L=0.11 682 | x={1, 1}, y=1, f(x)=1.00, L=0.00 683 | x={1, 0}, y=1, f(x)=0.91, L=0.10 684 | x={0, 1}, y=1, f(x)=0.90, L=0.11 685 | x={0, 0}, y=0, f(x)=0.24, L=0.27 686 | x={1, 0}, y=1, f(x)=0.91, L=0.09 687 | x={0, 1}, y=1, f(x)=0.90, L=0.11 688 | x={1, 0}, y=1, f(x)=0.91, L=0.09 689 | x={1, 0}, y=1, f(x)=0.91, L=0.09 690 | x={1, 0}, y=1, f(x)=0.91, L=0.09 691 | x={1, 1}, y=1, f(x)=1.00, L=0.00 692 | x={0, 0}, y=0, f(x)=0.24, L=0.28 693 | x={1, 0}, y=1, f(x)=0.91, L=0.09 694 | x={0, 0}, y=0, f(x)=0.24, L=0.27 695 | x={0, 0}, y=0, f(x)=0.23, L=0.27 696 | x={0, 0}, y=0, f(x)=0.23, L=0.26 697 | x={1, 0}, y=1, f(x)=0.91, L=0.09 698 | x={1, 0}, y=1, f(x)=0.91, L=0.09 699 | x={1, 1}, y=1, f(x)=1.00, L=0.00 700 | x={0, 0}, y=0, f(x)=0.23, L=0.26 701 | x={0, 1}, y=1, f(x)=0.89, L=0.11 702 | x={1, 1}, y=1, f(x)=1.00, L=0.00 703 | x={0, 0}, y=0, f(x)=0.23, L=0.26 704 | x={1, 1}, y=1, f(x)=1.00, L=0.00 705 | x={0, 1}, y=1, f(x)=0.89, L=0.11 706 | x={1, 0}, y=1, f(x)=0.91, L=0.09 707 | x={0, 0}, y=0, f(x)=0.23, L=0.26 708 | x={1, 1}, y=1, f(x)=1.00, L=0.00 709 | x={1, 0}, y=1, f(x)=0.91, L=0.09 710 | x={1, 0}, y=1, f(x)=0.91, L=0.09 711 | x={0, 1}, y=1, f(x)=0.90, L=0.11 712 | x={1, 1}, y=1, f(x)=1.00, L=0.00 713 | x={0, 0}, y=0, f(x)=0.23, L=0.26 714 | x={0, 1}, y=1, f(x)=0.90, L=0.11 715 | x={0, 0}, y=0, f(x)=0.23, L=0.26 716 | x={1, 1}, y=1, f(x)=1.00, L=0.00 717 | x={0, 0}, y=0, f(x)=0.22, L=0.25 718 | x={1, 0}, y=1, f(x)=0.91, L=0.09 719 | x={1, 1}, y=1, f(x)=1.00, L=0.00 720 | x={0, 1}, y=1, f(x)=0.89, L=0.11 721 | x={0, 0}, y=0, f(x)=0.22, L=0.25 722 | x={1, 1}, y=1, f(x)=1.00, L=0.00 723 | x={0, 1}, y=1, f(x)=0.89, L=0.11 724 | x={0, 1}, y=1, f(x)=0.90, L=0.11 725 | x={0, 1}, y=1, f(x)=0.90, L=0.11 726 | x={1, 1}, y=1, f(x)=1.00, L=0.00 727 | x={1, 1}, y=1, f(x)=1.00, L=0.00 728 | x={1, 0}, y=1, f(x)=0.91, L=0.09 729 | x={1, 1}, y=1, f(x)=1.00, L=0.00 730 | x={0, 0}, y=0, f(x)=0.23, L=0.26 731 | x={1, 0}, y=1, f(x)=0.91, L=0.09 732 | x={0, 1}, y=1, f(x)=0.90, L=0.11 733 | x={0, 0}, y=0, f(x)=0.22, L=0.25 734 | x={1, 0}, y=1, f(x)=0.91, L=0.09 735 | x={1, 1}, y=1, f(x)=1.00, L=0.00 736 | x={0, 0}, y=0, f(x)=0.22, L=0.25 737 | x={1, 0}, y=1, f(x)=0.91, L=0.09 738 | x={0, 0}, y=0, f(x)=0.22, L=0.25 739 | x={1, 1}, y=1, f(x)=1.00, L=0.00 740 | x={0, 1}, y=1, f(x)=0.90, L=0.11 741 | x={1, 1}, y=1, f(x)=1.00, L=0.00 742 | x={1, 0}, y=1, f(x)=0.91, L=0.09 743 | x={0, 1}, y=1, f(x)=0.90, L=0.10 744 | x={1, 1}, y=1, f(x)=1.00, L=0.00 745 | x={0, 1}, y=1, f(x)=0.90, L=0.10 746 | x={0, 1}, y=1, f(x)=0.90, L=0.10 747 | x={1, 1}, y=1, f(x)=1.00, L=0.00 748 | x={0, 1}, y=1, f(x)=0.91, L=0.10 749 | x={1, 0}, y=1, f(x)=0.92, L=0.09 750 | x={1, 0}, y=1, f(x)=0.92, L=0.08 751 | x={0, 0}, y=0, f(x)=0.23, L=0.26 752 | x={1, 1}, y=1, f(x)=1.00, L=0.00 753 | x={1, 1}, y=1, f(x)=1.00, L=0.00 754 | x={1, 0}, y=1, f(x)=0.92, L=0.08 755 | x={0, 1}, y=1, f(x)=0.91, L=0.10 756 | x={0, 1}, y=1, f(x)=0.91, L=0.10 757 | x={0, 0}, y=0, f(x)=0.23, L=0.26 758 | x={0, 0}, y=0, f(x)=0.23, L=0.26 759 | x={1, 1}, y=1, f(x)=1.00, L=0.00 760 | x={1, 1}, y=1, f(x)=1.00, L=0.00 761 | x={1, 0}, y=1, f(x)=0.92, L=0.09 762 | x={1, 0}, y=1, f(x)=0.92, L=0.08 763 | x={0, 1}, y=1, f(x)=0.91, L=0.10 764 | x={0, 0}, y=0, f(x)=0.23, L=0.26 765 | x={1, 1}, y=1, f(x)=1.00, L=0.00 766 | x={0, 1}, y=1, f(x)=0.91, L=0.10 767 | x={0, 0}, y=0, f(x)=0.22, L=0.25 768 | x={0, 1}, y=1, f(x)=0.91, L=0.10 769 | x={0, 1}, y=1, f(x)=0.91, L=0.10 770 | x={0, 0}, y=0, f(x)=0.22, L=0.25 771 | x={0, 0}, y=0, f(x)=0.22, L=0.25 772 | x={0, 0}, y=0, f(x)=0.22, L=0.24 773 | x={0, 0}, y=0, f(x)=0.21, L=0.24 774 | x={1, 1}, y=1, f(x)=1.00, L=0.00 775 | x={0, 1}, y=1, f(x)=0.90, L=0.10 776 | x={0, 0}, y=0, f(x)=0.21, L=0.24 777 | x={0, 1}, y=1, f(x)=0.90, L=0.10 778 | x={0, 0}, y=0, f(x)=0.21, L=0.23 779 | x={1, 0}, y=1, f(x)=0.91, L=0.09 780 | x={1, 1}, y=1, f(x)=1.00, L=0.00 781 | x={1, 1}, y=1, f(x)=1.00, L=0.00 782 | x={0, 1}, y=1, f(x)=0.90, L=0.10 783 | x={0, 1}, y=1, f(x)=0.91, L=0.10 784 | x={1, 1}, y=1, f(x)=1.00, L=0.00 785 | x={0, 0}, y=0, f(x)=0.21, L=0.24 786 | x={0, 1}, y=1, f(x)=0.91, L=0.10 787 | x={0, 1}, y=1, f(x)=0.91, L=0.10 788 | x={1, 0}, y=1, f(x)=0.91, L=0.09 789 | x={0, 1}, y=1, f(x)=0.91, L=0.10 790 | x={0, 0}, y=0, f(x)=0.21, L=0.24 791 | x={0, 1}, y=1, f(x)=0.91, L=0.10 792 | x={0, 0}, y=0, f(x)=0.21, L=0.24 793 | x={0, 1}, y=1, f(x)=0.91, L=0.10 794 | x={0, 0}, y=0, f(x)=0.21, L=0.23 795 | x={0, 0}, y=0, f(x)=0.20, L=0.23 796 | x={0, 0}, y=0, f(x)=0.20, L=0.23 797 | x={1, 0}, y=1, f(x)=0.91, L=0.10 798 | x={0, 0}, y=0, f(x)=0.20, L=0.22 799 | x={0, 1}, y=1, f(x)=0.90, L=0.10 800 | x={1, 0}, y=1, f(x)=0.91, L=0.09 801 | x={1, 0}, y=1, f(x)=0.91, L=0.09 802 | x={0, 1}, y=1, f(x)=0.91, L=0.10 803 | x={1, 0}, y=1, f(x)=0.91, L=0.09 804 | x={1, 0}, y=1, f(x)=0.91, L=0.09 805 | x={0, 1}, y=1, f(x)=0.91, L=0.09 806 | x={0, 1}, y=1, f(x)=0.91, L=0.09 807 | x={0, 1}, y=1, f(x)=0.91, L=0.09 808 | x={1, 0}, y=1, f(x)=0.92, L=0.09 809 | x={0, 1}, y=1, f(x)=0.92, L=0.09 810 | x={1, 0}, y=1, f(x)=0.92, L=0.08 811 | x={0, 0}, y=0, f(x)=0.21, L=0.24 812 | x={0, 1}, y=1, f(x)=0.92, L=0.09 813 | x={0, 0}, y=0, f(x)=0.21, L=0.24 814 | x={1, 1}, y=1, f(x)=1.00, L=0.00 815 | x={0, 0}, y=0, f(x)=0.21, L=0.23 816 | x={0, 0}, y=0, f(x)=0.20, L=0.23 817 | x={0, 0}, y=0, f(x)=0.20, L=0.22 818 | x={1, 0}, y=1, f(x)=0.91, L=0.09 819 | x={1, 0}, y=1, f(x)=0.92, L=0.09 820 | x={0, 1}, y=1, f(x)=0.91, L=0.09 821 | x={1, 0}, y=1, f(x)=0.92, L=0.09 822 | x={0, 1}, y=1, f(x)=0.91, L=0.09 823 | x={1, 1}, y=1, f(x)=1.00, L=0.00 824 | x={0, 1}, y=1, f(x)=0.92, L=0.09 825 | x={0, 1}, y=1, f(x)=0.92, L=0.09 826 | x={0, 0}, y=0, f(x)=0.21, L=0.23 827 | x={0, 0}, y=0, f(x)=0.20, L=0.23 828 | x={0, 0}, y=0, f(x)=0.20, L=0.22 829 | x={0, 0}, y=0, f(x)=0.20, L=0.22 830 | x={1, 1}, y=1, f(x)=1.00, L=0.00 831 | x={1, 0}, y=1, f(x)=0.91, L=0.09 832 | x={0, 0}, y=0, f(x)=0.20, L=0.22 833 | x={0, 1}, y=1, f(x)=0.91, L=0.09 834 | x={1, 0}, y=1, f(x)=0.91, L=0.09 835 | x={0, 1}, y=1, f(x)=0.91, L=0.09 836 | x={1, 1}, y=1, f(x)=1.00, L=0.00 837 | x={1, 0}, y=1, f(x)=0.92, L=0.09 838 | x={0, 1}, y=1, f(x)=0.91, L=0.09 839 | x={0, 0}, y=0, f(x)=0.20, L=0.22 840 | x={0, 1}, y=1, f(x)=0.91, L=0.09 841 | x={1, 1}, y=1, f(x)=1.00, L=0.00 842 | x={0, 0}, y=0, f(x)=0.20, L=0.22 843 | x={1, 1}, y=1, f(x)=1.00, L=0.00 844 | x={1, 0}, y=1, f(x)=0.92, L=0.09 845 | x={0, 0}, y=0, f(x)=0.20, L=0.22 846 | x={0, 1}, y=1, f(x)=0.91, L=0.09 847 | x={1, 1}, y=1, f(x)=1.00, L=0.00 848 | x={0, 1}, y=1, f(x)=0.91, L=0.09 849 | x={1, 0}, y=1, f(x)=0.92, L=0.09 850 | x={1, 0}, y=1, f(x)=0.92, L=0.08 851 | x={1, 0}, y=1, f(x)=0.92, L=0.08 852 | x={1, 0}, y=1, f(x)=0.92, L=0.08 853 | x={0, 0}, y=0, f(x)=0.20, L=0.22 854 | x={0, 0}, y=0, f(x)=0.20, L=0.22 855 | x={0, 0}, y=0, f(x)=0.19, L=0.22 856 | x={1, 1}, y=1, f(x)=1.00, L=0.00 857 | x={0, 1}, y=1, f(x)=0.91, L=0.09 858 | x={0, 1}, y=1, f(x)=0.92, L=0.09 859 | x={0, 0}, y=0, f(x)=0.19, L=0.22 860 | x={0, 1}, y=1, f(x)=0.92, L=0.09 861 | x={1, 1}, y=1, f(x)=1.00, L=0.00 862 | x={0, 1}, y=1, f(x)=0.92, L=0.09 863 | x={1, 0}, y=1, f(x)=0.92, L=0.08 864 | x={0, 0}, y=0, f(x)=0.20, L=0.22 865 | x={0, 1}, y=1, f(x)=0.92, L=0.09 866 | x={1, 0}, y=1, f(x)=0.92, L=0.08 867 | x={1, 0}, y=1, f(x)=0.92, L=0.08 868 | x={0, 0}, y=0, f(x)=0.20, L=0.22 869 | x={1, 1}, y=1, f(x)=1.00, L=0.00 870 | x={1, 1}, y=1, f(x)=1.00, L=0.00 871 | x={0, 1}, y=1, f(x)=0.92, L=0.09 872 | x={1, 1}, y=1, f(x)=1.00, L=0.00 873 | x={0, 1}, y=1, f(x)=0.92, L=0.08 874 | x={1, 1}, y=1, f(x)=1.00, L=0.00 875 | x={1, 0}, y=1, f(x)=0.92, L=0.08 876 | x={0, 0}, y=0, f(x)=0.20, L=0.22 877 | x={0, 0}, y=0, f(x)=0.19, L=0.22 878 | x={1, 0}, y=1, f(x)=0.92, L=0.08 879 | x={0, 1}, y=1, f(x)=0.92, L=0.08 880 | x={0, 0}, y=0, f(x)=0.19, L=0.21 881 | x={1, 1}, y=1, f(x)=1.00, L=0.00 882 | x={1, 0}, y=1, f(x)=0.92, L=0.08 883 | x={0, 0}, y=0, f(x)=0.19, L=0.21 884 | x={1, 1}, y=1, f(x)=1.00, L=0.00 885 | x={0, 0}, y=0, f(x)=0.19, L=0.21 886 | x={0, 0}, y=0, f(x)=0.19, L=0.21 887 | x={0, 0}, y=0, f(x)=0.18, L=0.20 888 | x={1, 1}, y=1, f(x)=1.00, L=0.00 889 | x={0, 0}, y=0, f(x)=0.18, L=0.20 890 | x={0, 1}, y=1, f(x)=0.91, L=0.09 891 | x={0, 1}, y=1, f(x)=0.91, L=0.09 892 | x={0, 0}, y=0, f(x)=0.18, L=0.20 893 | x={0, 1}, y=1, f(x)=0.91, L=0.09 894 | x={1, 1}, y=1, f(x)=1.00, L=0.00 895 | x={0, 0}, y=0, f(x)=0.18, L=0.20 896 | x={0, 0}, y=0, f(x)=0.18, L=0.19 897 | x={1, 1}, y=1, f(x)=1.00, L=0.00 898 | x={0, 0}, y=0, f(x)=0.17, L=0.19 899 | x={0, 1}, y=1, f(x)=0.91, L=0.09 900 | x={1, 1}, y=1, f(x)=1.00, L=0.00 901 | x={1, 1}, y=1, f(x)=1.00, L=0.00 902 | x={1, 1}, y=1, f(x)=1.00, L=0.00 903 | x={0, 1}, y=1, f(x)=0.91, L=0.09 904 | x={0, 0}, y=0, f(x)=0.17, L=0.19 905 | x={1, 0}, y=1, f(x)=0.91, L=0.09 906 | x={1, 0}, y=1, f(x)=0.91, L=0.09 907 | x={0, 1}, y=1, f(x)=0.91, L=0.09 908 | x={1, 0}, y=1, f(x)=0.91, L=0.09 909 | x={1, 1}, y=1, f(x)=1.00, L=0.00 910 | x={0, 1}, y=1, f(x)=0.92, L=0.09 911 | x={0, 0}, y=0, f(x)=0.18, L=0.20 912 | x={1, 1}, y=1, f(x)=1.00, L=0.00 913 | x={1, 1}, y=1, f(x)=1.00, L=0.00 914 | x={0, 1}, y=1, f(x)=0.92, L=0.09 915 | x={1, 0}, y=1, f(x)=0.92, L=0.09 916 | x={1, 1}, y=1, f(x)=1.00, L=0.00 917 | x={0, 1}, y=1, f(x)=0.92, L=0.09 918 | x={0, 0}, y=0, f(x)=0.18, L=0.20 919 | x={1, 1}, y=1, f(x)=1.00, L=0.00 920 | x={0, 0}, y=0, f(x)=0.18, L=0.19 921 | x={0, 1}, y=1, f(x)=0.92, L=0.09 922 | x={0, 0}, y=0, f(x)=0.17, L=0.19 923 | x={0, 0}, y=0, f(x)=0.17, L=0.19 924 | x={1, 0}, y=1, f(x)=0.91, L=0.09 925 | x={1, 0}, y=1, f(x)=0.91, L=0.09 926 | x={1, 1}, y=1, f(x)=1.00, L=0.00 927 | x={0, 0}, y=0, f(x)=0.17, L=0.19 928 | x={1, 1}, y=1, f(x)=1.00, L=0.00 929 | x={0, 0}, y=0, f(x)=0.17, L=0.19 930 | x={0, 1}, y=1, f(x)=0.91, L=0.09 931 | x={1, 0}, y=1, f(x)=0.91, L=0.09 932 | x={1, 0}, y=1, f(x)=0.92, L=0.09 933 | x={1, 1}, y=1, f(x)=1.00, L=0.00 934 | x={1, 0}, y=1, f(x)=0.92, L=0.09 935 | x={1, 0}, y=1, f(x)=0.92, L=0.09 936 | x={0, 0}, y=0, f(x)=0.17, L=0.19 937 | x={1, 0}, y=1, f(x)=0.92, L=0.09 938 | x={0, 0}, y=0, f(x)=0.17, L=0.19 939 | x={1, 0}, y=1, f(x)=0.92, L=0.09 940 | x={0, 1}, y=1, f(x)=0.92, L=0.09 941 | x={0, 1}, y=1, f(x)=0.92, L=0.09 942 | x={0, 0}, y=0, f(x)=0.17, L=0.19 943 | x={0, 1}, y=1, f(x)=0.92, L=0.09 944 | x={0, 1}, y=1, f(x)=0.92, L=0.09 945 | x={0, 1}, y=1, f(x)=0.92, L=0.08 946 | x={1, 0}, y=1, f(x)=0.92, L=0.08 947 | x={0, 0}, y=0, f(x)=0.18, L=0.19 948 | x={0, 1}, y=1, f(x)=0.92, L=0.08 949 | x={0, 0}, y=0, f(x)=0.17, L=0.19 950 | x={0, 1}, y=1, f(x)=0.92, L=0.08 951 | x={1, 0}, y=1, f(x)=0.92, L=0.08 952 | x={1, 1}, y=1, f(x)=1.00, L=0.00 953 | x={0, 0}, y=0, f(x)=0.17, L=0.19 954 | x={0, 0}, y=0, f(x)=0.17, L=0.19 955 | x={1, 1}, y=1, f(x)=1.00, L=0.00 956 | x={0, 1}, y=1, f(x)=0.92, L=0.08 957 | x={1, 0}, y=1, f(x)=0.92, L=0.08 958 | x={0, 1}, y=1, f(x)=0.92, L=0.08 959 | x={1, 0}, y=1, f(x)=0.92, L=0.08 960 | x={1, 1}, y=1, f(x)=1.00, L=0.00 961 | x={1, 0}, y=1, f(x)=0.92, L=0.08 962 | x={0, 0}, y=0, f(x)=0.17, L=0.19 963 | x={0, 0}, y=0, f(x)=0.17, L=0.19 964 | x={1, 0}, y=1, f(x)=0.92, L=0.08 965 | x={0, 0}, y=0, f(x)=0.17, L=0.19 966 | x={1, 1}, y=1, f(x)=1.00, L=0.00 967 | x={0, 1}, y=1, f(x)=0.92, L=0.08 968 | x={0, 1}, y=1, f(x)=0.92, L=0.08 969 | x={0, 1}, y=1, f(x)=0.92, L=0.08 970 | x={0, 1}, y=1, f(x)=0.92, L=0.08 971 | x={0, 0}, y=0, f(x)=0.17, L=0.19 972 | x={1, 1}, y=1, f(x)=1.00, L=0.00 973 | x={1, 1}, y=1, f(x)=1.00, L=0.00 974 | x={0, 0}, y=0, f(x)=0.17, L=0.19 975 | x={0, 0}, y=0, f(x)=0.17, L=0.18 976 | x={0, 1}, y=1, f(x)=0.92, L=0.08 977 | x={1, 1}, y=1, f(x)=1.00, L=0.00 978 | x={1, 0}, y=1, f(x)=0.92, L=0.08 979 | x={0, 0}, y=0, f(x)=0.17, L=0.18 980 | x={1, 1}, y=1, f(x)=1.00, L=0.00 981 | x={0, 0}, y=0, f(x)=0.17, L=0.18 982 | x={1, 1}, y=1, f(x)=1.00, L=0.00 983 | x={0, 0}, y=0, f(x)=0.16, L=0.18 984 | x={1, 0}, y=1, f(x)=0.92, L=0.09 985 | x={1, 1}, y=1, f(x)=1.00, L=0.00 986 | x={1, 0}, y=1, f(x)=0.92, L=0.08 987 | x={0, 1}, y=1, f(x)=0.92, L=0.08 988 | x={1, 1}, y=1, f(x)=1.00, L=0.00 989 | x={1, 1}, y=1, f(x)=1.00, L=0.00 990 | x={1, 0}, y=1, f(x)=0.92, L=0.08 991 | x={1, 1}, y=1, f(x)=1.00, L=0.00 992 | x={1, 1}, y=1, f(x)=1.00, L=0.00 993 | x={1, 0}, y=1, f(x)=0.92, L=0.08 994 | x={1, 1}, y=1, f(x)=1.00, L=0.00 995 | x={0, 1}, y=1, f(x)=0.92, L=0.08 996 | x={1, 0}, y=1, f(x)=0.92, L=0.08 997 | x={0, 0}, y=0, f(x)=0.17, L=0.19 998 | x={1, 1}, y=1, f(x)=1.00, L=0.00 999 | x={0, 1}, y=1, f(x)=0.92, L=0.08 1000 | x={0, 0}, y=0, f(x)=0.17, L=0.18 1001 | -------------------------------------------------------------------------------- /03-Training-simple-neural-networks/sigmoid.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/03-Training-simple-neural-networks/sigmoid.xlsx -------------------------------------------------------------------------------- /03-Training-simple-neural-networks/trainlogreg-mnist.lua: -------------------------------------------------------------------------------- 1 | require "nn" 2 | local dl = require "dataload" 3 | require "optim" 4 | 5 | trainset = dl.loadMNIST() 6 | 7 | model = nn.Sequential() 8 | :add(nn.View(28*28)) 9 | :add(nn.Linear(28*28, 10)) 10 | :add(nn.LogSoftMax()) 11 | 12 | criterion = nn.ClassNLLCriterion() 13 | 14 | model:float(); criterion:float() 15 | 16 | print("Epoch; Average Loss") 17 | 18 | -- optimize model 19 | for epoch=1,200 do 20 | local sumloss, count = 0, 0 21 | for i, input, target in trainset:sampleiter(32, 10000) do 22 | local output = model:forward(input) 23 | sumloss = sumloss + criterion:forward(output, target) 24 | count = i 25 | 26 | local gradOutput = criterion:backward(output, target) 27 | model:zeroGradParameters() 28 | model:backward(input, gradOutput) 29 | 30 | model:updateParameters(0.1) 31 | end 32 | local avgloss = sumloss/count 33 | print(string.format("%d; %f", epoch, avgloss)) 34 | if avgloss < 0.007 then 35 | break 36 | end 37 | end 38 | 39 | -- evaluate empirical risk and confusion matrix 40 | cm = optim.ConfusionMatrix(10) 41 | sumloss, count = 0, 0 42 | for i, input, target in trainset:subiter(32) do 43 | local output = model:forward(input) 44 | sumloss = sumloss + criterion:forward(output, target) 45 | cm:batchAdd(output, target) 46 | count = i 47 | end 48 | assert(count == 50000) 49 | print(cm) 50 | print("Avg NLL:"..sumloss/count) 51 | 52 | model:clearState() 53 | torch.save("logreg-mnist.t7", model) 54 | -------------------------------------------------------------------------------- /03-Training-simple-neural-networks/trainlogreg.lua: -------------------------------------------------------------------------------- 1 | require "nn" 2 | 3 | input = torch.Tensor({{0,0},{0,1},{1,0},{1,1}}) 4 | target = torch.Tensor({{0},{1},{1},{1}}) 5 | 6 | logreg = nn.Sequential() 7 | logreg:add(nn.Linear(2, 1)) 8 | logreg:add(nn.Sigmoid()) 9 | 10 | bce = nn.BCECriterion() 11 | 12 | for i=1,1000 do 13 | -- sample 14 | local idx = math.random(1,4) 15 | local x, y = input[idx], target[idx] 16 | 17 | -- forward 18 | local y_hat = logreg:forward(x) 19 | local loss = bce:forward(y_hat, y) 20 | print(string.format("x={%d, %d}, y=%d, f(x)=%4.2f, L=%4.2f", x[1], x[2], y[1], y_hat[1], loss)) 21 | 22 | -- backward 23 | local grad_y_hat = bce:backward(y_hat, y) 24 | logreg:zeroGradParameters() 25 | logreg:backward(x, grad_y_hat) 26 | 27 | -- update 28 | logreg:updateParameters(0.1) 29 | end 30 | -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/hyperopt-mnist.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/04-Generalizing-deep-neural-networks/hyperopt-mnist.xlsx -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/overfitting.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/04-Generalizing-deep-neural-networks/overfitting.xlsx -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/relu.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/04-Generalizing-deep-neural-networks/relu.xlsx -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/tanh.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/04-Generalizing-deep-neural-networks/tanh.xlsx -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/trainmlp-mnist-crossvalidate.lua: -------------------------------------------------------------------------------- 1 | require "nn" 2 | local dl = require "dataload" 3 | require "optim" 4 | require "dpnn" -- needed for nn.Convert 5 | 6 | -- options : hyper-parameters and such 7 | local cmd = torch.CmdLine() 8 | cmd:text() 9 | cmd:text('Training a multi-layer perceptron on MNIST') 10 | cmd:text('Options:') 11 | cmd:option('-lr', 0.1, 'learning rate') 12 | cmd:option('-batchsize', 32, 'number of samples per batch') 13 | cmd:option('-epochsize', -1, 'number of samples per epoch') 14 | cmd:option('-hiddensize', '{200,200}', 'number of hidden units') 15 | cmd:option('-transfer', 'ReLU', 'non-linear transfer function') 16 | cmd:option('-maxepoch', 200, 'stop after this many epochs') 17 | local opt = cmd:parse(arg or {}) 18 | 19 | -- process cmd-line options 20 | opt.hiddensize = loadstring(" return "..opt.hiddensize)() 21 | opt.epochsize = opt.epochsize > 0 and opt.epochsize or nil 22 | 23 | -- load training set 24 | local trainset, validset = dl.loadMNIST() 25 | 26 | -- define model and criterion 27 | local inputsize = 28*28 28 | 29 | local model = nn.Sequential() 30 | model:add(nn.Convert()) 31 | model:add(nn.View(inputsize)) 32 | 33 | for i,hiddensize in ipairs(opt.hiddensize) do 34 | model:add(nn.Linear(inputsize, hiddensize)) 35 | model:add(nn[opt.transfer]()) 36 | inputsize = hiddensize 37 | end 38 | 39 | model:add(nn.Linear(inputsize, 10)) 40 | model:add(nn.LogSoftMax()) 41 | 42 | local criterion = nn.ClassNLLCriterion() 43 | 44 | -- confusion matrix used for training and cross-valiation 45 | local validcm = optim.ConfusionMatrix(10) 46 | local traincm = optim.ConfusionMatrix(10) 47 | 48 | -- optimize model using SGD 49 | print("Epoch, Train error, Valid error") 50 | for epoch=1,opt.maxepoch do 51 | 52 | -- 1. training 53 | traincm:zero() 54 | model:training() 55 | for i, input, target in trainset:sampleiter(opt.batchsize, opt.epochsize) do 56 | local output = model:forward(input) 57 | traincm:batchAdd(output, target) 58 | 59 | criterion:forward(output, target) 60 | local gradOutput = criterion:backward(output, target) 61 | model:zeroGradParameters() 62 | model:backward(input, gradOutput) 63 | model:updateParameters(opt.lr) 64 | end 65 | traincm:updateValids() 66 | opt.trainerr = 1 - traincm.totalValid 67 | 68 | -- 2. cross-validation 69 | validcm:zero() 70 | model:evaluate() 71 | for i, input, target in validset:subiter(opt.batchsize) do 72 | local output = model:forward(input) 73 | validcm:batchAdd(output, target) 74 | end 75 | validcm:updateValids() 76 | opt.validerr = 1 - validcm.totalValid 77 | 78 | print(string.format("%d, %f, %f", epoch, opt.trainerr, opt.validerr)) 79 | 80 | end 81 | 82 | model:clearState() 83 | model.opt = opt 84 | torch.save("mlp-mnist-crossvalidate.t7", model) 85 | 86 | -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/trainmlp-mnist-earlystop.lua: -------------------------------------------------------------------------------- 1 | require "nn" 2 | local dl = require "dataload" 3 | require "optim" 4 | require "dpnn" -- needed for nn.Convert 5 | 6 | -- options : hyper-parameters and such 7 | local cmd = torch.CmdLine() 8 | cmd:text() 9 | cmd:text('Training a multi-layer perceptron on MNIST') 10 | cmd:text('Options:') 11 | cmd:option('-lr', 0.1, 'learning rate') 12 | cmd:option('-batchsize', 32, 'number of samples per batch') 13 | cmd:option('-epochsize', -1, 'number of samples per epoch') 14 | cmd:option('-hiddensize', '{200,200}', 'number of hidden units') 15 | cmd:option('-transfer', 'ReLU', 'non-linear transfer function') 16 | cmd:option('-maxepoch', 200, 'stop after this many epochs') 17 | cmd:option('-earlystop', 20, 'max #epochs to find a better minima for early-stopping') 18 | local opt = cmd:parse(arg or {}) 19 | 20 | -- process cmd-line options 21 | opt.hiddensize = loadstring(" return "..opt.hiddensize)() 22 | opt.epochsize = opt.epochsize > 0 and opt.epochsize or nil 23 | 24 | -- load training set 25 | local trainset, validset = dl.loadMNIST() 26 | 27 | -- define model and criterion 28 | local inputsize = 28*28 29 | 30 | local model = nn.Sequential() 31 | model:add(nn.Convert()) 32 | model:add(nn.View(inputsize)) 33 | 34 | for i,hiddensize in ipairs(opt.hiddensize) do 35 | model:add(nn.Linear(inputsize, hiddensize)) 36 | model:add(nn[opt.transfer]()) 37 | inputsize = hiddensize 38 | end 39 | 40 | model:add(nn.Linear(inputsize, 10)) 41 | model:add(nn.LogSoftMax()) 42 | 43 | local criterion = nn.ClassNLLCriterion() 44 | 45 | -- confusion matrix used for cross-valiation 46 | local validcm = optim.ConfusionMatrix(10) 47 | local traincm = optim.ConfusionMatrix(10) 48 | local ntrial, minvaliderr = 0, 1 49 | 50 | -- optimize model using SGD 51 | print("Epoch, Train error, Valid error") 52 | for epoch=1,opt.maxepoch do 53 | 54 | -- 1. training 55 | traincm:zero() 56 | for i, input, target in trainset:sampleiter(opt.batchsize, opt.epochsize) do 57 | local output = model:forward(input) 58 | criterion:forward(output, target) 59 | 60 | traincm:batchAdd(output, target) 61 | 62 | local gradOutput = criterion:backward(output, target) 63 | model:zeroGradParameters() 64 | model:backward(input, gradOutput) 65 | 66 | model:updateParameters(opt.lr) 67 | end 68 | traincm:updateValids() 69 | opt.trainerr = 1 - traincm.totalValid 70 | 71 | -- 2. cross-validation 72 | validcm:zero() 73 | for i, input, target in validset:subiter(opt.batchsize) do 74 | local output = model:forward(input) 75 | validcm:batchAdd(output, target) 76 | end 77 | validcm:updateValids() 78 | opt.validerr = 1 - validcm.totalValid 79 | 80 | print(string.format("%d, %f, %f", epoch, opt.trainerr, opt.validerr)) 81 | 82 | -- 3. early-stopping 83 | ntrial = ntrial + 1 84 | if opt.validerr < minvaliderr then 85 | -- save best version of model 86 | minvaliderr = opt.validerr 87 | model.opt = opt 88 | model:clearState() 89 | torch.save("mlp-mnist-earlystop.t7", model) 90 | ntrial = 0 91 | elseif ntrial >= opt.earlystop then 92 | print("No new minima found after "..(epoch-ntrial).." epochs.") 93 | print("Lowest validation error: "..(minvaliderr*100).."%") 94 | print("Stopping experiment.") 95 | break 96 | end 97 | 98 | end 99 | -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/trainmlp-mnist-hyperopt.lua: -------------------------------------------------------------------------------- 1 | require "nn" 2 | local dl = require "dataload" 3 | require "optim" 4 | require "dpnn" -- needed for nn.Convert 5 | 6 | -- options : hyper-parameters and such 7 | local cmd = torch.CmdLine() 8 | cmd:text() 9 | cmd:text('Training a multi-layer perceptron on MNIST') 10 | cmd:text('Options:') 11 | cmd:option('-lr', 0.1, 'learning rate') 12 | cmd:option('-batchsize', 32, 'number of samples per batch') 13 | cmd:option('-epochsize', -1, 'number of samples per epoch') 14 | cmd:option('-hiddensize', '{200,200}', 'number of hidden units') 15 | cmd:option('-transfer', 'ReLU', 'non-linear transfer function') 16 | cmd:option('-maxepoch', 200, 'stop after this many epochs') 17 | cmd:option('-earlystop', 20, 'max #epochs to find a better minima for early-stopping') 18 | cmd:option('-weightdecay', 1e-5, 'weight decay regularization factor') 19 | cmd:option('-savepath', paths.concat(dl.SAVE_PATH, 'mnist'), 'path to directory where to save model and learning curves') 20 | cmd:option('-id', dl.uniqueid(), 'id string of this experiment (defaults to a unique id)') 21 | cmd:option('-progress', false, 'print progress bar') 22 | local opt = cmd:parse(arg or {}) 23 | 24 | -- load training set 25 | local trainset, validset = dl.loadMNIST() 26 | 27 | -- process cmd-line options 28 | opt.hiddensize = loadstring(" return "..opt.hiddensize)() 29 | opt.epochsize = opt.epochsize > 0 and opt.epochsize or trainset:size() 30 | opt.version = 1 31 | opt.version = 2 -- uses dpnn's Module:weightdecay() 32 | 33 | -- define model and criterion 34 | local inputsize = 28*28 35 | 36 | local model = nn.Sequential() 37 | model:add(nn.Convert()) 38 | model:add(nn.View(inputsize)) 39 | 40 | for i,hiddensize in ipairs(opt.hiddensize) do 41 | model:add(nn.Linear(inputsize, hiddensize)) 42 | model:add(nn[opt.transfer]()) 43 | inputsize = hiddensize 44 | end 45 | 46 | model:add(nn.Linear(inputsize, 10)) 47 | model:add(nn.LogSoftMax()) 48 | 49 | local criterion = nn.ClassNLLCriterion() 50 | 51 | -- confusion matrix used for cross-valiation 52 | local validcm = optim.ConfusionMatrix(10) 53 | local traincm = optim.ConfusionMatrix(10) 54 | local ntrial, minvaliderr = 0, 1 55 | 56 | -- comma separated value 57 | paths.mkdir(opt.savepath) 58 | local csvpath = paths.concat(opt.savepath, opt.id..'.csv') 59 | 60 | local csvfile = io.open(csvpath, 'w') 61 | csvfile:write("Epoch,train error,valid error\n") 62 | 63 | -- optimize model using SGD 64 | for epoch=1,opt.maxepoch do 65 | print("\n"..opt.id.."; epoch #"..epoch.." :") 66 | 67 | -- 1. training 68 | local timer = torch.Timer() 69 | traincm:zero() 70 | for i, input, target in trainset:sampleiter(opt.batchsize, opt.epochsize) do 71 | local output = model:forward(input) 72 | criterion:forward(output, target) 73 | 74 | traincm:batchAdd(output, target) 75 | 76 | local gradOutput = criterion:backward(output, target) 77 | model:zeroGradParameters() 78 | model:backward(input, gradOutput) 79 | 80 | model:weightDecay(opt.weightdecay) -- weight decay 81 | 82 | if opt.progress then 83 | xlua.progress(math.min(i, opt.epochsize), opt.epochsize) 84 | end 85 | 86 | model:updateParameters(opt.lr) 87 | end 88 | traincm:updateValids() 89 | opt.trainerr = 1 - traincm.totalValid 90 | 91 | local speed = opt.epochsize/timer:time().real 92 | print(string.format("Speed : %f samples/second ", speed)) 93 | print(string.format("Training error: %f", opt.trainerr)) 94 | 95 | -- 2. cross-validation 96 | validcm:zero() 97 | for i, input, target in validset:subiter(opt.batchsize) do 98 | local output = model:forward(input) 99 | validcm:batchAdd(output, target) 100 | end 101 | validcm:updateValids() 102 | opt.validerr = 1 - validcm.totalValid 103 | 104 | print(string.format("Validation error: %f", opt.validerr)) 105 | csvfile:write(string.format('%d,%f,%f\n', epoch, opt.trainerr, opt.validerr)) 106 | 107 | -- 3. early-stopping 108 | ntrial = ntrial + 1 109 | if opt.validerr < minvaliderr then 110 | print("Found new minimum after "..ntrial.." epochs") 111 | minvaliderr = opt.validerr 112 | model.opt = opt 113 | model:clearState() 114 | local filename = paths.concat(opt.savepath, opt.id..'.t7') 115 | torch.save(filename, model) 116 | ntrial = 0 117 | elseif ntrial >= opt.earlystop then 118 | print("No new minima found after "..(epoch-ntrial).." epochs.") 119 | print("Lowest validation error: "..(minvaliderr*100).."%") 120 | print("Stopping experiment.") 121 | break 122 | end 123 | 124 | end 125 | 126 | csvfile:close() 127 | print("CSV file saved to "..csvpath) 128 | -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/trainmlp-mnist-weightdecay.lua: -------------------------------------------------------------------------------- 1 | require "nn" 2 | local dl = require "dataload" 3 | require "optim" 4 | require "dpnn" -- needed for nn.Convert 5 | 6 | -- options : hyper-parameters and such 7 | local cmd = torch.CmdLine() 8 | cmd:text() 9 | cmd:text('Training a multi-layer perceptron on MNIST') 10 | cmd:text('Options:') 11 | cmd:option('-lr', 0.1, 'learning rate') 12 | cmd:option('-batchsize', 32, 'number of samples per batch') 13 | cmd:option('-epochsize', -1, 'number of samples per epoch') 14 | cmd:option('-hiddensize', '{200,200}', 'number of hidden units') 15 | cmd:option('-transfer', 'ReLU', 'non-linear transfer function') 16 | cmd:option('-maxepoch', 200, 'stop after this many epochs') 17 | cmd:option('-earlystop', 20, 'max #epochs to find a better minima for early-stopping') 18 | cmd:option('-weightdecay', 1e-5, 'weight decay regularization factor') 19 | local opt = cmd:parse(arg or {}) 20 | 21 | -- process cmd-line options 22 | opt.hiddensize = loadstring(" return "..opt.hiddensize)() 23 | opt.epochsize = opt.epochsize > 0 and opt.epochsize or nil 24 | 25 | -- load training set 26 | local trainset, validset = dl.loadMNIST() 27 | 28 | -- define model and criterion 29 | local inputsize = 28*28 30 | 31 | local model = nn.Sequential() 32 | model:add(nn.Convert()) 33 | model:add(nn.View(inputsize)) 34 | 35 | for i,hiddensize in ipairs(opt.hiddensize) do 36 | model:add(nn.Linear(inputsize, hiddensize)) 37 | model:add(nn[opt.transfer]()) 38 | inputsize = hiddensize 39 | end 40 | 41 | model:add(nn.Linear(inputsize, 10)) 42 | model:add(nn.LogSoftMax()) 43 | 44 | local criterion = nn.ClassNLLCriterion() 45 | 46 | -- confusion matrix used for cross-valiation 47 | local validcm = optim.ConfusionMatrix(10) 48 | local traincm = optim.ConfusionMatrix(10) 49 | local ntrial, minvaliderr = 0, 1 50 | 51 | -- optimize model using SGD 52 | print("Epoch, Train error, Valid error") 53 | for epoch=1,opt.maxepoch do 54 | 55 | -- 1. training 56 | traincm:zero() 57 | for i, input, target in trainset:sampleiter(opt.batchsize, opt.epochsize) do 58 | local output = model:forward(input) 59 | criterion:forward(output, target) 60 | 61 | traincm:batchAdd(output, target) 62 | 63 | local gradOutput = criterion:backward(output, target) 64 | model:zeroGradParameters() 65 | model:backward(input, gradOutput) 66 | 67 | -- weight decay 68 | local params, gradParams = model:parameters() 69 | for i=1,#params do 70 | gradParams[i]:add(opt.weightdecay, params[i]) 71 | end 72 | 73 | model:updateParameters(opt.lr) 74 | end 75 | traincm:updateValids() 76 | opt.trainerr = 1 - traincm.totalValid 77 | 78 | -- 2. cross-validation 79 | validcm:zero() 80 | for i, input, target in validset:subiter(opt.batchsize) do 81 | local output = model:forward(input) 82 | validcm:batchAdd(output, target) 83 | end 84 | validcm:updateValids() 85 | opt.validerr = 1 - validcm.totalValid 86 | 87 | print(string.format("%d, %f, %f", epoch, opt.trainerr, opt.validerr)) 88 | 89 | -- 3. early-stopping 90 | ntrial = ntrial + 1 91 | if opt.validerr < minvaliderr then 92 | -- save best version of model 93 | minvaliderr = opt.validerr 94 | model.opt = opt 95 | model:clearState() 96 | torch.save("mlp-mnist-weightdecay.t7", model) 97 | ntrial = 0 98 | elseif ntrial >= opt.earlystop then 99 | print("No new minima found after "..(epoch-ntrial).." epochs.") 100 | print("Lowest validation error: "..(minvaliderr*100).."%") 101 | print("Stopping experiment.") 102 | break 103 | end 104 | 105 | end 106 | -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/trainmlp-mnist.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | We will use the MNIST data loader created in chapter 2. 3 | Listing 4.1 will extend listing 3.2 to use multiple layers. 4 | Everything else will be the same. 5 | It will be explained how the model capacity is increased to allow for 6 | non-linear discrimination and demonstrate this via better performance. 7 | We will include a diagram of the an MLP showing the usual connectionist view of a NN. 8 | Listing 4.1 will also move out the hyper-parameters to the command-line and introduce these. 9 | We will have a diagram plotting the training set learning curves of the logistic regression model side by side with the MLP. 10 | 11 | Changes: 12 | -- everything is local 13 | -- cmd-line args 14 | -- epochsize 15 | -- transfer 16 | -- clear-state 17 | -- nn.Convert 18 | --]] 19 | 20 | require "nn" 21 | local dl = require "dataload" 22 | require "optim" 23 | require "dpnn" -- needed for nn.Convert 24 | 25 | -- options : hyper-parameters and such 26 | local cmd = torch.CmdLine() 27 | cmd:text() 28 | cmd:text('Training a multi-layer perceptron on MNIST') 29 | cmd:text('Options:') 30 | cmd:option('-lr', 0.1, 'learning rate') 31 | cmd:option('-batchsize', 32, 'number of samples per batch') 32 | cmd:option('-epochsize', -1, 'number of samples per epoch') 33 | cmd:option('-hiddensize', '{200,200}', 'number of hidden units') 34 | cmd:option('-transfer', 'ReLU', 'non-linear transfer function') 35 | cmd:option('-maxepoch', 200, 'stop after this many epochs') 36 | cmd:option('-minloss', 0.0001, 'stop when training set loss is lower than this') 37 | local opt = cmd:parse(arg or {}) 38 | 39 | -- process cmd-line options 40 | opt.hiddensize = loadstring(" return "..opt.hiddensize)() 41 | opt.epochsize = opt.epochsize > 0 and opt.epochsize or nil 42 | 43 | -- load training set 44 | local trainset = dl.loadMNIST() 45 | 46 | -- define model and criterion 47 | local inputsize = 28*28 48 | 49 | local model = nn.Sequential() 50 | model:add(nn.Convert()) 51 | model:add(nn.View(inputsize)) 52 | 53 | for i,hiddensize in ipairs(opt.hiddensize) do 54 | model:add(nn.Linear(inputsize, hiddensize)) 55 | model:add(nn[opt.transfer]()) 56 | inputsize = hiddensize 57 | end 58 | 59 | model:add(nn.Linear(inputsize, 10)) 60 | model:add(nn.LogSoftMax()) 61 | 62 | local criterion = nn.ClassNLLCriterion() 63 | 64 | -- optimize model using SGD 65 | print("Epoch, Average Loss") 66 | for epoch=1,opt.maxepoch do 67 | local sumloss, count = 0, 0 68 | for i, input, target in trainset:sampleiter(opt.batchsize, opt.epochsize) do 69 | local output = model:forward(input) 70 | sumloss = sumloss + criterion:forward(output, target) 71 | count = i 72 | 73 | local gradOutput = criterion:backward(output, target) 74 | model:zeroGradParameters() 75 | model:backward(input, gradOutput) 76 | 77 | model:updateParameters(opt.lr) 78 | end 79 | local avgloss = sumloss/count 80 | print(string.format("%d, %f", epoch, avgloss)) 81 | if avgloss < opt.minloss then 82 | break 83 | end 84 | end 85 | 86 | -- evaluate empirical risk and confusion matrix 87 | local cm = optim.ConfusionMatrix(10) 88 | local sumloss, count = 0, 0 89 | for i, input, target in trainset:subiter(opt.batchsize) do 90 | local output = model:forward(input) 91 | sumloss = sumloss + criterion:forward(output, target) 92 | cm:batchAdd(output, target) 93 | count = i 94 | end 95 | print(cm) 96 | print("Avg NLL:"..sumloss/count) 97 | 98 | model:clearState() 99 | torch.save("mlp-mnist.t7", model) 100 | -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/trainmlp-xor.lua: -------------------------------------------------------------------------------- 1 | require "nn" 2 | require "optim" 3 | 4 | input = torch.Tensor({{0,0},{0,1},{1,0},{1,1}}) 5 | target = torch.Tensor({{0},{1},{1},{0}}) 6 | 7 | hiddensize = 2 8 | mlp = nn.Sequential() 9 | :add(nn.Linear(2, hiddensize)) 10 | :add(nn.Tanh()) 11 | :add(nn.Linear(hiddensize, 1)) 12 | :add(nn.Sigmoid()) 13 | 14 | bce = nn.BCECriterion() 15 | 16 | for i=1,1000 do 17 | -- sample 18 | local idx = math.random(1,4) 19 | local x, y = input[idx], target[idx] 20 | 21 | -- forward 22 | local y_hat = mlp:forward(x) 23 | local loss = bce:forward(y_hat, y) 24 | 25 | -- backward 26 | local grad_y_hat = bce:backward(y_hat, y) 27 | mlp:zeroGradParameters() 28 | mlp:backward(x, grad_y_hat) 29 | 30 | -- update 31 | mlp:updateParameters(0.1) 32 | end 33 | 34 | -- confusion matrix 35 | cm = optim.ConfusionMatrix(2) 36 | 37 | for i=1,4 do 38 | local x, y = input[i], target[i] 39 | local y_hat = mlp:forward(x) 40 | cm:add(y_hat[1] > 0.5 and 2 or 1, y[1]+1) 41 | end 42 | 43 | --print(cm) 44 | cm:updateValids() 45 | assert(cm.totalValid == 1, "Run the script again until you get 100% accuracy") 46 | 47 | -- get the classification boundary curve 48 | 49 | print("x1, x2, y_hat") 50 | input = torch.Tensor(2) 51 | for x1=0,1,0.01 do 52 | for x2=0,1,0.01 do 53 | input[1], input[2] = x1, x2 54 | local output = mlp:forward(input)[1] 55 | if output < 0.52 and output > 0.48 then 56 | print(string.format("%f, %f, %f", x1, x2, output)) 57 | end 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/xor-curve.csv: -------------------------------------------------------------------------------- 1 | x1, x2, y_hat 2 | 0.000000, 0.560000, 0.482105 3 | 0.000000, 0.570000, 0.499840 4 | 0.000000, 0.580000, 0.517446 5 | 0.010000, 0.570000, 0.481324 6 | 0.010000, 0.580000, 0.499063 7 | 0.010000, 0.590000, 0.516678 8 | 0.020000, 0.580000, 0.480542 9 | 0.020000, 0.590000, 0.498287 10 | 0.020000, 0.600000, 0.515908 11 | 0.030000, 0.600000, 0.497510 12 | 0.030000, 0.610000, 0.515139 13 | 0.040000, 0.610000, 0.496733 14 | 0.040000, 0.620000, 0.514369 15 | 0.050000, 0.620000, 0.495955 16 | 0.050000, 0.630000, 0.513598 17 | 0.060000, 0.630000, 0.495178 18 | 0.060000, 0.640000, 0.512827 19 | 0.070000, 0.640000, 0.494400 20 | 0.070000, 0.650000, 0.512056 21 | 0.080000, 0.650000, 0.493622 22 | 0.080000, 0.660000, 0.511285 23 | 0.090000, 0.660000, 0.492844 24 | 0.090000, 0.670000, 0.510513 25 | 0.100000, 0.670000, 0.492065 26 | 0.100000, 0.680000, 0.509741 27 | 0.110000, 0.680000, 0.491286 28 | 0.110000, 0.690000, 0.508969 29 | 0.120000, 0.690000, 0.490507 30 | 0.120000, 0.700000, 0.508196 31 | 0.130000, 0.700000, 0.489728 32 | 0.130000, 0.710000, 0.507423 33 | 0.140000, 0.710000, 0.488949 34 | 0.140000, 0.720000, 0.506650 35 | 0.150000, 0.720000, 0.488169 36 | 0.150000, 0.730000, 0.505876 37 | 0.160000, 0.730000, 0.487390 38 | 0.160000, 0.740000, 0.505102 39 | 0.170000, 0.740000, 0.486610 40 | 0.170000, 0.750000, 0.504328 41 | 0.180000, 0.750000, 0.485830 42 | 0.180000, 0.760000, 0.503553 43 | 0.190000, 0.760000, 0.485050 44 | 0.190000, 0.770000, 0.502778 45 | 0.200000, 0.770000, 0.484269 46 | 0.200000, 0.780000, 0.502003 47 | 0.200000, 0.790000, 0.519601 48 | 0.210000, 0.780000, 0.483489 49 | 0.210000, 0.790000, 0.501228 50 | 0.210000, 0.800000, 0.518833 51 | 0.220000, 0.790000, 0.482708 52 | 0.220000, 0.800000, 0.500452 53 | 0.220000, 0.810000, 0.518065 54 | 0.230000, 0.800000, 0.481927 55 | 0.230000, 0.810000, 0.499676 56 | 0.230000, 0.820000, 0.517297 57 | 0.240000, 0.810000, 0.481146 58 | 0.240000, 0.820000, 0.498900 59 | 0.240000, 0.830000, 0.516528 60 | 0.250000, 0.820000, 0.480365 61 | 0.250000, 0.830000, 0.498124 62 | 0.250000, 0.840000, 0.515759 63 | 0.260000, 0.840000, 0.497347 64 | 0.260000, 0.850000, 0.514989 65 | 0.270000, 0.850000, 0.496570 66 | 0.270000, 0.860000, 0.514220 67 | 0.280000, 0.860000, 0.495793 68 | 0.280000, 0.870000, 0.513449 69 | 0.290000, 0.870000, 0.495016 70 | 0.290000, 0.880000, 0.512679 71 | 0.300000, 0.880000, 0.494238 72 | 0.300000, 0.890000, 0.511908 73 | 0.310000, 0.890000, 0.493460 74 | 0.310000, 0.900000, 0.511137 75 | 0.320000, 0.900000, 0.492682 76 | 0.320000, 0.910000, 0.510365 77 | 0.330000, 0.910000, 0.491904 78 | 0.330000, 0.920000, 0.509593 79 | 0.340000, 0.920000, 0.491125 80 | 0.340000, 0.930000, 0.508821 81 | 0.350000, 0.930000, 0.490347 82 | 0.350000, 0.940000, 0.508049 83 | 0.360000, 0.940000, 0.489568 84 | 0.360000, 0.950000, 0.507276 85 | 0.370000, 0.950000, 0.488789 86 | 0.370000, 0.960000, 0.506503 87 | 0.380000, 0.960000, 0.488009 88 | 0.380000, 0.970000, 0.505729 89 | 0.390000, 0.970000, 0.487230 90 | 0.390000, 0.980000, 0.504956 91 | 0.400000, 0.980000, 0.486450 92 | 0.400000, 0.990000, 0.504181 93 | 0.410000, 0.990000, 0.485671 94 | 0.570000, 0.000000, 0.487278 95 | 0.580000, 0.000000, 0.508046 96 | 0.580000, 0.010000, 0.486961 97 | 0.590000, 0.010000, 0.507734 98 | 0.590000, 0.020000, 0.486644 99 | 0.600000, 0.020000, 0.507421 100 | 0.600000, 0.030000, 0.486328 101 | 0.610000, 0.030000, 0.507109 102 | 0.610000, 0.040000, 0.486011 103 | 0.620000, 0.040000, 0.506796 104 | 0.620000, 0.050000, 0.485695 105 | 0.630000, 0.050000, 0.506484 106 | 0.630000, 0.060000, 0.485378 107 | 0.640000, 0.060000, 0.506171 108 | 0.640000, 0.070000, 0.485061 109 | 0.650000, 0.070000, 0.505859 110 | 0.650000, 0.080000, 0.484745 111 | 0.660000, 0.080000, 0.505546 112 | 0.660000, 0.090000, 0.484428 113 | 0.670000, 0.090000, 0.505234 114 | 0.670000, 0.100000, 0.484112 115 | 0.680000, 0.100000, 0.504921 116 | 0.680000, 0.110000, 0.483795 117 | 0.690000, 0.110000, 0.504609 118 | 0.690000, 0.120000, 0.483479 119 | 0.700000, 0.120000, 0.504296 120 | 0.700000, 0.130000, 0.483162 121 | 0.710000, 0.130000, 0.503983 122 | 0.710000, 0.140000, 0.482846 123 | 0.720000, 0.140000, 0.503671 124 | 0.720000, 0.150000, 0.482529 125 | 0.730000, 0.150000, 0.503358 126 | 0.730000, 0.160000, 0.482213 127 | 0.740000, 0.160000, 0.503046 128 | 0.740000, 0.170000, 0.481897 129 | 0.750000, 0.170000, 0.502733 130 | 0.750000, 0.180000, 0.481580 131 | 0.760000, 0.180000, 0.502420 132 | 0.760000, 0.190000, 0.481264 133 | 0.770000, 0.190000, 0.502108 134 | 0.770000, 0.200000, 0.480948 135 | 0.780000, 0.200000, 0.501795 136 | 0.780000, 0.210000, 0.480631 137 | 0.790000, 0.210000, 0.501482 138 | 0.790000, 0.220000, 0.480315 139 | 0.800000, 0.220000, 0.501170 140 | 0.810000, 0.230000, 0.500857 141 | 0.820000, 0.240000, 0.500544 142 | 0.830000, 0.250000, 0.500232 143 | 0.840000, 0.260000, 0.499919 144 | 0.850000, 0.270000, 0.499606 145 | 0.860000, 0.280000, 0.499294 146 | 0.870000, 0.280000, 0.519955 147 | 0.870000, 0.290000, 0.498981 148 | 0.880000, 0.290000, 0.519647 149 | 0.880000, 0.300000, 0.498668 150 | 0.890000, 0.300000, 0.519339 151 | 0.890000, 0.310000, 0.498355 152 | 0.900000, 0.310000, 0.519031 153 | 0.900000, 0.320000, 0.498043 154 | 0.910000, 0.320000, 0.518724 155 | 0.910000, 0.330000, 0.497730 156 | 0.920000, 0.330000, 0.518416 157 | 0.920000, 0.340000, 0.497417 158 | 0.930000, 0.340000, 0.518108 159 | 0.930000, 0.350000, 0.497104 160 | 0.940000, 0.350000, 0.517800 161 | 0.940000, 0.360000, 0.496792 162 | 0.950000, 0.360000, 0.517492 163 | 0.950000, 0.370000, 0.496479 164 | 0.960000, 0.370000, 0.517184 165 | 0.960000, 0.380000, 0.496166 166 | 0.970000, 0.380000, 0.516876 167 | 0.970000, 0.390000, 0.495854 168 | 0.980000, 0.390000, 0.516568 169 | 0.980000, 0.400000, 0.495541 170 | 0.990000, 0.400000, 0.516260 171 | 0.990000, 0.410000, 0.495228 172 | -------------------------------------------------------------------------------- /04-Generalizing-deep-neural-networks/xor-mlp.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nicholas-leonard/torch-in-action/40698bad807a57252f33eb2a888ecead05b5ebcb/04-Generalizing-deep-neural-networks/xor-mlp.xlsx -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Torch in Action 2 | 3 | This repository contains the code for the Torch in Action book. 4 | 5 | [Chapter 1](01-Meeting-Torch): Meeting Torch 6 | * [facedetect](01-Meeting-Torch/facedetect): toy face detection dataset (directory with only four samples); 7 | * [train.lua](01-Meeting-Torch/train.lua): example face detection training script (listings 1.1, 1.2 and 1.3); 8 | 9 | [Chapter 2](02-Preparing-a-dataset): Preparing a dataset 10 | * [mnist](02-Preparing-a-dataset/mnist): MNIST dataset in binary format as downloaded from [yann.lecun.com](http://yann.lecun.com/exdb/mnist/); 11 | * [createdataset.lua](02-Preparing-a-dataset/createdataset.lua): code for serializing the MNIST dataset into `.t7` files and generating samples (section 2.3); 12 | * [dataloader.lua](02-Preparing-a-dataset/dataloader.lua): code for listing 2.1, 2.2, 2.3 and 2.5. Defines the `DataLoader` and `TensorLoader` classes); 13 | * [iteratedataset.lua](02-Preparing-a-dataset/iteratedataset.lua): code for listing 2.5. This script tests the `dataloader.lua` file by iterating through it. Only works if `createdataset.lua` was executed before hand; 14 | * [getmnistsample.lua](02-Preparing-a-dataset/getmnistsample.lua): script for generating MNIST samples consolidated as a single image (used to generate figure 2.1); 15 | 16 | [Chapter 3](03-Training-simple-neural-networks): Training simple neural networks 17 | * [trainlogreg.lua](03-Training-simple-neural-networks/trainlogreg.lua): Training script for applying binary logistic regression on OR dataset. The model is trained using stochastic gradient descent (listing 3.1); 18 | * [logreg.log](03-Training-simple-neural-networks/logreg.log): log file created by running `th trainlogreg.lua > logreg.log`; 19 | * [trainlogreg-mnist.lua](03-Training-simple-neural-networks/trainlogreg-mnist.lua): Script for training a multinomial logistic regression model (saved as `logreg-mnist.t7`) using SGD on the MNIST dataset. Training stops after 200 epochs where each epoch consists of 10000 samples divided into mini-batches of 32 random samples, or reaching an estimated empirical risk lower than 0.007, whichever comes first. The resulting model is evaluated on the entire training set of 50000 samples and saved to disk (listing 3.2); 20 | * [logreg-mnist.log](03-Training-simple-neural-networks/logreg-mnist.log): log file created by running `th trainlogreg-mnist.lua > logreg-mnist.log`. The data can be used to generate a learning curve. Open the file from your favorite spreadsheet application (Microsoft Excel, LibreOffice Calc, etc.) and specify that values are separated by semicolons; 21 | * [backward.lua](03-Training-simple-neural-networks/backward.lua): demonstrates gradient descent through a criterion. Using the input as a parameter, the loss is minized by tacking a step in opposite direction of gradient (section 8.1.3); 22 | 23 | [Chapter 4](04-Generalizing-deep-neural-networks): Generalizing deep neural networks 24 | * [tanh.xlsx](04-Generalizing-deep-neural-networks/xor-mlp.xlsx): plot of the hyperbolic tangent activation function (figure 4.2); 25 | * [trainmlp-xor.lua](04-Generalizing-deep-neural-networks/trainmlp-xor.lua): script for training an MLP with one hidden layer composed of 2 units on the XOR dataset. Used to generate `xor-mlp.xlsx` and figure 4.3; 26 | * [xor-mlp.xlsx](04-Generalizing-deep-neural-networks/xor-mlp.xlsx): diagram outlining the boundaries of an MLP trained on the XOR dataset (figure 4.3); 27 | * [overfitting.xlsx](04-Generalizing-deep-neural-networks/overfitting.xlsx): contains learing curve and model overfitting example (figure 4.4 and 4.5); 28 | * [trainmlp-mnist.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist.lua): upgrades [trainlogreg-mnist.lua](03-Training-simple-neural-networks/trainlogreg-mnist.lua) by moving the definition of hyper-parameters to the cmd-line (listing 4.1 and 4.2). 29 | * [trainmlp-mnist-crossvalidate.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist-crossvalidate.lua): upgrades [trainmlp-mnist.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist.lua) with cross-validation (listing 4.3); 30 | * [trainmlp-mnist-earlystop.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist-earlystop.lua): upgrades [trainmlp-mnist-crossvalidate.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist-crossvalidate.lua) with early-stopping (listing 4.4); 31 | * [trainmlp-mnist-weightdecay.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist-weightdecay.lua): upgrades [trainmlp-mnist-earlystop.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist-earlystop.lua) with weight decay regularization (listing 4.5); 32 | * [trainmlp-mnist-hyperopt.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist-hyperopt.lua): upgrades [trainmlp-mnist-weightdecay.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist-weightdecay.lua) to facilitate hyper-parameter optimization (listing 4.6); 33 | * [hyperopt-mnist.xlsx](04-Generalizing-deep-neural-networks/hyperopt-mnist.xlsx): spreadsheet used to hyper-optimize the [trainmlp-mnist-hyperopt.lua](04-Generalizing-deep-neural-networks/trainmlp-mnist-hyperopt.lua) script (figure 4.7 and 4.8); 34 | * [relu.xlsx](04-Generalizing-deep-neural-networks/relu.xlsx): plot of the rectified linear unit (figure 4.9). 35 | --------------------------------------------------------------------------------