├── Data Driven Code 101.pdf ├── README.md ├── demo.py ├── simple.py └── multiple.py /Data Driven Code 101.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/atmb4u/data-driven-code/HEAD/Data Driven Code 101.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Data Driven Code 101 - Pycon Canada 2016 2 | 3 | **slides** - https://slideshare.net/atmb4u/data-driven-code 4 | 5 | **simple.py** - if you are new to neural networks, this is where you should look at 6 | 7 | **multiple.py** - same code being reused in different data scenarios in a very simple example. Code that learns from the data. 8 | 9 | **demo.py** - similar to simple.py with just code and no documentation. 10 | 11 | -------------------------------------------------------------------------------- /demo.py: -------------------------------------------------------------------------------- 1 | from numpy import array, random, dot, exp 2 | 3 | inputs = array([[0, 0], [0, 1], [1, 0], [1, 1]]) 4 | outputs = array([[0], [1], [1], [0]]) 5 | 6 | def learn(X, y): 7 | l1_w = 2 * random.random((X.shape[1], 16)) - 1 8 | l2_w = 2 * random.random((16, 1)) - 1 9 | for j in xrange(10000): 10 | l1 = 1 / (1 + exp(-(dot(X, l1_w)))) 11 | l2 = 1 / (1 + exp(-(dot(l1, l2_w)))) 12 | l2_delta = (y - l2) * (l2 * (1 - l2)) 13 | l1_delta = l2_delta.dot(l2_w.T) * (l1 * (1-l1)) 14 | l2_w += l1.T.dot(l2_delta) 15 | l1_w += X.T.dot(l1_delta) 16 | return (l1_w, l2_w) 17 | 18 | xor_weights = learn(inputs, outputs) 19 | 20 | def predict(X, weights): 21 | l1 = 1/(1+exp(-(dot(X, weights[0])))) 22 | l2 = 1/(1+exp(-(dot(l1, weights[1])))) 23 | return l2 24 | 25 | test_set = [[0, 0], [0, 1], [1, 0], [1, 1]] 26 | for test_item in test_set: 27 | xor_prediction = predict(test_item, xor_weights) 28 | print str(test_item)+"\t"+str(xor_prediction) -------------------------------------------------------------------------------- /simple.py: -------------------------------------------------------------------------------- 1 | from numpy import array, random, dot, exp 2 | 3 | """ 4 | This program is a basic 2 layer neural network 5 | input -> input layer -> hidden layer -> output 6 | """ 7 | 8 | # LEARNING CODE 9 | def learn(X,y): 10 | """ 11 | Training phase of a neural network 12 | Accepts 13 | X - input 14 | y - output 15 | """ 16 | # initialized 2 layers with random weights from -1 to 1 17 | layer1_weights = 2 * random.random((X.shape[1], 16)) - 1 18 | layer2_weights = 2 * random.random((16, 1)) - 1 19 | # train the network for 100000 iterations 20 | for i in xrange(100000): 21 | # Find the results of the existing network with current weights and input - Sigmoid function 22 | layer1_estimation = 1/(1+exp(-(dot(X,layer1_weights)))) 23 | layer2_estimation = 1/(1+exp(-(dot(layer1_estimation,layer2_weights)))) 24 | # Calculate the error - derivative of the Sigmoid function 25 | layer2_estimation_delta = (y - layer2_estimation)*(layer2_estimation*(1-layer2_estimation)) 26 | layer1_estimation_delta = layer2_estimation_delta.dot(layer2_weights.T) * (layer1_estimation * (1-layer1_estimation)) 27 | # Correct the weights for next pass 28 | layer2_weights += layer1_estimation.T.dot(layer2_estimation_delta) 29 | layer1_weights += X.T.dot(layer1_estimation_delta) 30 | return (layer1_weights, layer2_weights) # return a tuple for all the weights associated for both layers 31 | 32 | # PREDICTION CODE 33 | 34 | def predict(X, weights): 35 | """ 36 | Runs the value in X with a 2 layer neural network with the supplied weights 37 | """ 38 | layer1_estimation = 1/(1+exp(-(dot(X, weights[0])))) 39 | layer2_estimation = 1/(1+exp(-(dot(layer1_estimation, weights[1])))) 40 | return layer2_estimation 41 | 42 | 43 | # DATA 44 | # Binary AND operator 45 | X1 = array([[0, 0], [0, 1], [1, 0], [1, 1]]) 46 | y1 = array([[0, 0, 0, 1]]).T # T to get transpose of 4x1 matrix 47 | 48 | # use the learn method to train over the input and output 49 | and_weights = learn(X1, y1) 50 | 51 | test_set = [[0, 0], [0, 1], [1, 0], [1, 1]] 52 | print "Predictions from AND gate trained neural network" 53 | # for each item in the test_set use the trained neural network model to predict 54 | for test_item in test_set: 55 | and_prediction = predict(test_item, and_weights) 56 | print str(test_item)+"\t"+str(and_prediction) -------------------------------------------------------------------------------- /multiple.py: -------------------------------------------------------------------------------- 1 | try: 2 | import numpy as np 3 | except ImportError: 4 | print "Install numpy - pip install numpy" 5 | """ 6 | This program is a basic 2 layer neural network 7 | input -> input layer -> hidden layer -> output 8 | 9 | There are 4 different examples, simulating AND, OR, XOR and NAND gates with the same code. 10 | """ 11 | 12 | # DATA 13 | # Binary AND operator 14 | X1 = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) 15 | y1 = np.array([[0, 0, 0, 1]]).T 16 | # Binary OR operator 17 | X2= np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) 18 | y2 = np.array([[0, 1, 1, 1]]).T 19 | # Binary XOR operator 20 | X3= np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) 21 | y3 = np.array([[0, 1, 1, 0]]).T 22 | # Binary NAND operator 23 | X4= np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) 24 | y4 = np.array([[1, 1, 1, 0]]).T 25 | 26 | 27 | # LEARNING CODE 28 | 29 | def learn(X,y,n,h=16): 30 | """ 31 | Training phase of a neural network 32 | Accepts an 33 | X - input, 34 | y - output 35 | n - number of training iterations, 36 | h - number of hidden neurons 37 | """ 38 | # initialized 2 layers with random weights from -1 to 1 39 | layer1_weights = 2 * np.random.random((X.shape[1], h)) - 1 40 | layer2_weights = 2 * np.random.random((h, 1)) - 1 41 | # train the network for n iterations 42 | for i in xrange(n): 43 | # Find the results of the existing network with current weights and input - Sigmoid function 44 | l1 = 1/(1+np.exp(-(np.dot(X,layer1_weights)))) 45 | l2 = 1/(1+np.exp(-(np.dot(l1,layer2_weights)))) 46 | # Calculate the error - derivative of the Sigmoid function 47 | l2_delta = (y - l2)*(l2*(1-l2)) 48 | l1_delta = l2_delta.dot(layer2_weights.T) * (l1 * (1-l1)) 49 | # Correct the weights for next pass 50 | layer2_weights += l1.T.dot(l2_delta) 51 | layer1_weights += X.T.dot(l1_delta) 52 | return (layer1_weights, layer2_weights) # return a tuple for all the weights associated for both layers 53 | 54 | # PREDICTION CODE 55 | 56 | def predict(X, weights): 57 | l1 = 1/(1+np.exp(-(np.dot(X, weights[0])))) 58 | l2 = 1/(1+np.exp(-(np.dot(l1, weights[1])))) 59 | return l2 60 | 61 | # Same neural network code used to learn about different gates 62 | 63 | 64 | and_weights = learn(X1, y1, 10000) 65 | # print and_weights 66 | or_weights = learn(X2,y2, 10000) 67 | # print or_weights 68 | xor_weights = learn(X3,y3, 10000) 69 | # print xor_weights 70 | nand_weights = learn(X4,y4, 10000) 71 | # print xor_weights 72 | test_set = [[0, 0], [0, 1], [1, 0], [1, 1]] 73 | print "ITEM\tAND\tOR\tXOR\tNAND" 74 | for test_item in test_set: 75 | and_prediction = predict(test_item, and_weights) 76 | or_prediction = predict(test_item, or_weights) 77 | xor_prediction = predict(test_item, xor_weights) 78 | nand_prediction = predict(test_item, nand_weights) 79 | # round will round off the floting point results to the nearest number (in this case, 0 or 1) 80 | print str(test_item)+"\t"+str(round(and_prediction))+"\t"+str(round(or_prediction))+ \ 81 | "\t"+str(round(xor_prediction))+"\t"+str(round(nand_prediction)) --------------------------------------------------------------------------------