├── README.md ├── TF_basics.py └── TF_linear_regression.py /README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow 2 | ## Tutorials from syntax basics to neural networks 3 | 4 | FYI these codes are written using an ATOM editor with a Hydrogen package extension and I recommend running them in an interactive coding environment. If you prefer not to run these codes inline/ in real-time feel free to copy and paste them in Jupyter notebooks and run in cells, lines, sections, blocks or however you code best! Happy coding <3 5 | 6 | ## Installation 7 | The easiest way to download + install this tutorial is by using git from the command-line: 8 | 9 | git clone https://github.com/AstronomerAmber/TensorFlow.git 10 | 11 | To run them, you also need the latest version of TensorFlow. To install it: 12 | 13 | pip install tensorflow 14 | or (if you want GPU support): 15 | 16 | pip install tensorflow_gpu 17 | 18 | ## Environment 19 | I recommend creating a conda environoment so you do not destroy your main installation in case you make a mistake somewhere: 20 | 21 | conda create --name tf_3.6 python=3.6 ipykernal 22 | You can activate the new environment by running the following (on Linux): 23 | 24 | source activate tf 25 | And deactivate it: 26 | 27 | source deactivate tf 28 | 29 | -------------------------------------------------------------------------------- /TF_basics.py: -------------------------------------------------------------------------------- 1 | #Hello everryone and welcome to an introduction to tensorflow synatx and basics! 2 | #This tutorial is brought to you by @astronomer_amber <3 3 | # -Using Python 3.6 and Atom editor w/ hyrodrogen, 4 | # feel free to copy and paste into Jupyter notebooks 5 | #First make sure that you have tensorflow installed 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | print (tf.__version__) 10 | 11 | #Let's try running a session 12 | hmm = tf.constant("you are fabulous") 13 | with tf.Session() as sess: #with runs our operations in block of code then closes Session 14 | result = sess.run(hmm) 15 | 16 | print(result) #b is a bytes literal 17 | 18 | a = tf.constant(2) 19 | b = tf.constant(5) 20 | 21 | with tf.Session() as sess: 22 | result = sess.run(a*b) 23 | 24 | print(result) 25 | 26 | #Woohoo you're doing awesome, now let's play with some matrices 27 | matrixA = tf.constant([ [2], [3]]) 28 | matrixA.get_shape() 29 | matrixB = tf.constant([ [10,1],[1,10]]) 30 | matrixB 31 | matrixB.get_shape() 32 | matrixC = tf.fill([2,2],7) #2x2 matrix filled with 7's 33 | 34 | with tf.Session() as sess: 35 | result1 = tf.matmul(matrixB, matrixA) 36 | x = sess.run(result1) 37 | result2 = tf.matmul(matrixC, matrixA) 38 | y = sess.run(result2) 39 | print(x) 40 | 41 | sess = tf.InteractiveSession() #Allows me to run session in between cells 42 | #only useful for Jupyter notebooks at hyrodrogen 43 | result1 = tf.matmul(matrixB, matrixA) 44 | sess.run(result1) 45 | result1.eval() #another way to view results 46 | 47 | #Graphs 48 | # -sets of nodes(vertices) connected by 'edges' 49 | # -In TF these nodes represent operations 50 | # -Tensor objects include: variables(must initalize) & placeholders 51 | #Let's construct and execute! 52 | 53 | node1 = tf.constant(10) #input variable1 54 | node2 = tf.constant(-2) #input variable2 55 | node3 = node1 + node2 #operation 56 | 57 | with tf.Session() as sess: 58 | G1 = sess.run(node3) 59 | 60 | print(G1) 61 | 62 | VariableA = tf.Variable(10) 63 | TensorA = tf.random_uniform((2,2),0,1) #random values from a uniform distribution 64 | VariableB = tf.Variable(initial_value=TensorA) 65 | 66 | init = tf.global_variables_initializer() 67 | #run initalization 68 | with tf.Session() as sess: 69 | G2 = sess.run(init) 70 | VarB = sess.run(VariableB) 71 | 72 | print(VarB) 73 | PlaceholderA = tf.placeholder(tf.float32) #specify type 74 | 75 | np.random.seed(101) 76 | tf.set_random_seed(101) 77 | 78 | data1 = np.random.uniform(0,100,(3,3)) 79 | data2 = np.random.uniform(0,100,(3,1)) 80 | 81 | a = tf.placeholder(tf.float32) 82 | b = tf.placeholder(tf.float32) 83 | 84 | with tf.Session() as sess: 85 | add_a_b = sess.run(a+b,feed_dict={a:5,b:10}) #can also multiply 86 | add_data1_data2 = sess.run(a+b,feed_dict={a:data1,b:data2}) 87 | 88 | print(add_a_b) 89 | print(add_data1_data2) 90 | -------------------------------------------------------------------------------- /TF_linear_regression.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | %config InlineBackend.figure_format = 'svg' 6 | 7 | learning_rate = 0.01 8 | epochs = 200 9 | n_samples = 30 10 | X_train = np.linspace(0, 20, n_samples) 11 | y_train = 2*X_train + np.random.randn(n_samples) 12 | 13 | plt.figure(1,figsize = (6,4)) 14 | plt.ylabel('y_train', fontsize = 20) 15 | plt.xlabel('X_train',fontsize = 20) 16 | 17 | plt.scatter(X_train, y_train, marker ='*', c = 'purple', s=13, label = 'training data') 18 | plt.legend(loc='upper left', prop={'size':10},frameon=False) 19 | leg = plt.gca().get_legend() 20 | leg.legendHandles[0].set_visible(False) 21 | plt.tick_params(labelsize=20) 22 | 23 | plt.show() 24 | 25 | #Neural network time 26 | n_features = 10 27 | n_neurons = 3 28 | 29 | x = tf.placeholder(tf.float32, (None,n_features)) #(samples, n_features) 30 | W = tf.Variable(tf.random_normal([n_features,n_neurons]), name = 'weights') 31 | b = tf.ones([n_neurons], name = 'bias') 32 | 33 | z = tf.add(tf.matmul(x,W),b) 34 | a = tf.sigmoid(z) 35 | 36 | init = tf.global_variables_initializer() 37 | 38 | with tf.Session() as sess: 39 | sess.run(init) 40 | layer = sess.run(a, feed_dict={x:np.random.random([1,n_features])}) 41 | 42 | print(layer) 43 | 44 | #need to define/use cost function to adjust the weights(W) and bias(b) AKA backpropagation 45 | 46 | X_data = np.linspace(0,10,10) - np.random.uniform(-2.0,2.0,10) 47 | #or X_data = tf.placeholder(tf.float32) 48 | y_data = np.linspace(0,10,10) - np.random.uniform(-2.0,2.0,10) 49 | #or y_data = tf.placeholder(tf.float32) 50 | 51 | #y= mx +b, y_pred = WX + b 52 | m = tf.Variable(np.random.randn(), name = 'weights') 53 | b = tf.Variable(np.random.randn(), name = 'bias') 54 | 55 | cost = 0 56 | 57 | for x,y in zip(X_data, y_data): 58 | y_predicted = m*x +b 59 | cost += (y - y_predicted)**2 #cost function 60 | 61 | #can also write cost funtion as: 62 | #y_predicted = m*x +b 63 | #cost = tf.reduced_sum((y_data - y_predicted)**2)/ (2*n_features)) 64 | 65 | #optimize! 66 | optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.001).minimize(cost) 67 | init = tf.global_variables_initializer() 68 | 69 | with tf.Session() as sess: 70 | sess.run(init) #initalize variables 71 | training = 100 #how many training steps to actually perform 72 | 73 | for i in range(training): 74 | sess.run(optimizer) 75 | 76 | m_model, b_model = sess.run([m,b]) 77 | 78 | plt.plot(X_data, y_data, 'go') 79 | #y=mx+b 80 | y = m_model*X_data + b_model 81 | plt.plot(X_data, y, 'r-') 82 | plt.show() 83 | --------------------------------------------------------------------------------