├── .gitignore
├── Dockerfile
├── data
├── friday.jpg
├── fire_theft.xls
├── friday.tfrecord
├── heart.csv
└── heart.txt
├── setup
├── requirements.txt
└── setup_instruction.md
├── README.md
├── docker-compose.yml
├── utils.py
├── 02_queue_example.py
├── 01_linear_regression_starter.py
├── 01_linear_regression_sol.py
├── 01_logistic_regression_mnist_starter.py
├── 01_linear_regression_starter.ipynb
├── 02_csv_reader.py
├── 02_sharing_variables.py
├── 02_word2vec_no_frills.py
├── 01_logistic_regression_mnist_sol.py
├── exercises
├── e01.py
└── e01.ipynb
├── 02_tfrecord_example.py
├── process_data.py
├── 02_word2vec_starter.py
├── 01_logistic_regression_mnist_starter.ipynb
├── 01_logistic_regression_mnist_sol.ipynb
├── 02_word2vec_visualize.py
└── 01_linear_regression_sol.ipynb
/.gitignore:
--------------------------------------------------------------------------------
1 | graphs/
2 | __pycache__/
3 | .ipynb_*
4 | .DS_Store
5 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:latest-py3
2 |
3 | RUN pip install xlrd
4 |
--------------------------------------------------------------------------------
/data/friday.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chiphuyen/tf-oreilly/HEAD/data/friday.jpg
--------------------------------------------------------------------------------
/data/fire_theft.xls:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chiphuyen/tf-oreilly/HEAD/data/fire_theft.xls
--------------------------------------------------------------------------------
/data/friday.tfrecord:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chiphuyen/tf-oreilly/HEAD/data/friday.tfrecord
--------------------------------------------------------------------------------
/setup/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow==1.1.0
2 | scipy==0.19.0
3 | scikit-learn==0.18.1
4 | matplotlib==2.0.0
5 | xlrd==1.0.0
6 | ipdb==0.10.3
7 | Pillow==4.1.1
8 | lxml==3.7.3
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # tf_oreilly
2 | Code for O'Reilly's "A Short Course on TensorFlow"
3 |
4 | Run on TensorFlow 1.1.0, Python 3.5+
5 |
6 | To get started read the [setup instructions](setup/setup_instruction.md).
7 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | tensorflow:
4 | container_name: tensorflow
5 | build: .
6 | volumes:
7 | - .:/notebooks
8 | ports:
9 | - 8888:8888
10 |
11 | tensorboard:
12 | container_name: tensorboard
13 | build: .
14 | volumes:
15 | - .:/notebooks
16 | ports:
17 | - 6006:6006
18 | command: tensorboard --logdir=/notebooks/graphs
19 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
3 | import tensorflow as tf
4 |
5 | def huber_loss(labels, predictions, delta=1.0):
6 | residual = tf.abs(predictions - labels)
7 | def f1(): return 0.5 * tf.square(residual)
8 | def f2(): return delta * residual - 0.5 * tf.square(delta)
9 | return tf.cond(residual < delta, f1, f2)
10 |
11 | def make_dir(path):
12 | """ Create a directory if there isn't one already. """
13 | try:
14 | os.mkdir(path)
15 | except OSError:
16 | pass
--------------------------------------------------------------------------------
/02_queue_example.py:
--------------------------------------------------------------------------------
1 | """ Example to demonstrate how to use queues
2 | Author: Chip Huyen
3 | Prepared for O'Reilly "A short course in TensorFlow"
4 | """
5 | from __future__ import print_function
6 |
7 | import os
8 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9 |
10 | import numpy as np
11 | import tensorflow as tf
12 |
13 | N_SAMPLES = 1000
14 | NUM_THREADS = 4
15 | # Generating some simple data
16 | # create 1000 random samples, each is a 1D array from the normal distribution (10, 1)
17 | data = 10 * np.random.randn(N_SAMPLES, 4) + 1
18 | # create 1000 random labels of 0 and 1
19 | target = np.random.randint(0, 2, size=N_SAMPLES)
20 |
21 | queue = tf.FIFOQueue(capacity=50, dtypes=[tf.float32, tf.int32], shapes=[[4], []])
22 |
23 | enqueue_op = queue.enqueue_many([data, target])
24 | data_sample, label_sample = queue.dequeue()
25 |
26 | # create ops that do something with data_sample and label_sample
27 |
28 | # create NUM_THREADS to do enqueue
29 | qr = tf.train.QueueRunner(queue, [enqueue_op] * NUM_THREADS)
30 | with tf.Session() as sess:
31 | # create a coordinator, launch the queue runner threads.
32 | coord = tf.train.Coordinator()
33 | enqueue_threads = qr.create_threads(sess, coord=coord, start=True)
34 | try:
35 | for step in range(100): # do to 100 iterations
36 | if coord.should_stop():
37 | break
38 | data_batch, label_batch = sess.run([data_sample, label_sample])
39 | print(data_batch)
40 | print(label_batch)
41 | except Exception as e:
42 | coord.request_stop(e)
43 | finally:
44 | coord.request_stop()
45 | coord.join(enqueue_threads)
--------------------------------------------------------------------------------
/01_linear_regression_starter.py:
--------------------------------------------------------------------------------
1 | """
2 | Simple linear regression example in TensorFlow
3 | This program tries to predict the number of thefts from
4 | the number of fire in the city of Chicago
5 | """
6 | import os
7 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | import tensorflow as tf
12 | import xlrd
13 |
14 | import utils
15 |
16 | DATA_FILE = 'data/fire_theft.xls'
17 |
18 | # Phase 1: Assemble the graph
19 | # Step 1: read in data from the .xls file
20 | book = xlrd.open_workbook(DATA_FILE, encoding_override='utf-8')
21 | sheet = book.sheet_by_index(0)
22 | data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])
23 | n_samples = sheet.nrows - 1
24 |
25 | # Step 2: create placeholders for input X (number of fire) and label Y (number of theft)
26 |
27 |
28 | # Step 3: create weight and bias, initialized to 0
29 | # name your variables w and b
30 |
31 |
32 | # Step 4: predict Y (number of theft) from the number of fire
33 | # name your variable Y_predicted
34 |
35 |
36 | # Step 5: use the square error as the loss function
37 | # name your variable loss
38 |
39 | # Step 6: using gradient descent with learning rate of 0.01 to minimize loss
40 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
41 |
42 | # Phase 2: Train our model
43 | with tf.Session() as sess:
44 | # Step 7: initialize the necessary variables, in this case, w and b
45 | # TO - DO
46 |
47 | # Step 8: train the model
48 | for i in range(50): # run 100 epochs
49 | total_loss = 0
50 | for x, y in data:
51 | # Session runs optimizer to minimize loss and fetch the value of loss
52 | # TO DO: write sess.run()
53 | total_loss += l
54 | print("Epoch {0}: {1}".format(i, total_loss/n_samples))
55 |
56 | # plot the results
57 | # X, Y = data.T[0], data.T[1]
58 | # plt.plot(X, Y, 'bo', label='Real data')
59 | # plt.plot(X, X * w + b, 'r', label='Predicted data')
60 | # plt.legend()
61 | # plt.show()
--------------------------------------------------------------------------------
/01_linear_regression_sol.py:
--------------------------------------------------------------------------------
1 | """
2 | Simple linear regression example in TensorFlow
3 | This program tries to predict the number of thefts from
4 | the number of fire in the city of Chicago
5 | """
6 | import os
7 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | import tensorflow as tf
12 | import xlrd
13 |
14 | import utils
15 |
16 | DATA_FILE = 'data/fire_theft.xls'
17 |
18 | # Step 1: read in data from the .xls file
19 | book = xlrd.open_workbook(DATA_FILE, encoding_override="utf-8")
20 | sheet = book.sheet_by_index(0)
21 | data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])
22 | n_samples = sheet.nrows - 1
23 |
24 | # Step 2: create placeholders for input X (number of fire) and label Y (number of theft)
25 | X = tf.placeholder(tf.float32, name='X')
26 | Y = tf.placeholder(tf.float32, name='Y')
27 |
28 | # Step 3: create weight and bias, initialized to 0
29 | w = tf.Variable(0.0, name='weights')
30 | b = tf.Variable(0.0, name='bias')
31 |
32 | # Step 4: build model to predict Y
33 | Y_predicted = X * w + b
34 |
35 | # Step 5: use the square error as the loss function
36 | loss = tf.square(Y - Y_predicted, name='loss')
37 | # loss = utils.huber_loss(Y, Y_predicted)
38 |
39 | # Step 6: using gradient descent with learning rate of 0.01 to minimize loss
40 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
41 |
42 | with tf.Session() as sess:
43 | # Step 7: initialize the necessary variables, in this case, w and b
44 | sess.run(tf.global_variables_initializer())
45 |
46 | writer = tf.summary.FileWriter('./graphs/linear_reg', sess.graph)
47 |
48 | # Step 8: train the model
49 | for i in range(50): # train the model 100 epochs
50 | total_loss = 0
51 | for x, y in data:
52 | # Session runs train_op and fetch values of loss
53 | _, l = sess.run([optimizer, loss], feed_dict={X: x, Y:y})
54 | total_loss += l
55 | print('Epoch {0}: {1}'.format(i, total_loss/n_samples))
56 |
57 | # close the writer when you're done using it
58 | writer.close()
59 |
60 | # Step 9: output the values of w and b
61 | w, b = sess.run([w, b])
62 |
63 | # plot the results
64 | X, Y = data.T[0], data.T[1]
65 | plt.plot(X, Y, 'bo', label='Real data')
66 | plt.plot(X, X * w + b, 'r', label='Predicted data')
67 | plt.legend()
68 | plt.show()
--------------------------------------------------------------------------------
/01_logistic_regression_mnist_starter.py:
--------------------------------------------------------------------------------
1 | """
2 | Starter code for logistic regression model to solve OCR task
3 | with MNIST in TensorFlow
4 | MNIST dataset: yann.lecun.com/exdb/mnist/
5 |
6 | """
7 | import os
8 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9 |
10 | import numpy as np
11 | import tensorflow as tf
12 | from tensorflow.examples.tutorials.mnist import input_data
13 | import time
14 |
15 | # Define paramaters for the model
16 | learning_rate = 0.01
17 | batch_size = 128
18 | n_epochs = 10
19 |
20 | # Step 1: Read in data
21 | # using TF Learn's built in function to load MNIST data to the folder data/mnist
22 | mnist = input_data.read_data_sets('/data/mnist', one_hot=True)
23 |
24 | # Step 2: create placeholders for features and labels
25 | # each image in the MNIST data is of shape 28*28 = 784
26 | # therefore, each image is represented with a 1x784 tensor
27 | # there are 10 classes for each image, corresponding to digits 0 - 9.
28 | # Features are of the type float, and labels are of the type int
29 |
30 |
31 | # Step 3: create weights and bias
32 | # weights and biases are initialized to 0
33 | # shape of w depends on the dimension of X and Y so that Y = X * w + b
34 | # shape of b depends on Y
35 |
36 |
37 | # Step 4: build model
38 | # the model that returns the logits.
39 | # this logits will be later passed through softmax layer
40 | # to get the probability distribution of possible label of the image
41 | # DO NOT DO SOFTMAX HERE
42 |
43 |
44 | # Step 5: define loss function
45 | # use cross entropy loss of the real labels with the softmax of logits
46 | # use the method:
47 | # tf.nn.softmax_cross_entropy_with_logits(logits, Y)
48 | # then use tf.reduce_mean to get the mean loss of the batch
49 |
50 |
51 | # Step 6: define training op
52 | # using gradient descent to minimize loss
53 |
54 |
55 | with tf.Session() as sess:
56 | start_time = time.time()
57 | sess.run(tf.global_variables_initializer())
58 | n_batches = int(mnist.train.num_examples/batch_size)
59 | for i in range(n_epochs): # train the model n_epochs times
60 | total_loss = 0
61 |
62 | for _ in range(n_batches):
63 | X_batch, Y_batch = mnist.train.next_batch(batch_size)
64 | # TO-DO: run optimizer + fetch loss_batch
65 | #
66 | #
67 | total_loss += loss_batch
68 | print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))
69 |
70 | print('Total time: {0} seconds'.format(time.time() - start_time))
71 |
72 | print('Optimization Finished!') # should be around 0.35 after 25 epochs
73 |
74 | # test the model
75 | n_batches = int(mnist.test.num_examples/batch_size)
76 | total_correct_preds = 0
77 | for i in range(n_batches):
78 | X_batch, Y_batch = mnist.test.next_batch(batch_size)
79 | _, loss_batch, logits_batch = sess.run([optimizer, loss, logits], feed_dict={X: X_batch, Y:Y_batch})
80 | preds = tf.nn.softmax(logits_batch)
81 | correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1))
82 | accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(
83 | total_correct_preds += sess.run(accuracy)
84 |
85 | print('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))
86 |
--------------------------------------------------------------------------------
/01_linear_regression_starter.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "\"\"\"\n",
12 | "Simple linear regression example in TensorFlow\n",
13 | "This program tries to predict the number of thefts from \n",
14 | "the number of fire in the city of Chicago\n",
15 | "\"\"\"\n",
16 | "import os\n",
17 | "os.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n",
18 | "\n",
19 | "import numpy as np\n",
20 | "import matplotlib.pyplot as plt\n",
21 | "import tensorflow as tf\n",
22 | "import xlrd\n",
23 | "\n",
24 | "import utils\n",
25 | "\n",
26 | "DATA_FILE = 'data/fire_theft.xls'\n",
27 | "\n",
28 | "# Phase 1: Assemble the graph\n",
29 | "# Step 1: read in data from the .xls file\n",
30 | "book = xlrd.open_workbook(DATA_FILE, encoding_override='utf-8')\n",
31 | "sheet = book.sheet_by_index(0)\n",
32 | "data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])\n",
33 | "n_samples = sheet.nrows - 1\n",
34 | "\n",
35 | "# Step 2: create placeholders for input X (number of fire) and label Y (number of theft)\n",
36 | "\n",
37 | "\n",
38 | "# Step 3: create weight and bias, initialized to 0\n",
39 | "# name your variables w and b\n",
40 | "\n",
41 | "\n",
42 | "# Step 4: predict Y (number of theft) from the number of fire\n",
43 | "# name your variable Y_predicted\n",
44 | "\n",
45 | "\n",
46 | "# Step 5: use the square error as the loss function\n",
47 | "# name your variable loss\n",
48 | "\n",
49 | "# Step 6: using gradient descent with learning rate of 0.01 to minimize loss\n",
50 | "optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)\n",
51 | " \n",
52 | "# Phase 2: Train our model\n",
53 | "with tf.Session() as sess:\n",
54 | "\t# Step 7: initialize the necessary variables, in this case, w and b\n",
55 | "\t# TO - DO\t\n",
56 | "\n",
57 | "\t# Step 8: train the model\n",
58 | "\tfor i in range(50): # run 100 epochs\n",
59 | "\t\ttotal_loss = 0\n",
60 | "\t\tfor x, y in data:\n",
61 | "\t\t\t# Session runs optimizer to minimize loss and fetch the value of loss\n",
62 | "\t\t\t# TO DO: write sess.run()\n",
63 | "\t\t\ttotal_loss += l\n",
64 | "\t\tprint(\"Epoch {0}: {1}\".format(i, total_loss/n_samples))\n",
65 | "\t\n",
66 | "# plot the results\n",
67 | "# X, Y = data.T[0], data.T[1]\n",
68 | "# plt.plot(X, Y, 'bo', label='Real data')\n",
69 | "# plt.plot(X, X * w + b, 'r', label='Predicted data')\n",
70 | "# plt.legend()\n",
71 | "# plt.show()"
72 | ]
73 | }
74 | ],
75 | "metadata": {
76 | "kernelspec": {
77 | "display_name": "Python 3",
78 | "language": "python",
79 | "name": "python3"
80 | },
81 | "language_info": {
82 | "codemirror_mode": {
83 | "name": "ipython",
84 | "version": 3
85 | },
86 | "file_extension": ".py",
87 | "mimetype": "text/x-python",
88 | "name": "python",
89 | "nbconvert_exporter": "python",
90 | "pygments_lexer": "ipython3",
91 | "version": "3.5.2"
92 | }
93 | },
94 | "nbformat": 4,
95 | "nbformat_minor": 2
96 | }
97 |
--------------------------------------------------------------------------------
/02_csv_reader.py:
--------------------------------------------------------------------------------
1 | """ Some people tried to use TextLineReader for the assignment 1
2 | but seem to have problems getting it work, so here is a short
3 | script demonstrating the use of CSV reader on the heart dataset.
4 | Note that the heart dataset is originally in txt so I first
5 | converted it to csv to take advantage of the already laid out columns.
6 |
7 | You can download heart.csv in the data folder.
8 | """
9 | import os
10 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
11 |
12 | import tensorflow as tf
13 |
14 | DATA_PATH = 'data/heart.csv'
15 | BATCH_SIZE = 2
16 | N_FEATURES = 9
17 |
18 | def batch_generator(filenames):
19 | """ filenames is the list of files you want to read from.
20 | In this case, it contains only heart.csv
21 | """
22 | filename_queue = tf.train.string_input_producer(filenames)
23 | reader = tf.TextLineReader(skip_header_lines=1) # skip the first line in the file
24 | _, value = reader.read(filename_queue)
25 |
26 | # record_defaults are the default values in case some of our columns are empty
27 | # This is also to tell tensorflow the format of our data (the type of the decode result)
28 | # for this dataset, out of 9 feature columns,
29 | # 8 of them are floats (some are integers, but to make our features homogenous,
30 | # we consider them floats), and 1 is string (at position 5)
31 | # the last column corresponds to the lable is an integer
32 |
33 | record_defaults = [[1.0] for _ in range(N_FEATURES)]
34 | record_defaults[4] = ['']
35 | record_defaults.append([1])
36 |
37 | # read in the 10 rows of data
38 | content = tf.decode_csv(value, record_defaults=record_defaults)
39 |
40 | # convert the 5th column (present/absent) to the binary value 0 and 1
41 | content[4] = tf.cond(tf.equal(content[4], tf.constant('Present')), lambda: tf.constant(1.0), lambda: tf.constant(0.0))
42 |
43 | # pack all 9 features into a tensor
44 | features = tf.stack(content[:N_FEATURES])
45 |
46 | # assign the last column to label
47 | label = content[-1]
48 |
49 | # minimum number elements in the queue after a dequeue, used to ensure
50 | # that the samples are sufficiently mixed
51 | # I think 10 times the BATCH_SIZE is sufficient
52 | min_after_dequeue = 10 * BATCH_SIZE
53 |
54 | # the maximum number of elements in the queue
55 | capacity = 20 * BATCH_SIZE
56 |
57 | # shuffle the data to generate BATCH_SIZE sample pairs
58 | data_batch, label_batch = tf.train.shuffle_batch([features, label], batch_size=BATCH_SIZE,
59 | capacity=capacity, min_after_dequeue=min_after_dequeue)
60 |
61 | return data_batch, label_batch
62 |
63 | def generate_batches(data_batch, label_batch):
64 | with tf.Session() as sess:
65 | coord = tf.train.Coordinator()
66 | threads = tf.train.start_queue_runners(coord=coord)
67 | for _ in range(10): # generate 10 batches
68 | features, labels = sess.run([data_batch, label_batch])
69 | print(features)
70 | coord.request_stop()
71 | coord.join(threads)
72 |
73 | def main():
74 | data_batch, label_batch = batch_generator([DATA_PATH])
75 | generate_batches(data_batch, label_batch)
76 |
77 | if __name__ == '__main__':
78 | main()
79 |
--------------------------------------------------------------------------------
/02_sharing_variables.py:
--------------------------------------------------------------------------------
1 | import os
2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
3 |
4 | import tensorflow as tf
5 |
6 | # def two_hidden_layers(x):
7 | # assert x.shape.as_list() == [200, 100]
8 | # w1 = tf.Variable(tf.random_normal([100, 50]), name="h1_weights")
9 | # b1 = tf.Variable(tf.zeros([50]), name="h1_biases")
10 | # h1 = tf.matmul(x, w1) + b1
11 | # assert h1.shape.as_list() == [200, 50]
12 | # w2 = tf.Variable(tf.random_normal([50, 10]), name="h2_weights")
13 | # b2 = tf.Variable(tf.zeros([10]), name="2_biases")
14 | # logits = tf.matmul(h1, w2) + b2
15 | # return logits
16 |
17 | # x1 = tf.truncated_normal([200, 100], name='x1')
18 | # x2 = tf.truncated_normal([200, 100], name='x2')
19 |
20 | # logits1 = two_hidden_layers(x1)
21 | # logits2 = two_hidden_layers(x2)
22 |
23 | # def two_hidden_layers(x):
24 | # assert x.shape.as_list() == [200, 100]
25 | # w1 = tf.get_variable("h1_weights", [100, 50], initializer=tf.random_normal_initializer())
26 | # b1 = tf.get_variable("h1_biases", [50], initializer=tf.constant_initializer(0.0))
27 | # h1 = tf.matmul(x, w1) + b1
28 | # assert h1.shape.as_list() == [200, 50]
29 | # w2 = tf.get_variable("h2_weights", [50, 10], initializer=tf.random_normal_initializer())
30 | # b2 = tf.get_variable("h2_biases", [10], initializer=tf.constant_initializer(0.0))
31 | # logits = tf.matmul(h1, w2) + b2
32 | # return logits
33 |
34 | # x1 = tf.truncated_normal([200, 100], name='x1')
35 | # x2 = tf.truncated_normal([200, 100], name='x2')
36 | # logits1 = two_hidden_layers(x1)
37 | # logits2 = two_hidden_layers(x2)
38 |
39 | # def two_hidden_layers(x):
40 | # # with tf.variable_scope('layers'):
41 | # assert x.shape.as_list() == [200, 100]
42 | # w1 = tf.get_variable("h1_weights", [100, 50], initializer=tf.random_normal_initializer())
43 | # b1 = tf.get_variable("h1_biases", [50], initializer=tf.constant_initializer(0.0))
44 | # h1 = tf.matmul(x, w1) + b1
45 | # assert h1.shape.as_list() == [200, 50]
46 | # w2 = tf.get_variable("h2_weights", [50, 10], initializer=tf.random_normal_initializer())
47 | # b2 = tf.get_variable("h2_biases", [10], initializer=tf.constant_initializer(0.0))
48 | # logits = tf.matmul(h1, w2) + b2
49 | # return logits
50 |
51 | # x1 = tf.truncated_normal([200, 100], name='x1')
52 | # x2 = tf.truncated_normal([200, 100], name='x2')
53 |
54 | # with tf.variable_scope('two_layers') as scope:
55 | # logits1 = two_hidden_layers(x1)
56 | # scope.reuse_variables()
57 | # logits2 = two_hidden_layers(x2)
58 |
59 | # def fully_connected(x, output_dim, scope):
60 | # with tf.variable_scope(scope) as scope:
61 | # w = tf.get_variable("weights", [x.shape[1], output_dim], initializer=tf.random_normal_initializer())
62 | # b = tf.get_variable("biases", [output_dim], initializer=tf.constant_initializer(0.0))
63 | # h = tf.matmul(x, w) + b
64 | # return h
65 |
66 | # def two_hidden_layers(x):
67 | # h1 = fully_connected(x, 50, 'h1')
68 | # h2 = fully_connected(h1, 10, 'h2')
69 |
70 | # x1 = tf.truncated_normal([200, 100], name='x1')
71 | # x2 = tf.truncated_normal([200, 100], name='x2')
72 |
73 | # with tf.variable_scope('two_layers') as scope:
74 | # logits1 = two_hidden_layers(x1)
75 | # scope.reuse_variables()
76 | # logits2 = two_hidden_layers(x2)
77 |
78 |
79 | writer = tf.summary.FileWriter('./graphs/cool_variables', tf.get_default_graph())
80 | writer.close()
--------------------------------------------------------------------------------
/02_word2vec_no_frills.py:
--------------------------------------------------------------------------------
1 | """ The mo frills implementation of word2vec skip-gram model using NCE loss. """
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import os
8 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9 |
10 | import numpy as np
11 | import tensorflow as tf
12 | from tensorflow.contrib.tensorboard.plugins import projector
13 |
14 | from process_data import process_data
15 |
16 | VOCAB_SIZE = 50000
17 | BATCH_SIZE = 128
18 | EMBED_SIZE = 128 # dimension of the word embedding vectors
19 | SKIP_WINDOW = 1 # the context window
20 | NUM_SAMPLED = 64 # Number of negative examples to sample.
21 | LEARNING_RATE = 1.0
22 | NUM_TRAIN_STEPS = 10000
23 | SKIP_STEP = 2000 # how many steps to skip before reporting the loss
24 |
25 | def word2vec(batch_gen):
26 | """ Build the graph for word2vec model and train it """
27 | # Step 1: define the placeholders for input and output
28 | center_words = tf.placeholder(tf.int32, shape=[BATCH_SIZE], name='center_words')
29 | target_words = tf.placeholder(tf.int32, shape=[BATCH_SIZE, 1], name='target_words')
30 |
31 | # Assemble this part of the graph on the CPU. You can change it to GPU if you have GPU
32 | # Step 2: define weights. In word2vec, it's actually the weights that we care about
33 | embed_matrix = tf.Variable(tf.random_uniform([VOCAB_SIZE, EMBED_SIZE], -1.0, 1.0),
34 | name='embed_matrix')
35 |
36 | # Step 3: define the inference
37 | embed = tf.nn.embedding_lookup(embed_matrix, center_words, name='embed')
38 |
39 | # Step 4: construct variables for NCE loss
40 | nce_weight = tf.Variable(tf.truncated_normal([VOCAB_SIZE, EMBED_SIZE],
41 | stddev=1.0 / (EMBED_SIZE ** 0.5)),
42 | name='nce_weight')
43 | nce_bias = tf.Variable(tf.zeros([VOCAB_SIZE]), name='nce_bias')
44 |
45 | # define loss function to be NCE loss function
46 | loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight,
47 | biases=nce_bias,
48 | labels=target_words,
49 | inputs=embed,
50 | num_sampled=NUM_SAMPLED,
51 | num_classes=VOCAB_SIZE), name='loss')
52 |
53 | # Step 5: define optimizer
54 | optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)
55 |
56 | with tf.Session() as sess:
57 | sess.run(tf.global_variables_initializer())
58 |
59 | total_loss = 0.0 # we use this to calculate late average loss in the last SKIP_STEP steps
60 | writer = tf.summary.FileWriter('./graphs/no_frills/', sess.graph)
61 | for index in range(NUM_TRAIN_STEPS):
62 | centers, targets = next(batch_gen)
63 | loss_batch, _ = sess.run([loss, optimizer],
64 | feed_dict={center_words: centers, target_words: targets})
65 | total_loss += loss_batch
66 | if (index + 1) % SKIP_STEP == 0:
67 | print('Average loss at step {}: {:5.1f}'.format(index, total_loss / SKIP_STEP))
68 | total_loss = 0.0
69 | writer.close()
70 |
71 | def main():
72 | batch_gen = process_data(VOCAB_SIZE, BATCH_SIZE, SKIP_WINDOW)
73 | word2vec(batch_gen)
74 |
75 | if __name__ == '__main__':
76 | main()
--------------------------------------------------------------------------------
/01_logistic_regression_mnist_sol.py:
--------------------------------------------------------------------------------
1 | """
2 | Simple logistic regression model to solve OCR task
3 | with MNIST in TensorFlow
4 | MNIST dataset: yann.lecun.com/exdb/mnist/
5 |
6 | """
7 | import os
8 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9 |
10 | import numpy as np
11 | import tensorflow as tf
12 | from tensorflow.examples.tutorials.mnist import input_data
13 | import time
14 |
15 | # Define paramaters for the model
16 | learning_rate = 0.01
17 | batch_size = 128
18 | n_epochs = 30
19 |
20 | # Step 1: Read in data
21 | # using TF Learn's built in function to load MNIST data to the folder data/mnist
22 | mnist = input_data.read_data_sets('/data/mnist', one_hot=True)
23 |
24 | # Step 2: create placeholders for features and labels
25 | # each image in the MNIST data is of shape 28*28 = 784
26 | # therefore, each image is represented with a 1x784 tensor
27 | # there are 10 classes for each image, corresponding to digits 0 - 9.
28 | # each lable is one hot vector.
29 | X = tf.placeholder(tf.float32, [batch_size, 784], name='X_placeholder')
30 | Y = tf.placeholder(tf.int32, [batch_size, 10], name='Y_placeholder')
31 |
32 | # Step 3: create weights and bias
33 | # w is initialized to random variables with mean of 0, stddev of 0.01
34 | # b is initialized to 0
35 | # shape of w depends on the dimension of X and Y so that Y = tf.matmul(X, w)
36 | # shape of b depends on Y
37 | w = tf.Variable(tf.random_normal(shape=[784, 10], stddev=0.01), name='weights')
38 | b = tf.Variable(tf.zeros([1, 10]), name="bias")
39 |
40 | # Step 4: build model
41 | # the model that returns the logits.
42 | # this logits will be later passed through softmax layer
43 | logits = tf.matmul(X, w) + b
44 |
45 | # Step 5: define loss function
46 | # use cross entropy of softmax of logits as the loss function
47 | entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name='loss')
48 | loss = tf.reduce_mean(entropy) # computes the mean over all the examples in the batch
49 |
50 | # Step 6: define training op
51 | # using gradient descent with learning rate of 0.01 to minimize loss
52 | optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
53 |
54 | with tf.Session() as sess:
55 | # to visualize using TensorBoard
56 | writer = tf.summary.FileWriter('./graphs/logistic_reg', sess.graph)
57 |
58 | start_time = time.time()
59 | sess.run(tf.global_variables_initializer())
60 | n_batches = int(mnist.train.num_examples/batch_size)
61 | for i in range(n_epochs): # train the model n_epochs times
62 | total_loss = 0
63 |
64 | for _ in range(n_batches):
65 | X_batch, Y_batch = mnist.train.next_batch(batch_size)
66 | _, loss_batch = sess.run([optimizer, loss], feed_dict={X: X_batch, Y:Y_batch})
67 | total_loss += loss_batch
68 | print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))
69 |
70 | print('Total time: {0} seconds'.format(time.time() - start_time))
71 |
72 | print('Optimization Finished!') # should be around 0.35 after 25 epochs
73 |
74 | # test the model
75 | n_batches = int(mnist.test.num_examples/batch_size)
76 | total_correct_preds = 0
77 | for i in range(n_batches):
78 | X_batch, Y_batch = mnist.test.next_batch(batch_size)
79 | _, loss_batch, logits_batch = sess.run([optimizer, loss, logits], feed_dict={X: X_batch, Y:Y_batch})
80 | preds = tf.nn.softmax(logits_batch)
81 | correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1))
82 | accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(
83 | total_correct_preds += sess.run(accuracy)
84 |
85 | print('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))
86 |
87 | writer.close()
88 |
--------------------------------------------------------------------------------
/exercises/e01.py:
--------------------------------------------------------------------------------
1 | """
2 | Simple TensorFlow exercises
3 | You should thoroughly test your code
4 | """
5 |
6 | import tensorflow as tf
7 |
8 | ###############################################################################
9 | # 1a: Create two random 0-d tensors x and y of any distribution.
10 | # Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
11 | # Hint: look up tf.cond()
12 | # I do the first problem for you
13 | ###############################################################################
14 |
15 | x = tf.random_uniform([]) # Empty array as shape creates a scalar.
16 | y = tf.random_uniform([])
17 | out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))
18 |
19 | ###############################################################################
20 | # 1b: Create two 0-d tensors x and y randomly selected from -1 and 1.
21 | # Return x + y if x < y, x - y if x > y, 0 otherwise.
22 | # Hint: Look up tf.case().
23 | ###############################################################################
24 |
25 | # YOUR CODE
26 |
27 | ###############################################################################
28 | # 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
29 | # and y as a tensor of zeros with the same shape as x.
30 | # Return a boolean tensor that yields Trues if x equals y element-wise.
31 | # Hint: Look up tf.equal().
32 | ###############################################################################
33 |
34 | # YOUR CODE
35 |
36 | ###############################################################################
37 | # 1d: Create the tensor x of value
38 | # [29.05088806, 27.61298943, 31.19073486, 29.35532951,
39 | # 30.97266006, 26.67541885, 38.08450317, 20.74983215,
40 | # 34.94445419, 34.45999146, 29.06485367, 36.01657104,
41 | # 27.88236427, 20.56035233, 30.20379066, 29.51215172,
42 | # 33.71149445, 28.59134293, 36.05556488, 28.66994858].
43 | # Get the indices of elements in x whose values are greater than 30.
44 | # Hint: Use tf.where().
45 | # Then extract elements whose values are greater than 30.
46 | # Hint: Use tf.gather().
47 | ###############################################################################
48 |
49 | # YOUR CODE
50 |
51 | ###############################################################################
52 | # 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
53 | # 2, ..., 6
54 | # Hint: Use tf.range() and tf.diag().
55 | ###############################################################################
56 |
57 | # YOUR CODE
58 |
59 | ###############################################################################
60 | # 1f: Create a random 2-d tensor of size 10 x 10 from any distribution.
61 | # Calculate its determinant.
62 | # Hint: Look at tf.matrix_determinant().
63 | ###############################################################################
64 |
65 | # YOUR CODE
66 |
67 | ###############################################################################
68 | # 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
69 | # Return the unique elements in x
70 | # Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.
71 | ###############################################################################
72 |
73 | # YOUR CODE
74 |
75 | ###############################################################################
76 | # 1h: Create two tensors x and y of shape 300 from any normal distribution,
77 | # as long as they are from the same distribution.
78 | # Use tf.cond() to return:
79 | # - The mean squared error of (x - y) if the average of all elements in (x - y)
80 | # is negative, or
81 | # - The sum of absolute value of all elements in the tensor (x - y) otherwise.
82 | # Hint: see the Huber loss function in the lecture slides 3.
83 | ###############################################################################
84 |
85 | # YOUR CODE
--------------------------------------------------------------------------------
/02_tfrecord_example.py:
--------------------------------------------------------------------------------
1 | """ Examples to demonstrate how to write an image file to a TFRecord,
2 | and how to read a TFRecord file using TFRecordReader.
3 | """
4 | import os
5 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
6 |
7 | from PIL import Image
8 | import numpy as np
9 | import matplotlib.pyplot as plt
10 | import tensorflow as tf
11 |
12 | # image supposed to have shape: 480 x 640 x 3 = 921600
13 | IMAGE_PATH = 'data/'
14 |
15 | def _int64_feature(value):
16 | return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
17 |
18 | def _bytes_feature(value):
19 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
20 |
21 | def get_image_binary(filename):
22 | """ You can read in the image using tensorflow too, but it's a drag
23 | since you have to create graphs. It's much easier using Pillow and NumPy
24 | """
25 | image = Image.open(filename)
26 | image = np.asarray(image, np.uint8)
27 | shape = np.array(image.shape, np.int32)
28 | return shape.tobytes(), image.tobytes() # convert image to raw data bytes in the array.
29 |
30 | def write_to_tfrecord(label, shape, binary_image, tfrecord_file):
31 | """ This example is to write a sample to TFRecord file. If you want to write
32 | more samples, just use a loop.
33 | """
34 | writer = tf.python_io.TFRecordWriter(tfrecord_file)
35 | # write label, shape, and image content to the TFRecord file
36 | example = tf.train.Example(features=tf.train.Features(feature={
37 | 'label': _int64_feature(label),
38 | 'shape': _bytes_feature(shape),
39 | 'image': _bytes_feature(binary_image)
40 | }))
41 | writer.write(example.SerializeToString())
42 | writer.close()
43 |
44 | def write_tfrecord(label, image_file, tfrecord_file):
45 | shape, binary_image = get_image_binary(image_file)
46 | write_to_tfrecord(label, shape, binary_image, tfrecord_file)
47 |
48 | def read_from_tfrecord(filenames):
49 | tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
50 | reader = tf.TFRecordReader()
51 | _, tfrecord_serialized = reader.read(tfrecord_file_queue)
52 |
53 | # label and image are stored as bytes but could be stored as
54 | # int64 or float64 values in a serialized tf.Example protobuf.
55 | tfrecord_features = tf.parse_single_example(tfrecord_serialized,
56 | features={
57 | 'label': tf.FixedLenFeature([], tf.int64),
58 | 'shape': tf.FixedLenFeature([], tf.string),
59 | 'image': tf.FixedLenFeature([], tf.string),
60 | }, name='features')
61 | # image was saved as uint8, so we have to decode as uint8.
62 | image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
63 | shape = tf.decode_raw(tfrecord_features['shape'], tf.int32)
64 | # the image tensor is flattened out, so we have to reconstruct the shape
65 | image = tf.reshape(image, shape)
66 | label = tfrecord_features['label']
67 | return label, shape, image
68 |
69 | def read_tfrecord(tfrecord_file):
70 | label, shape, image = read_from_tfrecord([tfrecord_file])
71 |
72 | with tf.Session() as sess:
73 | coord = tf.train.Coordinator()
74 | threads = tf.train.start_queue_runners(coord=coord)
75 | label, image, shape = sess.run([label, image, shape])
76 | coord.request_stop()
77 | coord.join(threads)
78 | print(label)
79 | print(shape)
80 | plt.imshow(image)
81 | plt.show()
82 |
83 | def main():
84 | # assume the image has the label Chihuahua, which corresponds to class number 1
85 | label = 1
86 | image_file = IMAGE_PATH + 'friday.jpg'
87 | tfrecord_file = IMAGE_PATH + 'friday.tfrecord'
88 | write_tfrecord(label, image_file, tfrecord_file)
89 | read_tfrecord(tfrecord_file)
90 |
91 | if __name__ == '__main__':
92 | main()
93 |
94 |
--------------------------------------------------------------------------------
/process_data.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | from collections import Counter
6 | import random
7 | import os
8 | import zipfile
9 |
10 | import numpy as np
11 | from six.moves import urllib
12 | import tensorflow as tf
13 |
14 | import utils
15 |
16 | # Parameters for downloading data
17 | DOWNLOAD_URL = 'http://mattmahoney.net/dc/'
18 | EXPECTED_BYTES = 31344016
19 | DATA_FOLDER = '/Users/Chip/data/'
20 | FILE_NAME = 'text8.zip'
21 |
22 | def download(file_name, expected_bytes):
23 | """ Download the dataset text8 if it's not already downloaded """
24 | file_path = DATA_FOLDER + file_name
25 | if os.path.exists(file_path):
26 | print("Dataset ready")
27 | return file_path
28 | file_name, _ = urllib.request.urlretrieve(DOWNLOAD_URL + file_name, file_path)
29 | file_stat = os.stat(file_path)
30 | if file_stat.st_size == expected_bytes:
31 | print('Successfully downloaded the file', file_name)
32 | else:
33 | raise Exception('File ' + file_name +
34 | ' might be corrupted. You should try downloading it with a browser.')
35 | return file_path
36 |
37 | def read_data(file_path):
38 | """ Read data into a list of tokens
39 | There should be 17,005,207 tokens
40 | """
41 | with zipfile.ZipFile(file_path) as f:
42 | words = tf.compat.as_str(f.read(f.namelist()[0])).split()
43 | # tf.compat.as_str() converts the input into the string
44 | return words
45 |
46 | def build_vocab(words, vocab_size):
47 | """ Build vocabulary of VOCAB_SIZE most frequent words """
48 | dictionary = dict()
49 | count = [('UNK', -1)]
50 | count.extend(Counter(words).most_common(vocab_size - 1))
51 | index = 0
52 | utils.make_dir('processed')
53 | with open('processed/vocab_1000.tsv', "w") as f:
54 | for word, _ in count:
55 | dictionary[word] = index
56 | if index < 1000:
57 | f.write(word + "\n")
58 | index += 1
59 | index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
60 | return dictionary, index_dictionary
61 |
62 | def convert_words_to_index(words, dictionary):
63 | """ Replace each word in the dataset with its index in the dictionary """
64 | return [dictionary[word] if word in dictionary else 0 for word in words]
65 |
66 | def generate_sample(index_words, context_window_size):
67 | """ Form training pairs according to the skip-gram model. """
68 | for index, center in enumerate(index_words):
69 | context = random.randint(1, context_window_size)
70 | # get a random target before the center word
71 | for target in index_words[max(0, index - context): index]:
72 | yield center, target
73 | # get a random target after the center wrod
74 | for target in index_words[index + 1: index + context + 1]:
75 | yield center, target
76 |
77 | def get_batch(iterator, batch_size):
78 | """ Group a numerical stream into batches and yield them as Numpy arrays. """
79 | while True:
80 | center_batch = np.zeros(batch_size, dtype=np.int32)
81 | target_batch = np.zeros([batch_size, 1])
82 | for index in range(batch_size):
83 | center_batch[index], target_batch[index] = next(iterator)
84 | yield center_batch, target_batch
85 |
86 | def process_data(vocab_size, batch_size, skip_window):
87 | file_path = download(FILE_NAME, EXPECTED_BYTES)
88 | words = read_data(file_path)
89 | dictionary, _ = build_vocab(words, vocab_size)
90 | index_words = convert_words_to_index(words, dictionary)
91 | del words # to save memory
92 | single_gen = generate_sample(index_words, skip_window)
93 | return get_batch(single_gen, batch_size)
94 |
95 | def get_index_vocab(vocab_size):
96 | file_path = download(FILE_NAME, EXPECTED_BYTES)
97 | words = read_data(file_path)
98 | return build_vocab(words, vocab_size)
99 |
--------------------------------------------------------------------------------
/setup/setup_instruction.md:
--------------------------------------------------------------------------------
1 | Tensorflow supports both Python 2.7 and Python 3.3+. Note that for Windows, TensorFlow supports only 64-bit Python 3.5.
2 | For this course, I will use Python 3. But you’re welcome to use either Python 2 or Python 3 for the assignments. The starter code, though, will be in Python 3.
3 |
4 | Google has a pretty detailed instruction on how to download and setup Tensorflow. You can follow it here: https://www.tensorflow.org/get_started/os_setup
5 |
6 | Unless your computer has GPU, you should install Tensorflow without GPU support. My recommendation is always set up Tensorflow using virtualenv. For the list of dependencies, please consult the file requirements.txt. This list will be updated as the course progresses.
7 |
8 | Below is a simpler instruction on how to install tensorflow for people using Mac OS. If you have any problem installing Tensorflow, feel free to post it to the class forum.
9 |
10 | ## Install TensorFlow
11 | ### For Mac OS
12 |
13 | #### With Docker Compose
14 |
15 | 1. Install [Docker for Mac](https://docs.docker.com/docker-for-mac/install/)
16 |
17 | 2. Clone Github Repo:
18 |
19 | ```
20 | $ git clone https://github.com/chiphuyen/tf_oreilly.git
21 | $ cd tf_oreilly/
22 | ```
23 | 3. Run containers:
24 |
25 | ```
26 | $ docker-compose up
27 | tensorflow | Copy/paste this URL into your browser when...
28 | tensorflow | to login with a token:
29 | tensorflow | http://localhost:8888/?token=TOKEN
30 | tensorboard | Starting TensorBoard b'47' at http://0.0.0.0:6006
31 | tensorboard | (Press CTRL+C to quit)
32 | ```
33 | 4. Open iPython Notebook using the URL with the token: http://localhost:8888/?token=TOKEN
34 | 5. Open TensorBoard at: http://localhost:6006
35 |
36 | #### With System Python
37 |
38 | If you get “permission denied” error in any command, use “sudo” in front of that command.
39 |
40 | You will need pip3 (or pip if you use Python 2), and virtualenv.
41 |
42 | 1. set up pip and virtual environment
43 | ```bash
44 | $ sudo easy_install pip
45 | $ sudo easy_install --upgrade six
46 | $ pip3 install virtualenv
47 | ```
48 |
49 | 2. set up a project directory. You will do all work for this class in this directory
50 | ```bash
51 | $ mkdir [my project]
52 | ```
53 |
54 | 3. set up virtual environment for the project directory.
55 | ```bash
56 | $ cd [my project]
57 | $ virtualenv venv --distribute
58 | ```
59 |
60 | These commands create a venv subdirectory in your project where everything is installed.
61 |
62 | 4. to activate the virtual environment
63 | ```bash
64 | $ source venv/bin/activate
65 | ```
66 |
67 | If you type:
68 | ```bash
69 | $ pip freeze
70 | ```
71 |
72 | You will see that nothing is shown, which means no package is installed in your virtual environment. So you have to install all packages that you need. For the list of packages you need for this class, refer to requirements.txt
73 | Step 5: Install Tensorflow and other dependencies
74 | ```bash
75 | $ pip install tensorflow
76 | $ pip freeze > requirements.txt
77 | ```
78 |
79 | Step n:
80 | To exit the virtual environment, use:
81 | ```bash
82 | $ deactivate
83 | ```
84 |
85 | If you want your virtual environment to inherit globally installed packages, (not recommended), use:
86 | ```bash
87 | $ virtualenv venv --distribute --system-site-packages
88 | ```
89 | ### For Ubuntu
90 |
91 |
92 | ### For Windows
93 |
94 |
95 | ### On the cloud
96 | If you don't want to install TensorFlow, you can use TensorFlow over the web.
97 |
98 | #### SageMath
99 | You can use Tensorflow over the web at https://cloud.sagemath.com/
100 | Simply click on the link, create an account (or log in with your GitHub), and create a TensorFlow project.
101 |
102 | #### Jupyter
103 | You can also use Jupyter notebook to write TensorFlow programs.
104 |
105 | ### Possible set up problem
106 |
107 | If you have problem with using Matplotlib in virtual environment, here is a simple fix.
108 | If you installed matplotlib using pip, there is a directory in you root called ~/.matplotlib. Go there and create a file ~/.matplotlib/matplotlibrc there and add the following code: backend: TkAgg
109 |
110 | Or you can simply add this after importing matplotlib: matplotlib.use("TkAgg")
111 |
--------------------------------------------------------------------------------
/02_word2vec_starter.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import os
6 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
7 |
8 | import numpy as np
9 | import tensorflow as tf
10 | from tensorflow.contrib.tensorboard.plugins import projector
11 |
12 | from process_data import process_data
13 |
14 | VOCAB_SIZE = 50000
15 | BATCH_SIZE = 128
16 | EMBED_SIZE = 128 # dimension of the word embedding vectors
17 | SKIP_WINDOW = 1 # the context window
18 | NUM_SAMPLED = 64 # Number of negative examples to sample.
19 | LEARNING_RATE = 1.0
20 | NUM_TRAIN_STEPS = 20000
21 | SKIP_STEP = 2000 # how many steps to skip before reporting the loss
22 |
23 | def word2vec(batch_gen):
24 | """ Build the graph for word2vec model and train it """
25 | # Step 1: define the placeholders for input and output
26 | # center_words have to be int to work on embedding lookup
27 |
28 | # TO DO
29 | with tf.name_scope('data'):
30 | center_words = tf.placeholder(tf.int32, [BATCH_SIZE], name='center_words')
31 | target_words = tf.placeholder(tf.int32, [BATCH_SIZE, 1], name='target_words')
32 |
33 | # Step 2: define weights. In word2vec, it's actually the weights that we care about
34 | # vocab size x embed size
35 | # initialized to random uniform -1 to 1
36 |
37 | # TOO DO
38 | with tf.name_scope('embedding_matrix'):
39 | embed_matrix = tf.Variable(tf.random_uniform([VOCAB_SIZE, EMBED_SIZE], -1.0, 1.0, name='embed_matrix'))
40 |
41 | # Step 3: define the inference
42 | # get the embed of input words using tf.nn.embedding_lookup
43 | # embed = tf.nn.embedding_lookup(embed_matrix, center_words, name='embed')
44 |
45 | # TO DO
46 | # with tf.name_scope('embedding_lookup'):
47 | # embed = tf.nn.embedding_lookup(embed_matrix, center_words, name='embed')
48 |
49 | # Step 4: construct variables for NCE loss
50 | # tf.nn.nce_loss(weights, biases, labels, inputs, num_sampled, num_classes, ...)
51 | # nce_weight (vocab size x embed size), intialized to truncated_normal stddev=1.0 / (EMBED_SIZE ** 0.5)
52 | # bias: vocab size, initialized to 0
53 |
54 | # TO DO
55 | with tf.name_scope('nce_loss'):
56 | embed = tf.nn.embedding_lookup(embed_matrix, center_words, name='embed')
57 | nce_weights = tf.Variable(tf.truncated_normal([VOCAB_SIZE, EMBED_SIZE],
58 | stddev=1.0 / (EMBED_SIZE ** 0.5),
59 | name='nce_weights'))
60 | nce_biases = tf.Variable(tf.zeros([VOCAB_SIZE]), name='nce_biases')
61 |
62 | # define loss function to be NCE loss function
63 | # tf.nn.nce_loss(weights, biases, labels, inputs, num_sampled, num_classes, ...)
64 | # need to get the mean accross the batch
65 | # note: you should use embedding of center words for inputs, not center words themselves
66 |
67 | # TO DO
68 | loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights,
69 | biases=nce_biases,
70 | labels=target_words,
71 | inputs=embed,
72 | num_sampled=NUM_SAMPLED,
73 | num_classes=VOCAB_SIZE,
74 | name='loss'))
75 |
76 | # Step 5: define optimizer
77 |
78 | # TO DO
79 | with tf.name_scope('optimizer'):
80 | optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)
81 |
82 | with tf.Session() as sess:
83 | # TO DO: initialize variables
84 | sess.run(tf.global_variables_initializer())
85 |
86 | total_loss = 0.0 # we use this to calculate the average loss in the last SKIP_STEP steps
87 | writer = tf.summary.FileWriter('./graphs/no_frills/', sess.graph)
88 | for index in range(NUM_TRAIN_STEPS):
89 | centers, targets = next(batch_gen)
90 | # TO DO: create feed_dict, run optimizer, fetch loss_batch
91 | _, loss_batch = sess.run([optimizer, loss], feed_dict={center_words: centers, target_words: targets})
92 |
93 | total_loss += loss_batch
94 | if (index + 1) % SKIP_STEP == 0:
95 | print('Average loss at step {}: {:5.1f}'.format(index, total_loss / SKIP_STEP))
96 | total_loss = 0.0
97 | writer.close()
98 |
99 | def main():
100 | batch_gen = process_data(VOCAB_SIZE, BATCH_SIZE, SKIP_WINDOW)
101 | word2vec(batch_gen)
102 |
103 | if __name__ == '__main__':
104 | main()
--------------------------------------------------------------------------------
/01_logistic_regression_mnist_starter.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "\"\"\"\n",
12 | "Starter code for logistic regression model to solve OCR task \n",
13 | "with MNIST in TensorFlow\n",
14 | "MNIST dataset: yann.lecun.com/exdb/mnist/\n",
15 | "\n",
16 | "\"\"\"\n",
17 | "import os\n",
18 | "os.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n",
19 | "\n",
20 | "import numpy as np\n",
21 | "import tensorflow as tf\n",
22 | "from tensorflow.examples.tutorials.mnist import input_data\n",
23 | "import time\n",
24 | "\n",
25 | "# Define paramaters for the model\n",
26 | "learning_rate = 0.01\n",
27 | "batch_size = 128\n",
28 | "n_epochs = 10\n",
29 | "\n",
30 | "# Step 1: Read in data\n",
31 | "# using TF Learn's built in function to load MNIST data to the folder data/mnist\n",
32 | "mnist = input_data.read_data_sets('/data/mnist', one_hot=True) \n",
33 | "\n",
34 | "# Step 2: create placeholders for features and labels\n",
35 | "# each image in the MNIST data is of shape 28*28 = 784\n",
36 | "# therefore, each image is represented with a 1x784 tensor\n",
37 | "# there are 10 classes for each image, corresponding to digits 0 - 9. \n",
38 | "# Features are of the type float, and labels are of the type int\n",
39 | "\n",
40 | "\n",
41 | "# Step 3: create weights and bias\n",
42 | "# weights and biases are initialized to 0\n",
43 | "# shape of w depends on the dimension of X and Y so that Y = X * w + b\n",
44 | "# shape of b depends on Y\n",
45 | "\n",
46 | "\n",
47 | "# Step 4: build model\n",
48 | "# the model that returns the logits.\n",
49 | "# this logits will be later passed through softmax layer\n",
50 | "# to get the probability distribution of possible label of the image\n",
51 | "# DO NOT DO SOFTMAX HERE\n",
52 | "\n",
53 | "\n",
54 | "# Step 5: define loss function\n",
55 | "# use cross entropy loss of the real labels with the softmax of logits\n",
56 | "# use the method:\n",
57 | "# tf.nn.softmax_cross_entropy_with_logits(logits, Y)\n",
58 | "# then use tf.reduce_mean to get the mean loss of the batch\n",
59 | "\n",
60 | "\n",
61 | "# Step 6: define training op\n",
62 | "# using gradient descent to minimize loss\n",
63 | "\n",
64 | "\n",
65 | "with tf.Session() as sess:\n",
66 | "\tstart_time = time.time()\n",
67 | "\tsess.run(tf.global_variables_initializer())\t\n",
68 | "\tn_batches = int(mnist.train.num_examples/batch_size)\n",
69 | "\tfor i in range(n_epochs): # train the model n_epochs times\n",
70 | "\t\ttotal_loss = 0\n",
71 | "\n",
72 | "\t\tfor _ in range(n_batches):\n",
73 | "\t\t\tX_batch, Y_batch = mnist.train.next_batch(batch_size)\n",
74 | "\t\t\t# TO-DO: run optimizer + fetch loss_batch\n",
75 | "\t\t\t# \n",
76 | "\t\t\t# \n",
77 | "\t\t\ttotal_loss += loss_batch\n",
78 | "\t\tprint('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))\n",
79 | "\n",
80 | "\tprint('Total time: {0} seconds'.format(time.time() - start_time))\n",
81 | "\n",
82 | "\tprint('Optimization Finished!') # should be around 0.35 after 25 epochs\n",
83 | "\n",
84 | "\t# test the model\n",
85 | "\tn_batches = int(mnist.test.num_examples/batch_size)\n",
86 | "\ttotal_correct_preds = 0\n",
87 | "\tfor i in range(n_batches):\n",
88 | "\t\tX_batch, Y_batch = mnist.test.next_batch(batch_size)\n",
89 | "\t\t_, loss_batch, logits_batch = sess.run([optimizer, loss, logits], feed_dict={X: X_batch, Y:Y_batch}) \n",
90 | "\t\tpreds = tf.nn.softmax(logits_batch)\n",
91 | "\t\tcorrect_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1))\n",
92 | "\t\taccuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(\n",
93 | "\t\ttotal_correct_preds += sess.run(accuracy)\t\n",
94 | "\t\n",
95 | "\tprint('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))\n"
96 | ]
97 | }
98 | ],
99 | "metadata": {
100 | "kernelspec": {
101 | "display_name": "Python 3",
102 | "language": "python",
103 | "name": "python3"
104 | },
105 | "language_info": {
106 | "codemirror_mode": {
107 | "name": "ipython",
108 | "version": 3
109 | },
110 | "file_extension": ".py",
111 | "mimetype": "text/x-python",
112 | "name": "python",
113 | "nbconvert_exporter": "python",
114 | "pygments_lexer": "ipython3",
115 | "version": "3.5.2"
116 | }
117 | },
118 | "nbformat": 4,
119 | "nbformat_minor": 2
120 | }
121 |
--------------------------------------------------------------------------------
/exercises/e01.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "\"\"\"\n",
12 | "Simple TensorFlow exercises\n",
13 | "You should thoroughly test your code\n",
14 | "\"\"\"\n",
15 | "\n",
16 | "import tensorflow as tf"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": 2,
22 | "metadata": {
23 | "collapsed": true
24 | },
25 | "outputs": [],
26 | "source": [
27 | "###############################################################################\n",
28 | "# 1a: Create two random 0-d tensors x and y of any distribution.\n",
29 | "# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.\n",
30 | "# Hint: look up tf.cond()\n",
31 | "# I do the first problem for you\n",
32 | "###############################################################################\n",
33 | "\n",
34 | "x = tf.random_uniform([]) # Empty array as shape creates a scalar.\n",
35 | "y = tf.random_uniform([])\n",
36 | "out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 3,
42 | "metadata": {
43 | "collapsed": true
44 | },
45 | "outputs": [],
46 | "source": [
47 | "###############################################################################\n",
48 | "# 1b: Create two 0-d tensors x and y randomly selected from -1 and 1.\n",
49 | "# Return x + y if x < y, x - y if x > y, 0 otherwise.\n",
50 | "# Hint: Look up tf.case().\n",
51 | "###############################################################################\n",
52 | "\n",
53 | "# YOUR CODE"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": 4,
59 | "metadata": {
60 | "collapsed": true
61 | },
62 | "outputs": [],
63 | "source": [
64 | "###############################################################################\n",
65 | "# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]] \n",
66 | "# and y as a tensor of zeros with the same shape as x.\n",
67 | "# Return a boolean tensor that yields Trues if x equals y element-wise.\n",
68 | "# Hint: Look up tf.equal().\n",
69 | "###############################################################################\n",
70 | "\n",
71 | "# YOUR CODE"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 5,
77 | "metadata": {
78 | "collapsed": true
79 | },
80 | "outputs": [],
81 | "source": [
82 | "###############################################################################\n",
83 | "# 1d: Create the tensor x of value \n",
84 | "# [29.05088806, 27.61298943, 31.19073486, 29.35532951,\n",
85 | "# 30.97266006, 26.67541885, 38.08450317, 20.74983215,\n",
86 | "# 34.94445419, 34.45999146, 29.06485367, 36.01657104,\n",
87 | "# 27.88236427, 20.56035233, 30.20379066, 29.51215172,\n",
88 | "# 33.71149445, 28.59134293, 36.05556488, 28.66994858].\n",
89 | "# Get the indices of elements in x whose values are greater than 30.\n",
90 | "# Hint: Use tf.where().\n",
91 | "# Then extract elements whose values are greater than 30.\n",
92 | "# Hint: Use tf.gather().\n",
93 | "###############################################################################\n",
94 | "\n",
95 | "# YOUR CODE"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": 6,
101 | "metadata": {
102 | "collapsed": true
103 | },
104 | "outputs": [],
105 | "source": [
106 | "###############################################################################\n",
107 | "# 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,\n",
108 | "# 2, ..., 6\n",
109 | "# Hint: Use tf.range() and tf.diag().\n",
110 | "###############################################################################\n",
111 | "\n",
112 | "# YOUR CODE"
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": 7,
118 | "metadata": {
119 | "collapsed": true
120 | },
121 | "outputs": [],
122 | "source": [
123 | "###############################################################################\n",
124 | "# 1f: Create a random 2-d tensor of size 10 x 10 from any distribution.\n",
125 | "# Calculate its determinant.\n",
126 | "# Hint: Look at tf.matrix_determinant().\n",
127 | "###############################################################################\n",
128 | "\n",
129 | "# YOUR CODE"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": 8,
135 | "metadata": {
136 | "collapsed": true
137 | },
138 | "outputs": [],
139 | "source": [
140 | "###############################################################################\n",
141 | "# 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].\n",
142 | "# Return the unique elements in x\n",
143 | "# Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.\n",
144 | "###############################################################################\n",
145 | "\n",
146 | "# YOUR CODE"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "metadata": {
153 | "collapsed": true
154 | },
155 | "outputs": [],
156 | "source": [
157 | "###############################################################################\n",
158 | "# 1h: Create two tensors x and y of shape 300 from any normal distribution,\n",
159 | "# as long as they are from the same distribution.\n",
160 | "# Use tf.cond() to return:\n",
161 | "# - The mean squared error of (x - y) if the average of all elements in (x - y)\n",
162 | "# is negative, or\n",
163 | "# - The sum of absolute value of all elements in the tensor (x - y) otherwise.\n",
164 | "# Hint: see the Huber loss function in the lecture slides 3.\n",
165 | "###############################################################################\n",
166 | "\n",
167 | "# YOUR CODE"
168 | ]
169 | }
170 | ],
171 | "metadata": {
172 | "kernelspec": {
173 | "display_name": "Python 3",
174 | "language": "python",
175 | "name": "python3"
176 | },
177 | "language_info": {
178 | "codemirror_mode": {
179 | "name": "ipython",
180 | "version": 3
181 | },
182 | "file_extension": ".py",
183 | "mimetype": "text/x-python",
184 | "name": "python",
185 | "nbconvert_exporter": "python",
186 | "pygments_lexer": "ipython3",
187 | "version": "3.5.2"
188 | }
189 | },
190 | "nbformat": 4,
191 | "nbformat_minor": 2
192 | }
193 |
--------------------------------------------------------------------------------
/01_logistic_regression_mnist_sol.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stdout",
10 | "output_type": "stream",
11 | "text": [
12 | "Extracting /data/mnist/train-images-idx3-ubyte.gz\n",
13 | "Extracting /data/mnist/train-labels-idx1-ubyte.gz\n",
14 | "Extracting /data/mnist/t10k-images-idx3-ubyte.gz\n",
15 | "Extracting /data/mnist/t10k-labels-idx1-ubyte.gz\n",
16 | "Average loss epoch 0: 0.36955618330370854\n",
17 | "Average loss epoch 1: 0.293480447712756\n",
18 | "Average loss epoch 2: 0.28623841154130747\n",
19 | "Average loss epoch 3: 0.27665123294839217\n",
20 | "Average loss epoch 4: 0.277344136996008\n",
21 | "Average loss epoch 5: 0.27262849688946783\n",
22 | "Average loss epoch 6: 0.2691011475898447\n",
23 | "Average loss epoch 7: 0.267174185885416\n",
24 | "Average loss epoch 8: 0.26553179556395345\n",
25 | "Average loss epoch 9: 0.2650559436885905\n",
26 | "Average loss epoch 10: 0.2634954330075037\n",
27 | "Average loss epoch 11: 0.2619154796455846\n",
28 | "Average loss epoch 12: 0.26072470520759794\n",
29 | "Average loss epoch 13: 0.2619401936729749\n",
30 | "Average loss epoch 14: 0.2622848469720576\n",
31 | "Average loss epoch 15: 0.25875225717023814\n",
32 | "Average loss epoch 16: 0.25856056364821944\n",
33 | "Average loss epoch 17: 0.2561449474266498\n",
34 | "Average loss epoch 18: 0.25908740677616815\n",
35 | "Average loss epoch 19: 0.2584952840056175\n",
36 | "Average loss epoch 20: 0.2550625791360726\n",
37 | "Average loss epoch 21: 0.25346926433014705\n",
38 | "Average loss epoch 22: 0.2567733477427687\n",
39 | "Average loss epoch 23: 0.2564429994021262\n",
40 | "Average loss epoch 24: 0.2523712360393473\n",
41 | "Average loss epoch 25: 0.25367757440099625\n",
42 | "Average loss epoch 26: 0.25561104912857907\n",
43 | "Average loss epoch 27: 0.25363389703082595\n",
44 | "Average loss epoch 28: 0.2511840765397032\n",
45 | "Average loss epoch 29: 0.2511802302165465\n",
46 | "Total time: 17.81419563293457 seconds\n",
47 | "Optimization Finished!\n",
48 | "Accuracy 0.9135\n"
49 | ]
50 | }
51 | ],
52 | "source": [
53 | "\"\"\"\n",
54 | "Simple logistic regression model to solve OCR task \n",
55 | "with MNIST in TensorFlow\n",
56 | "MNIST dataset: yann.lecun.com/exdb/mnist/\n",
57 | "\n",
58 | "\"\"\"\n",
59 | "import os\n",
60 | "os.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n",
61 | "\n",
62 | "import numpy as np\n",
63 | "import tensorflow as tf\n",
64 | "from tensorflow.examples.tutorials.mnist import input_data\n",
65 | "import time\n",
66 | "\n",
67 | "# Define paramaters for the model\n",
68 | "learning_rate = 0.01\n",
69 | "batch_size = 128\n",
70 | "n_epochs = 30\n",
71 | "\n",
72 | "# Step 1: Read in data\n",
73 | "# using TF Learn's built in function to load MNIST data to the folder data/mnist\n",
74 | "mnist = input_data.read_data_sets('/data/mnist', one_hot=True) \n",
75 | "\n",
76 | "# Step 2: create placeholders for features and labels\n",
77 | "# each image in the MNIST data is of shape 28*28 = 784\n",
78 | "# therefore, each image is represented with a 1x784 tensor\n",
79 | "# there are 10 classes for each image, corresponding to digits 0 - 9. \n",
80 | "# each lable is one hot vector.\n",
81 | "X = tf.placeholder(tf.float32, [batch_size, 784], name='X_placeholder') \n",
82 | "Y = tf.placeholder(tf.int32, [batch_size, 10], name='Y_placeholder')\n",
83 | "\n",
84 | "# Step 3: create weights and bias\n",
85 | "# w is initialized to random variables with mean of 0, stddev of 0.01\n",
86 | "# b is initialized to 0\n",
87 | "# shape of w depends on the dimension of X and Y so that Y = tf.matmul(X, w)\n",
88 | "# shape of b depends on Y\n",
89 | "w = tf.Variable(tf.random_normal(shape=[784, 10], stddev=0.01), name='weights')\n",
90 | "b = tf.Variable(tf.zeros([1, 10]), name=\"bias\")\n",
91 | "\n",
92 | "# Step 4: build model\n",
93 | "# the model that returns the logits.\n",
94 | "# this logits will be later passed through softmax layer\n",
95 | "logits = tf.matmul(X, w) + b \n",
96 | "\n",
97 | "# Step 5: define loss function\n",
98 | "# use cross entropy of softmax of logits as the loss function\n",
99 | "entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name='loss')\n",
100 | "loss = tf.reduce_mean(entropy) # computes the mean over all the examples in the batch\n",
101 | "\n",
102 | "# Step 6: define training op\n",
103 | "# using gradient descent with learning rate of 0.01 to minimize loss\n",
104 | "optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n",
105 | "\n",
106 | "with tf.Session() as sess:\n",
107 | "\t# to visualize using TensorBoard\n",
108 | "\twriter = tf.summary.FileWriter('./graphs/logistic_reg', sess.graph)\n",
109 | "\n",
110 | "\tstart_time = time.time()\n",
111 | "\tsess.run(tf.global_variables_initializer())\t\n",
112 | "\tn_batches = int(mnist.train.num_examples/batch_size)\n",
113 | "\tfor i in range(n_epochs): # train the model n_epochs times\n",
114 | "\t\ttotal_loss = 0\n",
115 | "\n",
116 | "\t\tfor _ in range(n_batches):\n",
117 | "\t\t\tX_batch, Y_batch = mnist.train.next_batch(batch_size)\n",
118 | "\t\t\t_, loss_batch = sess.run([optimizer, loss], feed_dict={X: X_batch, Y:Y_batch}) \n",
119 | "\t\t\ttotal_loss += loss_batch\n",
120 | "\t\tprint('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))\n",
121 | "\n",
122 | "\tprint('Total time: {0} seconds'.format(time.time() - start_time))\n",
123 | "\n",
124 | "\tprint('Optimization Finished!') # should be around 0.35 after 25 epochs\n",
125 | "\n",
126 | "\t# test the model\n",
127 | "\tn_batches = int(mnist.test.num_examples/batch_size)\n",
128 | "\ttotal_correct_preds = 0\n",
129 | "\tfor i in range(n_batches):\n",
130 | "\t\tX_batch, Y_batch = mnist.test.next_batch(batch_size)\n",
131 | "\t\t_, loss_batch, logits_batch = sess.run([optimizer, loss, logits], feed_dict={X: X_batch, Y:Y_batch}) \n",
132 | "\t\tpreds = tf.nn.softmax(logits_batch)\n",
133 | "\t\tcorrect_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1))\n",
134 | "\t\taccuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(\n",
135 | "\t\ttotal_correct_preds += sess.run(accuracy)\t\n",
136 | "\t\n",
137 | "\tprint('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))\n",
138 | "\n",
139 | "\twriter.close()"
140 | ]
141 | }
142 | ],
143 | "metadata": {
144 | "kernelspec": {
145 | "display_name": "Python 3",
146 | "language": "python",
147 | "name": "python3"
148 | },
149 | "language_info": {
150 | "codemirror_mode": {
151 | "name": "ipython",
152 | "version": 3
153 | },
154 | "file_extension": ".py",
155 | "mimetype": "text/x-python",
156 | "name": "python",
157 | "nbconvert_exporter": "python",
158 | "pygments_lexer": "ipython3",
159 | "version": "3.5.2"
160 | }
161 | },
162 | "nbformat": 4,
163 | "nbformat_minor": 2
164 | }
165 |
--------------------------------------------------------------------------------
/02_word2vec_visualize.py:
--------------------------------------------------------------------------------
1 | """ word2vec with NCE loss
2 | and code to visualize the embeddings on TensorBoard
3 | """
4 |
5 | from __future__ import absolute_import
6 | from __future__ import division
7 | from __future__ import print_function
8 |
9 | import os
10 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
11 |
12 | import numpy as np
13 | from tensorflow.contrib.tensorboard.plugins import projector
14 | import tensorflow as tf
15 |
16 | from process_data import process_data
17 | import utils
18 |
19 | VOCAB_SIZE = 50000
20 | BATCH_SIZE = 128
21 | EMBED_SIZE = 128 # dimension of the word embedding vectors
22 | SKIP_WINDOW = 1 # the context window
23 | NUM_SAMPLED = 64 # Number of negative examples to sample.
24 | LEARNING_RATE = 1.0
25 | NUM_TRAIN_STEPS = 100000
26 | WEIGHTS_FLD = 'processed/'
27 | SKIP_STEP = 2000
28 |
29 | class SkipGramModel:
30 | """ Build the graph for word2vec model """
31 | def __init__(self, vocab_size, embed_size, batch_size, num_sampled, learning_rate):
32 | self.vocab_size = vocab_size
33 | self.embed_size = embed_size
34 | self.batch_size = batch_size
35 | self.num_sampled = num_sampled
36 | self.lr = learning_rate
37 | self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
38 |
39 | def _create_placeholders(self):
40 | """ Step 1: define the placeholders for input and output """
41 | with tf.name_scope("data"):
42 | self.center_words = tf.placeholder(tf.int32, shape=[self.batch_size], name='center_words')
43 | self.target_words = tf.placeholder(tf.int32, shape=[self.batch_size, 1], name='target_words')
44 |
45 | def _create_embedding(self):
46 | """ Step 2: define weights. In word2vec, it's actually the weights that we care about """
47 | # Assemble this part of the graph on the CPU. You can change it to GPU if you have GPU
48 | with tf.device('/cpu:0'):
49 | with tf.name_scope("embed"):
50 | self.embed_matrix = tf.Variable(tf.random_uniform([self.vocab_size,
51 | self.embed_size], -1.0, 1.0),
52 | name='embed_matrix')
53 |
54 | def _create_loss(self):
55 | """ Step 3 + 4: define the model + the loss function """
56 | with tf.device('/cpu:0'):
57 | with tf.name_scope("loss"):
58 | # Step 3: define the inference
59 | embed = tf.nn.embedding_lookup(self.embed_matrix, self.center_words, name='embed')
60 |
61 | # Step 4: define loss function
62 | # construct variables for NCE loss
63 | nce_weight = tf.Variable(tf.truncated_normal([self.vocab_size, self.embed_size],
64 | stddev=1.0 / (self.embed_size ** 0.5)),
65 | name='nce_weight')
66 | nce_bias = tf.Variable(tf.zeros([VOCAB_SIZE]), name='nce_bias')
67 |
68 | # define loss function to be NCE loss function
69 | self.loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight,
70 | biases=nce_bias,
71 | labels=self.target_words,
72 | inputs=embed,
73 | num_sampled=self.num_sampled,
74 | num_classes=self.vocab_size), name='loss')
75 | def _create_optimizer(self):
76 | """ Step 5: define optimizer """
77 | with tf.device('/cpu:0'):
78 | self.optimizer = tf.train.GradientDescentOptimizer(self.lr).minimize(self.loss,
79 | global_step=self.global_step)
80 |
81 | def _create_summaries(self):
82 | with tf.name_scope("summaries"):
83 | tf.summary.scalar("loss", self.loss)
84 | tf.summary.histogram("histogram loss", self.loss)
85 | # because you have several summaries, we should merge them all
86 | # into one op to make it easier to manage
87 | self.summary_op = tf.summary.merge_all()
88 |
89 | def build_graph(self):
90 | """ Build the graph for our model """
91 | self._create_placeholders()
92 | self._create_embedding()
93 | self._create_loss()
94 | self._create_optimizer()
95 | self._create_summaries()
96 |
97 | def train_model(model, batch_gen, num_train_steps, weights_fld):
98 | saver = tf.train.Saver() # defaults to saving all variables - in this case embed_matrix, nce_weight, nce_bias
99 |
100 | initial_step = 0
101 | utils.make_dir('checkpoints')
102 | with tf.Session() as sess:
103 | sess.run(tf.global_variables_initializer())
104 | ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
105 | # if that checkpoint exists, restore from checkpoint
106 | if ckpt and ckpt.model_checkpoint_path:
107 | saver.restore(sess, ckpt.model_checkpoint_path)
108 |
109 | total_loss = 0.0 # we use this to calculate late average loss in the last SKIP_STEP steps
110 | writer = tf.summary.FileWriter('improved_graph/lr' + str(LEARNING_RATE), sess.graph)
111 | initial_step = model.global_step.eval()
112 | for index in range(initial_step, initial_step + num_train_steps):
113 | centers, targets = next(batch_gen)
114 | feed_dict={model.center_words: centers, model.target_words: targets}
115 | loss_batch, _, summary = sess.run([model.loss, model.optimizer, model.summary_op],
116 | feed_dict=feed_dict)
117 | writer.add_summary(summary, global_step=index)
118 | total_loss += loss_batch
119 | if (index + 1) % SKIP_STEP == 0:
120 | print('Average loss at step {}: {:5.1f}'.format(index, total_loss / SKIP_STEP))
121 | total_loss = 0.0
122 | saver.save(sess, 'checkpoints/skip-gram', index)
123 |
124 | ####################
125 | # code to visualize the embeddings. uncomment the below to visualize embeddings
126 | # run "'tensorboard --logdir='processed'" to see the embeddings
127 | # final_embed_matrix = sess.run(model.embed_matrix)
128 |
129 | # # it has to variable. constants don't work here. you can't reuse model.embed_matrix
130 | # embedding_var = tf.Variable(final_embed_matrix[:1000], name='embedding')
131 | # sess.run(embedding_var.initializer)
132 |
133 | # config = projector.ProjectorConfig()
134 | # summary_writer = tf.summary.FileWriter('processed')
135 |
136 | # # add embedding to the config file
137 | # embedding = config.embeddings.add()
138 | # embedding.tensor_name = embedding_var.name
139 |
140 | # # link this tensor to its metadata file, in this case the first 500 words of vocab
141 | # embedding.metadata_path = 'processed/vocab_1000.tsv'
142 |
143 | # # saves a configuration file that TensorBoard will read during startup.
144 | # projector.visualize_embeddings(summary_writer, config)
145 | # saver_embed = tf.train.Saver([embedding_var])
146 | # saver_embed.save(sess, 'processed/model3.ckpt', 1)
147 |
148 | def main():
149 | model = SkipGramModel(VOCAB_SIZE, EMBED_SIZE, BATCH_SIZE, NUM_SAMPLED, LEARNING_RATE)
150 | model.build_graph()
151 | batch_gen = process_data(VOCAB_SIZE, BATCH_SIZE, SKIP_WINDOW)
152 | train_model(model, batch_gen, NUM_TRAIN_STEPS, WEIGHTS_FLD)
153 |
154 | if __name__ == '__main__':
155 | main()
--------------------------------------------------------------------------------
/01_linear_regression_sol.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stdout",
10 | "output_type": "stream",
11 | "text": [
12 | "Epoch 0: 2069.6319333978354\n",
13 | "Epoch 1: 2117.0123581953535\n",
14 | "Epoch 2: 2092.302723001866\n",
15 | "Epoch 3: 2068.5080461938464\n",
16 | "Epoch 4: 2045.591184088162\n",
17 | "Epoch 5: 2023.5146448101316\n",
18 | "Epoch 6: 2002.2447619835536\n",
19 | "Epoch 7: 1981.748338803649\n",
20 | "Epoch 8: 1961.9944411260742\n",
21 | "Epoch 9: 1942.9520116143283\n",
22 | "Epoch 10: 1924.5930823644712\n",
23 | "Epoch 11: 1906.8898800636332\n",
24 | "Epoch 12: 1889.8164505837929\n",
25 | "Epoch 13: 1873.347133841543\n",
26 | "Epoch 14: 1857.4588400604468\n",
27 | "Epoch 15: 1842.1278742424079\n",
28 | "Epoch 16: 1827.332495119955\n",
29 | "Epoch 17: 1813.0520579712022\n",
30 | "Epoch 18: 1799.2660847636982\n",
31 | "Epoch 19: 1785.9562132299961\n",
32 | "Epoch 20: 1773.1024853109072\n",
33 | "Epoch 21: 1760.689129482884\n",
34 | "Epoch 22: 1748.6984157081515\n",
35 | "Epoch 23: 1737.1138680398553\n",
36 | "Epoch 24: 1725.920873066732\n",
37 | "Epoch 25: 1715.1046249579008\n",
38 | "Epoch 26: 1704.6500954309377\n",
39 | "Epoch 27: 1694.5447134910141\n",
40 | "Epoch 28: 1684.7746311347667\n",
41 | "Epoch 29: 1675.328450968245\n",
42 | "Epoch 30: 1666.1935385839038\n",
43 | "Epoch 31: 1657.3584002084322\n",
44 | "Epoch 32: 1648.8122658529207\n",
45 | "Epoch 33: 1640.5440742547091\n",
46 | "Epoch 34: 1632.5446836102221\n",
47 | "Epoch 35: 1624.8043315147183\n",
48 | "Epoch 36: 1617.3126799958602\n",
49 | "Epoch 37: 1610.0622532456405\n",
50 | "Epoch 38: 1603.0433557207386\n",
51 | "Epoch 39: 1596.2479176106197\n",
52 | "Epoch 40: 1589.668056331575\n",
53 | "Epoch 41: 1583.2965242617897\n",
54 | "Epoch 42: 1577.126371285745\n",
55 | "Epoch 43: 1571.1501190634\n",
56 | "Epoch 44: 1565.360979151513\n",
57 | "Epoch 45: 1559.7523780798629\n",
58 | "Epoch 46: 1554.3184364555138\n",
59 | "Epoch 47: 1549.0529469620615\n",
60 | "Epoch 48: 1543.950059985476\n",
61 | "Epoch 49: 1539.0050282141283\n"
62 | ]
63 | },
64 | {
65 | "data": {
66 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XuUVOWZ7/HvI2IQ4rVpjeHSTaKIEeTWEgiO44ga4gVl\nokGDSmZY4nhJnHFGRZ0VzSScwWXi7cRIiFFw6CNqFMUcjRgv0ZhI0igmBFRQQUC0u1E4QHuh4Tl/\n7Cqqurtu3XXbtfv3WatXV+29q/bDbvrXb717v+82d0dERKJrr3IXICIixaWgFxGJOAW9iEjEKehF\nRCJOQS8iEnEKehGRiFPQi4hEnIJeRCTiFPQiIhG3d7kLAOjbt6/X1taWuwwRkYqybNmyZnevzrZd\nKIK+traWhoaGcpchIlJRzGxdLtup60ZEJOIU9CIiEaegFxGJuFD00aeyc+dONmzYwCeffFLuUiQH\nvXr1on///vTs2bPcpYhIO6EN+g0bNrDffvtRW1uLmZW7HMnA3dm8eTMbNmxg0KBB5S5HRNoJbdfN\nJ598QlVVlUK+ApgZVVVV+vQl0gn19VBbC3vtFXyvry/evkLbogcU8hVEPyuR3NXXw4wZ0NISPF+3\nLngOMHVq4fcX2ha9iEhUXX99IuTjWlqC5cWgoM+gR48ejBgxgqFDh3LGGWewZcuWLr9XbW0tzc3N\nGbeZN28el19+ecZtnn/+ef7whz90uQ4RKb933+3c8nxFJuiL0d+17777snz5clasWMHBBx/MnXfe\nmf+b5klBL1L5Bg7s3PJ8RSLo4/1d69aBe6K/q5AnN8aNG8fGjRv3PL/55ps59thjOeaYY7jhhhv2\nLD/rrLMYPXo0Rx99NHPnzs36vvfeey+DBw9mzJgxvPTSS3uWP/7443z1q19l5MiRnHTSSXzwwQes\nXbuWOXPmcOuttzJixAhefPHFlNuJSLjNmgW9e7dd1rt3sLwo3L3sX6NHj/b2Vq5c2WFZOjU17kHE\nt/2qqcn5LVLq06ePu7u3trb62Wef7U8++aS7uz/11FN+0UUX+e7du33Xrl1+2mmn+e9+9zt3d9+8\nebO7u7e0tPjRRx/tzc3NsRprvKmpqc37v/feez5gwABvbGz0Tz/91L/2ta/5ZZdd5u7uH374oe/e\nvdvd3X/xi1/4lVde6e7uN9xwg99888173iPdduXQmZ+ZSHe3YEGQUWbB9wULOv8eQIPnkLGhvuom\nV8Xq7/r4448ZMWIEGzdu5KijjuLkk08GYMmSJSxZsoSRI0cCsH37dlavXs3xxx/PHXfcwaJFiwBY\nv349q1evpqqqKuX7L126lBNOOIHq6mDyuSlTpvDmm28CwTiCKVOmsGnTJj777LO016fnup2IhMvU\nqcW5wiaVSHTdFKu/K95Hv27dOtx9Tx+9u3PttdeyfPlyli9fzpo1a5g+fTrPP/88v/3tb/njH//I\na6+9xsiRI7t8bfl3v/tdLr/8cv7617/y85//PO375LqdiHRfWYPezO4xs0YzW5Fi3b+bmZtZ39hz\nM7M7zGyNmf3FzEYVo+j2it3f1bt3b+644w5+8pOf0Nrayte//nXuuecetm/fDsDGjRtpbGxk69at\nHHTQQfTu3ZvXX3+dl19+OeP7fvWrX+V3v/sdmzdvZufOnTz00EN71m3dupV+/foBMH/+/D3L99tv\nP7Zt25Z1OxGRuFxa9POAie0XmtkA4BQguYPkG8ARsa8ZwF35l5jd1Kkwdy7U1IBZ8H3u3MJ+LBo5\nciTHHHMM999/P6eccgrf/va3GTduHMOGDePss89m27ZtTJw4kdbWVo466ihmzpzJ2LFjM77nYYcd\nxo033si4ceMYP348Rx111J51N954I+eccw6jR4+mb9++e5afccYZLFq0aM/J2HTbiYjEWdCfn2Uj\ns1rg1+4+NGnZr4AfAo8Bde7ebGY/B5539/tj27wBnODumzK9f11dnbe/8ciqVavaBJ+En35mIqVl\nZsvcvS7bdl3qozezM4GN7v5au1X9gPVJzzfElomISJl0+qobM+sNXEfQbdNlZjaDoHuHgcUaJSAi\nIl1q0X8ZGAS8ZmZrgf7AK2b2BWAjMCBp2/6xZR24+1x3r3P3uvjlhSIiUnidDnp3/6u7H+Lute5e\nS9A9M8rd3wcWAxfGrr4ZC2zN1j8vIiLFlcvllfcDfwSONLMNZjY9w+ZPAG8Da4BfAJcWpEoREemy\nrH307n5elvW1SY8duCz/skREpFAiMTK2WJKnKT7nnHNoaT+BdCc8//zznH766QAsXryY2bNnp912\ny5Yt/OxnP+v0Pm688UZ+/OMfZ93u85//fMb1Xd2/iISTgj6D5GmK99lnH+bMmdNmvbuze/fuTr/v\npEmTmDlzZtr15Q7acu9fRApLQZ+jv/u7v2PNmjWsXbuWI488kgsvvJChQ4eyfv16lixZwrhx4xg1\nahTnnHPOnqkRfvOb3zBkyBBGjRrFI488sue9km8w8sEHHzB58mSGDx/O8OHD+cMf/sDMmTN56623\nGDFiBFdddRWQflrkWbNmMXjwYI477jjeeOONlLW/8847e0bx/ud//uee5du3b2fChAmMGjWKYcOG\n8dhjjwF02H+67USkMlTG7JX/+q+wfHlh33PECLjttpw2bW1t5cknn2TixGAmiNWrVzN//nzGjh1L\nc3MzP/rRj/jtb39Lnz59uOmmm7jlllu4+uqrueiii3j22Wc5/PDDmTJlSsr3/t73vsff//3fs2jR\nInbt2sX27duZPXs2K1asYHns37xkyRJWr17Nn/70J9ydSZMm8cILL9CnTx8WLlzI8uXLaW1tZdSo\nUYwePbrDPq644gouueQSLrzwwjY3T+nVqxeLFi1i//33p7m5mbFjxzJp0qQO+29tbU25ne4TK1IZ\nKiPoyyQ+TTEELfrp06fz3nvvUVNTs2cem5dffpmVK1cyfvx4AD777DPGjRvH66+/zqBBgzjiiCMA\nOP/881PeiOTZZ5/lvvvuA4JzAgcccAAfffRRm23STYu8bds2Jk+eTO/YjG6TJk1K+e946aWXePjh\nhwG44IILuOaaa4Cg6+m6667jhRdeYK+99mLjxo0pb1ySbrsvfOELnTiaIlIulRH0Oba8Cy3eR99e\nnz599jx2d04++WTuv//+Ntukel1XxadFvvjii9ssv60TxyVV67u+vp6mpiaWLVtGz549qa2tTTnN\nca7biUg4qY8+T2PHjuWll15izZo1AOzYsYM333yTIUOGsHbtWt566y2ADn8I4iZMmMBddwWTfO7a\ntYutW7d2mIo43bTIxx9/PI8++igff/wx27Zt4/HHH0+5j/Hjx7Nw4UIgCO24rVu3csghh9CzZ0+e\ne+451q1bB6SeCjnVdiJSGRT0eaqurmbevHmcd955HHPMMXu6bXr16sXcuXM57bTTGDVqFIccckjK\n199+++0899xzDBs2jNGjR7Ny5UqqqqoYP348Q4cO5aqrrko7LfKoUaOYMmUKw4cP5xvf+AbHHnts\n2n3ceeedDBs2rM19b6dOnUpDQwPDhg3jvvvuY8iQIQAd9p9uOxGpDDlNU1xsmqY4GvQzEymtok5T\nLCIilUNBLyIScaEO+jB0K0lu9LMSCa/QBn2vXr3YvHmzAqQCuDubN2+mV69e5S5FRFII7XX0/fv3\nZ8OGDTQ1NZW7FMlBr1696N+/f7nLEJEUQhv0PXv2ZNCgQeUuQ0Sk4oW260ZERApDQS8iEnEKehGR\niFPQi4hEXC43B7/HzBrNbEXSspvN7HUz+4uZLTKzA5PWXWtma8zsDTP7erEKFxGR3OTSop8HTGy3\n7GlgqLsfA7wJXAtgZl8BzgWOjr3mZ2bWo2DViohIp2UNend/Afiw3bIl7t4ae/oyEL+A+kxgobt/\n6u7vAGuAMQWsV0REOqkQffT/DDwZe9wPWJ+0bkNsmYiIlEleQW9m1wOtQH22bVO8doaZNZhZg0a/\niogUT5eD3sy+A5wOTPXEhDQbgQFJm/WPLevA3ee6e52711VXV3e1DBERyaJLQW9mE4GrgUnu3pK0\najFwrpl9zswGAUcAf8q/TBER6aqsc92Y2f3ACUBfM9sA3EBwlc3ngKdjN51+2d3/xd3/ZmYPAisJ\nunQuc/ddxSpeRESyC+2tBEVEJDPdSlBERAAFvYhI5CnoRUQiTkEvIhJxCnoRkYhT0IuIRJyCXkQk\n4hT0IiIRp6AXEYk4Bb2ISMQp6EVEIk5BLyIScQp6EZGIU9CLiEScgl5EJOIU9CIiEaegFxGJOAW9\niEjEKehFRCIua9Cb2T1m1mhmK5KWHWxmT5vZ6tj3g2LLzczuMLM1ZvYXMxtVzOJFRCS7XFr084CJ\n7ZbNBJ5x9yOAZ2LPAb4BHBH7mgHcVZgyRUSkq7IGvbu/AHzYbvGZwPzY4/nAWUnL7/PAy8CBZnZY\noYoVEZHO62of/aHuvin2+H3g0NjjfsD6pO02xJZ1YGYzzKzBzBqampq6WIaIiGST98lYd3fAu/C6\nue5e5+511dXV+ZYhIiJpdDXoP4h3ycS+N8aWbwQGJG3XP7ZMRETKpKtBvxiYFns8DXgsafmFsatv\nxgJbk7p4RESkDPbOtoGZ3Q+cAPQ1sw3ADcBs4EEzmw6sA74V2/wJ4FRgDdAC/FMRahYRkU7IGvTu\nfl6aVRNSbOvAZfkWJSIihaORsSIiEaegFxGJOAW9iEjEKehFRCJOQS8iUg47d8ITT8COHUXflYJe\nRKRUdu+GW28FM9hnHzjtNFi4sOi7zXp5pYiI5MEdfvlLuOiijuvOOgsuvLDoJSjoRUSK4cEHYcqU\njssnTIB774UBAzquKxIFvYhIoTz5JEyeDJ9+2nb5mDHwP/8DgweXpSwFvYhIPl54Ab75TWhubrt8\nyJCg/3348PLUlURBLyLSWcuWwTnnwDvvtF3erx889BCMG1eeutJQ0IuI5GLlSvj2t+G119ou339/\nePhhOOmk8tSVAwW9iEg677wTXBXz+993XPfII0F/fAXQdfQiIsk2bYJTTw2udf/Sl9qG/H33BdfC\nu1dMyIOCXkQENm+Gc88Nwv2LXwyunom7807YtSsI9wsuCLapMAp6Eemetm0LBjGZQd++8MADiXX/\n/d/Q2hqE+6WXwl6VHZWVXb2ISGd8/DFceWUQ7vvvD3ffnVh33XXB9e/uMHMm9OhRvjoLTCdjRSTa\ndu6EWbPgBz/ouO6734XZs6F379LXVUJ5tejN7N/M7G9mtsLM7jezXmY2yMyWmtkaM3vAzPYpVLEi\nIjnZtQt+8pPE5GHJIT9tGmzZErTc77gj8iEPeQS9mfUDvgfUuftQoAdwLnATcKu7Hw58BEwvRKEi\nIhm5w9y5QbjvvTf8x38k1v3jP0JjY7DNvHlwwAFlK7Mc8u2j3xvY18z2BnoDm4ATgV/F1s8Hzspz\nHyIi6U2eHIT7XnvBxRcnlp98MqxfH4T7ww9DdXX5aiyzLge9u28Efgy8SxDwW4FlwBZ3b41ttgHo\nl2+RIiJtXHppEO5m8OijieVHHAGrVwfhvmQJ9O9fvhpDJJ+um4OAM4FBwBeBPsDETrx+hpk1mFlD\nU1NTV8sQke7ihz9MhPtdd7VdV18fhPubb8Lhh5envhDLp+vmJOAdd29y953AI8B44MBYVw5Af2Bj\nqhe7+1x3r3P3uupu/JFKRDKYMycR7t//ftt1t98ehLt7MAeNpJVP0L8LjDWz3mZmwARgJfAccHZs\nm2nAY/mVKCLdykMPJcL9kkvarrv++kS4f+975amvAnX5Onp3X2pmvwJeAVqBV4G5wP8FFprZj2LL\nflmIQkUkwp55Jv3sj9Ontx3YJJ2W11U37n6Duw9x96HufoG7f+rub7v7GHc/3N3PcfdPs7+TFEN9\nPdTWBhcj1NYGz0VCo6Eh0XJvH/KnnpqYPEwhnzeNjI2o+nqYMQNaWoLn69YFzwGmTi1fXdLNvfkm\nHHlk6nUjRgQ39KjweWXCSEc0oq6/PhHycS0twXKRktq4MdFybx/yffsm5pd59VWFfJGoRR9R777b\nueUiBfXhh1BVlX799u3Qp0/p6unm9OczogYO7Nxykby1tCRa7qlCvrk5ccWMQr6kFPQRNWtWx7ma\nevcOlosUzM6dcOihQbinCu/4FATumVv4UlQK+oiaOjWY36mmJvgdrKkJnutErORt924YPToxM2Rj\nY9v1q1Ylwl1TEISC+ugjbOpUBbsUiDuceSY8/njq9UuXwpgxpa1JcqagF5H0+vWD995Lve6pp+CU\nU0pbj3SJum5EpK34CVWzjiG/cGGiWyZEIa/BgZkp6EUEjj46Ee7tTZuWCPcpU0pfWxbxwYHr1gUl\nxgcHKuwTFPQi3dU3v5kI95UrO66Ph/u8eSUvrTM0ODA7Bb1Id3L11Ylwf+SRjuvj4e5e+tq6SIMD\ns1PQi0Tdz36WCPebb+64Pj55WAWFezINDsxOQS8SRY8+mgj3yy7ruL61NRHuqfrlK4gGB2anoBeJ\nipdfToT75Mkd1+/YkQj3Hj1KX1+RaHBgdrqOXqSSrV4NgwenX9/UFMwQGXEaHJiZgl6k0jQ2BvPL\npLNmDXz5y6WrR0JPXTcilSB5ZshUIb90aaJbRiEv7SjoRcJq165EuKeaGXLx4kS4a54ZySCvoDez\nA83sV2b2upmtMrNxZnawmT1tZqtj3w8qVLEikRe/CsYM9k7RszpnTiLczzij9PVJRcq3RX878Bt3\nHwIMB1YBM4Fn3P0I4JnYcxHJJB7uqW6ld+21iXC/+OLS1yYVr8snY83sAOB44DsA7v4Z8JmZnQmc\nENtsPvA8cE0+RYpEUqbr17/1LXjggdLVIpGWT4t+ENAE3Gtmr5rZ3WbWBzjU3TfFtnkfyHB5gEg3\nM3hw+snDRoxItNwV8lJA+QT93sAo4C53HwnsoF03jbs7kHJctZnNMLMGM2toamrKowyRkPvSlxLh\nvnp123V7750I91dfLU99Enn5BP0GYIO7L409/xVB8H9gZocBxL43pnqxu8919zp3r6uurs6jDJEQ\nmjQpEe7vvNNxfTzcd+4sfW3S7XQ56N39fWC9mR0ZWzQBWAksBqbFlk0DHsurQpFKcc01iXBPdcu9\nCpwZUqIh35Gx3wXqzWwf4G3gnwj+eDxoZtOBdcC38tyHSHjNmQOXXJJ+fWtrpOaVkcqUV9C7+3Kg\nLsWqCfm8r0ioLVwI552Xfv2OHR2nUxQpI811I5KLl16C445Lv/799zPPPyNSRpoCIWR0k+MQefvt\nRJ97qpB/5ZVEn7tCXkJMLfoQid/kOH7/y/hNjkFTsJbMRx/BwQenX//443D66aWrR6QA1KIPEd3k\nuEw++yzRck8V8nfckWi5K+SlAqlFHyK6yXEJuaeeVybussvgpz8tXT0iRaSgD5GBA4PumlTLpUAy\nzS9z3HHw4oulq0WkRNR1EyK6yXGRxLtlUoV8796JbhmFvESUgj5EdJPjAsoU7pAI9x07SluXSBmo\n6yZkdJPjPPTrB++9l369ph6QbkoteqlsEycmWu6pQl7zy4go6KUC/du/JcL9qac6rle4i7ShoO+m\nkkfg9u0bfIV6NO5Pf5oI99tu67h+1y6Fu0ga6qPvhtqPwN28ObEuVKNxH388mNc9nZYW2Hff0tUj\nUqHUou+GUo3ATVbW0bgNDYmWe6qQb2xMtNwV8iI5UdBXkEJNeJbLSNuSjsZduzYR7sce23H9G28k\nwl13IxPpNAV9hYh3t6xbF+RdvIslVdhn+4OQy0jboo/G3bIlEe6DBnVc/+KLiXAfPLjIxYhEm4K+\nQuQy4Vl9fXBS9fzzM/9BSDUCN1nRRuMmTx520EEd1y9cmAj3THO/i0inKOgrRLYJz+It/uQTq3Ht\n/yC0H4FbVRV8FWU0rnsi3D/3uY7rZ89OhPuUKQXaqYgkyzvozayHmb1qZr+OPR9kZkvNbI2ZPRC7\nn6zkKV1XSnx5thOs7f9QTJ0adI3v3g3NzcHX7t3BsoKEfDzcU80QOX16ItyvuaYAOxPJX5Rv+lOI\nFv0VwKqk5zcBt7r74cBHwPQC7KPbyzbhWbaTpyWZATPT/DJjxybC/e67U748yr9oEm6dOQdWkdy9\ny19Af+AZ4ETg14ABzcDesfXjgKeyvc/o0aNdsluwwL2mxt0s+L5gQWJdTU3ycNC2X717t922oNLt\nFNz33Tfnt1mwIKizZHWLJEn3+1NTU+7KMgMaPIeszrdFfxtwNbA79rwK2OLurbHnG4B+ee6j22rf\nwoVEd0v7LpZ0J1irqoowA2bfvrnNDJmpL6kd3V1LyinqN/3pctCb2elAo7sv6+LrZ5hZg5k1NDU1\ndbWMyOrsR8lUUxwvWBD0vRck5E88MRHuqc745jm/TNR/0STcsp0Dq3T5tOjHA5PMbC2wkKD75nbg\nQDOLT63QH9iY6sXuPtfd69y9rrobDYLJtR+6Ky3c5BOsBTmpevnliXB/7rmO6ws4eVjUf9Ek3CJ/\n059c+neyfQEnAL+OPX4IODf2eA5wabbXd5c++s70Q5ul7jM0K3KRt9ySud99166i7FZ99FJumc6B\nhRUl6qNP5RrgSjNbQ9Bn/8si7KMidaaV3pUWbpevWlm0KNFyv/LKjus//jiRv5luqJ0H3V1Lyq3g\nn4hDxLwAH7vzVVdX5w0NDeUuo+j22it1L4dZ8J8rWfsZJiH4KJku/Dq7Pc8+CxMmpC+2uTk4kysi\noWVmy9y9Ltt2GhlbQp1ppXe2hZvu08K0aXDppUEL/yu2KtFyTxXya9YkWu4KeYnR+IbKpxZ9CXW6\n1d0J6T4tVNNII4emf+HTT8NJJ+W3c4msYv6flfypRR9CxeyHTv5UsC8tOIZjKUN+OndTWxNruSvk\nJQONb4gGBX0GxfjIWqwTPrN+uHtPuLfQp8P6m7g6tta5h+kVeX16tp+HuhgKT+MbokG3Ekyj/UfW\nUN1iL1lsdGqqklZwNMNYkfJllXZ9erafR8X8vCrMwIHBsUy1XCqHWvRpFPIja8FbmpkmDwP69A5a\n7ulCvhIHgmT7eZSyi6E7fXKI/ECi7iKXi+2L/RXGAVOFGrBUsIFAmQYxQYd9Jg/8uOSS8g0EKdQg\nlGw/j1INMOuOA7sqcSBRd0GOA6bKHvIe0qDPdTa7bL8EVVW5vU9KnQj3MCpkKGb7eZRq9sGu7kdh\nKcWgoM9TLiGVbZsFC9JndNqWZoWHe7JChm8ux7oULe2ufHLojp8CpDQU9AWQrRXW1VZmh7AbMiQy\n4Z6s0N0p2X4epWg1d+WPV6XOdS7hp6AvsFQh0tV+Y3B/+2tTM4d7kSYPK6UoBlxXWudlm6Cui9TN\nVDkU9AWU7pc7W/97+6CbwZzM4d7SUs5/ZsFFtcuis0FYSX/wovoziyoFfQGl+0Wtqsreb3zG557K\nHO6bNpX131Zsah1WVnhW0h8lyT3odR19DtKNAvzwwzRTGoxZDWZMPd9Y/OnXO75w1arE79AXvlDc\n4qXsKmkKZo2EjSZNapaD2trUowNraoJpDIBgWt9Md8p69ln4h38oQnXhpQmxKk9O/9clNDSpWQGl\nGx343zd8khihmirkFy1KtNy7WciDJsSqRBoJG00K+hzEP3pXVYERTB62o8U475/37bjxbbclwv2s\ns0pfbIioG6DyVFI3k+QuMkFf7PlHpp5vNG82dtOj48rrr0+E+xVXFHbHFawcN/zuTvPQFEuUb6nX\nXUUi6ON9wevWBVkbn7mwmJOHLWMUhgfzuv/oR3nuKJpK3Q1QtP8HIhWuyydjzWwAcB9wKODAXHe/\n3cwOBh4AaoG1wLfc/aNM75XvydiCnkBKMyPkntW0PV6p7vcqCfX1wQeed98NWvKzZhWvhagTidLd\n5HoyNp+gPww4zN1fMbP9gGXAWcB3gA/dfbaZzQQOcvdrMr1XvkHfmZtup7TPPrBzZ/r17gqRCpD3\n/wORClP0q27cfZO7vxJ7vA1YBfQDzgTmxzabTxD+RdWlvuAxYxLdMilCvrbG2cuCrpn6el2NUAnK\ncU5ApBIUpI/ezGqBkcBS4FB33xRb9T5kujN1YeQcwj/4QSLc//znjm/kTv0Cp09v79DPC7oaIez0\nx1gkjVyGz2b6Aj5P0G3zj7HnW9qt/yjN62YADUDDwIED8x4KnHao/ZNPZp6CYPfuNu9TqCHgGvpf\nHjru0p2Q4xQIeY2MNbOewK+Bp9z9ltiyN4AT3H1TrB//eXc/MtP7FHxk7J//HHTNpHH4wM/4wf/q\nmbI1Xoh+Xo0IFZFSKHofvZkZ8EtgVTzkYxYD02KPpwGPdXUfnbJ6daJbJkXI9913B0ZwL9W33u3J\njBlw6aUdr7kuRD+vRoSKSJjk00c/HrgAONHMlse+TgVmAyeb2WrgpNjz4vn+94NwHzy447rNm4Mr\nZmqczR+37bxtaYE5czpec33qqfn382pEqIiEyd5dfaG7/x5Id9H5hK6+b6csXw4//GHbZWvXBmdK\nk6QL2PZdNC0t8MQTQRdLPtd+DxyY+lJMXf0hIuVQ2SNjhw3jmWueYtiALYlLIX9f02GzzgTsu+/m\nPwRcV3+ISJhUdNDXL+zBpP99CivWH5BxyPusWVkHvO5RiFa3JoYSkTCp6PnoOzNaNZeg15UxIlJJ\nusV89J056VnTsUcHgB491OoWkWir2KCvrw8ui0wlVfdLun7z+fM71xevaXBFpNJUZNDHByTt2tVx\nXbqTnoXoN9c0uCJSiSqyjz5d33yPHkELXdPgikh3EOk++nR986la+KXYrwZCiUiYVWTQZ7oEsphd\nKZoGV0QqUUUGfaoTq3HFnFNGA6FEpBJVZNDHT6ymU6yuFA2EEpFKVJEnY+N0clREurNIn4yNU1eK\niEh2FR306koREcmuooMe8p9pUqQrNEJaKkmX56MX6a7a3yoy+QbyamhIGFV8i16k1HSrSKk0CnqR\nTtIIaak0CnqRTtIIaak0RQt6M5toZm+Y2Rozm1ms/YiUmi7rlUpTlKA3sx7AncA3gK8A55nZV4qx\nL5FS02W9UmmKddXNGGCNu78NYGYLgTOBlUXan0hJTZ2qYJfKUayum37A+qTnG2LL9jCzGWbWYGYN\nTU1NRSq0Z9B0AAAEYUlEQVRDRETKdjLW3ee6e52711VXV5erDBGRyCtW0G8EBiQ97x9bJiIiJVas\noP8zcISZDTKzfYBzgcVF2peIiGRQlJOx7t5qZpcDTwE9gHvc/W/F2JeIiGQWivnozawJSDGzfGj0\nBZrLXUQGqi9/Ya9R9eUv7DV2pb4ad896kjMUQR92ZtaQy+T+5aL68hf2GlVf/sJeYzHr0xQIIiIR\np6AXEYk4BX1uMtyKPBRUX/7CXqPqy1/YayxafeqjFxGJOLXoRUQiTkGfgZmtNbO/mtlyM2sodz0A\nZnaPmTWa2YqkZQeb2dNmtjr2/aCQ1XejmW2MHcflZnZqGesbYGbPmdlKM/ubmV0RWx6KY5ihvjAd\nw15m9iczey1W4w9iyweZ2dLY1OQPxAZLhqm+eWb2TtIxHFGO+pLq7GFmr5rZr2PPi3b8FPTZ/YO7\njwjRZVnzgIntls0EnnH3I4BnYs/LZR4d6wO4NXYcR7j7EyWuKVkr8O/u/hVgLHBZbArtsBzDdPVB\neI7hp8CJ7j4cGAFMNLOxwE2xGg8HPgKmh6w+gKuSjuHyMtUXdwWwKul50Y6fgr7CuPsLwIftFp8J\nzI89ng+cVdKikqSpLzTcfZO7vxJ7vI3gF60fITmGGeoLDQ9sjz3tGfty4ETgV7Hl5TyG6eoLDTPr\nD5wG3B17bhTx+CnoM3NgiZktM7MZ5S4mg0PdfVPs8fvAoeUsJo3Lzewvsa6dsnUtJTOzWmAksJQQ\nHsN29UGIjmGs22E50Ag8DbwFbHH31tgmHaYmL2d97h4/hrNix/BWM/tcueoDbgOuBnbHnldRxOOn\noM/sOHcfRXCnrMvM7PhyF5SNB5dRhar1AtwFfJngY/Qm4CflLQfM7PPAw8C/uvv/S14XhmOYor5Q\nHUN33+XuIwhmph0DDClnPe21r8/MhgLXEtR5LHAwcE05ajOz04FGd19Wqn0q6DNw942x743AIoL/\n0GH0gZkdBhD73ljmetpw9w9iv3i7gV9Q5uNoZj0JQrTe3R+JLQ7NMUxVX9iOYZy7bwGeA8YBB5pZ\nfKLEUExNnlTfxFi3mLv7p8C9lO8YjgcmmdlaYCFBl83tFPH4KejTMLM+ZrZf/DFwCrAi86vKZjEw\nLfZ4GvBYGWvpIB6gMZMp43GM9YX+Eljl7rckrQrFMUxXX8iOYbWZHRh7vC9wMsG5hOeAs2OblfMY\npqrv9aQ/5EbQ/12WY+ju17p7f3evJZjC/Vl3n0oRj58GTKVhZl8iaMVDMJ3z/3H3WWUsCQAzux84\ngWCmuw+AG4BHgQeBgQSzgH7L3ctyQjRNfScQdDk4sBa4OKk/vNT1HQe8CPyVRP/odQT94GU/hhnq\nO4/wHMNjCE4W9iBoLD7o7v8V+51ZSNAt8ipwfqz1HJb6ngWqAQOWA/+SdNK2LMzsBOA/3P30Yh4/\nBb2ISMSp60ZEJOIU9CIiEaegFxGJOAW9iEjEKehFRCJOQS8iEnEKehGRiFPQi4hE3P8Hzc2lhtVL\nXfoAAAAASUVORK5CYII=\n",
67 | "text/plain": [
68 | ""
69 | ]
70 | },
71 | "metadata": {},
72 | "output_type": "display_data"
73 | }
74 | ],
75 | "source": [
76 | "\"\"\"\n",
77 | "Simple linear regression example in TensorFlow\n",
78 | "This program tries to predict the number of thefts from \n",
79 | "the number of fire in the city of Chicago\n",
80 | "\"\"\"\n",
81 | "import os\n",
82 | "os.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n",
83 | "\n",
84 | "import numpy as np\n",
85 | "import matplotlib.pyplot as plt\n",
86 | "import tensorflow as tf\n",
87 | "import xlrd\n",
88 | "\n",
89 | "import utils\n",
90 | "\n",
91 | "DATA_FILE = 'data/fire_theft.xls'\n",
92 | "\n",
93 | "# Step 1: read in data from the .xls file\n",
94 | "book = xlrd.open_workbook(DATA_FILE, encoding_override=\"utf-8\")\n",
95 | "sheet = book.sheet_by_index(0)\n",
96 | "data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])\n",
97 | "n_samples = sheet.nrows - 1\n",
98 | "\n",
99 | "# Step 2: create placeholders for input X (number of fire) and label Y (number of theft)\n",
100 | "X = tf.placeholder(tf.float32, name='X')\n",
101 | "Y = tf.placeholder(tf.float32, name='Y')\n",
102 | "\n",
103 | "# Step 3: create weight and bias, initialized to 0\n",
104 | "w = tf.Variable(0.0, name='weights')\n",
105 | "b = tf.Variable(0.0, name='bias')\n",
106 | "\n",
107 | "# Step 4: build model to predict Y\n",
108 | "Y_predicted = X * w + b \n",
109 | "\n",
110 | "# Step 5: use the square error as the loss function\n",
111 | "loss = tf.square(Y - Y_predicted, name='loss')\n",
112 | "# loss = utils.huber_loss(Y, Y_predicted)\n",
113 | "\n",
114 | "# Step 6: using gradient descent with learning rate of 0.01 to minimize loss\n",
115 | "optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)\n",
116 | "\n",
117 | "with tf.Session() as sess:\n",
118 | "\t# Step 7: initialize the necessary variables, in this case, w and b\n",
119 | "\tsess.run(tf.global_variables_initializer()) \n",
120 | "\t\n",
121 | "\twriter = tf.summary.FileWriter('./graphs/linear_reg', sess.graph)\n",
122 | "\t\n",
123 | "\t# Step 8: train the model\n",
124 | "\tfor i in range(50): # train the model 100 epochs\n",
125 | "\t\ttotal_loss = 0\n",
126 | "\t\tfor x, y in data:\n",
127 | "\t\t\t# Session runs train_op and fetch values of loss\n",
128 | "\t\t\t_, l = sess.run([optimizer, loss], feed_dict={X: x, Y:y}) \n",
129 | "\t\t\ttotal_loss += l\n",
130 | "\t\tprint('Epoch {0}: {1}'.format(i, total_loss/n_samples))\n",
131 | "\n",
132 | "\t# close the writer when you're done using it\n",
133 | "\twriter.close() \n",
134 | "\t\n",
135 | "\t# Step 9: output the values of w and b\n",
136 | "\tw, b = sess.run([w, b]) \n",
137 | "\n",
138 | "# plot the results\n",
139 | "X, Y = data.T[0], data.T[1]\n",
140 | "plt.plot(X, Y, 'bo', label='Real data')\n",
141 | "plt.plot(X, X * w + b, 'r', label='Predicted data')\n",
142 | "plt.legend()\n",
143 | "plt.show()"
144 | ]
145 | }
146 | ],
147 | "metadata": {
148 | "kernelspec": {
149 | "display_name": "Python 3",
150 | "language": "python",
151 | "name": "python3"
152 | },
153 | "language_info": {
154 | "codemirror_mode": {
155 | "name": "ipython",
156 | "version": 3
157 | },
158 | "file_extension": ".py",
159 | "mimetype": "text/x-python",
160 | "name": "python",
161 | "nbconvert_exporter": "python",
162 | "pygments_lexer": "ipython3",
163 | "version": "3.5.2"
164 | }
165 | },
166 | "nbformat": 4,
167 | "nbformat_minor": 2
168 | }
169 |
--------------------------------------------------------------------------------
/data/heart.csv:
--------------------------------------------------------------------------------
1 | sbp,tobacco,ldl,adiposity,famhist,typea,obesity,alcohol,age,chd
2 | 160,12,5.73,23.11,Present,49,25.3,97.2,52,1
3 | 144,0.01,4.41,28.61,Absent,55,28.87,2.06,63,1
4 | 118,0.08,3.48,32.28,Present,52,29.14,3.81,46,0
5 | 170,7.5,6.41,38.03,Present,51,31.99,24.26,58,1
6 | 134,13.6,3.5,27.78,Present,60,25.99,57.34,49,1
7 | 132,6.2,6.47,36.21,Present,62,30.77,14.14,45,0
8 | 142,4.05,3.38,16.2,Absent,59,20.81,2.62,38,0
9 | 114,4.08,4.59,14.6,Present,62,23.11,6.72,58,1
10 | 114,0,3.83,19.4,Present,49,24.86,2.49,29,0
11 | 132,0,5.8,30.96,Present,69,30.11,0,53,1
12 | 206,6,2.95,32.27,Absent,72,26.81,56.06,60,1
13 | 134,14.1,4.44,22.39,Present,65,23.09,0,40,1
14 | 118,0,1.88,10.05,Absent,59,21.57,0,17,0
15 | 132,0,1.87,17.21,Absent,49,23.63,0.97,15,0
16 | 112,9.65,2.29,17.2,Present,54,23.53,0.68,53,0
17 | 117,1.53,2.44,28.95,Present,35,25.89,30.03,46,0
18 | 120,7.5,15.33,22,Absent,60,25.31,34.49,49,0
19 | 146,10.5,8.29,35.36,Present,78,32.73,13.89,53,1
20 | 158,2.6,7.46,34.07,Present,61,29.3,53.28,62,1
21 | 124,14,6.23,35.96,Present,45,30.09,0,59,1
22 | 106,1.61,1.74,12.32,Absent,74,20.92,13.37,20,1
23 | 132,7.9,2.85,26.5,Present,51,26.16,25.71,44,0
24 | 150,0.3,6.38,33.99,Present,62,24.64,0,50,0
25 | 138,0.6,3.81,28.66,Absent,54,28.7,1.46,58,0
26 | 142,18.2,4.34,24.38,Absent,61,26.19,0,50,0
27 | 124,4,12.42,31.29,Present,54,23.23,2.06,42,1
28 | 118,6,9.65,33.91,Absent,60,38.8,0,48,0
29 | 145,9.1,5.24,27.55,Absent,59,20.96,21.6,61,1
30 | 144,4.09,5.55,31.4,Present,60,29.43,5.55,56,0
31 | 146,0,6.62,25.69,Absent,60,28.07,8.23,63,1
32 | 136,2.52,3.95,25.63,Absent,51,21.86,0,45,1
33 | 158,1.02,6.33,23.88,Absent,66,22.13,24.99,46,1
34 | 122,6.6,5.58,35.95,Present,53,28.07,12.55,59,1
35 | 126,8.75,6.53,34.02,Absent,49,30.25,0,41,1
36 | 148,5.5,7.1,25.31,Absent,56,29.84,3.6,48,0
37 | 122,4.26,4.44,13.04,Absent,57,19.49,48.99,28,1
38 | 140,3.9,7.32,25.05,Absent,47,27.36,36.77,32,0
39 | 110,4.64,4.55,30.46,Absent,48,30.9,15.22,46,0
40 | 130,0,2.82,19.63,Present,70,24.86,0,29,0
41 | 136,11.2,5.81,31.85,Present,75,27.68,22.94,58,1
42 | 118,0.28,5.8,33.7,Present,60,30.98,0,41,1
43 | 144,0.04,3.38,23.61,Absent,30,23.75,4.66,30,0
44 | 120,0,1.07,16.02,Absent,47,22.15,0,15,0
45 | 130,2.61,2.72,22.99,Present,51,26.29,13.37,51,1
46 | 114,0,2.99,9.74,Absent,54,46.58,0,17,0
47 | 128,4.65,3.31,22.74,Absent,62,22.95,0.51,48,0
48 | 162,7.4,8.55,24.65,Present,64,25.71,5.86,58,1
49 | 116,1.91,7.56,26.45,Present,52,30.01,3.6,33,1
50 | 114,0,1.94,11.02,Absent,54,20.17,38.98,16,0
51 | 126,3.8,3.88,31.79,Absent,57,30.53,0,30,0
52 | 122,0,5.75,30.9,Present,46,29.01,4.11,42,0
53 | 134,2.5,3.66,30.9,Absent,52,27.19,23.66,49,0
54 | 152,0.9,9.12,30.23,Absent,56,28.64,0.37,42,1
55 | 134,8.08,1.55,17.5,Present,56,22.65,66.65,31,1
56 | 156,3,1.82,27.55,Absent,60,23.91,54,53,0
57 | 152,5.99,7.99,32.48,Absent,45,26.57,100.32,48,0
58 | 118,0,2.99,16.17,Absent,49,23.83,3.22,28,0
59 | 126,5.1,2.96,26.5,Absent,55,25.52,12.34,38,1
60 | 103,0.03,4.21,18.96,Absent,48,22.94,2.62,18,0
61 | 121,0.8,5.29,18.95,Present,47,22.51,0,61,0
62 | 142,0.28,1.8,21.03,Absent,57,23.65,2.93,33,0
63 | 138,1.15,5.09,27.87,Present,61,25.65,2.34,44,0
64 | 152,10.1,4.71,24.65,Present,65,26.21,24.53,57,0
65 | 140,0.45,4.3,24.33,Absent,41,27.23,10.08,38,0
66 | 130,0,1.82,10.45,Absent,57,22.07,2.06,17,0
67 | 136,7.36,2.19,28.11,Present,61,25,61.71,54,0
68 | 124,4.82,3.24,21.1,Present,48,28.49,8.42,30,0
69 | 112,0.41,1.88,10.29,Absent,39,22.08,20.98,27,0
70 | 118,4.46,7.27,29.13,Present,48,29.01,11.11,33,0
71 | 122,0,3.37,16.1,Absent,67,21.06,0,32,1
72 | 118,0,3.67,12.13,Absent,51,19.15,0.6,15,0
73 | 130,1.72,2.66,10.38,Absent,68,17.81,11.1,26,0
74 | 130,5.6,3.37,24.8,Absent,58,25.76,43.2,36,0
75 | 126,0.09,5.03,13.27,Present,50,17.75,4.63,20,0
76 | 128,0.4,6.17,26.35,Absent,64,27.86,11.11,34,0
77 | 136,0,4.12,17.42,Absent,52,21.66,12.86,40,0
78 | 134,0,5.9,30.84,Absent,49,29.16,0,55,0
79 | 140,0.6,5.56,33.39,Present,58,27.19,0,55,1
80 | 168,4.5,6.68,28.47,Absent,43,24.25,24.38,56,1
81 | 108,0.4,5.91,22.92,Present,57,25.72,72,39,0
82 | 114,3,7.04,22.64,Present,55,22.59,0,45,1
83 | 140,8.14,4.93,42.49,Absent,53,45.72,6.43,53,1
84 | 148,4.8,6.09,36.55,Present,63,25.44,0.88,55,1
85 | 148,12.2,3.79,34.15,Absent,57,26.38,14.4,57,1
86 | 128,0,2.43,13.15,Present,63,20.75,0,17,0
87 | 130,0.56,3.3,30.86,Absent,49,27.52,33.33,45,0
88 | 126,10.5,4.49,17.33,Absent,67,19.37,0,49,1
89 | 140,0,5.08,27.33,Present,41,27.83,1.25,38,0
90 | 126,0.9,5.64,17.78,Present,55,21.94,0,41,0
91 | 122,0.72,4.04,32.38,Absent,34,28.34,0,55,0
92 | 116,1.03,2.83,10.85,Absent,45,21.59,1.75,21,0
93 | 120,3.7,4.02,39.66,Absent,61,30.57,0,64,1
94 | 143,0.46,2.4,22.87,Absent,62,29.17,15.43,29,0
95 | 118,4,3.95,18.96,Absent,54,25.15,8.33,49,1
96 | 194,1.7,6.32,33.67,Absent,47,30.16,0.19,56,0
97 | 134,3,4.37,23.07,Absent,56,20.54,9.65,62,0
98 | 138,2.16,4.9,24.83,Present,39,26.06,28.29,29,0
99 | 136,0,5,27.58,Present,49,27.59,1.47,39,0
100 | 122,3.2,11.32,35.36,Present,55,27.07,0,51,1
101 | 164,12,3.91,19.59,Absent,51,23.44,19.75,39,0
102 | 136,8,7.85,23.81,Present,51,22.69,2.78,50,0
103 | 166,0.07,4.03,29.29,Absent,53,28.37,0,27,0
104 | 118,0,4.34,30.12,Present,52,32.18,3.91,46,0
105 | 128,0.42,4.6,26.68,Absent,41,30.97,10.33,31,0
106 | 118,1.5,5.38,25.84,Absent,64,28.63,3.89,29,0
107 | 158,3.6,2.97,30.11,Absent,63,26.64,108,64,0
108 | 108,1.5,4.33,24.99,Absent,66,22.29,21.6,61,1
109 | 170,7.6,5.5,37.83,Present,42,37.41,6.17,54,1
110 | 118,1,5.76,22.1,Absent,62,23.48,7.71,42,0
111 | 124,0,3.04,17.33,Absent,49,22.04,0,18,0
112 | 114,0,8.01,21.64,Absent,66,25.51,2.49,16,0
113 | 168,9,8.53,24.48,Present,69,26.18,4.63,54,1
114 | 134,2,3.66,14.69,Absent,52,21.03,2.06,37,0
115 | 174,0,8.46,35.1,Present,35,25.27,0,61,1
116 | 116,31.2,3.17,14.99,Absent,47,19.4,49.06,59,1
117 | 128,0,10.58,31.81,Present,46,28.41,14.66,48,0
118 | 140,4.5,4.59,18.01,Absent,63,21.91,22.09,32,1
119 | 154,0.7,5.91,25,Absent,13,20.6,0,42,0
120 | 150,3.5,6.99,25.39,Present,50,23.35,23.48,61,1
121 | 130,0,3.92,25.55,Absent,68,28.02,0.68,27,0
122 | 128,2,6.13,21.31,Absent,66,22.86,11.83,60,0
123 | 120,1.4,6.25,20.47,Absent,60,25.85,8.51,28,0
124 | 120,0,5.01,26.13,Absent,64,26.21,12.24,33,0
125 | 138,4.5,2.85,30.11,Absent,55,24.78,24.89,56,1
126 | 153,7.8,3.96,25.73,Absent,54,25.91,27.03,45,0
127 | 123,8.6,11.17,35.28,Present,70,33.14,0,59,1
128 | 148,4.04,3.99,20.69,Absent,60,27.78,1.75,28,0
129 | 136,3.96,2.76,30.28,Present,50,34.42,18.51,38,0
130 | 134,8.8,7.41,26.84,Absent,35,29.44,29.52,60,1
131 | 152,12.18,4.04,37.83,Present,63,34.57,4.17,64,0
132 | 158,13.5,5.04,30.79,Absent,54,24.79,21.5,62,0
133 | 132,2,3.08,35.39,Absent,45,31.44,79.82,58,1
134 | 134,1.5,3.73,21.53,Absent,41,24.7,11.11,30,1
135 | 142,7.44,5.52,33.97,Absent,47,29.29,24.27,54,0
136 | 134,6,3.3,28.45,Absent,65,26.09,58.11,40,0
137 | 122,4.18,9.05,29.27,Present,44,24.05,19.34,52,1
138 | 116,2.7,3.69,13.52,Absent,55,21.13,18.51,32,0
139 | 128,0.5,3.7,12.81,Present,66,21.25,22.73,28,0
140 | 120,0,3.68,12.24,Absent,51,20.52,0.51,20,0
141 | 124,0,3.95,36.35,Present,59,32.83,9.59,54,0
142 | 160,14,5.9,37.12,Absent,58,33.87,3.52,54,1
143 | 130,2.78,4.89,9.39,Present,63,19.3,17.47,25,1
144 | 128,2.8,5.53,14.29,Absent,64,24.97,0.51,38,0
145 | 130,4.5,5.86,37.43,Absent,61,31.21,32.3,58,0
146 | 109,1.2,6.14,29.26,Absent,47,24.72,10.46,40,0
147 | 144,0,3.84,18.72,Absent,56,22.1,4.8,40,0
148 | 118,1.05,3.16,12.98,Present,46,22.09,16.35,31,0
149 | 136,3.46,6.38,32.25,Present,43,28.73,3.13,43,1
150 | 136,1.5,6.06,26.54,Absent,54,29.38,14.5,33,1
151 | 124,15.5,5.05,24.06,Absent,46,23.22,0,61,1
152 | 148,6,6.49,26.47,Absent,48,24.7,0,55,0
153 | 128,6.6,3.58,20.71,Absent,55,24.15,0,52,0
154 | 122,0.28,4.19,19.97,Absent,61,25.63,0,24,0
155 | 108,0,2.74,11.17,Absent,53,22.61,0.95,20,0
156 | 124,3.04,4.8,19.52,Present,60,21.78,147.19,41,1
157 | 138,8.8,3.12,22.41,Present,63,23.33,120.03,55,1
158 | 127,0,2.81,15.7,Absent,42,22.03,1.03,17,0
159 | 174,9.45,5.13,35.54,Absent,55,30.71,59.79,53,0
160 | 122,0,3.05,23.51,Absent,46,25.81,0,38,0
161 | 144,6.75,5.45,29.81,Absent,53,25.62,26.23,43,1
162 | 126,1.8,6.22,19.71,Absent,65,24.81,0.69,31,0
163 | 208,27.4,3.12,26.63,Absent,66,27.45,33.07,62,1
164 | 138,0,2.68,17.04,Absent,42,22.16,0,16,0
165 | 148,0,3.84,17.26,Absent,70,20,0,21,0
166 | 122,0,3.08,16.3,Absent,43,22.13,0,16,0
167 | 132,7,3.2,23.26,Absent,77,23.64,23.14,49,0
168 | 110,12.16,4.99,28.56,Absent,44,27.14,21.6,55,1
169 | 160,1.52,8.12,29.3,Present,54,25.87,12.86,43,1
170 | 126,0.54,4.39,21.13,Present,45,25.99,0,25,0
171 | 162,5.3,7.95,33.58,Present,58,36.06,8.23,48,0
172 | 194,2.55,6.89,33.88,Present,69,29.33,0,41,0
173 | 118,0.75,2.58,20.25,Absent,59,24.46,0,32,0
174 | 124,0,4.79,34.71,Absent,49,26.09,9.26,47,0
175 | 160,0,2.42,34.46,Absent,48,29.83,1.03,61,0
176 | 128,0,2.51,29.35,Present,53,22.05,1.37,62,0
177 | 122,4,5.24,27.89,Present,45,26.52,0,61,1
178 | 132,2,2.7,21.57,Present,50,27.95,9.26,37,0
179 | 120,0,2.42,16.66,Absent,46,20.16,0,17,0
180 | 128,0.04,8.22,28.17,Absent,65,26.24,11.73,24,0
181 | 108,15,4.91,34.65,Absent,41,27.96,14.4,56,0
182 | 166,0,4.31,34.27,Absent,45,30.14,13.27,56,0
183 | 152,0,6.06,41.05,Present,51,40.34,0,51,0
184 | 170,4.2,4.67,35.45,Present,50,27.14,7.92,60,1
185 | 156,4,2.05,19.48,Present,50,21.48,27.77,39,1
186 | 116,8,6.73,28.81,Present,41,26.74,40.94,48,1
187 | 122,4.4,3.18,11.59,Present,59,21.94,0,33,1
188 | 150,20,6.4,35.04,Absent,53,28.88,8.33,63,0
189 | 129,2.15,5.17,27.57,Absent,52,25.42,2.06,39,0
190 | 134,4.8,6.58,29.89,Present,55,24.73,23.66,63,0
191 | 126,0,5.98,29.06,Present,56,25.39,11.52,64,1
192 | 142,0,3.72,25.68,Absent,48,24.37,5.25,40,1
193 | 128,0.7,4.9,37.42,Present,72,35.94,3.09,49,1
194 | 102,0.4,3.41,17.22,Present,56,23.59,2.06,39,1
195 | 130,0,4.89,25.98,Absent,72,30.42,14.71,23,0
196 | 138,0.05,2.79,10.35,Absent,46,21.62,0,18,0
197 | 138,0,1.96,11.82,Present,54,22.01,8.13,21,0
198 | 128,0,3.09,20.57,Absent,54,25.63,0.51,17,0
199 | 162,2.92,3.63,31.33,Absent,62,31.59,18.51,42,0
200 | 160,3,9.19,26.47,Present,39,28.25,14.4,54,1
201 | 148,0,4.66,24.39,Absent,50,25.26,4.03,27,0
202 | 124,0.16,2.44,16.67,Absent,65,24.58,74.91,23,0
203 | 136,3.15,4.37,20.22,Present,59,25.12,47.16,31,1
204 | 134,2.75,5.51,26.17,Absent,57,29.87,8.33,33,0
205 | 128,0.73,3.97,23.52,Absent,54,23.81,19.2,64,0
206 | 122,3.2,3.59,22.49,Present,45,24.96,36.17,58,0
207 | 152,3,4.64,31.29,Absent,41,29.34,4.53,40,0
208 | 162,0,5.09,24.6,Present,64,26.71,3.81,18,0
209 | 124,4,6.65,30.84,Present,54,28.4,33.51,60,0
210 | 136,5.8,5.9,27.55,Absent,65,25.71,14.4,59,0
211 | 136,8.8,4.26,32.03,Present,52,31.44,34.35,60,0
212 | 134,0.05,8.03,27.95,Absent,48,26.88,0,60,0
213 | 122,1,5.88,34.81,Present,69,31.27,15.94,40,1
214 | 116,3,3.05,30.31,Absent,41,23.63,0.86,44,0
215 | 132,0,0.98,21.39,Absent,62,26.75,0,53,0
216 | 134,0,2.4,21.11,Absent,57,22.45,1.37,18,0
217 | 160,7.77,8.07,34.8,Absent,64,31.15,0,62,1
218 | 180,0.52,4.23,16.38,Absent,55,22.56,14.77,45,1
219 | 124,0.81,6.16,11.61,Absent,35,21.47,10.49,26,0
220 | 114,0,4.97,9.69,Absent,26,22.6,0,25,0
221 | 208,7.4,7.41,32.03,Absent,50,27.62,7.85,57,0
222 | 138,0,3.14,12,Absent,54,20.28,0,16,0
223 | 164,0.5,6.95,39.64,Present,47,41.76,3.81,46,1
224 | 144,2.4,8.13,35.61,Absent,46,27.38,13.37,60,0
225 | 136,7.5,7.39,28.04,Present,50,25.01,0,45,1
226 | 132,7.28,3.52,12.33,Absent,60,19.48,2.06,56,0
227 | 143,5.04,4.86,23.59,Absent,58,24.69,18.72,42,0
228 | 112,4.46,7.18,26.25,Present,69,27.29,0,32,1
229 | 134,10,3.79,34.72,Absent,42,28.33,28.8,52,1
230 | 138,2,5.11,31.4,Present,49,27.25,2.06,64,1
231 | 188,0,5.47,32.44,Present,71,28.99,7.41,50,1
232 | 110,2.35,3.36,26.72,Present,54,26.08,109.8,58,1
233 | 136,13.2,7.18,35.95,Absent,48,29.19,0,62,0
234 | 130,1.75,5.46,34.34,Absent,53,29.42,0,58,1
235 | 122,0,3.76,24.59,Absent,56,24.36,0,30,0
236 | 138,0,3.24,27.68,Absent,60,25.7,88.66,29,0
237 | 130,18,4.13,27.43,Absent,54,27.44,0,51,1
238 | 126,5.5,3.78,34.15,Absent,55,28.85,3.18,61,0
239 | 176,5.76,4.89,26.1,Present,46,27.3,19.44,57,0
240 | 122,0,5.49,19.56,Absent,57,23.12,14.02,27,0
241 | 124,0,3.23,9.64,Absent,59,22.7,0,16,0
242 | 140,5.2,3.58,29.26,Absent,70,27.29,20.17,45,1
243 | 128,6,4.37,22.98,Present,50,26.01,0,47,0
244 | 190,4.18,5.05,24.83,Absent,45,26.09,82.85,41,0
245 | 144,0.76,10.53,35.66,Absent,63,34.35,0,55,1
246 | 126,4.6,7.4,31.99,Present,57,28.67,0.37,60,1
247 | 128,0,2.63,23.88,Absent,45,21.59,6.54,57,0
248 | 136,0.4,3.91,21.1,Present,63,22.3,0,56,1
249 | 158,4,4.18,28.61,Present,42,25.11,0,60,0
250 | 160,0.6,6.94,30.53,Absent,36,25.68,1.42,64,0
251 | 124,6,5.21,33.02,Present,64,29.37,7.61,58,1
252 | 158,6.17,8.12,30.75,Absent,46,27.84,92.62,48,0
253 | 128,0,6.34,11.87,Absent,57,23.14,0,17,0
254 | 166,3,3.82,26.75,Absent,45,20.86,0,63,1
255 | 146,7.5,7.21,25.93,Present,55,22.51,0.51,42,0
256 | 161,9,4.65,15.16,Present,58,23.76,43.2,46,0
257 | 164,13.02,6.26,29.38,Present,47,22.75,37.03,54,1
258 | 146,5.08,7.03,27.41,Present,63,36.46,24.48,37,1
259 | 142,4.48,3.57,19.75,Present,51,23.54,3.29,49,0
260 | 138,12,5.13,28.34,Absent,59,24.49,32.81,58,1
261 | 154,1.8,7.13,34.04,Present,52,35.51,39.36,44,0
262 | 118,0,2.39,12.13,Absent,49,18.46,0.26,17,1
263 | 124,0.61,2.69,17.15,Present,61,22.76,11.55,20,0
264 | 124,1.04,2.84,16.42,Present,46,20.17,0,61,0
265 | 136,5,4.19,23.99,Present,68,27.8,25.86,35,0
266 | 132,9.9,4.63,27.86,Present,46,23.39,0.51,52,1
267 | 118,0.12,1.96,20.31,Absent,37,20.01,2.42,18,0
268 | 118,0.12,4.16,9.37,Absent,57,19.61,0,17,0
269 | 134,12,4.96,29.79,Absent,53,24.86,8.23,57,0
270 | 114,0.1,3.95,15.89,Present,57,20.31,17.14,16,0
271 | 136,6.8,7.84,30.74,Present,58,26.2,23.66,45,1
272 | 130,0,4.16,39.43,Present,46,30.01,0,55,1
273 | 136,2.2,4.16,38.02,Absent,65,37.24,4.11,41,1
274 | 136,1.36,3.16,14.97,Present,56,24.98,7.3,24,0
275 | 154,4.2,5.59,25.02,Absent,58,25.02,1.54,43,0
276 | 108,0.8,2.47,17.53,Absent,47,22.18,0,55,1
277 | 136,8.8,4.69,36.07,Present,38,26.56,2.78,63,1
278 | 174,2.02,6.57,31.9,Present,50,28.75,11.83,64,1
279 | 124,4.25,8.22,30.77,Absent,56,25.8,0,43,0
280 | 114,0,2.63,9.69,Absent,45,17.89,0,16,0
281 | 118,0.12,3.26,12.26,Absent,55,22.65,0,16,0
282 | 106,1.08,4.37,26.08,Absent,67,24.07,17.74,28,1
283 | 146,3.6,3.51,22.67,Absent,51,22.29,43.71,42,0
284 | 206,0,4.17,33.23,Absent,69,27.36,6.17,50,1
285 | 134,3,3.17,17.91,Absent,35,26.37,15.12,27,0
286 | 148,15,4.98,36.94,Present,72,31.83,66.27,41,1
287 | 126,0.21,3.95,15.11,Absent,61,22.17,2.42,17,0
288 | 134,0,3.69,13.92,Absent,43,27.66,0,19,0
289 | 134,0.02,2.8,18.84,Absent,45,24.82,0,17,0
290 | 123,0.05,4.61,13.69,Absent,51,23.23,2.78,16,0
291 | 112,0.6,5.28,25.71,Absent,55,27.02,27.77,38,1
292 | 112,0,1.71,15.96,Absent,42,22.03,3.5,16,0
293 | 101,0.48,7.26,13,Absent,50,19.82,5.19,16,0
294 | 150,0.18,4.14,14.4,Absent,53,23.43,7.71,44,0
295 | 170,2.6,7.22,28.69,Present,71,27.87,37.65,56,1
296 | 134,0,5.63,29.12,Absent,68,32.33,2.02,34,0
297 | 142,0,4.19,18.04,Absent,56,23.65,20.78,42,1
298 | 132,0.1,3.28,10.73,Absent,73,20.42,0,17,0
299 | 136,0,2.28,18.14,Absent,55,22.59,0,17,0
300 | 132,12,4.51,21.93,Absent,61,26.07,64.8,46,1
301 | 166,4.1,4,34.3,Present,32,29.51,8.23,53,0
302 | 138,0,3.96,24.7,Present,53,23.8,0,45,0
303 | 138,2.27,6.41,29.07,Absent,58,30.22,2.93,32,1
304 | 170,0,3.12,37.15,Absent,47,35.42,0,53,0
305 | 128,0,8.41,28.82,Present,60,26.86,0,59,1
306 | 136,1.2,2.78,7.12,Absent,52,22.51,3.41,27,0
307 | 128,0,3.22,26.55,Present,39,26.59,16.71,49,0
308 | 150,14.4,5.04,26.52,Present,60,28.84,0,45,0
309 | 132,8.4,3.57,13.68,Absent,42,18.75,15.43,59,1
310 | 142,2.4,2.55,23.89,Absent,54,26.09,59.14,37,0
311 | 130,0.05,2.44,28.25,Present,67,30.86,40.32,34,0
312 | 174,3.5,5.26,21.97,Present,36,22.04,8.33,59,1
313 | 114,9.6,2.51,29.18,Absent,49,25.67,40.63,46,0
314 | 162,1.5,2.46,19.39,Present,49,24.32,0,59,1
315 | 174,0,3.27,35.4,Absent,58,37.71,24.95,44,0
316 | 190,5.15,6.03,36.59,Absent,42,30.31,72,50,0
317 | 154,1.4,1.72,18.86,Absent,58,22.67,43.2,59,0
318 | 124,0,2.28,24.86,Present,50,22.24,8.26,38,0
319 | 114,1.2,3.98,14.9,Absent,49,23.79,25.82,26,0
320 | 168,11.4,5.08,26.66,Present,56,27.04,2.61,59,1
321 | 142,3.72,4.24,32.57,Absent,52,24.98,7.61,51,0
322 | 154,0,4.81,28.11,Present,56,25.67,75.77,59,0
323 | 146,4.36,4.31,18.44,Present,47,24.72,10.8,38,0
324 | 166,6,3.02,29.3,Absent,35,24.38,38.06,61,0
325 | 140,8.6,3.9,32.16,Present,52,28.51,11.11,64,1
326 | 136,1.7,3.53,20.13,Absent,56,19.44,14.4,55,0
327 | 156,0,3.47,21.1,Absent,73,28.4,0,36,1
328 | 132,0,6.63,29.58,Present,37,29.41,2.57,62,0
329 | 128,0,2.98,12.59,Absent,65,20.74,2.06,19,0
330 | 106,5.6,3.2,12.3,Absent,49,20.29,0,39,0
331 | 144,0.4,4.64,30.09,Absent,30,27.39,0.74,55,0
332 | 154,0.31,2.33,16.48,Absent,33,24,11.83,17,0
333 | 126,3.1,2.01,32.97,Present,56,28.63,26.74,45,0
334 | 134,6.4,8.49,37.25,Present,56,28.94,10.49,51,1
335 | 152,19.45,4.22,29.81,Absent,28,23.95,0,59,1
336 | 146,1.35,6.39,34.21,Absent,51,26.43,0,59,1
337 | 162,6.94,4.55,33.36,Present,52,27.09,32.06,43,0
338 | 130,7.28,3.56,23.29,Present,20,26.8,51.87,58,1
339 | 138,6,7.24,37.05,Absent,38,28.69,0,59,0
340 | 148,0,5.32,26.71,Present,52,32.21,32.78,27,0
341 | 124,4.2,2.94,27.59,Absent,50,30.31,85.06,30,0
342 | 118,1.62,9.01,21.7,Absent,59,25.89,21.19,40,0
343 | 116,4.28,7.02,19.99,Present,68,23.31,0,52,1
344 | 162,6.3,5.73,22.61,Present,46,20.43,62.54,53,1
345 | 138,0.87,1.87,15.89,Absent,44,26.76,42.99,31,0
346 | 137,1.2,3.14,23.87,Absent,66,24.13,45,37,0
347 | 198,0.52,11.89,27.68,Present,48,28.4,78.99,26,1
348 | 154,4.5,4.75,23.52,Present,43,25.76,0,53,1
349 | 128,5.4,2.36,12.98,Absent,51,18.36,6.69,61,0
350 | 130,0.08,5.59,25.42,Present,50,24.98,6.27,43,1
351 | 162,5.6,4.24,22.53,Absent,29,22.91,5.66,60,0
352 | 120,10.5,2.7,29.87,Present,54,24.5,16.46,49,0
353 | 136,3.99,2.58,16.38,Present,53,22.41,27.67,36,0
354 | 176,1.2,8.28,36.16,Present,42,27.81,11.6,58,1
355 | 134,11.79,4.01,26.57,Present,38,21.79,38.88,61,1
356 | 122,1.7,5.28,32.23,Present,51,24.08,0,54,0
357 | 134,0.9,3.18,23.66,Present,52,23.26,27.36,58,1
358 | 134,0,2.43,22.24,Absent,52,26.49,41.66,24,0
359 | 136,6.6,6.08,32.74,Absent,64,33.28,2.72,49,0
360 | 132,4.05,5.15,26.51,Present,31,26.67,16.3,50,0
361 | 152,1.68,3.58,25.43,Absent,50,27.03,0,32,0
362 | 132,12.3,5.96,32.79,Present,57,30.12,21.5,62,1
363 | 124,0.4,3.67,25.76,Absent,43,28.08,20.57,34,0
364 | 140,4.2,2.91,28.83,Present,43,24.7,47.52,48,0
365 | 166,0.6,2.42,34.03,Present,53,26.96,54,60,0
366 | 156,3.02,5.35,25.72,Present,53,25.22,28.11,52,1
367 | 132,0.72,4.37,19.54,Absent,48,26.11,49.37,28,0
368 | 150,0,4.99,27.73,Absent,57,30.92,8.33,24,0
369 | 134,0.12,3.4,21.18,Present,33,26.27,14.21,30,0
370 | 126,3.4,4.87,15.16,Present,65,22.01,11.11,38,0
371 | 148,0.5,5.97,32.88,Absent,54,29.27,6.43,42,0
372 | 148,8.2,7.75,34.46,Present,46,26.53,6.04,64,1
373 | 132,6,5.97,25.73,Present,66,24.18,145.29,41,0
374 | 128,1.6,5.41,29.3,Absent,68,29.38,23.97,32,0
375 | 128,5.16,4.9,31.35,Present,57,26.42,0,64,0
376 | 140,0,2.4,27.89,Present,70,30.74,144,29,0
377 | 126,0,5.29,27.64,Absent,25,27.62,2.06,45,0
378 | 114,3.6,4.16,22.58,Absent,60,24.49,65.31,31,0
379 | 118,1.25,4.69,31.58,Present,52,27.16,4.11,53,0
380 | 126,0.96,4.99,29.74,Absent,66,33.35,58.32,38,0
381 | 154,4.5,4.68,39.97,Absent,61,33.17,1.54,64,1
382 | 112,1.44,2.71,22.92,Absent,59,24.81,0,52,0
383 | 140,8,4.42,33.15,Present,47,32.77,66.86,44,0
384 | 140,1.68,11.41,29.54,Present,74,30.75,2.06,38,1
385 | 128,2.6,4.94,21.36,Absent,61,21.3,0,31,0
386 | 126,19.6,6.03,34.99,Absent,49,26.99,55.89,44,0
387 | 160,4.2,6.76,37.99,Present,61,32.91,3.09,54,1
388 | 144,0,4.17,29.63,Present,52,21.83,0,59,0
389 | 148,4.5,10.49,33.27,Absent,50,25.92,2.06,53,1
390 | 146,0,4.92,18.53,Absent,57,24.2,34.97,26,0
391 | 164,5.6,3.17,30.98,Present,44,25.99,43.2,53,1
392 | 130,0.54,3.63,22.03,Present,69,24.34,12.86,39,1
393 | 154,2.4,5.63,42.17,Present,59,35.07,12.86,50,1
394 | 178,0.95,4.75,21.06,Absent,49,23.74,24.69,61,0
395 | 180,3.57,3.57,36.1,Absent,36,26.7,19.95,64,0
396 | 134,12.5,2.73,39.35,Absent,48,35.58,0,48,0
397 | 142,0,3.54,16.64,Absent,58,25.97,8.36,27,0
398 | 162,7,7.67,34.34,Present,33,30.77,0,62,0
399 | 218,11.2,2.77,30.79,Absent,38,24.86,90.93,48,1
400 | 126,8.75,6.06,32.72,Present,33,27,62.43,55,1
401 | 126,0,3.57,26.01,Absent,61,26.3,7.97,47,0
402 | 134,6.1,4.77,26.08,Absent,47,23.82,1.03,49,0
403 | 132,0,4.17,36.57,Absent,57,30.61,18,49,0
404 | 178,5.5,3.79,23.92,Present,45,21.26,6.17,62,1
405 | 208,5.04,5.19,20.71,Present,52,25.12,24.27,58,1
406 | 160,1.15,10.19,39.71,Absent,31,31.65,20.52,57,0
407 | 116,2.38,5.67,29.01,Present,54,27.26,15.77,51,0
408 | 180,25.01,3.7,38.11,Present,57,30.54,0,61,1
409 | 200,19.2,4.43,40.6,Present,55,32.04,36,60,1
410 | 112,4.2,3.58,27.14,Absent,52,26.83,2.06,40,0
411 | 120,0,3.1,26.97,Absent,41,24.8,0,16,0
412 | 178,20,9.78,33.55,Absent,37,27.29,2.88,62,1
413 | 166,0.8,5.63,36.21,Absent,50,34.72,28.8,60,0
414 | 164,8.2,14.16,36.85,Absent,52,28.5,17.02,55,1
415 | 216,0.92,2.66,19.85,Present,49,20.58,0.51,63,1
416 | 146,6.4,5.62,33.05,Present,57,31.03,0.74,46,0
417 | 134,1.1,3.54,20.41,Present,58,24.54,39.91,39,1
418 | 158,16,5.56,29.35,Absent,36,25.92,58.32,60,0
419 | 176,0,3.14,31.04,Present,45,30.18,4.63,45,0
420 | 132,2.8,4.79,20.47,Present,50,22.15,11.73,48,0
421 | 126,0,4.55,29.18,Absent,48,24.94,36,41,0
422 | 120,5.5,3.51,23.23,Absent,46,22.4,90.31,43,0
423 | 174,0,3.86,21.73,Absent,42,23.37,0,63,0
424 | 150,13.8,5.1,29.45,Present,52,27.92,77.76,55,1
425 | 176,6,3.98,17.2,Present,52,21.07,4.11,61,1
426 | 142,2.2,3.29,22.7,Absent,44,23.66,5.66,42,1
427 | 132,0,3.3,21.61,Absent,42,24.92,32.61,33,0
428 | 142,1.32,7.63,29.98,Present,57,31.16,72.93,33,0
429 | 146,1.16,2.28,34.53,Absent,50,28.71,45,49,0
430 | 132,7.2,3.65,17.16,Present,56,23.25,0,34,0
431 | 120,0,3.57,23.22,Absent,58,27.2,0,32,0
432 | 118,0,3.89,15.96,Absent,65,20.18,0,16,0
433 | 108,0,1.43,26.26,Absent,42,19.38,0,16,0
434 | 136,0,4,19.06,Absent,40,21.94,2.06,16,0
435 | 120,0,2.46,13.39,Absent,47,22.01,0.51,18,0
436 | 132,0,3.55,8.66,Present,61,18.5,3.87,16,0
437 | 136,0,1.77,20.37,Absent,45,21.51,2.06,16,0
438 | 138,0,1.86,18.35,Present,59,25.38,6.51,17,0
439 | 138,0.06,4.15,20.66,Absent,49,22.59,2.49,16,0
440 | 130,1.22,3.3,13.65,Absent,50,21.4,3.81,31,0
441 | 130,4,2.4,17.42,Absent,60,22.05,0,40,0
442 | 110,0,7.14,28.28,Absent,57,29,0,32,0
443 | 120,0,3.98,13.19,Present,47,21.89,0,16,0
444 | 166,6,8.8,37.89,Absent,39,28.7,43.2,52,0
445 | 134,0.57,4.75,23.07,Absent,67,26.33,0,37,0
446 | 142,3,3.69,25.1,Absent,60,30.08,38.88,27,0
447 | 136,2.8,2.53,9.28,Present,61,20.7,4.55,25,0
448 | 142,0,4.32,25.22,Absent,47,28.92,6.53,34,1
449 | 130,0,1.88,12.51,Present,52,20.28,0,17,0
450 | 124,1.8,3.74,16.64,Present,42,22.26,10.49,20,0
451 | 144,4,5.03,25.78,Present,57,27.55,90,48,1
452 | 136,1.81,3.31,6.74,Absent,63,19.57,24.94,24,0
453 | 120,0,2.77,13.35,Absent,67,23.37,1.03,18,0
454 | 154,5.53,3.2,28.81,Present,61,26.15,42.79,42,0
455 | 124,1.6,7.22,39.68,Present,36,31.5,0,51,1
456 | 146,0.64,4.82,28.02,Absent,60,28.11,8.23,39,1
457 | 128,2.24,2.83,26.48,Absent,48,23.96,47.42,27,1
458 | 170,0.4,4.11,42.06,Present,56,33.1,2.06,57,0
459 | 214,0.4,5.98,31.72,Absent,64,28.45,0,58,0
460 | 182,4.2,4.41,32.1,Absent,52,28.61,18.72,52,1
461 | 108,3,1.59,15.23,Absent,40,20.09,26.64,55,0
462 | 118,5.4,11.61,30.79,Absent,64,27.35,23.97,40,0
463 | 132,0,4.82,33.41,Present,62,14.7,0,46,1
--------------------------------------------------------------------------------
/data/heart.txt:
--------------------------------------------------------------------------------
1 | "sbp" "tobacco" "ldl" "adiposity" "famhist" "typea" "obesity" "alcohol" "age" "chd"
2 | 160 12 5.73 23.11 "Present" 49 25.3 97.2 52 1
3 | 144 0.01 4.41 28.61 "Absent" 55 28.87 2.06 63 1
4 | 118 0.08 3.48 32.28 "Present" 52 29.14 3.81 46 0
5 | 170 7.5 6.41 38.03 "Present" 51 31.99 24.26 58 1
6 | 134 13.6 3.5 27.78 "Present" 60 25.99 57.34 49 1
7 | 132 6.2 6.47 36.21 "Present" 62 30.77 14.14 45 0
8 | 142 4.05 3.38 16.2 "Absent" 59 20.81 2.62 38 0
9 | 114 4.08 4.59 14.6 "Present" 62 23.11 6.72 58 1
10 | 114 0 3.83 19.4 "Present" 49 24.86 2.49 29 0
11 | 132 0 5.8 30.96 "Present" 69 30.11 0 53 1
12 | 206 6 2.95 32.27 "Absent" 72 26.81 56.06 60 1
13 | 134 14.1 4.44 22.39 "Present" 65 23.09 0 40 1
14 | 118 0 1.88 10.05 "Absent" 59 21.57 0 17 0
15 | 132 0 1.87 17.21 "Absent" 49 23.63 0.97 15 0
16 | 112 9.65 2.29 17.2 "Present" 54 23.53 0.68 53 0
17 | 117 1.53 2.44 28.95 "Present" 35 25.89 30.03 46 0
18 | 120 7.5 15.33 22 "Absent" 60 25.31 34.49 49 0
19 | 146 10.5 8.29 35.36 "Present" 78 32.73 13.89 53 1
20 | 158 2.6 7.46 34.07 "Present" 61 29.3 53.28 62 1
21 | 124 14 6.23 35.96 "Present" 45 30.09 0 59 1
22 | 106 1.61 1.74 12.32 "Absent" 74 20.92 13.37 20 1
23 | 132 7.9 2.85 26.5 "Present" 51 26.16 25.71 44 0
24 | 150 0.3 6.38 33.99 "Present" 62 24.64 0 50 0
25 | 138 0.6 3.81 28.66 "Absent" 54 28.7 1.46 58 0
26 | 142 18.2 4.34 24.38 "Absent" 61 26.19 0 50 0
27 | 124 4 12.42 31.29 "Present" 54 23.23 2.06 42 1
28 | 118 6 9.65 33.91 "Absent" 60 38.8 0 48 0
29 | 145 9.1 5.24 27.55 "Absent" 59 20.96 21.6 61 1
30 | 144 4.09 5.55 31.4 "Present" 60 29.43 5.55 56 0
31 | 146 0 6.62 25.69 "Absent" 60 28.07 8.23 63 1
32 | 136 2.52 3.95 25.63 "Absent" 51 21.86 0 45 1
33 | 158 1.02 6.33 23.88 "Absent" 66 22.13 24.99 46 1
34 | 122 6.6 5.58 35.95 "Present" 53 28.07 12.55 59 1
35 | 126 8.75 6.53 34.02 "Absent" 49 30.25 0 41 1
36 | 148 5.5 7.1 25.31 "Absent" 56 29.84 3.6 48 0
37 | 122 4.26 4.44 13.04 "Absent" 57 19.49 48.99 28 1
38 | 140 3.9 7.32 25.05 "Absent" 47 27.36 36.77 32 0
39 | 110 4.64 4.55 30.46 "Absent" 48 30.9 15.22 46 0
40 | 130 0 2.82 19.63 "Present" 70 24.86 0 29 0
41 | 136 11.2 5.81 31.85 "Present" 75 27.68 22.94 58 1
42 | 118 0.28 5.8 33.7 "Present" 60 30.98 0 41 1
43 | 144 0.04 3.38 23.61 "Absent" 30 23.75 4.66 30 0
44 | 120 0 1.07 16.02 "Absent" 47 22.15 0 15 0
45 | 130 2.61 2.72 22.99 "Present" 51 26.29 13.37 51 1
46 | 114 0 2.99 9.74 "Absent" 54 46.58 0 17 0
47 | 128 4.65 3.31 22.74 "Absent" 62 22.95 0.51 48 0
48 | 162 7.4 8.55 24.65 "Present" 64 25.71 5.86 58 1
49 | 116 1.91 7.56 26.45 "Present" 52 30.01 3.6 33 1
50 | 114 0 1.94 11.02 "Absent" 54 20.17 38.98 16 0
51 | 126 3.8 3.88 31.79 "Absent" 57 30.53 0 30 0
52 | 122 0 5.75 30.9 "Present" 46 29.01 4.11 42 0
53 | 134 2.5 3.66 30.9 "Absent" 52 27.19 23.66 49 0
54 | 152 0.9 9.12 30.23 "Absent" 56 28.64 0.37 42 1
55 | 134 8.08 1.55 17.5 "Present" 56 22.65 66.65 31 1
56 | 156 3 1.82 27.55 "Absent" 60 23.91 54 53 0
57 | 152 5.99 7.99 32.48 "Absent" 45 26.57 100.32 48 0
58 | 118 0 2.99 16.17 "Absent" 49 23.83 3.22 28 0
59 | 126 5.1 2.96 26.5 "Absent" 55 25.52 12.34 38 1
60 | 103 0.03 4.21 18.96 "Absent" 48 22.94 2.62 18 0
61 | 121 0.8 5.29 18.95 "Present" 47 22.51 0 61 0
62 | 142 0.28 1.8 21.03 "Absent" 57 23.65 2.93 33 0
63 | 138 1.15 5.09 27.87 "Present" 61 25.65 2.34 44 0
64 | 152 10.1 4.71 24.65 "Present" 65 26.21 24.53 57 0
65 | 140 0.45 4.3 24.33 "Absent" 41 27.23 10.08 38 0
66 | 130 0 1.82 10.45 "Absent" 57 22.07 2.06 17 0
67 | 136 7.36 2.19 28.11 "Present" 61 25 61.71 54 0
68 | 124 4.82 3.24 21.1 "Present" 48 28.49 8.42 30 0
69 | 112 0.41 1.88 10.29 "Absent" 39 22.08 20.98 27 0
70 | 118 4.46 7.27 29.13 "Present" 48 29.01 11.11 33 0
71 | 122 0 3.37 16.1 "Absent" 67 21.06 0 32 1
72 | 118 0 3.67 12.13 "Absent" 51 19.15 0.6 15 0
73 | 130 1.72 2.66 10.38 "Absent" 68 17.81 11.1 26 0
74 | 130 5.6 3.37 24.8 "Absent" 58 25.76 43.2 36 0
75 | 126 0.09 5.03 13.27 "Present" 50 17.75 4.63 20 0
76 | 128 0.4 6.17 26.35 "Absent" 64 27.86 11.11 34 0
77 | 136 0 4.12 17.42 "Absent" 52 21.66 12.86 40 0
78 | 134 0 5.9 30.84 "Absent" 49 29.16 0 55 0
79 | 140 0.6 5.56 33.39 "Present" 58 27.19 0 55 1
80 | 168 4.5 6.68 28.47 "Absent" 43 24.25 24.38 56 1
81 | 108 0.4 5.91 22.92 "Present" 57 25.72 72 39 0
82 | 114 3 7.04 22.64 "Present" 55 22.59 0 45 1
83 | 140 8.14 4.93 42.49 "Absent" 53 45.72 6.43 53 1
84 | 148 4.8 6.09 36.55 "Present" 63 25.44 0.88 55 1
85 | 148 12.2 3.79 34.15 "Absent" 57 26.38 14.4 57 1
86 | 128 0 2.43 13.15 "Present" 63 20.75 0 17 0
87 | 130 0.56 3.3 30.86 "Absent" 49 27.52 33.33 45 0
88 | 126 10.5 4.49 17.33 "Absent" 67 19.37 0 49 1
89 | 140 0 5.08 27.33 "Present" 41 27.83 1.25 38 0
90 | 126 0.9 5.64 17.78 "Present" 55 21.94 0 41 0
91 | 122 0.72 4.04 32.38 "Absent" 34 28.34 0 55 0
92 | 116 1.03 2.83 10.85 "Absent" 45 21.59 1.75 21 0
93 | 120 3.7 4.02 39.66 "Absent" 61 30.57 0 64 1
94 | 143 0.46 2.4 22.87 "Absent" 62 29.17 15.43 29 0
95 | 118 4 3.95 18.96 "Absent" 54 25.15 8.33 49 1
96 | 194 1.7 6.32 33.67 "Absent" 47 30.16 0.19 56 0
97 | 134 3 4.37 23.07 "Absent" 56 20.54 9.65 62 0
98 | 138 2.16 4.9 24.83 "Present" 39 26.06 28.29 29 0
99 | 136 0 5 27.58 "Present" 49 27.59 1.47 39 0
100 | 122 3.2 11.32 35.36 "Present" 55 27.07 0 51 1
101 | 164 12 3.91 19.59 "Absent" 51 23.44 19.75 39 0
102 | 136 8 7.85 23.81 "Present" 51 22.69 2.78 50 0
103 | 166 0.07 4.03 29.29 "Absent" 53 28.37 0 27 0
104 | 118 0 4.34 30.12 "Present" 52 32.18 3.91 46 0
105 | 128 0.42 4.6 26.68 "Absent" 41 30.97 10.33 31 0
106 | 118 1.5 5.38 25.84 "Absent" 64 28.63 3.89 29 0
107 | 158 3.6 2.97 30.11 "Absent" 63 26.64 108 64 0
108 | 108 1.5 4.33 24.99 "Absent" 66 22.29 21.6 61 1
109 | 170 7.6 5.5 37.83 "Present" 42 37.41 6.17 54 1
110 | 118 1 5.76 22.1 "Absent" 62 23.48 7.71 42 0
111 | 124 0 3.04 17.33 "Absent" 49 22.04 0 18 0
112 | 114 0 8.01 21.64 "Absent" 66 25.51 2.49 16 0
113 | 168 9 8.53 24.48 "Present" 69 26.18 4.63 54 1
114 | 134 2 3.66 14.69 "Absent" 52 21.03 2.06 37 0
115 | 174 0 8.46 35.1 "Present" 35 25.27 0 61 1
116 | 116 31.2 3.17 14.99 "Absent" 47 19.4 49.06 59 1
117 | 128 0 10.58 31.81 "Present" 46 28.41 14.66 48 0
118 | 140 4.5 4.59 18.01 "Absent" 63 21.91 22.09 32 1
119 | 154 0.7 5.91 25 "Absent" 13 20.6 0 42 0
120 | 150 3.5 6.99 25.39 "Present" 50 23.35 23.48 61 1
121 | 130 0 3.92 25.55 "Absent" 68 28.02 0.68 27 0
122 | 128 2 6.13 21.31 "Absent" 66 22.86 11.83 60 0
123 | 120 1.4 6.25 20.47 "Absent" 60 25.85 8.51 28 0
124 | 120 0 5.01 26.13 "Absent" 64 26.21 12.24 33 0
125 | 138 4.5 2.85 30.11 "Absent" 55 24.78 24.89 56 1
126 | 153 7.8 3.96 25.73 "Absent" 54 25.91 27.03 45 0
127 | 123 8.6 11.17 35.28 "Present" 70 33.14 0 59 1
128 | 148 4.04 3.99 20.69 "Absent" 60 27.78 1.75 28 0
129 | 136 3.96 2.76 30.28 "Present" 50 34.42 18.51 38 0
130 | 134 8.8 7.41 26.84 "Absent" 35 29.44 29.52 60 1
131 | 152 12.18 4.04 37.83 "Present" 63 34.57 4.17 64 0
132 | 158 13.5 5.04 30.79 "Absent" 54 24.79 21.5 62 0
133 | 132 2 3.08 35.39 "Absent" 45 31.44 79.82 58 1
134 | 134 1.5 3.73 21.53 "Absent" 41 24.7 11.11 30 1
135 | 142 7.44 5.52 33.97 "Absent" 47 29.29 24.27 54 0
136 | 134 6 3.3 28.45 "Absent" 65 26.09 58.11 40 0
137 | 122 4.18 9.05 29.27 "Present" 44 24.05 19.34 52 1
138 | 116 2.7 3.69 13.52 "Absent" 55 21.13 18.51 32 0
139 | 128 0.5 3.7 12.81 "Present" 66 21.25 22.73 28 0
140 | 120 0 3.68 12.24 "Absent" 51 20.52 0.51 20 0
141 | 124 0 3.95 36.35 "Present" 59 32.83 9.59 54 0
142 | 160 14 5.9 37.12 "Absent" 58 33.87 3.52 54 1
143 | 130 2.78 4.89 9.39 "Present" 63 19.3 17.47 25 1
144 | 128 2.8 5.53 14.29 "Absent" 64 24.97 0.51 38 0
145 | 130 4.5 5.86 37.43 "Absent" 61 31.21 32.3 58 0
146 | 109 1.2 6.14 29.26 "Absent" 47 24.72 10.46 40 0
147 | 144 0 3.84 18.72 "Absent" 56 22.1 4.8 40 0
148 | 118 1.05 3.16 12.98 "Present" 46 22.09 16.35 31 0
149 | 136 3.46 6.38 32.25 "Present" 43 28.73 3.13 43 1
150 | 136 1.5 6.06 26.54 "Absent" 54 29.38 14.5 33 1
151 | 124 15.5 5.05 24.06 "Absent" 46 23.22 0 61 1
152 | 148 6 6.49 26.47 "Absent" 48 24.7 0 55 0
153 | 128 6.6 3.58 20.71 "Absent" 55 24.15 0 52 0
154 | 122 0.28 4.19 19.97 "Absent" 61 25.63 0 24 0
155 | 108 0 2.74 11.17 "Absent" 53 22.61 0.95 20 0
156 | 124 3.04 4.8 19.52 "Present" 60 21.78 147.19 41 1
157 | 138 8.8 3.12 22.41 "Present" 63 23.33 120.03 55 1
158 | 127 0 2.81 15.7 "Absent" 42 22.03 1.03 17 0
159 | 174 9.45 5.13 35.54 "Absent" 55 30.71 59.79 53 0
160 | 122 0 3.05 23.51 "Absent" 46 25.81 0 38 0
161 | 144 6.75 5.45 29.81 "Absent" 53 25.62 26.23 43 1
162 | 126 1.8 6.22 19.71 "Absent" 65 24.81 0.69 31 0
163 | 208 27.4 3.12 26.63 "Absent" 66 27.45 33.07 62 1
164 | 138 0 2.68 17.04 "Absent" 42 22.16 0 16 0
165 | 148 0 3.84 17.26 "Absent" 70 20 0 21 0
166 | 122 0 3.08 16.3 "Absent" 43 22.13 0 16 0
167 | 132 7 3.2 23.26 "Absent" 77 23.64 23.14 49 0
168 | 110 12.16 4.99 28.56 "Absent" 44 27.14 21.6 55 1
169 | 160 1.52 8.12 29.3 "Present" 54 25.87 12.86 43 1
170 | 126 0.54 4.39 21.13 "Present" 45 25.99 0 25 0
171 | 162 5.3 7.95 33.58 "Present" 58 36.06 8.23 48 0
172 | 194 2.55 6.89 33.88 "Present" 69 29.33 0 41 0
173 | 118 0.75 2.58 20.25 "Absent" 59 24.46 0 32 0
174 | 124 0 4.79 34.71 "Absent" 49 26.09 9.26 47 0
175 | 160 0 2.42 34.46 "Absent" 48 29.83 1.03 61 0
176 | 128 0 2.51 29.35 "Present" 53 22.05 1.37 62 0
177 | 122 4 5.24 27.89 "Present" 45 26.52 0 61 1
178 | 132 2 2.7 21.57 "Present" 50 27.95 9.26 37 0
179 | 120 0 2.42 16.66 "Absent" 46 20.16 0 17 0
180 | 128 0.04 8.22 28.17 "Absent" 65 26.24 11.73 24 0
181 | 108 15 4.91 34.65 "Absent" 41 27.96 14.4 56 0
182 | 166 0 4.31 34.27 "Absent" 45 30.14 13.27 56 0
183 | 152 0 6.06 41.05 "Present" 51 40.34 0 51 0
184 | 170 4.2 4.67 35.45 "Present" 50 27.14 7.92 60 1
185 | 156 4 2.05 19.48 "Present" 50 21.48 27.77 39 1
186 | 116 8 6.73 28.81 "Present" 41 26.74 40.94 48 1
187 | 122 4.4 3.18 11.59 "Present" 59 21.94 0 33 1
188 | 150 20 6.4 35.04 "Absent" 53 28.88 8.33 63 0
189 | 129 2.15 5.17 27.57 "Absent" 52 25.42 2.06 39 0
190 | 134 4.8 6.58 29.89 "Present" 55 24.73 23.66 63 0
191 | 126 0 5.98 29.06 "Present" 56 25.39 11.52 64 1
192 | 142 0 3.72 25.68 "Absent" 48 24.37 5.25 40 1
193 | 128 0.7 4.9 37.42 "Present" 72 35.94 3.09 49 1
194 | 102 0.4 3.41 17.22 "Present" 56 23.59 2.06 39 1
195 | 130 0 4.89 25.98 "Absent" 72 30.42 14.71 23 0
196 | 138 0.05 2.79 10.35 "Absent" 46 21.62 0 18 0
197 | 138 0 1.96 11.82 "Present" 54 22.01 8.13 21 0
198 | 128 0 3.09 20.57 "Absent" 54 25.63 0.51 17 0
199 | 162 2.92 3.63 31.33 "Absent" 62 31.59 18.51 42 0
200 | 160 3 9.19 26.47 "Present" 39 28.25 14.4 54 1
201 | 148 0 4.66 24.39 "Absent" 50 25.26 4.03 27 0
202 | 124 0.16 2.44 16.67 "Absent" 65 24.58 74.91 23 0
203 | 136 3.15 4.37 20.22 "Present" 59 25.12 47.16 31 1
204 | 134 2.75 5.51 26.17 "Absent" 57 29.87 8.33 33 0
205 | 128 0.73 3.97 23.52 "Absent" 54 23.81 19.2 64 0
206 | 122 3.2 3.59 22.49 "Present" 45 24.96 36.17 58 0
207 | 152 3 4.64 31.29 "Absent" 41 29.34 4.53 40 0
208 | 162 0 5.09 24.6 "Present" 64 26.71 3.81 18 0
209 | 124 4 6.65 30.84 "Present" 54 28.4 33.51 60 0
210 | 136 5.8 5.9 27.55 "Absent" 65 25.71 14.4 59 0
211 | 136 8.8 4.26 32.03 "Present" 52 31.44 34.35 60 0
212 | 134 0.05 8.03 27.95 "Absent" 48 26.88 0 60 0
213 | 122 1 5.88 34.81 "Present" 69 31.27 15.94 40 1
214 | 116 3 3.05 30.31 "Absent" 41 23.63 0.86 44 0
215 | 132 0 0.98 21.39 "Absent" 62 26.75 0 53 0
216 | 134 0 2.4 21.11 "Absent" 57 22.45 1.37 18 0
217 | 160 7.77 8.07 34.8 "Absent" 64 31.15 0 62 1
218 | 180 0.52 4.23 16.38 "Absent" 55 22.56 14.77 45 1
219 | 124 0.81 6.16 11.61 "Absent" 35 21.47 10.49 26 0
220 | 114 0 4.97 9.69 "Absent" 26 22.6 0 25 0
221 | 208 7.4 7.41 32.03 "Absent" 50 27.62 7.85 57 0
222 | 138 0 3.14 12 "Absent" 54 20.28 0 16 0
223 | 164 0.5 6.95 39.64 "Present" 47 41.76 3.81 46 1
224 | 144 2.4 8.13 35.61 "Absent" 46 27.38 13.37 60 0
225 | 136 7.5 7.39 28.04 "Present" 50 25.01 0 45 1
226 | 132 7.28 3.52 12.33 "Absent" 60 19.48 2.06 56 0
227 | 143 5.04 4.86 23.59 "Absent" 58 24.69 18.72 42 0
228 | 112 4.46 7.18 26.25 "Present" 69 27.29 0 32 1
229 | 134 10 3.79 34.72 "Absent" 42 28.33 28.8 52 1
230 | 138 2 5.11 31.4 "Present" 49 27.25 2.06 64 1
231 | 188 0 5.47 32.44 "Present" 71 28.99 7.41 50 1
232 | 110 2.35 3.36 26.72 "Present" 54 26.08 109.8 58 1
233 | 136 13.2 7.18 35.95 "Absent" 48 29.19 0 62 0
234 | 130 1.75 5.46 34.34 "Absent" 53 29.42 0 58 1
235 | 122 0 3.76 24.59 "Absent" 56 24.36 0 30 0
236 | 138 0 3.24 27.68 "Absent" 60 25.7 88.66 29 0
237 | 130 18 4.13 27.43 "Absent" 54 27.44 0 51 1
238 | 126 5.5 3.78 34.15 "Absent" 55 28.85 3.18 61 0
239 | 176 5.76 4.89 26.1 "Present" 46 27.3 19.44 57 0
240 | 122 0 5.49 19.56 "Absent" 57 23.12 14.02 27 0
241 | 124 0 3.23 9.64 "Absent" 59 22.7 0 16 0
242 | 140 5.2 3.58 29.26 "Absent" 70 27.29 20.17 45 1
243 | 128 6 4.37 22.98 "Present" 50 26.01 0 47 0
244 | 190 4.18 5.05 24.83 "Absent" 45 26.09 82.85 41 0
245 | 144 0.76 10.53 35.66 "Absent" 63 34.35 0 55 1
246 | 126 4.6 7.4 31.99 "Present" 57 28.67 0.37 60 1
247 | 128 0 2.63 23.88 "Absent" 45 21.59 6.54 57 0
248 | 136 0.4 3.91 21.1 "Present" 63 22.3 0 56 1
249 | 158 4 4.18 28.61 "Present" 42 25.11 0 60 0
250 | 160 0.6 6.94 30.53 "Absent" 36 25.68 1.42 64 0
251 | 124 6 5.21 33.02 "Present" 64 29.37 7.61 58 1
252 | 158 6.17 8.12 30.75 "Absent" 46 27.84 92.62 48 0
253 | 128 0 6.34 11.87 "Absent" 57 23.14 0 17 0
254 | 166 3 3.82 26.75 "Absent" 45 20.86 0 63 1
255 | 146 7.5 7.21 25.93 "Present" 55 22.51 0.51 42 0
256 | 161 9 4.65 15.16 "Present" 58 23.76 43.2 46 0
257 | 164 13.02 6.26 29.38 "Present" 47 22.75 37.03 54 1
258 | 146 5.08 7.03 27.41 "Present" 63 36.46 24.48 37 1
259 | 142 4.48 3.57 19.75 "Present" 51 23.54 3.29 49 0
260 | 138 12 5.13 28.34 "Absent" 59 24.49 32.81 58 1
261 | 154 1.8 7.13 34.04 "Present" 52 35.51 39.36 44 0
262 | 118 0 2.39 12.13 "Absent" 49 18.46 0.26 17 1
263 | 124 0.61 2.69 17.15 "Present" 61 22.76 11.55 20 0
264 | 124 1.04 2.84 16.42 "Present" 46 20.17 0 61 0
265 | 136 5 4.19 23.99 "Present" 68 27.8 25.86 35 0
266 | 132 9.9 4.63 27.86 "Present" 46 23.39 0.51 52 1
267 | 118 0.12 1.96 20.31 "Absent" 37 20.01 2.42 18 0
268 | 118 0.12 4.16 9.37 "Absent" 57 19.61 0 17 0
269 | 134 12 4.96 29.79 "Absent" 53 24.86 8.23 57 0
270 | 114 0.1 3.95 15.89 "Present" 57 20.31 17.14 16 0
271 | 136 6.8 7.84 30.74 "Present" 58 26.2 23.66 45 1
272 | 130 0 4.16 39.43 "Present" 46 30.01 0 55 1
273 | 136 2.2 4.16 38.02 "Absent" 65 37.24 4.11 41 1
274 | 136 1.36 3.16 14.97 "Present" 56 24.98 7.3 24 0
275 | 154 4.2 5.59 25.02 "Absent" 58 25.02 1.54 43 0
276 | 108 0.8 2.47 17.53 "Absent" 47 22.18 0 55 1
277 | 136 8.8 4.69 36.07 "Present" 38 26.56 2.78 63 1
278 | 174 2.02 6.57 31.9 "Present" 50 28.75 11.83 64 1
279 | 124 4.25 8.22 30.77 "Absent" 56 25.8 0 43 0
280 | 114 0 2.63 9.69 "Absent" 45 17.89 0 16 0
281 | 118 0.12 3.26 12.26 "Absent" 55 22.65 0 16 0
282 | 106 1.08 4.37 26.08 "Absent" 67 24.07 17.74 28 1
283 | 146 3.6 3.51 22.67 "Absent" 51 22.29 43.71 42 0
284 | 206 0 4.17 33.23 "Absent" 69 27.36 6.17 50 1
285 | 134 3 3.17 17.91 "Absent" 35 26.37 15.12 27 0
286 | 148 15 4.98 36.94 "Present" 72 31.83 66.27 41 1
287 | 126 0.21 3.95 15.11 "Absent" 61 22.17 2.42 17 0
288 | 134 0 3.69 13.92 "Absent" 43 27.66 0 19 0
289 | 134 0.02 2.8 18.84 "Absent" 45 24.82 0 17 0
290 | 123 0.05 4.61 13.69 "Absent" 51 23.23 2.78 16 0
291 | 112 0.6 5.28 25.71 "Absent" 55 27.02 27.77 38 1
292 | 112 0 1.71 15.96 "Absent" 42 22.03 3.5 16 0
293 | 101 0.48 7.26 13 "Absent" 50 19.82 5.19 16 0
294 | 150 0.18 4.14 14.4 "Absent" 53 23.43 7.71 44 0
295 | 170 2.6 7.22 28.69 "Present" 71 27.87 37.65 56 1
296 | 134 0 5.63 29.12 "Absent" 68 32.33 2.02 34 0
297 | 142 0 4.19 18.04 "Absent" 56 23.65 20.78 42 1
298 | 132 0.1 3.28 10.73 "Absent" 73 20.42 0 17 0
299 | 136 0 2.28 18.14 "Absent" 55 22.59 0 17 0
300 | 132 12 4.51 21.93 "Absent" 61 26.07 64.8 46 1
301 | 166 4.1 4 34.3 "Present" 32 29.51 8.23 53 0
302 | 138 0 3.96 24.7 "Present" 53 23.8 0 45 0
303 | 138 2.27 6.41 29.07 "Absent" 58 30.22 2.93 32 1
304 | 170 0 3.12 37.15 "Absent" 47 35.42 0 53 0
305 | 128 0 8.41 28.82 "Present" 60 26.86 0 59 1
306 | 136 1.2 2.78 7.12 "Absent" 52 22.51 3.41 27 0
307 | 128 0 3.22 26.55 "Present" 39 26.59 16.71 49 0
308 | 150 14.4 5.04 26.52 "Present" 60 28.84 0 45 0
309 | 132 8.4 3.57 13.68 "Absent" 42 18.75 15.43 59 1
310 | 142 2.4 2.55 23.89 "Absent" 54 26.09 59.14 37 0
311 | 130 0.05 2.44 28.25 "Present" 67 30.86 40.32 34 0
312 | 174 3.5 5.26 21.97 "Present" 36 22.04 8.33 59 1
313 | 114 9.6 2.51 29.18 "Absent" 49 25.67 40.63 46 0
314 | 162 1.5 2.46 19.39 "Present" 49 24.32 0 59 1
315 | 174 0 3.27 35.4 "Absent" 58 37.71 24.95 44 0
316 | 190 5.15 6.03 36.59 "Absent" 42 30.31 72 50 0
317 | 154 1.4 1.72 18.86 "Absent" 58 22.67 43.2 59 0
318 | 124 0 2.28 24.86 "Present" 50 22.24 8.26 38 0
319 | 114 1.2 3.98 14.9 "Absent" 49 23.79 25.82 26 0
320 | 168 11.4 5.08 26.66 "Present" 56 27.04 2.61 59 1
321 | 142 3.72 4.24 32.57 "Absent" 52 24.98 7.61 51 0
322 | 154 0 4.81 28.11 "Present" 56 25.67 75.77 59 0
323 | 146 4.36 4.31 18.44 "Present" 47 24.72 10.8 38 0
324 | 166 6 3.02 29.3 "Absent" 35 24.38 38.06 61 0
325 | 140 8.6 3.9 32.16 "Present" 52 28.51 11.11 64 1
326 | 136 1.7 3.53 20.13 "Absent" 56 19.44 14.4 55 0
327 | 156 0 3.47 21.1 "Absent" 73 28.4 0 36 1
328 | 132 0 6.63 29.58 "Present" 37 29.41 2.57 62 0
329 | 128 0 2.98 12.59 "Absent" 65 20.74 2.06 19 0
330 | 106 5.6 3.2 12.3 "Absent" 49 20.29 0 39 0
331 | 144 0.4 4.64 30.09 "Absent" 30 27.39 0.74 55 0
332 | 154 0.31 2.33 16.48 "Absent" 33 24 11.83 17 0
333 | 126 3.1 2.01 32.97 "Present" 56 28.63 26.74 45 0
334 | 134 6.4 8.49 37.25 "Present" 56 28.94 10.49 51 1
335 | 152 19.45 4.22 29.81 "Absent" 28 23.95 0 59 1
336 | 146 1.35 6.39 34.21 "Absent" 51 26.43 0 59 1
337 | 162 6.94 4.55 33.36 "Present" 52 27.09 32.06 43 0
338 | 130 7.28 3.56 23.29 "Present" 20 26.8 51.87 58 1
339 | 138 6 7.24 37.05 "Absent" 38 28.69 0 59 0
340 | 148 0 5.32 26.71 "Present" 52 32.21 32.78 27 0
341 | 124 4.2 2.94 27.59 "Absent" 50 30.31 85.06 30 0
342 | 118 1.62 9.01 21.7 "Absent" 59 25.89 21.19 40 0
343 | 116 4.28 7.02 19.99 "Present" 68 23.31 0 52 1
344 | 162 6.3 5.73 22.61 "Present" 46 20.43 62.54 53 1
345 | 138 0.87 1.87 15.89 "Absent" 44 26.76 42.99 31 0
346 | 137 1.2 3.14 23.87 "Absent" 66 24.13 45 37 0
347 | 198 0.52 11.89 27.68 "Present" 48 28.4 78.99 26 1
348 | 154 4.5 4.75 23.52 "Present" 43 25.76 0 53 1
349 | 128 5.4 2.36 12.98 "Absent" 51 18.36 6.69 61 0
350 | 130 0.08 5.59 25.42 "Present" 50 24.98 6.27 43 1
351 | 162 5.6 4.24 22.53 "Absent" 29 22.91 5.66 60 0
352 | 120 10.5 2.7 29.87 "Present" 54 24.5 16.46 49 0
353 | 136 3.99 2.58 16.38 "Present" 53 22.41 27.67 36 0
354 | 176 1.2 8.28 36.16 "Present" 42 27.81 11.6 58 1
355 | 134 11.79 4.01 26.57 "Present" 38 21.79 38.88 61 1
356 | 122 1.7 5.28 32.23 "Present" 51 24.08 0 54 0
357 | 134 0.9 3.18 23.66 "Present" 52 23.26 27.36 58 1
358 | 134 0 2.43 22.24 "Absent" 52 26.49 41.66 24 0
359 | 136 6.6 6.08 32.74 "Absent" 64 33.28 2.72 49 0
360 | 132 4.05 5.15 26.51 "Present" 31 26.67 16.3 50 0
361 | 152 1.68 3.58 25.43 "Absent" 50 27.03 0 32 0
362 | 132 12.3 5.96 32.79 "Present" 57 30.12 21.5 62 1
363 | 124 0.4 3.67 25.76 "Absent" 43 28.08 20.57 34 0
364 | 140 4.2 2.91 28.83 "Present" 43 24.7 47.52 48 0
365 | 166 0.6 2.42 34.03 "Present" 53 26.96 54 60 0
366 | 156 3.02 5.35 25.72 "Present" 53 25.22 28.11 52 1
367 | 132 0.72 4.37 19.54 "Absent" 48 26.11 49.37 28 0
368 | 150 0 4.99 27.73 "Absent" 57 30.92 8.33 24 0
369 | 134 0.12 3.4 21.18 "Present" 33 26.27 14.21 30 0
370 | 126 3.4 4.87 15.16 "Present" 65 22.01 11.11 38 0
371 | 148 0.5 5.97 32.88 "Absent" 54 29.27 6.43 42 0
372 | 148 8.2 7.75 34.46 "Present" 46 26.53 6.04 64 1
373 | 132 6 5.97 25.73 "Present" 66 24.18 145.29 41 0
374 | 128 1.6 5.41 29.3 "Absent" 68 29.38 23.97 32 0
375 | 128 5.16 4.9 31.35 "Present" 57 26.42 0 64 0
376 | 140 0 2.4 27.89 "Present" 70 30.74 144 29 0
377 | 126 0 5.29 27.64 "Absent" 25 27.62 2.06 45 0
378 | 114 3.6 4.16 22.58 "Absent" 60 24.49 65.31 31 0
379 | 118 1.25 4.69 31.58 "Present" 52 27.16 4.11 53 0
380 | 126 0.96 4.99 29.74 "Absent" 66 33.35 58.32 38 0
381 | 154 4.5 4.68 39.97 "Absent" 61 33.17 1.54 64 1
382 | 112 1.44 2.71 22.92 "Absent" 59 24.81 0 52 0
383 | 140 8 4.42 33.15 "Present" 47 32.77 66.86 44 0
384 | 140 1.68 11.41 29.54 "Present" 74 30.75 2.06 38 1
385 | 128 2.6 4.94 21.36 "Absent" 61 21.3 0 31 0
386 | 126 19.6 6.03 34.99 "Absent" 49 26.99 55.89 44 0
387 | 160 4.2 6.76 37.99 "Present" 61 32.91 3.09 54 1
388 | 144 0 4.17 29.63 "Present" 52 21.83 0 59 0
389 | 148 4.5 10.49 33.27 "Absent" 50 25.92 2.06 53 1
390 | 146 0 4.92 18.53 "Absent" 57 24.2 34.97 26 0
391 | 164 5.6 3.17 30.98 "Present" 44 25.99 43.2 53 1
392 | 130 0.54 3.63 22.03 "Present" 69 24.34 12.86 39 1
393 | 154 2.4 5.63 42.17 "Present" 59 35.07 12.86 50 1
394 | 178 0.95 4.75 21.06 "Absent" 49 23.74 24.69 61 0
395 | 180 3.57 3.57 36.1 "Absent" 36 26.7 19.95 64 0
396 | 134 12.5 2.73 39.35 "Absent" 48 35.58 0 48 0
397 | 142 0 3.54 16.64 "Absent" 58 25.97 8.36 27 0
398 | 162 7 7.67 34.34 "Present" 33 30.77 0 62 0
399 | 218 11.2 2.77 30.79 "Absent" 38 24.86 90.93 48 1
400 | 126 8.75 6.06 32.72 "Present" 33 27 62.43 55 1
401 | 126 0 3.57 26.01 "Absent" 61 26.3 7.97 47 0
402 | 134 6.1 4.77 26.08 "Absent" 47 23.82 1.03 49 0
403 | 132 0 4.17 36.57 "Absent" 57 30.61 18 49 0
404 | 178 5.5 3.79 23.92 "Present" 45 21.26 6.17 62 1
405 | 208 5.04 5.19 20.71 "Present" 52 25.12 24.27 58 1
406 | 160 1.15 10.19 39.71 "Absent" 31 31.65 20.52 57 0
407 | 116 2.38 5.67 29.01 "Present" 54 27.26 15.77 51 0
408 | 180 25.01 3.7 38.11 "Present" 57 30.54 0 61 1
409 | 200 19.2 4.43 40.6 "Present" 55 32.04 36 60 1
410 | 112 4.2 3.58 27.14 "Absent" 52 26.83 2.06 40 0
411 | 120 0 3.1 26.97 "Absent" 41 24.8 0 16 0
412 | 178 20 9.78 33.55 "Absent" 37 27.29 2.88 62 1
413 | 166 0.8 5.63 36.21 "Absent" 50 34.72 28.8 60 0
414 | 164 8.2 14.16 36.85 "Absent" 52 28.5 17.02 55 1
415 | 216 0.92 2.66 19.85 "Present" 49 20.58 0.51 63 1
416 | 146 6.4 5.62 33.05 "Present" 57 31.03 0.74 46 0
417 | 134 1.1 3.54 20.41 "Present" 58 24.54 39.91 39 1
418 | 158 16 5.56 29.35 "Absent" 36 25.92 58.32 60 0
419 | 176 0 3.14 31.04 "Present" 45 30.18 4.63 45 0
420 | 132 2.8 4.79 20.47 "Present" 50 22.15 11.73 48 0
421 | 126 0 4.55 29.18 "Absent" 48 24.94 36 41 0
422 | 120 5.5 3.51 23.23 "Absent" 46 22.4 90.31 43 0
423 | 174 0 3.86 21.73 "Absent" 42 23.37 0 63 0
424 | 150 13.8 5.1 29.45 "Present" 52 27.92 77.76 55 1
425 | 176 6 3.98 17.2 "Present" 52 21.07 4.11 61 1
426 | 142 2.2 3.29 22.7 "Absent" 44 23.66 5.66 42 1
427 | 132 0 3.3 21.61 "Absent" 42 24.92 32.61 33 0
428 | 142 1.32 7.63 29.98 "Present" 57 31.16 72.93 33 0
429 | 146 1.16 2.28 34.53 "Absent" 50 28.71 45 49 0
430 | 132 7.2 3.65 17.16 "Present" 56 23.25 0 34 0
431 | 120 0 3.57 23.22 "Absent" 58 27.2 0 32 0
432 | 118 0 3.89 15.96 "Absent" 65 20.18 0 16 0
433 | 108 0 1.43 26.26 "Absent" 42 19.38 0 16 0
434 | 136 0 4 19.06 "Absent" 40 21.94 2.06 16 0
435 | 120 0 2.46 13.39 "Absent" 47 22.01 0.51 18 0
436 | 132 0 3.55 8.66 "Present" 61 18.5 3.87 16 0
437 | 136 0 1.77 20.37 "Absent" 45 21.51 2.06 16 0
438 | 138 0 1.86 18.35 "Present" 59 25.38 6.51 17 0
439 | 138 0.06 4.15 20.66 "Absent" 49 22.59 2.49 16 0
440 | 130 1.22 3.3 13.65 "Absent" 50 21.4 3.81 31 0
441 | 130 4 2.4 17.42 "Absent" 60 22.05 0 40 0
442 | 110 0 7.14 28.28 "Absent" 57 29 0 32 0
443 | 120 0 3.98 13.19 "Present" 47 21.89 0 16 0
444 | 166 6 8.8 37.89 "Absent" 39 28.7 43.2 52 0
445 | 134 0.57 4.75 23.07 "Absent" 67 26.33 0 37 0
446 | 142 3 3.69 25.1 "Absent" 60 30.08 38.88 27 0
447 | 136 2.8 2.53 9.28 "Present" 61 20.7 4.55 25 0
448 | 142 0 4.32 25.22 "Absent" 47 28.92 6.53 34 1
449 | 130 0 1.88 12.51 "Present" 52 20.28 0 17 0
450 | 124 1.8 3.74 16.64 "Present" 42 22.26 10.49 20 0
451 | 144 4 5.03 25.78 "Present" 57 27.55 90 48 1
452 | 136 1.81 3.31 6.74 "Absent" 63 19.57 24.94 24 0
453 | 120 0 2.77 13.35 "Absent" 67 23.37 1.03 18 0
454 | 154 5.53 3.2 28.81 "Present" 61 26.15 42.79 42 0
455 | 124 1.6 7.22 39.68 "Present" 36 31.5 0 51 1
456 | 146 0.64 4.82 28.02 "Absent" 60 28.11 8.23 39 1
457 | 128 2.24 2.83 26.48 "Absent" 48 23.96 47.42 27 1
458 | 170 0.4 4.11 42.06 "Present" 56 33.1 2.06 57 0
459 | 214 0.4 5.98 31.72 "Absent" 64 28.45 0 58 0
460 | 182 4.2 4.41 32.1 "Absent" 52 28.61 18.72 52 1
461 | 108 3 1.59 15.23 "Absent" 40 20.09 26.64 55 0
462 | 118 5.4 11.61 30.79 "Absent" 64 27.35 23.97 40 0
463 | 132 0 4.82 33.41 "Present" 62 14.7 0 46 1
--------------------------------------------------------------------------------