├── aymericdamien-Examples
├── examples
│ ├── recurrent_network.py
│ ├── a06-rocksetta-alexnet.sh
│ ├── a05-rocksetta-nearest_neighbor.sh
│ ├── a03-rocksetta-linear_regression.sh
│ ├── a09-rocksetta-recurrent_network.sh
│ ├── a10-rocksetta-multigpu_basics.sh
│ ├── a11-rocksetta-graph_visualization.sh
│ ├── a04-rocksetta-logistic_regression.sh
│ ├── a07-rocksetta-convolutional_network.sh
│ ├── a08-rocksetta-multilayer_perceptron.sh
│ ├── a12-rocksetta-loss_visualization.sh
│ ├── a01-rocksetta-helloworld.sh
│ ├── a02-rocksetta-basic_operations.sh
│ ├── helloworld.py
│ ├── nearest_neighbor.py
│ ├── logistic_regression.py
│ ├── multigpu_basics.py
│ ├── basic_operations.py
│ ├── graph_visualization.py
│ ├── linear_regression.py
│ ├── loss_visualization.py
│ ├── multilayer_perceptron.py
│ ├── convolutional_network.py
│ ├── alexnet.py
│ └── input_data.py
├── README.md
├── multigpu_basics.py
├── input_data.py
└── Setup_TensorFlow.md
├── rocksetta-examples
├── Thumbs.db
├── myImage.jpg
├── cloud9-tf.png
├── new-workspace.png
├── tensorflow-teacher.png
├── hello42.py
├── a01-rocksetta-hello42.sh
├── a02-rocksetta-good2d.sh
├── a05-rocksetta-summary_writer_test.sh
├── a06-rocksetta-classify_image.sh
├── a07-rocksetta-myImage.sh
├── README.md
├── practice.ipynb
├── a11-rocksetta-tb-normal.sh
├── a12-rocksetta-tensorBoard-advanced.sh
├── a03-rocksetta-tb-example.sh
├── a04-rocksetta-tensorboard.sh
├── good2d.py
├── z00-template-notebook.ipynb
├── z12-advanced-tensorboard-noptebook.ipynb
├── tb-normal.py
├── tb-example.py
└── input_data.py
├── bcomposes-examples
├── simdata
│ ├── .RData
│ ├── linear_data_train.jpg
│ ├── moon_data_train.jpg
│ ├── saturn_data_train.jpg
│ ├── plot_data.R
│ ├── plot_hidden_curve.R
│ ├── generate_moon_data.py
│ ├── plot_hyperplane.R
│ ├── output_curve_hidden_nodes.txt
│ ├── generate_linear_data.R
│ ├── generate_saturn_data.R
│ ├── saturn_data_eval.csv
│ ├── linear_data_eval.csv
│ └── saturn_data_train.csv
├── a02-rocksetta-softmax.sh
├── a03-rocksetta-softmax.sh
├── a01-rocksetta-hidden.sh
├── a04-rocksetta-hidden.sh
├── z01-hidden-notebook.ipynb
├── z00-template-notebook.ipynb
├── z02-softmax-notebook.ipynb
├── README.md
├── softmax.py
├── truncnorm_hidden.py
├── hidden.py
├── hidden-backup.py
└── LICENSE
├── hello42.py
├── reload-jupyter-notbook.sh
├── a01-rocksetta-hello42.sh
├── old
├── setup01.sh
├── setup02.sh
├── setup03.sh
├── ipython-01-notebook-setup.sh
├── setup05.sh
├── setup06.sh
├── future.txt
└── setup-new01.sh
├── where-is-notebook.sh
├── tmp5
└── README.md
├── LICENSE
├── reminder.txt
├── setup.sh
├── README.md
└── setup-new02.sh
/aymericdamien-Examples/examples/recurrent_network.py:
--------------------------------------------------------------------------------
1 | under dev..
2 |
--------------------------------------------------------------------------------
/rocksetta-examples/Thumbs.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hpssjellis/easy-tensorflow-on-cloud9/HEAD/rocksetta-examples/Thumbs.db
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a06-rocksetta-alexnet.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python alexnet.py
4 |
--------------------------------------------------------------------------------
/rocksetta-examples/myImage.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hpssjellis/easy-tensorflow-on-cloud9/HEAD/rocksetta-examples/myImage.jpg
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/.RData:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hpssjellis/easy-tensorflow-on-cloud9/HEAD/bcomposes-examples/simdata/.RData
--------------------------------------------------------------------------------
/rocksetta-examples/cloud9-tf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hpssjellis/easy-tensorflow-on-cloud9/HEAD/rocksetta-examples/cloud9-tf.png
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a05-rocksetta-nearest_neighbor.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python helloworld.py
4 |
--------------------------------------------------------------------------------
/rocksetta-examples/new-workspace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hpssjellis/easy-tensorflow-on-cloud9/HEAD/rocksetta-examples/new-workspace.png
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a03-rocksetta-linear_regression.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python linear_regression.py
4 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a09-rocksetta-recurrent_network.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python recurrent_network.py
4 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a10-rocksetta-multigpu_basics.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python multigpu_basics.py
4 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a11-rocksetta-graph_visualization.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python graph_visualization.py
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a04-rocksetta-logistic_regression.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python logistic_regression.py
4 |
--------------------------------------------------------------------------------
/rocksetta-examples/tensorflow-teacher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hpssjellis/easy-tensorflow-on-cloud9/HEAD/rocksetta-examples/tensorflow-teacher.png
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a07-rocksetta-convolutional_network.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python convolutional_network.py
4 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a08-rocksetta-multilayer_perceptron.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python multilayer_perceptron.py
4 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a12-rocksetta-loss_visualization.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | python loss_visualization.py
4 |
5 |
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/linear_data_train.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hpssjellis/easy-tensorflow-on-cloud9/HEAD/bcomposes-examples/simdata/linear_data_train.jpg
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/moon_data_train.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hpssjellis/easy-tensorflow-on-cloud9/HEAD/bcomposes-examples/simdata/moon_data_train.jpg
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/saturn_data_train.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hpssjellis/easy-tensorflow-on-cloud9/HEAD/bcomposes-examples/simdata/saturn_data_train.jpg
--------------------------------------------------------------------------------
/hello42.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | hello = tf.constant('Hello, TensorFlow!')
3 | sess = tf.Session()
4 | print(sess.run(hello))
5 |
6 | a = tf.constant(10)
7 | b = tf.constant(32)
8 | print(sess.run(a + b))
9 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/README.md:
--------------------------------------------------------------------------------
1 | Great set of examples originally at https://github.com/aymericdamien/TensorFlow-Examples by Aymeric Damien
2 |
3 | I have condensed the files into one example folder
4 |
5 | .
6 |
7 |
--------------------------------------------------------------------------------
/rocksetta-examples/hello42.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | hello = tf.constant('Hello, TensorFlow!')
3 | sess = tf.Session()
4 | print(sess.run(hello))
5 |
6 | a = tf.constant(10)
7 | b = tf.constant(32)
8 | print(sess.run(a + b))
9 |
--------------------------------------------------------------------------------
/reload-jupyter-notbook.sh:
--------------------------------------------------------------------------------
1 |
2 |
3 | #!/bin/bash
4 |
5 | # only need to run this script with the command (do not type the #)
6 | # bash reload-jupyter-notbook.sh
7 | source ~/virtual-tf/bin/activate
8 |
9 |
10 |
11 |
12 | jupyter notebook --ip=0.0.0.0 --port=8080 --no-browser
13 |
--------------------------------------------------------------------------------
/a01-rocksetta-hello42.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 | echo "running python hello42.py from the rocksetta-examples folder"
8 | echo ""
9 | echo ""
10 |
11 | python hello42.py
12 |
13 | echo ""
14 | echo "--------------------Done--------------------------"
15 |
--------------------------------------------------------------------------------
/rocksetta-examples/a01-rocksetta-hello42.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 | echo "running python hello42.py from the rocksetta-examples folder"
5 | echo ""
6 | echo ""
7 |
8 | python hello42.py
9 |
10 | echo ""
11 | echo "--------------------Done--------------------------"
12 |
--------------------------------------------------------------------------------
/rocksetta-examples/a02-rocksetta-good2d.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 | echo "running python good2d.py from the rocksetta-examples folder"
8 | echo ""
9 | echo ""
10 |
11 | python good2d.py
12 |
13 | echo ""
14 | echo "--------------------Done--------------------------"
15 |
--------------------------------------------------------------------------------
/old/setup01.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "This is a shell script"
4 |
5 | ls -lah
6 |
7 | echo "I am done running ls"
8 |
9 | SOMEVAR='text stuff'
10 |
11 | echo "$SOMEVAR"
12 |
13 |
14 | git clone --recurse-submodules https://github.com/tensorflow/tensorflow
15 |
16 |
17 |
18 |
19 | echo "run by bash myFilename.sh"
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a01-rocksetta-helloworld.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 | echo "running python helloworld.py from the aymericdamien-Examples folder"
6 | echo ""
7 | echo ""
8 |
9 | python helloworld.py
10 |
11 | echo ""
12 | echo "--------------------Done--------------------------"
13 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/a02-rocksetta-basic_operations.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 |
8 |
9 | echo "running python basic_operations.py from the aymericdamien-Examples folder"
10 | echo ""
11 | echo ""
12 |
13 | python basic_operations.py
14 |
15 | echo ""
16 | echo "--------------------Done--------------------------"
17 |
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/plot_data.R:
--------------------------------------------------------------------------------
1 | filename = "linear_data_train"
2 | #filename = "moon_data_train"
3 | #filename = "saturn_data_train"
4 |
5 | data = read.table(paste(filename,".csv",sep=''),header=F,sep=',')
6 | colnames(data) = c("label","x","y")
7 |
8 | jpeg(paste(filename,".jpg",sep=''))
9 | plot(data[,c("x","y")],pch=21,bg=c("orange","blue")[data[,"label"]+1])
10 | dev.off
11 |
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/plot_hidden_curve.R:
--------------------------------------------------------------------------------
1 | d = read.table("output_curve_hidden_nodes.txt",sep=",")
2 | d$V2 = d$V2*100
3 | means = aggregate(V2 ~ V1, d, mean)
4 |
5 | #jpeg("hidden_node_curve.jpg")
6 | plot(means$V1,means$V2,xlab="Number of hidden nodes.",ylab="Accuracy",ylim=c(40,100),pch=21,bg="blue")
7 | lines(means$V1,means$V2)
8 | points(d$V1,jitter(d$V2,1),pch=1,cex=.5)
9 | #dev.off()
--------------------------------------------------------------------------------
/where-is-notebook.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # only need to run this script with the command (do not type the #)
4 | # bash where-is-notebook.sh
5 |
6 |
7 |
8 |
9 | echo "Click this window, Then click the link bon the last line, then click open"
10 | echo "If the notwbook is not running then right click run or bash reload-jupyter-notbook.sh"
11 |
12 | echo "http://0.0.0.0:8080/"
13 |
--------------------------------------------------------------------------------
/rocksetta-examples/a05-rocksetta-summary_writer_test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | echo "Running true tensorFlow examples!"
4 | echo "running summary_writer_test.py in the folder /home/ubuntu/workspace/tensorflow/tensorflow/python/training"
5 | echo ""
6 | echo ""
7 |
8 | python /home/ubuntu/workspace/tensorflow/tensorflow/python/training/summary_writer_test.py
9 |
10 | echo ""
11 | echo "--------------------Done--------------------------"
12 |
--------------------------------------------------------------------------------
/rocksetta-examples/a06-rocksetta-classify_image.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | echo "Running true tensorFlow examples!"
4 | echo "running classify_image.py in the folder /home/ubuntu/workspace/tensorflow/tensorflow/models/image/imagenet"
5 | echo ""
6 | echo ""
7 |
8 | python /home/ubuntu/workspace/tensorflow/tensorflow/models/image/imagenet/classify_image.py
9 |
10 | echo ""
11 | echo "--------------------Done--------------------------"
12 |
--------------------------------------------------------------------------------
/bcomposes-examples/a02-rocksetta-softmax.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 |
8 | echo "running python softmax.py --train simdata/linear_data_train.csv --test simdata/linear_data_eval.csv"
9 | echo "from the bcomposes-examples folder"
10 | echo ""
11 |
12 |
13 | python softmax.py --train simdata/linear_data_train.csv --test simdata/linear_data_eval.csv
14 |
15 |
16 | echo ""
17 | echo "--------------------Done--------------------------"
18 |
--------------------------------------------------------------------------------
/tmp5/README.md:
--------------------------------------------------------------------------------
1 | This folder is visible in the cloud 9 directory tree and is needed for the tensorBoard web application.
2 | The normal location for this file is at /tmp but that is out of view for cloud9 (you can still get there suing cd /tmp)
3 |
4 | To use this directrorey properly search your python files for /tmp and change it to
5 | ``` bash
6 | /home/ubuntu/workspace/tmp5
7 | ```
8 |
9 | As long as the tensorBorad application linked to the same directory everything should be fine.
10 |
--------------------------------------------------------------------------------
/bcomposes-examples/a03-rocksetta-softmax.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 | echo "running python softmax.py --train simdata/linear_data_train.csv --test simdata/linear_data_eval.csv --num_epochs 2"
6 | echo "from the bcomposes-examples folder"
7 | echo ""
8 |
9 | python softmax.py --train simdata/linear_data_train.csv --test simdata/linear_data_eval.csv --num_epochs 2
10 |
11 |
12 |
13 | echo ""
14 | echo "--------------------Done--------------------------"
15 |
--------------------------------------------------------------------------------
/rocksetta-examples/a07-rocksetta-myImage.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 | echo "Running true tensorFlow examples!"
4 | echo "running classify_image.py in the folder /home/ubuntu/workspace/tensorflow/tensorflow/models/image/imagenet"
5 | echo ""
6 | echo ""
7 |
8 | python /home/ubuntu/workspace/tensorflow/tensorflow/models/image/imagenet/classify_image.py --image_file=myImage.jpg
9 |
10 | echo ""
11 | echo "--------------------Done--------------------------"
12 |
13 |
14 |
--------------------------------------------------------------------------------
/bcomposes-examples/a01-rocksetta-hidden.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 | echo "running python hidden.py --train simdata/moon_data_train.csv --test simdata/moon_data_eval.csv --num_epochs 100 --num_hidden 3"
8 | echo "from the bcomposes-examples folder"
9 | echo ""
10 |
11 | python hidden.py --train simdata/moon_data_train.csv --test simdata/moon_data_eval.csv --num_epochs 100 --num_hidden 3
12 |
13 |
14 | echo ""
15 | echo "--------------------Done--------------------------"
16 |
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/generate_moon_data.py:
--------------------------------------------------------------------------------
1 | from sklearn import datasets
2 |
3 | X, y = datasets.make_moons(2000, noise=0.20)
4 |
5 | # Can't believe I'm doing it this way, but join doesn't work
6 | # on numpy strings and I'm on a plane unable to lookup the
7 | # right way to join a column to a matrix and output as CSV.
8 | for x_i,y_i in zip(X,y):
9 | output = ''
10 | output += str(y_i)
11 | for j in range(0,len(x_i)):
12 | output += ','
13 | output += str(x_i[j])
14 | print output
15 |
16 |
17 |
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/plot_hyperplane.R:
--------------------------------------------------------------------------------
1 | filename = "linear_data_train"
2 |
3 | data = read.table(paste(filename,".csv",sep=''),header=F,sep=',')
4 | colnames(data) = c("label","x","y")
5 |
6 | #jpeg("linear_data_hyperplane.jpg")
7 | plot(data[,c("x","y")],pch=21,bg=c("orange","blue")[data[,"label"]+1],xlim=c(-.25,1),ylim=c(-.25,1))
8 | abline(h=0,lty=2)
9 | abline(v=0,lty=2)
10 |
11 | w0 = -1.87038445
12 | w1 = -2.23716712
13 | b = 1.57296884
14 | slope = -1*(w0/w1)
15 | intercept = -1*(b/w1)
16 | abline(coef=c(intercept,slope),lwd=3)
17 | #dev.off()
--------------------------------------------------------------------------------
/bcomposes-examples/a04-rocksetta-hidden.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 | echo "running python hidden.py --train simdata/saturn_data_train.csv --test simdata/saturn_data_eval.csv --num_epochs 50 --num_hidden 2"
11 | echo "from the bcomposes-examples folder"
12 | echo ""
13 |
14 | python hidden.py --train simdata/saturn_data_train.csv --test simdata/saturn_data_eval.csv --num_epochs 50 --num_hidden 2
15 |
16 |
17 |
18 |
19 | echo ""
20 | echo "--------------------Done--------------------------"
21 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/helloworld.py:
--------------------------------------------------------------------------------
1 | '''
2 | HelloWorld example using TensorFlow library.
3 |
4 | Author: Aymeric Damien
5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
6 | '''
7 |
8 | import tensorflow as tf
9 |
10 | #Simple hello world using TensorFlow
11 |
12 | # Create a Constant op
13 | # The op is added as a node to the default graph.
14 | #
15 | # The value returned by the constructor represents the output
16 | # of the Constant op.
17 | hello = tf.constant('Hello, TensorFlow!')
18 |
19 | # Start tf session
20 | sess = tf.Session()
21 |
22 | print sess.run(hello)
23 |
--------------------------------------------------------------------------------
/rocksetta-examples/README.md:
--------------------------------------------------------------------------------
1 | First try the program hello42.py by getting into this directory from anywhere by typing in a terminal
2 |
3 | ``` bash
4 | cd /home/ubuntu/workspace/rocksetta-examples/
5 | ```
6 |
7 | remember to use the tab key as it auto-completes. using cd and ls commands you can move around the workspace
8 | note: cd .. goes up a directory
9 |
10 |
11 | Then try the examples in this order
12 |
13 | 1. hello42
14 | 1.
15 | 1.
16 | 1.
17 |
18 |
19 | The commands all work the same by running
20 |
21 | python filename.py
22 |
23 | ``` bash
24 | python hello42.py
25 | ```
26 |
27 | .
28 |
29 |
--------------------------------------------------------------------------------
/rocksetta-examples/practice.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": []
11 | }
12 | ],
13 | "metadata": {
14 | "kernelspec": {
15 | "display_name": "Python 2",
16 | "language": "python",
17 | "name": "python2"
18 | },
19 | "language_info": {
20 | "codemirror_mode": {
21 | "name": "ipython",
22 | "version": 2
23 | },
24 | "file_extension": ".py",
25 | "mimetype": "text/x-python",
26 | "name": "python",
27 | "nbconvert_exporter": "python",
28 | "pygments_lexer": "ipython2",
29 | "version": "2.7.6"
30 | }
31 | },
32 | "nbformat": 4,
33 | "nbformat_minor": 0
34 | }
35 |
--------------------------------------------------------------------------------
/old/setup02.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "This is a shell script"
4 |
5 | ls -lah
6 |
7 | echo "I am done running ls"
8 |
9 | SOMEVAR='text stuff'
10 |
11 | echo "$SOMEVAR"
12 |
13 |
14 | python manage.py migrate
15 |
16 | python manage.py runserver $IP:$PORT
17 |
18 | mysql-ctl start
19 |
20 | python manage.py syncdb
21 |
22 | curl https://raw.github.com/pypa/pip/master/contrib/get-pip.py | python
23 |
24 | sudo apt-get install python-pip python-dev
25 |
26 | sudo pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.6.0-cp27-none-linux_x86_64.whl
27 |
28 |
29 |
30 | python -c 'import site; print("\n".join(site.getsitepackages()))'
31 |
32 |
33 |
34 | git clone --recurse-submodules https://github.com/tensorflow/tensorflow
35 |
36 |
37 |
38 |
39 | echo "run by bash myFilename.sh"
40 |
--------------------------------------------------------------------------------
/rocksetta-examples/a11-rocksetta-tb-normal.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 |
8 | echo "running tb-normal.py from the rocksetta-examples folder. "
9 | echo "You need to also run a12-rocksetta-tensorboard-advanced.sh with this program!"
10 | echo ""
11 |
12 |
13 |
14 | python tb-normal.py
15 |
16 |
17 |
18 |
19 | echo "On cloud9 run the menu item preview-->preview running application "
20 | echo "To see the tensorBoard website"
21 | echo "This one uses the sort of hidden /tmp folder"
22 | echo "The /tmp folder may work, but is hidden from the main cloud9 folders"
23 |
24 |
25 | echo "TO SEE TENSORBOARD YOU MUST NOW ALSO RUN A12-ROCKSETTA-TENSORBOARD-ADVANCED.SH"
26 | echo "--------------------Done--------------------------"
27 |
28 |
29 |
30 | # assumes something like the following in your code
31 | # summary_writer = tf.train.SummaryWriter('/tmp', sess.graph_def)
32 |
--------------------------------------------------------------------------------
/rocksetta-examples/a12-rocksetta-tensorBoard-advanced.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 |
8 | echo "running tensorboard from the pip folders"
9 | echo ""
10 | echo ""
11 |
12 | #tensorboard --logdir=/tmp --host $IP --port $PORT
13 |
14 | #since $PORT 8080 is now taken by jupyter notebook can use 8082
15 | tensorboard --logdir=/home/ubuntu/workspace/tmp5 --host $IP --port=8082
16 |
17 | echo "On cloud9 run the menu item preview-->preview running application "
18 | echo "To see the tensorBoard website"
19 | echo "The /tmp directory is not visible in the regular workspace directory."
20 | echo "You can view the /tmp folder using cd /tmp and then dir or ls commands"
21 | echo "--------------------Done--------------------------"
22 |
23 |
24 |
25 | # assumes something like the following in your code
26 | # summary_writer = tf.train.SummaryWriter('/tmp/tensorflow-logs', sess.graph_def)
27 |
--------------------------------------------------------------------------------
/rocksetta-examples/a03-rocksetta-tb-example.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 |
8 | echo "running tb-example.py from the rocksetta-examples folder. "
9 | echo "You need to also run a04-rocksetta-tensorboard.sh with this program!"
10 | echo ""
11 |
12 |
13 |
14 | python tb-example.py
15 |
16 |
17 |
18 |
19 | echo "On cloud9 run the menu item preview-->preview running application "
20 | echo "To see the tensorBoard website"
21 | echo "Check that the .py file uses /home/ubuntu/workspace/tmp5 instead of the /tmp directory"
22 | echo "The /tmp folder may work, but is hidden from the main cloud9 folders"
23 |
24 |
25 | echo "TO SEE TENSORBOARD YOU MUST NOW ALSO RUN A04-ROCKSETTA-TENSORBOARD.SH"
26 | echo "--------------------Done--------------------------"
27 |
28 |
29 |
30 | # assumes something like the following in your code
31 | # summary_writer = tf.train.SummaryWriter('/home/ubuntu/workspace/tmp5', sess.graph_def)
32 |
--------------------------------------------------------------------------------
/rocksetta-examples/a04-rocksetta-tensorboard.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ~/virtual-tf/bin/activate
3 |
4 |
5 |
6 |
7 |
8 | echo "running tensorboard from the pip folders"
9 | echo ""
10 | echo ""
11 |
12 | #tensorboard --logdir=/home/ubuntu/workspace/tmp5 --host $IP --port $PORT
13 |
14 | #since $PORT 8080 is now taken by jupyter notebook can use 8081
15 | tensorboard --logdir=/home/ubuntu/workspace/tmp5 --host $IP --port=8081
16 |
17 |
18 | echo "On cloud9 run the menu item preview-->preview running application "
19 | echo "To see the tensorBoard website"
20 | echo "Check that the .py file uses /home/ubuntu/workspace/tmp5 instead of the /tmp directory"
21 | echo "The /tmp folder may work, but is hidden from the main cloud9 folders"
22 | echo "--------------------Done--------------------------"
23 |
24 |
25 |
26 | # assumes something like the following in your code
27 | # summary_writer = tf.train.SummaryWriter('/home/ubuntu/workspace/tmp5', sess.graph_def)
28 |
--------------------------------------------------------------------------------
/old/setup03.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # only need to run this script with the command (do not type the #)
4 | # bash setup.sh
5 |
6 |
7 | echo "First checking the Python version"
8 | python --version
9 |
10 | echo "--------------------------------------------------------------"
11 | echo ". "
12 |
13 |
14 | echo "Now checking is pip is installed"
15 |
16 | pip list
17 |
18 | echo "--------------------------------------------------------------"
19 | echo ". "
20 |
21 |
22 | echo "next list this directory"
23 | pwd
24 | ls -lah
25 |
26 | echo "--------------------------------------------------------------"
27 | echo "Some other useful commands are cd cd .. dir ls pwd "
28 | echo "."
29 |
30 |
31 |
32 |
33 | echo "Now installing TensorFlow"
34 |
35 | sudo pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.6.0-cp27-none-linux_x86_64.whl
36 |
37 |
38 | echo"--------------------------------------------------------------"
39 | echo ". "
40 |
--------------------------------------------------------------------------------
/rocksetta-examples/good2d.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 |
4 | # Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
5 | x_data = np.random.rand(100).astype("float32")
6 | y_data = x_data * 0.1 + 0.3
7 |
8 | # Try to find values for W and b that compute y_data = W * x_data + b
9 | # (We know that W should be 0.1 and b 0.3, but Tensorflow will
10 | # figure that out for us.)
11 | W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
12 | b = tf.Variable(tf.zeros([1]))
13 | y = W * x_data + b
14 |
15 | # Minimize the mean squared errors.
16 | loss = tf.reduce_mean(tf.square(y - y_data))
17 | optimizer = tf.train.GradientDescentOptimizer(0.5)
18 | train = optimizer.minimize(loss)
19 |
20 | # Before starting, initialize the variables. We will 'run' this first.
21 | init = tf.initialize_all_variables()
22 |
23 | # Launch the graph.
24 | sess = tf.Session()
25 | sess.run(init)
26 |
27 | # Fit the line.
28 | for step in xrange(201):
29 | sess.run(train)
30 | if step % 20 == 0:
31 | print(step, sess.run(W), sess.run(b))
32 |
33 | # Learns best fit is W: [0.1], b: [0.3]
34 |
--------------------------------------------------------------------------------
/old/ipython-01-notebook-setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # only need to run this script with the command (do not type the #)
4 | # bash ipython-01-notebook-setup.sh
5 |
6 |
7 | wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh
8 | chmod a+x Miniconda-latest-Linux-x86_64.sh
9 | bash Miniconda-latest-Linux-x86_64.sh
10 | rm Miniconda-latest-Linux-x86_64.sh
11 | conda create -n myPy27 python
12 | source activate myPy27
13 |
14 | # for ipython 3
15 | #wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
16 | #chmod a+x Miniconda3-latest-Linux-x86_64.sh
17 | #bash Miniconda3-latest-Linux-x86_64.sh
18 | #rm Miniconda3-latest-Linux-x86_64.sh
19 | #conda create -n py3 python=3 ipython
20 | #source activate py3
21 |
22 |
23 |
24 |
25 |
26 | export IPYTHON_HOME=/home/ubuntu/miniconda2
27 | export PATH=$PATH:$IPYTHON_HOME/bin
28 |
29 |
30 |
31 |
32 |
33 |
34 | conda install ipython-notebook
35 |
36 |
37 | ipython notebook --ip=0.0.0.0 --port=8080 --no-browser
38 |
39 | echo "YOu can load the browser by clicking on the following and select open"
40 | echo "http:\\$C9_HOSTNAME:8080/tree"
41 |
42 |
--------------------------------------------------------------------------------
/bcomposes-examples/z01-hidden-notebook.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [
10 |
11 | ],
12 | "source": [
13 | "!python hidden.py --train simdata/moon_data_train.csv --test simdata/moon_data_eval.csv --num_epochs 100 --num_hidden 3"
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": null,
19 | "metadata": {
20 | "collapsed": true
21 | },
22 | "outputs": [],
23 | "source": [
24 | "!load hidden.py"
25 | ]
26 | }
27 | ],
28 | "metadata": {
29 | "kernelspec": {
30 | "display_name": "Python 2",
31 | "language": "python",
32 | "name": "python2"
33 | },
34 | "language_info": {
35 | "codemirror_mode": {
36 | "name": "ipython",
37 | "version": 2
38 | },
39 | "file_extension": ".py",
40 | "mimetype": "text/x-python",
41 | "name": "python",
42 | "nbconvert_exporter": "python",
43 | "pygments_lexer": "ipython2",
44 | "version": "2.7.6"
45 | }
46 | },
47 | "nbformat": 4,
48 | "nbformat_minor": 0
49 | }
50 |
--------------------------------------------------------------------------------
/bcomposes-examples/z00-template-notebook.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "#Enter a bash command below and click run to activate\n",
12 | "!"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {
19 | "collapsed": true
20 | },
21 | "outputs": [],
22 | "source": [
23 | "#Enter a python file to load and edit. Changes will be automatically saved!\n",
24 | "%load "
25 | ]
26 | }
27 | ],
28 | "metadata": {
29 | "kernelspec": {
30 | "display_name": "Python 2",
31 | "language": "python",
32 | "name": "python2"
33 | },
34 | "language_info": {
35 | "codemirror_mode": {
36 | "name": "ipython",
37 | "version": 2
38 | },
39 | "file_extension": ".py",
40 | "mimetype": "text/x-python",
41 | "name": "python",
42 | "nbconvert_exporter": "python",
43 | "pygments_lexer": "ipython2",
44 | "version": "2.7.6"
45 | }
46 | },
47 | "nbformat": 4,
48 | "nbformat_minor": 0
49 | }
50 |
--------------------------------------------------------------------------------
/rocksetta-examples/z00-template-notebook.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "#Enter a bash command below and click run to activate\n",
12 | "!"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {
19 | "collapsed": true
20 | },
21 | "outputs": [],
22 | "source": [
23 | "#Enter a python file to load and edit. Changes will be automatically saved!\n",
24 | "%load "
25 | ]
26 | }
27 | ],
28 | "metadata": {
29 | "kernelspec": {
30 | "display_name": "Python 2",
31 | "language": "python",
32 | "name": "python2"
33 | },
34 | "language_info": {
35 | "codemirror_mode": {
36 | "name": "ipython",
37 | "version": 2
38 | },
39 | "file_extension": ".py",
40 | "mimetype": "text/x-python",
41 | "name": "python",
42 | "nbconvert_exporter": "python",
43 | "pygments_lexer": "ipython2",
44 | "version": "2.7.6"
45 | }
46 | },
47 | "nbformat": 4,
48 | "nbformat_minor": 0
49 | }
50 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Jeremy Ellis
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
23 |
--------------------------------------------------------------------------------
/bcomposes-examples/z02-softmax-notebook.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "#Enter a bash command below and click run to activate\n",
12 | "!python softmax.py --train simdata/linear_data_train.csv --test simdata/linear_data_eval.csv"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {
19 | "collapsed": true
20 | },
21 | "outputs": [],
22 | "source": [
23 | "#Enter a python file to load and edit. Changes will be automatically saved!\n",
24 | "%load softmax.py"
25 | ]
26 | }
27 | ],
28 | "metadata": {
29 | "kernelspec": {
30 | "display_name": "Python 2",
31 | "language": "python",
32 | "name": "python2"
33 | },
34 | "language_info": {
35 | "codemirror_mode": {
36 | "name": "ipython",
37 | "version": 2
38 | },
39 | "file_extension": ".py",
40 | "mimetype": "text/x-python",
41 | "name": "python",
42 | "nbconvert_exporter": "python",
43 | "pygments_lexer": "ipython2",
44 | "version": "2.7.6"
45 | }
46 | },
47 | "nbformat": 4,
48 | "nbformat_minor": 0
49 | }
50 |
--------------------------------------------------------------------------------
/rocksetta-examples/z12-advanced-tensorboard-noptebook.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "#Enter a bash command below and click run to activate\n",
12 | "!echo \"http://$C9_HOSTNAME:8082/\""
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {
19 | "collapsed": true
20 | },
21 | "outputs": [],
22 | "source": [
23 | "#Enter a python file to load and edit. Changes will be automatically saved!\n",
24 | "!tensorboard --logdir=/home/ubuntu/workspace/tmp5 --host $IP --port=8082"
25 | ]
26 | }
27 | ],
28 | "metadata": {
29 | "kernelspec": {
30 | "display_name": "Python 2",
31 | "language": "python",
32 | "name": "python2"
33 | },
34 | "language_info": {
35 | "codemirror_mode": {
36 | "name": "ipython",
37 | "version": 2
38 | },
39 | "file_extension": ".py",
40 | "mimetype": "text/x-python",
41 | "name": "python",
42 | "nbconvert_exporter": "python",
43 | "pygments_lexer": "ipython2",
44 | "version": "2.7.6"
45 | }
46 | },
47 | "nbformat": 4,
48 | "nbformat_minor": 0
49 | }
50 |
--------------------------------------------------------------------------------
/reminder.txt:
--------------------------------------------------------------------------------
1 | right click on rocksetta???.sh files and select run to see examples working
2 |
3 | You can always run any of the python files the normal way by openning a terminal and entering
4 |
5 | source ~/virtual-tf/bin/activate
6 |
7 |
8 | then enter
9 | python filename.py
10 |
11 | reminder to type deactivate to stop running the virtual environment
12 |
13 | A few extra reminders
14 |
15 | type a few letters of a file and click the tab key to autocomplete
16 | cd .. goes up a lvel
17 | cd foldername goes into the folder
18 |
19 | ls or dir list a folders files
20 |
21 | the rocksetta bash files can also be run this way by typing
22 | bash rocksetta??.sh
23 |
24 |
25 |
26 |
27 | NOTE: The above manual method presently will have to be used for true TensorFlow examples as I have not made rocksetta files for those yet.
28 |
29 | Note with the tensoerflow examples you may have to change the /tmp file path so that the examples work with tensorboard. I use this file path, since thenormal /tmp is invisible to the easy cloud9 directory structure (still works using the manual method)
30 |
31 | /home/ubuntu/workspace/tmp5
32 |
33 |
34 |
35 |
36 | P.S. always close of your run windows as some of them may have processes still running that then stops new processes from running.
37 |
--------------------------------------------------------------------------------
/old/setup05.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # only need to run this script with the command (do not type the #)
4 | # bash setup.sh
5 |
6 |
7 | echo "First checking the Python version"
8 | python --version
9 |
10 | echo "--------------------------------------------------------------"
11 | echo ". "
12 |
13 |
14 | echo "Now checking is pip is installed"
15 |
16 | pip list
17 |
18 | echo "--------------------------------------------------------------"
19 | echo ". "
20 |
21 |
22 | echo "next list this directory"
23 | pwd
24 | ls -lah
25 |
26 | echo "--------------------------------------------------------------"
27 | echo "Some other useful commands are cd cd .. dir ls pwd "
28 | echo "."
29 |
30 |
31 |
32 |
33 | echo "Installing PIP and Virtual Environment"
34 |
35 | sudo apt-get install python-pip python-dev python-virtualenv
36 |
37 | echo "--------------------------------------------------------------"
38 | echo ". "
39 |
40 | echo "make the tensorflow environment"
41 |
42 | virtualenv --system-site-packages ~/tensorflow
43 |
44 | echo "--------------------------------------------------------------"
45 | echo ". "
46 |
47 |
48 | echo "Activate the environemtn use deactivate to get your cursor back"
49 | source ~/tensorflow/bin/activate
50 |
51 |
52 | echo "--------------------------------------------------------------"
53 | echo ". "
54 |
55 | echo "Now intall tensorFlow into the enviroment"
56 |
57 | pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
58 |
59 | echo "--------------------------------------------------------------"
60 | echo ". "
61 |
62 |
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/output_curve_hidden_nodes.txt:
--------------------------------------------------------------------------------
1 | 1,0.52
2 | 1,0.67
3 | 1,0.55
4 | 1,0.48
5 | 1,0.64
6 | 1,0.52
7 | 1,0.53
8 | 1,0.52
9 | 1,0.58
10 | 1,0.51
11 | 2,0.79
12 | 2,0.88
13 | 2,0.77
14 | 2,0.82
15 | 2,0.81
16 | 2,0.74
17 | 2,0.61
18 | 2,0.81
19 | 2,0.55
20 | 2,0.85
21 | 3,0.84
22 | 3,0.83
23 | 3,0.78
24 | 3,0.98
25 | 3,0.97
26 | 3,0.95
27 | 3,0.74
28 | 3,0.76
29 | 3,0.99
30 | 3,0.88
31 | 4,0.96
32 | 4,0.94
33 | 4,0.87
34 | 4,0.84
35 | 4,0.75
36 | 4,0.97
37 | 4,0.91
38 | 4,0.98
39 | 4,0.89
40 | 4,1.0
41 | 5,0.88
42 | 5,0.93
43 | 5,0.99
44 | 5,0.97
45 | 5,0.96
46 | 5,1.0
47 | 5,0.77
48 | 5,0.94
49 | 5,0.94
50 | 5,0.8
51 | 6,0.99
52 | 6,0.98
53 | 6,1.0
54 | 6,0.97
55 | 6,0.98
56 | 6,0.99
57 | 6,0.89
58 | 6,0.9
59 | 6,0.96
60 | 6,0.93
61 | 7,1.0
62 | 7,0.99
63 | 7,0.96
64 | 7,0.98
65 | 7,1.0
66 | 7,1.0
67 | 7,0.96
68 | 7,1.0
69 | 7,0.96
70 | 7,0.98
71 | 8,1.0
72 | 8,0.97
73 | 8,1.0
74 | 8,0.99
75 | 8,0.96
76 | 8,0.99
77 | 8,0.96
78 | 8,1.0
79 | 8,0.93
80 | 8,0.99
81 | 9,1.0
82 | 9,1.0
83 | 9,0.98
84 | 9,0.95
85 | 9,0.94
86 | 9,1.0
87 | 9,1.0
88 | 9,0.93
89 | 9,1.0
90 | 9,1.0
91 | 10,0.97
92 | 10,0.99
93 | 10,1.0
94 | 10,1.0
95 | 10,1.0
96 | 10,0.97
97 | 10,0.92
98 | 10,1.0
99 | 10,1.0
100 | 10,1.0
101 | 15,1.0
102 | 15,0.99
103 | 15,0.98
104 | 15,0.99
105 | 15,1.0
106 | 15,1.0
107 | 15,0.99
108 | 15,1.0
109 | 15,1.0
110 | 15,1.0
111 | 14,1.0
112 | 14,0.99
113 | 14,1.0
114 | 14,0.98
115 | 14,1.0
116 | 14,0.99
117 | 14,1.0
118 | 14,1.0
119 | 14,1.0
120 | 14,1.0
121 | 13,0.98
122 | 13,1.0
123 | 13,1.0
124 | 13,1.0
125 | 13,1.0
126 | 13,1.0
127 | 13,0.99
128 | 13,0.97
129 | 13,1.0
130 | 13,0.97
131 | 12,0.98
132 | 12,0.99
133 | 12,0.99
134 | 12,1.0
135 | 12,1.0
136 | 12,1.0
137 | 12,1.0
138 | 12,0.99
139 | 12,0.96
140 | 12,1.0
141 | 11,1.0
142 | 11,0.98
143 | 11,0.92
144 | 11,0.99
145 | 11,1.0
146 | 11,0.99
147 | 11,0.95
148 | 11,0.99
149 | 11,1.0
150 | 11,1.0
151 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/nearest_neighbor.py:
--------------------------------------------------------------------------------
1 | '''
2 | A nearest neighbor learning algorithm example using TensorFlow library.
3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4 |
5 | Author: Aymeric Damien
6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
7 | '''
8 |
9 | import numpy as np
10 | import tensorflow as tf
11 |
12 | # Import MINST data
13 | import input_data
14 | mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
15 |
16 | # In this example, we limit mnist data
17 | Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)
18 | Xte, Yte = mnist.test.next_batch(200) #200 for testing
19 |
20 | # Reshape images to 1D
21 | Xtr = np.reshape(Xtr, newshape=(-1, 28*28))
22 | Xte = np.reshape(Xte, newshape=(-1, 28*28))
23 |
24 | # tf Graph Input
25 | xtr = tf.placeholder("float", [None, 784])
26 | xte = tf.placeholder("float", [784])
27 |
28 | # Nearest Neighbor calculation using L1 Distance
29 | # Calculate L1 Distance
30 | distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)
31 | # Predict: Get min distance index (Nearest neighbor)
32 | pred = tf.arg_min(distance, 0)
33 |
34 | accuracy = 0.
35 |
36 | # Initializing the variables
37 | init = tf.initialize_all_variables()
38 |
39 | # Launch the graph
40 | with tf.Session() as sess:
41 | sess.run(init)
42 |
43 | # loop over test data
44 | for i in range(len(Xte)):
45 | # Get nearest neighbor
46 | nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i,:]})
47 | # Get nearest neighbor class label and compare it to its true label
48 | print "Test", i, "Prediction:", np.argmax(Ytr[nn_index]), "True Class:", np.argmax(Yte[i])
49 | # Calculate accuracy
50 | if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
51 | accuracy += 1./len(Xte)
52 | print "Done!"
53 | print "Accuracy:", accuracy
54 |
55 |
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/generate_linear_data.R:
--------------------------------------------------------------------------------
1 | ### 10/29/2015
2 | ### Simulate a two-class linearly separable classification problem.
3 | ### Label 0 is the "negative" class.
4 | ### Label 1 is the "positive" class.
5 | ### Author: Jason Baldridge
6 |
7 | # Create a matrix given a label, the class means of some number of
8 | # dimensions, the number of items, and the standard deviation. Values
9 | # are sampled normally according to the mean and stdev for each
10 | # column.
11 | create_matrix = function(label, mu, n, dev=.1) {
12 | d = length(mu)
13 | x = t(matrix(rnorm(n*d, mu, dev), ncol=n))
14 | cbind(rep(label,n),x)
15 | }
16 |
17 | # Num input dimensions (the "features").
18 | numDimensions = 2
19 |
20 | # Sample the means for the dimensions for a positive class.
21 | #pos = runif(numDimensions,min=0,max=1)
22 | pos = c(.7,.5) # Use a fixed 2-dimensional center.
23 |
24 | # Sample the means for the dimensions for a negative class.
25 | #neg = runif(numDimensions,min=0,max=1)
26 | neg = c(.3,.1) # Use a fixed 2-dimensional center.
27 |
28 | # Create training data.
29 | numTraining = 500
30 | trainDev = .1
31 | training_data = as.matrix(rbind(create_matrix(1,pos,numTraining,trainDev),create_matrix(0,neg,numTraining,trainDev)))
32 | shuffled_training_data = training_data[sample(nrow(training_data)),]
33 | write.table(shuffled_training_data,file="linear_data_train.csv",row.names=FALSE,col.names=FALSE,quote=FALSE,sep=",")
34 |
35 | # Create eval data. Possibly make the stdev bigger to make it a bit more interesting.
36 | numEval = 100
37 | evalDev = .1
38 | eval_data = as.matrix(rbind(create_matrix(1,pos,numEval,evalDev),create_matrix(0,neg,numEval,evalDev)))
39 | shuffled_eval_data = eval_data[sample(nrow(eval_data)),]
40 | write.table(shuffled_eval_data,file="linear_data_eval.csv",row.names=FALSE,col.names=FALSE,quote=FALSE,sep=",")
41 |
42 | #Plot the training items, if desired.
43 | #colnames(training_data) = c("label","x","y")
44 | #plot(training_data[,c("x","y")],pch=21,bg=c("orange","blue")[training_data[,"label"]+1])
45 |
46 |
--------------------------------------------------------------------------------
/old/setup06.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # only need to run this script with the command (do not type the #)
4 | # bash setup.sh
5 |
6 |
7 | echo "First checking the Python version"
8 | python --version
9 |
10 | echo "--------------------------------------------------------------"
11 | echo ". "
12 |
13 |
14 | echo "Now checking is pip is installed"
15 |
16 | pip list
17 |
18 | echo "--------------------------------------------------------------"
19 | echo ". "
20 |
21 |
22 | echo "next list this directory"
23 | pwd
24 | ls -lah
25 |
26 | echo "--------------------------------------------------------------"
27 | echo "Some other useful commands are cd cd .. dir ls pwd "
28 | echo "."
29 |
30 |
31 |
32 |
33 | echo "Installing PIP and Virtual Environment"
34 |
35 | sudo apt-get install python-pip python-dev python-virtualenv
36 |
37 | echo "--------------------------------------------------------------"
38 | echo ". "
39 |
40 | echo "make the tensorflow environment"
41 |
42 | virtualenv --system-site-packages ~/tensorflow
43 |
44 | echo "--------------------------------------------------------------"
45 | echo ". "
46 |
47 |
48 | echo "Activate the environemtn use deactivate to get your cursor back"
49 | source ~/tensorflow/bin/activate
50 |
51 |
52 | echo "--------------------------------------------------------------"
53 | echo ". "
54 |
55 | echo "Now intall tensorFlow into the enviroment"
56 |
57 | pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
58 |
59 | echo "--------------------------------------------------------------"
60 | echo ". "
61 |
62 | echo "Unfortunately on cloud 9 Pip hides the TensorFlow folder so lets clone it for our use"
63 | echo "Kind of wasteful but it isn't on our computer anyway"
64 |
65 | deactivate
66 |
67 | git clone --recurse-submodules https://github.com/tensorflow/tensorflow
68 |
69 |
70 | echo "--------------------------------------------------------------"
71 | echo ". "
72 |
73 | echo "Now how to setup tensorBoard????"
74 |
75 |
76 | echo "--------------------------------------------------------------"
77 | echo ". "
78 |
79 | echo "--------------------------------------------------------------"
80 | echo ". You can close this window by clicking the close x"
81 |
82 |
83 |
84 |
85 |
86 |
87 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/multigpu_basics.py:
--------------------------------------------------------------------------------
1 | #Multi GPU Basic example
2 | '''
3 | This tutorial requires your machine to have 2 GPUs
4 | "/cpu:0": The CPU of your machine.
5 | "/gpu:0": The first GPU of your machine
6 | "/gpu:1": The second GPU of your machine
7 | '''
8 |
9 | import numpy as np
10 | import tensorflow as tf
11 | import datetime
12 |
13 | #Processing Units logs
14 | log_device_placement = True
15 |
16 | #num of multiplications to perform
17 | n = 10
18 |
19 | '''
20 | Example: compute A^n + B^n on 2 GPUs
21 | Results on 8 cores with 2 GTX-980:
22 | * Single GPU computation time: 0:00:11.277449
23 | * Multi GPU computation time: 0:00:07.131701
24 | '''
25 | #Create random large matrix
26 | A = np.random.rand(1e4, 1e4).astype('float32')
27 | B = np.random.rand(1e4, 1e4).astype('float32')
28 |
29 | # Creates a graph to store results
30 | c1 = []
31 | c2 = []
32 |
33 | def matpow(M, n):
34 | if n < 1: #Abstract cases where n < 1
35 | return M
36 | else:
37 | return tf.matmul(M, matpow(M, n-1))
38 |
39 | '''
40 | Single GPU computing
41 | '''
42 | with tf.device('/gpu:0'):
43 | a = tf.constant(A)
44 | b = tf.constant(B)
45 | #compute A^n and B^n and store results in c1
46 | c1.append(matpow(a, n))
47 | c1.append(matpow(b, n))
48 |
49 | with tf.device('/cpu:0'):
50 | sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n
51 |
52 | t1_1 = datetime.datetime.now()
53 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
54 | # Runs the op.
55 | sess.run(sum)
56 | t2_1 = datetime.datetime.now()
57 |
58 |
59 | '''
60 | Multi GPU computing
61 | '''
62 | #GPU:0 computes A^n
63 | with tf.device('/gpu:0'):
64 | #compute A^n and store result in c2
65 | a = tf.constant(A)
66 | c2.append(matpow(a, n))
67 |
68 | #GPU:1 computes B^n
69 | with tf.device('/gpu:1'):
70 | #compute B^n and store result in c2
71 | b = tf.constant(B)
72 | c2.append(matpow(b, n))
73 |
74 | with tf.device('/cpu:0'):
75 | sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n
76 |
77 | t1_2 = datetime.datetime.now()
78 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
79 | # Runs the op.
80 | sess.run(sum)
81 | t2_2 = datetime.datetime.now()
82 |
83 |
84 | print "Single GPU computation time: " + str(t2_1-t1_1)
85 | print "Multi GPU computation time: " + str(t2_2-t1_2)
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/logistic_regression.py:
--------------------------------------------------------------------------------
1 | '''
2 | A logistic regression learning algorithm example using TensorFlow library.
3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4 |
5 | Author: Aymeric Damien
6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
7 | '''
8 |
9 | # Import MINST data
10 | import input_data
11 | mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
12 |
13 | import tensorflow as tf
14 |
15 | # Parameters
16 | learning_rate = 0.01
17 | training_epochs = 25
18 | batch_size = 100
19 | display_step = 1
20 |
21 | # tf Graph Input
22 | x = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784
23 | y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes
24 |
25 | # Create model
26 |
27 | # Set model weights
28 | W = tf.Variable(tf.zeros([784, 10]))
29 | b = tf.Variable(tf.zeros([10]))
30 |
31 | # Construct model
32 | activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
33 |
34 | # Minimize error using cross entropy
35 | cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
36 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
37 |
38 | # Initializing the variables
39 | init = tf.initialize_all_variables()
40 |
41 | # Launch the graph
42 | with tf.Session() as sess:
43 | sess.run(init)
44 |
45 | # Training cycle
46 | for epoch in range(training_epochs):
47 | avg_cost = 0.
48 | total_batch = int(mnist.train.num_examples/batch_size)
49 | # Loop over all batches
50 | for i in range(total_batch):
51 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
52 | # Fit training using batch data
53 | sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
54 | # Compute average loss
55 | avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
56 | # Display logs per epoch step
57 | if epoch % display_step == 0:
58 | print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
59 |
60 | print "Optimization Finished!"
61 |
62 | # Test model
63 | correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
64 | # Calculate accuracy
65 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
66 | print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
67 |
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/generate_saturn_data.R:
--------------------------------------------------------------------------------
1 | ### 10/29/2015
2 | ### Simulate a two-class "Saturn" classification problem
3 | ### Label 0 is the planet
4 | ### Label 1 is the ring
5 | ### Author: James Scott
6 |
7 | # @n: number of points
8 | # @frac: fraction of points to simulate from class 1
9 | # @d: Euclidean dimension
10 | # @radius: a 2-vector of radiuses for class0 and class1
11 | sim_saturn_data = function(n, d, radius, sigma, frac = 0.5) {
12 |
13 | # Argument checking
14 | stopifnot(d >= 2, length(radius) == 2)
15 |
16 | # We work in radial coordinates.
17 | # Uniformly sample d-1 angular coordinates for each point
18 | phi = matrix(runif(n*(d-1), 0, 2*pi), nrow=n, ncol=d-1)
19 |
20 | # Sample a class indicator for each simulated data point
21 | gamma = rbinom(n, 1, frac)
22 | n1 = sum(gamma)
23 |
24 | # Simulate a radial distance for each point
25 | r = rep(0, n)
26 | r[gamma==0] = runif(n-n1, 0, radius[1])
27 | r[gamma==1] = rnorm(n1, radius[2], sigma)
28 |
29 | # convert to Euclidean coordinates
30 | x = matrix(0, nrow=n, ncol=d)
31 | x[,1] = r*cos(phi[,1])
32 | x[,d] = r*apply(sin(phi), 1, prod)
33 | if(d >= 3) {
34 | for(j in 2:(d-1)) {
35 | prod_of_sines = apply(matrix(sin(phi[,1:(j-1)]), nrow=n), 1, prod)
36 | x[,j] = r*prod_of_sines*cos(phi[,j])
37 | }
38 | }
39 |
40 | list(labels = gamma, features = x)
41 | }
42 |
43 | ### Testing: simulate some data and plot it.
44 | mycols = c('blue','orange')
45 |
46 | # 2d example
47 | #out = sim_saturn_data(1000, 2, c(3, 10), sigma = 1)
48 | #plot(out$features,pch=21,bg=mycols[out$labels+1],xlab="x",ylab="y")
49 |
50 | # 3d example (need rgl installed for the visualization)
51 | #out = sim_saturn_data(1000, 3, c(3, 10), sigma = 1.0)
52 | #rgl::plot3d(out$features, col=mycols[out$labels+1],xlab="x",ylab="y",zlab="z")
53 |
54 | ### Actually create simulated data.
55 | numDimensions = 2
56 |
57 | # Create training data.
58 | numTraining = 500
59 | training_out = sim_saturn_data(numTraining, numDimensions, c(5, 10), sigma = 1.0)
60 | training_data = cbind(training_out$labels,training_out$features)
61 | write.table(training_data,file="saturn_data_train.csv",row.names=FALSE,col.names=FALSE,quote=FALSE,sep=",")
62 |
63 | # Create eval data. Perhas make sigma bigger to make it a bit more interesting.
64 | numEval = 100
65 | eval_out = sim_saturn_data(numEval, numDimensions, c(5, 10), sigma = 1.0)
66 | eval_data = cbind(eval_out$labels,eval_out$features)
67 | write.table(eval_data,file="saturn_data_eval.csv",row.names=FALSE,col.names=FALSE,quote=FALSE,sep=",")
68 |
69 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/multigpu_basics.py:
--------------------------------------------------------------------------------
1 | '''
2 | Basic Multi GPU computation example using TensorFlow library.
3 |
4 | Author: Aymeric Damien
5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
6 | '''
7 |
8 | '''
9 | This tutorial requires your machine to have 2 GPUs
10 | "/cpu:0": The CPU of your machine.
11 | "/gpu:0": The first GPU of your machine
12 | "/gpu:1": The second GPU of your machine
13 | '''
14 |
15 | import numpy as np
16 | import tensorflow as tf
17 | import datetime
18 |
19 | #Processing Units logs
20 | log_device_placement = True
21 |
22 | #num of multiplications to perform
23 | n = 10
24 |
25 | '''
26 | Example: compute A^n + B^n on 2 GPUs
27 | Results on 8 cores with 2 GTX-980:
28 | * Single GPU computation time: 0:00:11.277449
29 | * Multi GPU computation time: 0:00:07.131701
30 | '''
31 | #Create random large matrix
32 | A = np.random.rand(1e4, 1e4).astype('float32')
33 | B = np.random.rand(1e4, 1e4).astype('float32')
34 |
35 | # Creates a graph to store results
36 | c1 = []
37 | c2 = []
38 |
39 | def matpow(M, n):
40 | if n < 1: #Abstract cases where n < 1
41 | return M
42 | else:
43 | return tf.matmul(M, matpow(M, n-1))
44 |
45 | '''
46 | Single GPU computing
47 | '''
48 | with tf.device('/gpu:0'):
49 | a = tf.constant(A)
50 | b = tf.constant(B)
51 | #compute A^n and B^n and store results in c1
52 | c1.append(matpow(a, n))
53 | c1.append(matpow(b, n))
54 |
55 | with tf.device('/cpu:0'):
56 | sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n
57 |
58 | t1_1 = datetime.datetime.now()
59 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
60 | # Runs the op.
61 | sess.run(sum)
62 | t2_1 = datetime.datetime.now()
63 |
64 |
65 | '''
66 | Multi GPU computing
67 | '''
68 | #GPU:0 computes A^n
69 | with tf.device('/gpu:0'):
70 | #compute A^n and store result in c2
71 | a = tf.constant(A)
72 | c2.append(matpow(a, n))
73 |
74 | #GPU:1 computes B^n
75 | with tf.device('/gpu:1'):
76 | #compute B^n and store result in c2
77 | b = tf.constant(B)
78 | c2.append(matpow(b, n))
79 |
80 | with tf.device('/cpu:0'):
81 | sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n
82 |
83 | t1_2 = datetime.datetime.now()
84 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
85 | # Runs the op.
86 | sess.run(sum)
87 | t2_2 = datetime.datetime.now()
88 |
89 |
90 | print "Single GPU computation time: " + str(t2_1-t1_1)
91 | print "Multi GPU computation time: " + str(t2_2-t1_2)
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/basic_operations.py:
--------------------------------------------------------------------------------
1 | '''
2 | Basic Operations example using TensorFlow library.
3 |
4 | Author: Aymeric Damien
5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
6 | '''
7 |
8 | import tensorflow as tf
9 |
10 | # Basic constant operations
11 | # The value returned by the constructor represents the output
12 | # of the Constant op.
13 | a = tf.constant(2)
14 | b = tf.constant(3)
15 |
16 | # Launch the default graph.
17 | with tf.Session() as sess:
18 | print "a=2, b=3"
19 | print "Addition with constants: %i" % sess.run(a+b)
20 | print "Multiplication with constants: %i" % sess.run(a*b)
21 |
22 | # Basic Operations with variable as graph input
23 | # The value returned by the constructor represents the output
24 | # of the Variable op. (define as input when running session)
25 | # tf Graph input
26 | a = tf.placeholder(tf.types.int16)
27 | b = tf.placeholder(tf.types.int16)
28 |
29 | # Define some operations
30 | add = tf.add(a, b)
31 | mul = tf.mul(a, b)
32 |
33 | # Launch the default graph.
34 | with tf.Session() as sess:
35 | # Run every operation with variable input
36 | print "Addition with variables: %i" % sess.run(add, feed_dict={a: 2, b: 3})
37 | print "Multiplication with variables: %i" % sess.run(mul, feed_dict={a: 2, b: 3})
38 |
39 |
40 | # ----------------
41 | # More in details:
42 | # Matrix Multiplication from TensorFlow official tutorial
43 |
44 | # Create a Constant op that produces a 1x2 matrix. The op is
45 | # added as a node to the default graph.
46 | #
47 | # The value returned by the constructor represents the output
48 | # of the Constant op.
49 | matrix1 = tf.constant([[3., 3.]])
50 |
51 | # Create another Constant that produces a 2x1 matrix.
52 | matrix2 = tf.constant([[2.],[2.]])
53 |
54 | # Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.
55 | # The returned value, 'product', represents the result of the matrix
56 | # multiplication.
57 | product = tf.matmul(matrix1, matrix2)
58 |
59 | # To run the matmul op we call the session 'run()' method, passing 'product'
60 | # which represents the output of the matmul op. This indicates to the call
61 | # that we want to get the output of the matmul op back.
62 | #
63 | # All inputs needed by the op are run automatically by the session. They
64 | # typically are run in parallel.
65 | #
66 | # The call 'run(product)' thus causes the execution of threes ops in the
67 | # graph: the two constants and matmul.
68 | #
69 | # The output of the op is returned in 'result' as a numpy `ndarray` object.
70 | with tf.Session() as sess:
71 | result = sess.run(product)
72 | print result
73 | # ==> [[ 12.]]
74 |
--------------------------------------------------------------------------------
/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # only need to run this script with the command (do not type the #)
4 | # bash setup.sh
5 |
6 |
7 | echo "First checking the Python version"
8 | python --version
9 |
10 | echo "--------------------------------------------------------------"
11 | echo ". "
12 |
13 |
14 | echo "Now checking is pip is installed"
15 |
16 | pip list
17 |
18 | echo "--------------------------------------------------------------"
19 | echo ". "
20 |
21 |
22 | echo "next list this directory"
23 | pwd
24 | ls -lah
25 |
26 | echo "--------------------------------------------------------------"
27 | echo "Some other useful commands are cd cd .. dir ls pwd "
28 | echo "."
29 |
30 |
31 |
32 |
33 | echo "Installing PIP and Virtual Environment"
34 |
35 | sudo apt-get install python-pip python-dev python-virtualenv
36 |
37 | echo "--------------------------------------------------------------"
38 | echo ". "
39 |
40 | echo "make the tensorflow environment"
41 |
42 | virtualenv --system-site-packages ~/virtual-tf
43 |
44 | echo "--------------------------------------------------------------"
45 | echo ". "
46 |
47 |
48 | echo "Activate the environemtn use deactivate to get your cursor back"
49 | source ~/virtual-tf/bin/activate
50 |
51 |
52 | echo "--------------------------------------------------------------"
53 | echo ". "
54 |
55 | echo "Now intall tensorFlow into the enviroment"
56 |
57 | pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
58 |
59 | echo "--------------------------------------------------------------"
60 | echo ". "
61 |
62 | echo "Unfortunately on cloud 9 Pip hides the TensorFlow folder so lets clone it for our use"
63 | echo "Kind of wasteful but it isn't on our computer anyway"
64 |
65 | deactivate
66 |
67 | git clone --recurse-submodules https://github.com/tensorflow/tensorflow
68 |
69 |
70 | echo "--------------------------------------------------------------"
71 | echo ". "
72 |
73 |
74 |
75 |
76 | echo "--------------------------------------------------------------"
77 | echo ". "
78 |
79 | echo "--------------------------------------------------------------"
80 | echo "You can close this window by clicking the close x"
81 | echo "Right click rocksetta files and select run "
82 | echo "you can also run them manually by entering the virtual environment"
83 | echo "source ~/virtual-tf/bin/activate "
84 | echo "Then bash rocksetta????.sh"
85 | echo "deactivate to get out of the virtual enviroment"
86 | echo "-----------------------------------------"
87 | echo ""
88 | echo "If you have made it this far the installation has finished"
89 | echo "SETUP.SH HAS FINISHED INSTALLING. You can close this terminal window by clicking the x"
90 | echo "-----------------------------------------"
91 | echo ""
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/graph_visualization.py:
--------------------------------------------------------------------------------
1 | '''
2 | Graph Visualization with TensorFlow.
3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4 |
5 | Author: Aymeric Damien
6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
7 | '''
8 |
9 | import tensorflow as tf
10 | import numpy
11 |
12 | # Import MINST data
13 | import input_data
14 | mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
15 |
16 | # Use Logistic Regression from our previous example
17 |
18 | # Parameters
19 | learning_rate = 0.01
20 | training_epochs = 10
21 | batch_size = 100
22 | display_step = 1
23 |
24 | # tf Graph Input
25 | x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
26 | y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
27 |
28 | # Create model
29 |
30 | # Set model weights
31 | W = tf.Variable(tf.zeros([784, 10]), name="weights")
32 | b = tf.Variable(tf.zeros([10]), name="bias")
33 |
34 | # Construct model
35 | activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
36 |
37 | # Minimize error using cross entropy
38 | cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
39 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
40 |
41 | # Initializing the variables
42 | init = tf.initialize_all_variables()
43 |
44 | # Launch the graph
45 | with tf.Session() as sess:
46 | sess.run(init)
47 |
48 | # Set logs writer into folder /tmp/tensorflow_logs
49 | summary_writer = tf.train.SummaryWriter('/home/ubuntu/workspace/tmp5/tensorflow_logs', graph_def=sess.graph_def)
50 |
51 | # Training cycle
52 | for epoch in range(training_epochs):
53 | avg_cost = 0.
54 | total_batch = int(mnist.train.num_examples/batch_size)
55 | # Loop over all batches
56 | for i in range(total_batch):
57 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
58 | # Fit training using batch data
59 | sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
60 | # Compute average loss
61 | avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
62 | # Display logs per epoch step
63 | if epoch % display_step == 0:
64 | print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
65 |
66 | print "Optimization Finished!"
67 |
68 | # Test model
69 | correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
70 | # Calculate accuracy
71 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
72 | print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
73 |
74 | '''
75 | Run the command line: tensorboard --logdir=/home/ubuntu/workspace/tmp5/tensorflow_logs
76 | Open http://localhost:6006/ into your web browser
77 | '''
78 |
79 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/linear_regression.py:
--------------------------------------------------------------------------------
1 | '''
2 | A linear regression learning algorithm example using TensorFlow library.
3 |
4 | Author: Aymeric Damien
5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
6 | '''
7 |
8 | import tensorflow as tf
9 | import numpy
10 | import matplotlib.pyplot as plt
11 | rng = numpy.random
12 |
13 | # Parameters
14 | learning_rate = 0.01
15 | training_epochs = 2000
16 | display_step = 50
17 |
18 | # Training Data
19 | train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
20 | train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
21 | n_samples = train_X.shape[0]
22 |
23 | # tf Graph Input
24 | X = tf.placeholder("float")
25 | Y = tf.placeholder("float")
26 |
27 | # Create Model
28 |
29 | # Set model weights
30 | W = tf.Variable(rng.randn(), name="weight")
31 | b = tf.Variable(rng.randn(), name="bias")
32 |
33 | # Construct a linear model
34 | activation = tf.add(tf.mul(X, W), b)
35 |
36 | # Minimize the squared errors
37 | cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
38 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
39 |
40 | # Initializing the variables
41 | init = tf.initialize_all_variables()
42 |
43 | # Launch the graph
44 | with tf.Session() as sess:
45 | sess.run(init)
46 |
47 | # Fit all training data
48 | for epoch in range(training_epochs):
49 | for (x, y) in zip(train_X, train_Y):
50 | sess.run(optimizer, feed_dict={X: x, Y: y})
51 |
52 | #Display logs per epoch step
53 | if epoch % display_step == 0:
54 | print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
55 | "W=", sess.run(W), "b=", sess.run(b)
56 |
57 | print "Optimization Finished!"
58 | training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
59 | print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
60 |
61 |
62 | # Testing example, as requested (Issue #2)
63 | test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1])
64 | test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03])
65 |
66 | print "Testing... (L2 loss Comparison)"
67 | testing_cost = sess.run(tf.reduce_sum(tf.pow(activation-Y, 2))/(2*test_X.shape[0]),
68 | feed_dict={X: test_X, Y: test_Y}) #same function as cost above
69 | print "Testing cost=", testing_cost
70 | print "Absolute l2 loss difference:", abs(training_cost - testing_cost)
71 |
72 | #Graphic display
73 | plt.plot(train_X, train_Y, 'ro', label='Original data')
74 | plt.plot(test_X, test_Y, 'bo', label='Testing data')
75 | plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
76 | plt.legend()
77 | plt.show()
--------------------------------------------------------------------------------
/old/future.txt:
--------------------------------------------------------------------------------
1 | pip install jupyter
2 | works but can't find path since double tensorFlow
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | PYTHONUSERBASE=/opt/mysterypackage-1.0/python-deps pip install --user --upgrade numpy scipy
14 | ..to install the scipy and numpy package most recent versions into a directory which you can then include in your PYTHONPATH like so (using bash and for python 2.6 on CentOS 6 for this example):
15 |
16 | export PYTHONPATH=/opt/mysterypackage-1.0/python-deps/lib64/python2.6/site-packages:$PYTHONPATH
17 | export PATH=/opt/mysterypackage-1.0/python-deps/bin:$PATH
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 | http://www.motleytech.net/en/2015/11/08/jupyter-python-27-and-35/
40 |
41 | source env.sh
42 | pip install jupyter
43 | We will now configure ipython and jupyter to our liking...
44 |
45 | # create the ipython_config folder contents
46 | ipython locate
47 |
48 | # create a new config that we will edit soon
49 | jupyter notebook --generate-config
50 | We will configure the ip address, port, enable mathjax and password protect our jupyter notebook.
51 |
52 | Open jupyter_config/jupyter_notebook_config.py in an editor
53 |
54 | vim jupyter_config/jupyter_notebook_config.py
55 | and edit the following fields... (to generate a new password hash, run ipython and type in from notebook.auth import passwd; passwd(). Enter your new password twice and copy the resulting hash to the c.NotebookApp.password field.)
56 |
57 | # The IP address the notebook server will listen on.
58 | c.NotebookApp.ip = '0.0.0.0'
59 |
60 | # The port the notebook server will listen on.
61 | c.NotebookApp.port = 8008
62 |
63 | # The directory to use for notebooks and kernels.
64 | c.NotebookApp.notebook_dir = 'books'
65 |
66 | # When disabled, equations etc. will appear as their untransformed TeX source.
67 | c.NotebookApp.enable_mathjax = True
68 |
69 | # To generate, type in a python/IPython shell:
70 | #
71 | # from notebook.auth import passwd; passwd()
72 | #
73 | # The string should be of the form type:salt:hashed-password.
74 | c.NotebookApp.password = u'sha1:db4b151b2368:bc24e9d50c456b9c22131d53f009ab3c26a0bb9a'
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
89 |
90 | chmod a+x Miniconda3-latest-Linux-x86_64.sh
91 |
92 | run it
93 |
94 | conda create -n py3 python=3 ipython
95 |
96 |
97 | source activate py3
98 |
99 | pip freeze
100 |
101 | conda install ipython-notebook
102 |
103 |
104 |
105 |
106 | ipython notebook --ip=process.env.IP --port=process.env.PORT --no-browser
107 |
108 |
109 | ipython notebook --ip=0.0.0.0 --port=8080 --no-browser
110 |
111 |
112 | ipython notebook --ip=0.0.0.0 --port=8080
113 |
114 |
115 | sudo pip install ipython[notebook]
116 |
117 | sudo pip install ipython[jupyter]
118 |
119 |
120 | pip install --upgrade ipython[all]
121 |
122 | ipython notebook
123 |
124 |
125 | pip install pyzmq jinja2 tornado mistune jsonschema pygments terminado
126 |
127 |
128 |
129 | https://github.com/binder-project/binder
130 |
--------------------------------------------------------------------------------
/rocksetta-examples/tb-normal.py:
--------------------------------------------------------------------------------
1 | '''
2 | Loss Visualization with TensorFlow.
3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4 |
5 | Author: Aymeric Damien
6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
7 | '''
8 |
9 | import tensorflow as tf
10 | import numpy
11 |
12 | # Import MINST data
13 | import input_data
14 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
15 |
16 | # Use Logistic Regression from our previous example
17 |
18 | # Parameters
19 | learning_rate = 0.01
20 | training_epochs = 10
21 | batch_size = 100
22 | display_step = 1
23 |
24 | # tf Graph Input
25 | x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
26 | y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
27 |
28 | # Create model
29 |
30 | # Set model weights
31 | W = tf.Variable(tf.zeros([784, 10]), name="weights")
32 | b = tf.Variable(tf.zeros([10]), name="bias")
33 |
34 | # Construct model
35 | activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
36 |
37 | # Minimize error using cross entropy
38 | cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
39 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
40 |
41 | # Initializing the variables
42 | init = tf.initialize_all_variables()
43 |
44 | # Create a summary to monitor cost function
45 | tf.scalar_summary("loss", cost)
46 |
47 | # Merge all summaries to a single operator
48 | merged_summary_op = tf.merge_all_summaries()
49 |
50 | # Launch the graph
51 | with tf.Session() as sess:
52 | sess.run(init)
53 |
54 | # Set logs writer into folder /home/ubuntu/workspace/tmp5/tensorflow_logs
55 | summary_writer = tf.train.SummaryWriter('/tmp/tensorflow_logs', graph_def=sess.graph_def)
56 |
57 | # Training cycle
58 | for epoch in range(training_epochs):
59 | avg_cost = 0.
60 | total_batch = int(mnist.train.num_examples/batch_size)
61 | # Loop over all batches
62 | for i in range(total_batch):
63 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
64 | # Fit training using batch data
65 | sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
66 | # Compute average loss
67 | avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
68 | # Write logs at every iteration
69 | summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
70 | summary_writer.add_summary(summary_str, epoch*total_batch + i)
71 | # Display logs per epoch step
72 | if epoch % display_step == 0:
73 | print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
74 |
75 | print "Optimization Finished!"
76 |
77 | # Test model
78 | correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
79 | # Calculate accuracy
80 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
81 | print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
82 |
83 | '''
84 | Run the command line: tensorboard --logdir=/home/ubuntu/workspace/tmp5/tensorflow_logs
85 | Open http://localhost:6006/ into your web browser
86 | '''
87 |
--------------------------------------------------------------------------------
/rocksetta-examples/tb-example.py:
--------------------------------------------------------------------------------
1 | '''
2 | Loss Visualization with TensorFlow.
3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4 |
5 | Author: Aymeric Damien
6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
7 | '''
8 |
9 | import tensorflow as tf
10 | import numpy
11 |
12 | # Import MINST data
13 | import input_data
14 | mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
15 |
16 | # Use Logistic Regression from our previous example
17 |
18 | # Parameters
19 | learning_rate = 0.01
20 | training_epochs = 10
21 | batch_size = 100
22 | display_step = 1
23 |
24 | # tf Graph Input
25 | x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
26 | y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
27 |
28 | # Create model
29 |
30 | # Set model weights
31 | W = tf.Variable(tf.zeros([784, 10]), name="weights")
32 | b = tf.Variable(tf.zeros([10]), name="bias")
33 |
34 | # Construct model
35 | activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
36 |
37 | # Minimize error using cross entropy
38 | cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
39 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
40 |
41 | # Initializing the variables
42 | init = tf.initialize_all_variables()
43 |
44 | # Create a summary to monitor cost function
45 | tf.scalar_summary("loss", cost)
46 |
47 | # Merge all summaries to a single operator
48 | merged_summary_op = tf.merge_all_summaries()
49 |
50 | # Launch the graph
51 | with tf.Session() as sess:
52 | sess.run(init)
53 |
54 | # Set logs writer into folder /home/ubuntu/workspace/tmp5/tensorflow_logs
55 | summary_writer = tf.train.SummaryWriter('/home/ubuntu/workspace/tmp5/tensorflow_logs', graph_def=sess.graph_def)
56 |
57 | # Training cycle
58 | for epoch in range(training_epochs):
59 | avg_cost = 0.
60 | total_batch = int(mnist.train.num_examples/batch_size)
61 | # Loop over all batches
62 | for i in range(total_batch):
63 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
64 | # Fit training using batch data
65 | sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
66 | # Compute average loss
67 | avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
68 | # Write logs at every iteration
69 | summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
70 | summary_writer.add_summary(summary_str, epoch*total_batch + i)
71 | # Display logs per epoch step
72 | if epoch % display_step == 0:
73 | print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
74 |
75 | print "Optimization Finished!"
76 |
77 | # Test model
78 | correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
79 | # Calculate accuracy
80 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
81 | print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
82 |
83 | '''
84 | Run the command line: tensorboard --logdir=/home/ubuntu/workspace/tmp5/tensorflow_logs
85 | Open http://localhost:6006/ into your web browser
86 | '''
87 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/loss_visualization.py:
--------------------------------------------------------------------------------
1 | '''
2 | Loss Visualization with TensorFlow.
3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4 |
5 | Author: Aymeric Damien
6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
7 | '''
8 |
9 | import tensorflow as tf
10 | import numpy
11 |
12 | # Import MINST data
13 | import input_data
14 | mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
15 |
16 | # Use Logistic Regression from our previous example
17 |
18 | # Parameters
19 | learning_rate = 0.01
20 | training_epochs = 10
21 | batch_size = 100
22 | display_step = 1
23 |
24 | # tf Graph Input
25 | x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
26 | y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
27 |
28 | # Create model
29 |
30 | # Set model weights
31 | W = tf.Variable(tf.zeros([784, 10]), name="weights")
32 | b = tf.Variable(tf.zeros([10]), name="bias")
33 |
34 | # Construct model
35 | activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
36 |
37 | # Minimize error using cross entropy
38 | cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
39 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
40 |
41 | # Initializing the variables
42 | init = tf.initialize_all_variables()
43 |
44 | # Create a summary to monitor cost function
45 | tf.scalar_summary("loss", cost)
46 |
47 | # Merge all summaries to a single operator
48 | merged_summary_op = tf.merge_all_summaries()
49 |
50 | # Launch the graph
51 | with tf.Session() as sess:
52 | sess.run(init)
53 |
54 | # Set logs writer into folder /home/ubuntu/workspace/tmp5/tensorflow_logs
55 | summary_writer = tf.train.SummaryWriter('/home/ubuntu/workspace/tmp5/tensorflow_logs', graph_def=sess.graph_def)
56 |
57 | # Training cycle
58 | for epoch in range(training_epochs):
59 | avg_cost = 0.
60 | total_batch = int(mnist.train.num_examples/batch_size)
61 | # Loop over all batches
62 | for i in range(total_batch):
63 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
64 | # Fit training using batch data
65 | sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
66 | # Compute average loss
67 | avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
68 | # Write logs at every iteration
69 | summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
70 | summary_writer.add_summary(summary_str, epoch*total_batch + i)
71 | # Display logs per epoch step
72 | if epoch % display_step == 0:
73 | print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
74 |
75 | print "Optimization Finished!"
76 |
77 | # Test model
78 | correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
79 | # Calculate accuracy
80 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
81 | print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
82 |
83 | '''
84 | Run the command line: tensorboard --logdir=/home/ubuntu/workspace/tmp5/tensorflow_logs
85 | Open http://localhost:6006/ into your web browser
86 | '''
87 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # easy-tensorflow-on-cloud9
2 | Hopefully the easiest way to get started with Google's TensorFlow Deep Learning Neural Network Python Library
3 |
4 |
5 | Feb 7, 2016 Updated after the video
6 |
7 | Added bash file setup-new02.sh which automates setting up ipython notebook (now called jupyter).
8 |
9 |
10 |
11 |
12 |
13 |
14 | Installation Video at https://youtu.be/kMtrOIPLpR0
15 |
16 |
17 |
18 |
19 |
20 | [![Instructional video at]
21 | (http://img.youtube.com/vi/kMtrOIPLpR0/0.jpg)]
22 | (https://youtu.be/kMtrOIPLpR0)
23 |
24 |
25 |
26 |
27 | and my TensorFlow Teacher webpagge is at
28 |
29 | http://rocksetta.com/tensorflow-teacher/
30 |
31 |
32 |
33 | install onto cloud9 http://c9.io as a custom workspace
34 |
35 | Using the url for this repository
36 |
37 | https://github.com/hpssjellis/easy-tensorflow-on-cloud9.git
38 |
39 |
40 | In the terminal type:
41 | ``` bash
42 | bash setup.sh
43 | ```
44 |
45 | OR JUST RIGHT CLICK AND SELECT RUN ON THE FILE setup.sh
46 |
47 | Note: Any of these examples can be run by just right clicking on the files and selecting run.
48 |
49 | All files with the file name starting with rocksetta and ending with .sh should be able to be run this way.
50 |
51 |
52 | If you have never used cloud9 it may look hard but compared to making your own linux server, cloud 9 is a breeze:
53 |
54 | 1. register for a free account
55 | 1. click on the big plus sign to make a new workspace
56 |
57 |
58 | 
59 |
60 | 1. Fill out the forms as needed, the main fields are the URL for this repository (make sure it has .git at the end
61 | https://github.com/hpssjellis/easy-tensorflow-on-cloud9.git)
62 |
63 | 1. Make sure the default custom box is selected
64 |
65 | 
66 |
67 | 1. Then just right click and run setup.sh and take a break for about 10 minutes
68 |
69 |
70 | .
71 |
72 | .
73 |
74 | .
75 |
76 |
77 | Then hopefully try some examples in the rocksetta-example folder
78 |
79 | My TensorFlow API diagram is at http://rocksetta.com/tensorflow-teacher/tensorflow-svg.html which is a clickable version of
80 |
81 | 
82 |
83 | My Tensorflow-teacher site is at http://rocksetta.com/tensorflow-teacher/
84 |
85 |
86 |
87 |
88 | Other peoples examples are in the other folders.
89 |
90 | A good starting point is the try-tf folder explained at this website
91 |
92 | https://bcomposes.wordpress.com/2015/11/26/simple-end-to-end-tensorflow-examples/
93 |
94 |
95 |
96 |
97 |
98 | By Jeremy Ellis Maker of rocksetta.com
99 |
100 | twitter @rocksetta
101 |
102 |
103 |
104 | Side note:
105 |
106 |
107 | using
108 |
109 | ``` bash
110 | source ~/virtual-tf/bin/activate
111 | ```
112 |
113 | sets up the environment
114 |
115 | note just type
116 |
117 | ``` bash
118 | deactivate
119 | ```
120 |
121 | to get your cursor back
122 |
123 | I include this code in each of my .sh files but you could run the code in the command line and call the files normally.
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 | to find tensorflow
143 |
144 |
145 |
146 | cd /usr/local/lib/python2.7/site-packages/tensorflow
147 |
148 |
149 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/multilayer_perceptron.py:
--------------------------------------------------------------------------------
1 | '''
2 | A Multilayer Perceptron implementation example using TensorFlow library.
3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4 |
5 | Author: Aymeric Damien
6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
7 | '''
8 |
9 | # Import MINST data
10 | import input_data
11 | mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
12 |
13 | import tensorflow as tf
14 |
15 | # Parameters
16 | learning_rate = 0.001
17 | training_epochs = 15
18 | batch_size = 100
19 | display_step = 1
20 |
21 | # Network Parameters
22 | n_hidden_1 = 256 # 1st layer num features
23 | n_hidden_2 = 256 # 2nd layer num features
24 | n_input = 784 # MNIST data input (img shape: 28*28)
25 | n_classes = 10 # MNIST total classes (0-9 digits)
26 |
27 | # tf Graph input
28 | x = tf.placeholder("float", [None, n_input])
29 | y = tf.placeholder("float", [None, n_classes])
30 |
31 | # Create model
32 | def multilayer_perceptron(_X, _weights, _biases):
33 | layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) #Hidden layer with RELU activation
34 | layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])) #Hidden layer with RELU activation
35 | return tf.matmul(layer_2, _weights['out']) + _biases['out']
36 |
37 | # Store layers weight & bias
38 | weights = {
39 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
40 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
41 | 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
42 | }
43 | biases = {
44 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])),
45 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])),
46 | 'out': tf.Variable(tf.random_normal([n_classes]))
47 | }
48 |
49 | # Construct model
50 | pred = multilayer_perceptron(x, weights, biases)
51 |
52 | # Define loss and optimizer
53 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
54 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
55 |
56 | # Initializing the variables
57 | init = tf.initialize_all_variables()
58 |
59 | # Launch the graph
60 | with tf.Session() as sess:
61 | sess.run(init)
62 |
63 | # Training cycle
64 | for epoch in range(training_epochs):
65 | avg_cost = 0.
66 | total_batch = int(mnist.train.num_examples/batch_size)
67 | # Loop over all batches
68 | for i in range(total_batch):
69 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
70 | # Fit training using batch data
71 | sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
72 | # Compute average loss
73 | avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
74 | # Display logs per epoch step
75 | if epoch % display_step == 0:
76 | print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
77 |
78 | print "Optimization Finished!"
79 |
80 | # Test model
81 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
82 | # Calculate accuracy
83 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
84 | print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
85 |
--------------------------------------------------------------------------------
/old/setup-new01.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # only need to run this script with the command (do not type the #)
4 | # bash setup-new.sh
5 |
6 |
7 | echo "First checking the Python version"
8 | python --version
9 |
10 | echo "--------------------------------------------------------------"
11 | echo ". "
12 |
13 |
14 | echo "Now checking is pip is installed"
15 |
16 | pip list
17 |
18 | echo "--------------------------------------------------------------"
19 | echo ". "
20 |
21 |
22 | echo "next list this directory"
23 | pwd
24 | ls -lah
25 |
26 | echo "--------------------------------------------------------------"
27 | echo "Some other useful commands are cd cd .. dir ls pwd "
28 | echo "."
29 |
30 |
31 |
32 |
33 | echo "Installing PIP and Virtual Environment"
34 |
35 | sudo apt-get install python-pip python-dev python-virtualenv
36 |
37 | echo "--------------------------------------------------------------"
38 | echo ". "
39 |
40 | echo "make the tensorflow environment"
41 |
42 | virtualenv --system-site-packages ~/virtual-tf
43 |
44 | echo "--------------------------------------------------------------"
45 | echo ". "
46 |
47 |
48 | echo "Activate the environemtn use deactivate to get your cursor back"
49 | source ~/virtual-tf/bin/activate
50 |
51 |
52 | echo "--------------------------------------------------------------"
53 | echo ". "
54 |
55 | echo "Now intall tensorFlow into the enviroment"
56 |
57 | pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
58 |
59 | #PYTHONUSERBASE=/home/ubuntu/workspace/tensorflow pip install --user --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
60 | #export PYTHONPATH=/home/ubuntu/workspace/tensorflow:$PYTHONPATH
61 | #export PATH=/home/ubuntu/workspace/tensorflow/bin:$PATH
62 |
63 |
64 |
65 |
66 | echo "--------------------------------------------------------------"
67 | echo ". "
68 |
69 | echo "Unfortunately on cloud 9 Pip hides the TensorFlow folder so lets clone it for our use"
70 | echo "Kind of wasteful but it isn't on our computer anyway"
71 |
72 | #deactivate
73 |
74 | git clone --recurse-submodules https://github.com/tensorflow/tensorflow
75 |
76 |
77 | echo "--------------------------------------------------------------"
78 | echo ". "
79 |
80 |
81 | echo "Installing a few extra packages"
82 |
83 |
84 |
85 | pip install matplotlib
86 |
87 |
88 | pip install git+git://github.com/tensorflow/skflow.git
89 |
90 |
91 |
92 | echo "------------------------Tensorflow installed--------------------------------------"
93 | echo "--------------------------------------------------------------"
94 | echo "--------------------------------------------------------------"
95 | echo "--------------------------------------------------------------"
96 | echo "--------------------------------------------------------------"
97 | echo "--------------------------------------------------------------"
98 | echo "--------------------------------------------------------------"
99 |
100 | echo ". "
101 |
102 | #echo "Unfortunately on cloud 9 Pip hides the TensorFlow folder so lets clone it for our use"
103 | #echo "Kind of wasteful but it isn't on our computer anyway"
104 |
105 | #deactivate
106 |
107 | git clone --recurse-submodules https://github.com/tensorflow/tensorflow
108 |
109 |
110 | echo "--------------------------------------------------------------"
111 | echo ". "
112 |
113 |
114 | pip install --upgrade ipython
115 | pip install --upgrade jupyter
116 |
117 | jupyter notebook --ip=0.0.0.0 --port=8080 --no-browser
118 |
119 |
120 |
121 | echo "deactivate to get out of the virtual enviroment"
122 |
123 |
124 |
125 |
126 |
127 |
128 |
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/saturn_data_eval.csv:
--------------------------------------------------------------------------------
1 | 0,-2.95364440838872,0.4240724153547
2 | 1,9.05324338708622,3.8332299316063
3 | 1,-9.41898808565923,-5.15292208063294
4 | 0,0.0259142145221616,0.0131769510996405
5 | 1,-5.98582112483023,9.74872712136347
6 | 1,-7.49101797542126,-5.48394497181704
7 | 1,8.65910174722262,-2.32602493955759
8 | 1,8.26922103849562,-6.56763171920307
9 | 0,2.40182827343185,1.10657173843081
10 | 0,-0.465755780497113,-0.807678174831206
11 | 1,10.2846732789519,0.828400996154731
12 | 0,1.88926691446674,-3.06093070941668
13 | 1,6.04467655182339,7.74140971333368
14 | 1,9.61871090831236,-3.80296065221943
15 | 0,2.92950492924933,0.665512002904368
16 | 0,1.87992216704095,-4.43105250577418
17 | 1,7.28892290086414,2.66698918808811
18 | 0,4.02450666602898,2.244583780411
19 | 1,-7.66630871112399,-5.29749623431877
20 | 0,-0.781453169961538,0.562678104064411
21 | 0,0.635011298824518,-3.6878002110401
22 | 1,8.22170742942158,2.88799387574692
23 | 1,-1.47455736050755,9.2203860297421
24 | 0,-0.121270871486501,0.063986309790505
25 | 1,-2.31266607975405,-10.9717639669406
26 | 1,2.83009424760612,-9.8210260630275
27 | 1,-9.19635813111571,-3.34855954713015
28 | 0,0.147725367512453,-0.437736939119105
29 | 1,-9.17331746858361,6.36392553718533
30 | 0,-2.28980339192164,1.60501617702423
31 | 1,5.13240717849589,9.86906556059293
32 | 0,1.65654360679859,-1.66581386791298
33 | 0,-0.00622256336644169,-0.0832218750907117
34 | 0,0.903538741758324,-1.27861276550525
35 | 0,1.25052117606507,-1.41863885064092
36 | 1,2.27623616139974,8.35271948733344
37 | 1,-2.19622946975649,9.26066202994795
38 | 1,4.65886462517106,7.92763500009444
39 | 1,-9.12600785652553,6.38516605589955
40 | 0,3.4851777563707,-2.41070406961164
41 | 1,-10.0955814178556,0.00676320264738719
42 | 0,-1.06378102886385,0.501713925048591
43 | 1,3.88451320638887,-8.9013454545313
44 | 1,9.6704324419551,1.31561096269033
45 | 1,-4.17881680474775,8.00173835147622
46 | 1,-5.00775406999558,9.24837974467148
47 | 1,3.7598328281536,-9.63895057965262
48 | 0,-1.46568630445723,1.02514709218113
49 | 1,2.52889846201837,-9.2666832637993
50 | 0,0.0667992413611389,0.28670701698946
51 | 1,9.10554765145899,5.68039941265474
52 | 0,-0.118192087098342,1.29788524800616
53 | 0,-2.19266842228583,-2.44434085822266
54 | 0,-0.0631441415283714,0.0365706184771397
55 | 0,0.756437258940677,3.85698106911192
56 | 0,-2.46969508412011,1.83238321355437
57 | 0,1.10193728949443,0.756008287697793
58 | 0,-1.01028268728193,-4.87447977282223
59 | 0,0.0946061442932678,0.536838992880969
60 | 1,4.5727928037117,9.40983787276743
61 | 0,-1.34938481807617,-2.68034970483454
62 | 1,-1.60719444645805,9.93588541231975
63 | 1,-7.93697056741509,3.05689582684673
64 | 1,-9.00829730587327,4.07249342645771
65 | 1,-11.2684518042161,-0.843134900869058
66 | 0,-3.36107317587188,0.663007407488394
67 | 0,-4.39609285144382,1.41304472178053
68 | 0,0.993790652959637,1.61726905315253
69 | 1,9.05467956228427,-6.47174612409913
70 | 1,7.96844249991419,5.37736349820582
71 | 1,-9.36976664961836,0.871088431804888
72 | 0,-0.758472521152815,-0.65404711174728
73 | 1,9.82751795911665,3.82413930092729
74 | 1,0.662620432079108,-11.8200982882237
75 | 1,-9.02908779011979,0.153310302751011
76 | 1,-0.101067080565379,-9.88393539126925
77 | 1,10.0054197909241,-1.50409519211489
78 | 1,-2.3857895511175,-9.91232681015645
79 | 1,9.64950043910508,-1.62499249925805
80 | 1,3.67888830791673,-10.1744112831109
81 | 0,-0.622873408062069,1.4372817097535
82 | 0,-0.45644767226127,1.33286205033499
83 | 0,3.77501530804962,-0.20492061753698
84 | 0,-0.705977520233487,0.535090235637785
85 | 0,0.952722120603435,1.39849898225992
86 | 0,2.43241042072037,-4.01804860356159
87 | 1,6.17248393242332,-6.79019861171794
88 | 0,0.332296597197245,2.84862313836927
89 | 0,-2.08466292404208,-1.26782105518914
90 | 1,7.90726565243414,5.19311974848883
91 | 1,-1.23513454896338,9.24435498482269
92 | 0,1.57853483990754,1.55662803439644
93 | 0,-0.615468713457144,0.177447373766895
94 | 1,-5.8458798259574,7.88083060792
95 | 0,1.07584883346741,0.281565147442874
96 | 1,11.3917396542548,1.58992609468347
97 | 1,-7.54119390428137,8.01151373936753
98 | 0,3.71898328296341,3.01997451231519
99 | 0,2.5103810625784,1.47566193245145
100 | 0,-1.81246514486709,2.65784907064089
101 |
--------------------------------------------------------------------------------
/setup-new02.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # only need to run this script with the command (do not type the #)
4 | # bash setup-new.sh
5 |
6 |
7 | echo "First checking the Python version"
8 | python --version
9 |
10 | echo "--------------------------------------------------------------"
11 | echo ". "
12 |
13 |
14 | echo "Now checking is pip is installed"
15 |
16 | pip list
17 |
18 | echo "--------------------------------------------------------------"
19 | echo ". "
20 |
21 |
22 | echo "next list this directory"
23 | pwd
24 | ls -lah
25 |
26 | echo "--------------------------------------------------------------"
27 | echo "Some other useful commands are cd cd .. dir ls pwd "
28 | echo "."
29 |
30 |
31 |
32 |
33 | echo "Installing PIP and Virtual Environment"
34 |
35 | sudo apt-get install python-pip python-dev python-virtualenv
36 |
37 | echo "--------------------------------------------------------------"
38 | echo ". "
39 |
40 | echo "make the tensorflow environment"
41 |
42 | virtualenv --system-site-packages ~/virtual-tf
43 |
44 | echo "--------------------------------------------------------------"
45 | echo ". "
46 |
47 |
48 | echo "Activate the environemtn use deactivate to get your cursor back"
49 | source ~/virtual-tf/bin/activate
50 |
51 | printf "\n\nsource ~/virtual-tf/bin/activate " >> ~/.profile
52 | printf "\necho 'enter deactivate to get out of the virtual enviroment'" >> ~/.profile
53 |
54 |
55 | echo "--------------------------------------------------------------"
56 | echo ". "
57 |
58 | echo "Now intall tensorFlow into the enviroment"
59 |
60 | pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
61 |
62 | #PYTHONUSERBASE=/home/ubuntu/workspace/tensorflow pip install --user --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
63 | #export PYTHONPATH=/home/ubuntu/workspace/tensorflow:$PYTHONPATH
64 | #export PATH=/home/ubuntu/workspace/tensorflow/bin:$PATH
65 |
66 |
67 |
68 |
69 | echo "--------------------------------------------------------------"
70 | echo ". "
71 |
72 | echo "Unfortunately on cloud 9 Pip hides the TensorFlow folder so lets clone it for our use"
73 | echo "Kind of wasteful but it isn't on our computer anyway"
74 |
75 | #deactivate
76 |
77 | git clone --recurse-submodules https://github.com/tensorflow/tensorflow
78 |
79 |
80 | echo "--------------------------------------------------------------"
81 | echo ". "
82 |
83 |
84 | echo "Installing a few extra packages"
85 |
86 |
87 |
88 | pip install matplotlib
89 |
90 | pip install scikit-learn
91 |
92 | pip install git+git://github.com/tensorflow/skflow.git
93 |
94 |
95 |
96 | echo "------------------------Tensorflow installed--------------------------------------"
97 | echo "--------------------------------------------------------------"
98 | echo "--------------------------------------------------------------"
99 | echo "--------------------------------------------------------------"
100 | echo "--------------------------------------------------------------"
101 | echo "--------------------------------------------------------------"
102 | echo "--------------------------------------------------------------"
103 |
104 | echo ". "
105 |
106 | #echo "Unfortunately on cloud 9 Pip hides the TensorFlow folder so lets clone it for our use"
107 | #echo "Kind of wasteful but it isn't on our computer anyway"
108 |
109 | #deactivate
110 |
111 | git clone --recurse-submodules https://github.com/tensorflow/tensorflow
112 |
113 | echo "Make a quick link to udacity files"
114 |
115 | ln -s /home/ubuntu/workspace/tensorflow/tensorflow/examples/udacity /home/ubuntu/workspace/udacityLink
116 |
117 |
118 |
119 | echo "enter deactivate to get out of the virtual enviroment"
120 |
121 |
122 |
123 | echo "--------------------------------------------------------------"
124 | echo ". "
125 |
126 |
127 | pip install --upgrade ipython
128 | pip install --upgrade jupyter
129 |
130 |
131 |
132 |
133 |
134 | #jupyter notebook --ip=0.0.0.0 --port=8080 --no-browser
135 | jupyter notebook --ip $IP --port $PORT --no-browser
136 |
137 | # bash file stays here so no more commands will work
138 |
139 |
140 |
141 |
142 |
143 |
144 |
--------------------------------------------------------------------------------
/bcomposes-examples/README.md:
--------------------------------------------------------------------------------
1 | Try TensorFlow
2 | ====
3 |
4 | Example code to try out [TensorFlow](http://www.tensorflow.org/). See the blog post [Simple end-to-end TensorFlow examples](https://bcomposes.wordpress.com/2015/11/26/simple-end-to-end-tensorflow-examples/) for more discussion and context.
5 |
6 | You need have [TensorFlow installed](http://www.tensorflow.org/get_started/os_setup.md).
7 |
8 | ## Instructions for simulated data
9 |
10 | The subdirectory `try-tf/simdata` contains train and evaluation data sets for three simulated data set types: linear, moon, and saturn. It also contains some simple R and Python scripts for generating and viewing the data.
11 |
12 | ### Linearly separable data
13 |
14 | The data:
15 |
16 | * `try-tf/simdata/linear_data_train.csv`
17 | * `try-tf/simdata/linear_data_eval.csv`
18 |
19 | The training data set looks like this.
20 |
21 | 
22 |
23 | Softmax regression is perfectly capable of handling this data. If you run the command below, you should see output similar to that provided here.
24 |
25 | ```
26 | $ python softmax.py --train simdata/linear_data_train.csv --test simdata/linear_data_eval.csv --num_epochs 5 --verbose True
27 | Initialized!
28 |
29 | Training.
30 | 0 1 2 3 4 5 6 7 8 9
31 | 10 11 12 13 14 15 16 17 18 19
32 | 20 21 22 23 24 25 26 27 28 29
33 | 30 31 32 33 34 35 36 37 38 39
34 | 40 41 42 43 44 45 46 47 48 49
35 |
36 | Weight matrix.
37 | [[-1.87038445 1.87038457]
38 | [-2.23716712 2.23716712]]
39 |
40 | Bias vector.
41 | [ 1.57296884 -1.57296848]
42 |
43 | Applying model to first test instance.
44 | Point = [[ 0.14756215 0.24351828]]
45 | Wx+b = [[ 0.7521798 -0.75217938]]
46 | softmax(Wx+b) = [[ 0.81822371 0.18177626]]
47 |
48 | Accuracy: 1.0
49 | ```
50 |
51 | ### Moon data
52 |
53 | The data:
54 |
55 | * `try-tf/simdata/moon_data_train.csv`
56 | * `try-tf/simdata/moon_data_eval.csv`
57 |
58 | The training data set looks like this.
59 |
60 | 
61 |
62 | The softmax network performs poorly, but a network with a five node hidden layer works great.
63 |
64 | ```
65 | $ python softmax.py --train simdata/moon_data_train.csv --test simdata/moon_data_eval.csv --num_epochs 100
66 | Accuracy: 0.861
67 |
68 | $ python hidden.py --train simdata/moon_data_train.csv --test simdata/moon_data_eval.csv --num_epochs 100 --num_hidden 5
69 | Accuracy: 0.971
70 | ```
71 |
72 | ### Saturn data
73 |
74 | The data:
75 |
76 | * `try-tf/simdata/saturn_data_train.csv`
77 | * `try-tf/simdata/saturn_data_eval.csv`
78 |
79 | The training data set looks like this.
80 |
81 | 
82 |
83 | Again, a softmax network performs poorly, but a network with a five node hidden layer works great.
84 |
85 | ```
86 | $ python softmax.py --train simdata/saturn_data_train.csv --test simdata/saturn_data_eval.csv --num_epochs 100
87 | Accuracy: 0.43
88 |
89 | $ python hidden.py --train simdata/saturn_data_train.csv --test simdata/saturn_data_eval.csv --num_epochs 100 --num_hidden 15
90 | Accuracy: 1.0
91 | ```
92 |
93 | ## Generating simulated data.
94 |
95 | Feel free to play around with the code to generate data to make it harder, add more dimensions, etc. You can then generate new data as follows (while in the simdata directory):
96 |
97 | ```
98 | $ Rscript generate_linear_data.R
99 | $ python generate_moon_data.R
100 | $ Rscript generate_saturn_data.R
101 | ```
102 |
103 | The R scripts generate both train and test sets. For the moon data, you'll need to split the output into train and eval files using the Unix `head` and `tail` commands.
104 |
105 | ## Creating plots of the data.
106 |
107 | To prepare the blog post for this repository, I created a few R scripts to plot data. They are simple, but I figured I'd include them in case they are useful starting points for others for changing things or plotting related data.
108 |
109 | Go into the `simdata` directory.
110 |
111 | Open `plot_data.R` in an editor and uncomment the data set you'd like to plot, save it, and then run:
112 |
113 | ```
114 | $ Rscript plot_data.R
115 | ```
116 |
117 | For plotting the image with the hyperplane, start up R and then provide the command `source("plot_hyperplane.R")` to R.
118 |
119 | For plotting the graph relating the number of hidden nodes to accuracy, start up R and then provide the command `source("plot_hidden_curve.R")` to R.
120 |
121 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/convolutional_network.py:
--------------------------------------------------------------------------------
1 | '''
2 | A Convolutional Network implementation example using TensorFlow library.
3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4 |
5 | Author: Aymeric Damien
6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
7 | '''
8 |
9 | # Import MINST data
10 | import input_data
11 | mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
12 |
13 | import tensorflow as tf
14 |
15 | # Parameters
16 | learning_rate = 0.001
17 | training_iters = 100000
18 | batch_size = 128
19 | display_step = 10
20 |
21 | # Network Parameters
22 | n_input = 784 # MNIST data input (img shape: 28*28)
23 | n_classes = 10 # MNIST total classes (0-9 digits)
24 | dropout = 0.75 # Dropout, probability to keep units
25 |
26 | # tf Graph input
27 | x = tf.placeholder(tf.types.float32, [None, n_input])
28 | y = tf.placeholder(tf.types.float32, [None, n_classes])
29 | keep_prob = tf.placeholder(tf.types.float32) #dropout (keep probability)
30 |
31 | # Create model
32 | def conv2d(img, w, b):
33 | return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], padding='SAME'),b))
34 |
35 | def max_pool(img, k):
36 | return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
37 |
38 | def conv_net(_X, _weights, _biases, _dropout):
39 | # Reshape input picture
40 | _X = tf.reshape(_X, shape=[-1, 28, 28, 1])
41 |
42 | # Convolution Layer
43 | conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])
44 | # Max Pooling (down-sampling)
45 | conv1 = max_pool(conv1, k=2)
46 | # Apply Dropout
47 | conv1 = tf.nn.dropout(conv1, _dropout)
48 |
49 | # Convolution Layer
50 | conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])
51 | # Max Pooling (down-sampling)
52 | conv2 = max_pool(conv2, k=2)
53 | # Apply Dropout
54 | conv2 = tf.nn.dropout(conv2, _dropout)
55 |
56 | # Fully connected layer
57 | dense1 = tf.reshape(conv2, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv2 output to fit dense layer input
58 | dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # Relu activation
59 | dense1 = tf.nn.dropout(dense1, _dropout) # Apply Dropout
60 |
61 | # Output, class prediction
62 | out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])
63 | return out
64 |
65 | # Store layers weight & bias
66 | weights = {
67 | 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), # 5x5 conv, 1 input, 32 outputs
68 | 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), # 5x5 conv, 32 inputs, 64 outputs
69 | 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), # fully connected, 7*7*64 inputs, 1024 outputs
70 | 'out': tf.Variable(tf.random_normal([1024, n_classes])) # 1024 inputs, 10 outputs (class prediction)
71 | }
72 |
73 | biases = {
74 | 'bc1': tf.Variable(tf.random_normal([32])),
75 | 'bc2': tf.Variable(tf.random_normal([64])),
76 | 'bd1': tf.Variable(tf.random_normal([1024])),
77 | 'out': tf.Variable(tf.random_normal([n_classes]))
78 | }
79 |
80 | # Construct model
81 | pred = conv_net(x, weights, biases, keep_prob)
82 |
83 | # Define loss and optimizer
84 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
85 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
86 |
87 | # Evaluate model
88 | correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
89 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.types.float32))
90 |
91 | # Initializing the variables
92 | init = tf.initialize_all_variables()
93 |
94 | # Launch the graph
95 | with tf.Session() as sess:
96 | sess.run(init)
97 | step = 1
98 | # Keep training until reach max iterations
99 | while step * batch_size < training_iters:
100 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
101 | # Fit training using batch data
102 | sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
103 | if step % display_step == 0:
104 | # Calculate batch accuracy
105 | acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
106 | # Calculate batch loss
107 | loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
108 | print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
109 | step += 1
110 | print "Optimization Finished!"
111 | # Calculate accuracy for 256 mnist test images
112 | print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})
113 |
--------------------------------------------------------------------------------
/bcomposes-examples/softmax.py:
--------------------------------------------------------------------------------
1 | import tensorflow.python.platform
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | # Global variables.
7 | NUM_LABELS = 2 # The number of labels.
8 | BATCH_SIZE = 100 # The number of training examples to use per training step.
9 |
10 | # Define the flags useable from the command line.
11 | tf.app.flags.DEFINE_string('train', None,
12 | 'File containing the training data (labels & features).')
13 | tf.app.flags.DEFINE_string('test', None,
14 | 'File containing the test data (labels & features).')
15 | tf.app.flags.DEFINE_integer('num_epochs', 1,
16 | 'Number of examples to separate from the training '
17 | 'data for the validation set.')
18 | tf.app.flags.DEFINE_boolean('verbose', False, 'Produce verbose output.')
19 | FLAGS = tf.app.flags.FLAGS
20 |
21 | # Extract numpy representations of the labels and features given rows consisting of:
22 | # label, feat_0, feat_1, ..., feat_n
23 | def extract_data(filename):
24 |
25 | # Arrays to hold the labels and feature vectors.
26 | labels = []
27 | fvecs = []
28 |
29 | # Iterate over the rows, splitting the label from the features. Convert labels
30 | # to integers and features to floats.
31 | for line in file(filename):
32 | row = line.split(",")
33 | labels.append(int(row[0]))
34 | fvecs.append([float(x) for x in row[1:]])
35 |
36 | # Convert the array of float arrays into a numpy float matrix.
37 | fvecs_np = np.matrix(fvecs).astype(np.float32)
38 |
39 | # Convert the array of int labels into a numpy array.
40 | labels_np = np.array(labels).astype(dtype=np.uint8)
41 |
42 | # Convert the int numpy array into a one-hot matrix.
43 | labels_onehot = (np.arange(NUM_LABELS) == labels_np[:, None]).astype(np.float32)
44 |
45 | # Return a pair of the feature matrix and the one-hot label matrix.
46 | return fvecs_np,labels_onehot
47 |
48 | def main(argv=None):
49 | # Be verbose?
50 | verbose = FLAGS.verbose
51 |
52 | # Get the data.
53 | train_data_filename = FLAGS.train
54 | test_data_filename = FLAGS.test
55 |
56 | # Extract it into numpy matrices.
57 | train_data,train_labels = extract_data(train_data_filename)
58 | test_data, test_labels = extract_data(test_data_filename)
59 |
60 | # Get the shape of the training data.
61 | train_size,num_features = train_data.shape
62 |
63 | # Get the number of epochs for training.
64 | num_epochs = FLAGS.num_epochs
65 |
66 | # This is where training samples and labels are fed to the graph.
67 | # These placeholder nodes will be fed a batch of training data at each
68 | # training step using the {feed_dict} argument to the Run() call below.
69 | x = tf.placeholder("float", shape=[None, num_features])
70 | y_ = tf.placeholder("float", shape=[None, NUM_LABELS])
71 |
72 | # For the test data, hold the entire dataset in one constant node.
73 | test_data_node = tf.constant(test_data)
74 |
75 | # Define and initialize the network.
76 |
77 | # These are the weights that inform how much each feature contributes to
78 | # the classification.
79 | W = tf.Variable(tf.zeros([num_features,NUM_LABELS]))
80 | b = tf.Variable(tf.zeros([NUM_LABELS]))
81 | y = tf.nn.softmax(tf.matmul(x,W) + b)
82 |
83 | # Optimization.
84 | cross_entropy = -tf.reduce_sum(y_*tf.log(y))
85 | train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
86 |
87 | # Evaluation.
88 | correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
89 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
90 |
91 | # Create a local session to run this computation.
92 | with tf.Session() as s:
93 | # Run all the initializers to prepare the trainable parameters.
94 | tf.initialize_all_variables().run()
95 | if verbose:
96 | print 'Initialized!'
97 | print
98 | print 'Training.'
99 |
100 | # Iterate and train.
101 | for step in xrange(num_epochs * train_size // BATCH_SIZE):
102 | if verbose:
103 | print step,
104 |
105 | offset = (step * BATCH_SIZE) % train_size
106 | batch_data = train_data[offset:(offset + BATCH_SIZE), :]
107 | batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
108 | train_step.run(feed_dict={x: batch_data, y_: batch_labels})
109 |
110 | if verbose and offset >= train_size-BATCH_SIZE:
111 | print
112 |
113 | # Give very detailed output.
114 | if verbose:
115 | print
116 | print 'Weight matrix.'
117 | print s.run(W)
118 | print
119 | print 'Bias vector.'
120 | print s.run(b)
121 | print
122 | print "Applying model to first test instance."
123 | first = test_data[:1]
124 | print "Point =", first
125 | print "Wx+b = ", s.run(tf.matmul(first,W)+b)
126 | print "softmax(Wx+b) = ", s.run(tf.nn.softmax(tf.matmul(first,W)+b))
127 | print
128 |
129 | print "Accuracy:", accuracy.eval(feed_dict={x: test_data, y_: test_labels})
130 |
131 | if __name__ == '__main__':
132 | tf.app.run()
133 |
--------------------------------------------------------------------------------
/bcomposes-examples/truncnorm_hidden.py:
--------------------------------------------------------------------------------
1 | import tensorflow.python.platform
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | # Global variables.
7 | NUM_LABELS = 2 # The number of labels.
8 | BATCH_SIZE = 100 # The number of training examples to use per training step.
9 | SEED = None # Set to None for random seed.
10 |
11 | tf.app.flags.DEFINE_string('train', None,
12 | 'File containing the training data (labels & features).')
13 | tf.app.flags.DEFINE_string('test', None,
14 | 'File containing the test data (labels & features).')
15 | tf.app.flags.DEFINE_integer('num_epochs', 1,
16 | 'Number of passes over the training data.')
17 | tf.app.flags.DEFINE_integer('num_hidden', 1,
18 | 'Number of nodes in the hidden layer.')
19 | tf.app.flags.DEFINE_boolean('verbose', False, 'Produce verbose output.')
20 | FLAGS = tf.app.flags.FLAGS
21 |
22 | # Extract numpy representations of the labels and features given rows consisting of:
23 | # label, feat_0, feat_1, ..., feat_n
24 | def extract_data(filename):
25 |
26 | # Arrays to hold the labels and feature vectors.
27 | labels = []
28 | fvecs = []
29 |
30 | # Iterate over the rows, splitting the label from the features. Convert labels
31 | # to integers and features to floats.
32 | for line in file(filename):
33 | row = line.split(",")
34 | labels.append(int(row[0]))
35 | fvecs.append([float(x) for x in row[1:]])
36 |
37 | # Convert the array of float arrays into a numpy float matrix.
38 | fvecs_np = np.matrix(fvecs).astype(np.float32)
39 |
40 | # Convert the array of int labels into a numpy array.
41 | labels_np = np.array(labels).astype(dtype=np.uint8)
42 |
43 | # Convert the int numpy array into a one-hot matrix.
44 | labels_onehot = (np.arange(NUM_LABELS) == labels_np[:, None]).astype(np.float32)
45 |
46 | # Return a pair of the feature matrix and the one-hot label matrix.
47 | return fvecs_np,labels_onehot
48 |
49 | def main(argv=None):
50 | # Be verbose?
51 | verbose = FLAGS.verbose
52 |
53 | # Get the data.
54 | train_data_filename = FLAGS.train
55 | test_data_filename = FLAGS.test
56 |
57 | # Extract it into numpy arrays.
58 | train_data,train_labels = extract_data(train_data_filename)
59 | test_data, test_labels = extract_data(test_data_filename)
60 |
61 | # Get the shape of the training data.
62 | train_size,num_features = train_data.shape
63 |
64 | # Get the number of epochs for training.
65 | num_epochs = FLAGS.num_epochs
66 |
67 | # Get the size of layer one.
68 | num_hidden = FLAGS.num_hidden
69 |
70 | # This is where training samples and labels are fed to the graph.
71 | # These placeholder nodes will be fed a batch of training data at each
72 | # training step using the {feed_dict} argument to the Run() call below.
73 | x = tf.placeholder("float", shape=[None, num_features])
74 | y_ = tf.placeholder("float", shape=[None, NUM_LABELS])
75 |
76 | # For the test data, hold the entire dataset in one constant node.
77 | test_data_node = tf.constant(test_data)
78 |
79 | # Define and initialize the network.
80 |
81 | # Initialize the hidden weights and biases.
82 | w_hidden = tf.Variable(
83 | tf.truncated_normal([num_features, num_hidden],
84 | stddev=0.1,
85 | seed=SEED))
86 |
87 | b_hidden = tf.Variable(tf.constant(0.1, shape=[num_hidden]))
88 |
89 | # The hidden layer.
90 | hidden = tf.nn.relu(tf.matmul(x,w_hidden) + b_hidden)
91 |
92 | # Initialize the output weights and biases.
93 | w_out = tf.Variable(
94 | tf.truncated_normal([num_hidden, NUM_LABELS],
95 | stddev=0.1,
96 | seed=SEED))
97 |
98 | b_out = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
99 |
100 | # The output layer.
101 | y = tf.nn.softmax(tf.matmul(hidden, w_out) + b_out)
102 |
103 | # Optimization.
104 | cross_entropy = -tf.reduce_sum(y_*tf.log(y))
105 | train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
106 |
107 | # Evaluation.
108 | correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
109 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
110 |
111 | # Create a local session to run this computation.
112 | with tf.Session() as s:
113 | # Run all the initializers to prepare the trainable parameters.
114 | tf.initialize_all_variables().run()
115 | if verbose:
116 | print 'Initialized!'
117 | print
118 | print 'Training.'
119 |
120 | # Iterate and train.
121 | for step in xrange(num_epochs * train_size // BATCH_SIZE):
122 | if verbose:
123 | print step,
124 |
125 | offset = (step * BATCH_SIZE) % train_size
126 | batch_data = train_data[offset:(offset + BATCH_SIZE), :]
127 | batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
128 | train_step.run(feed_dict={x: batch_data, y_: batch_labels})
129 | if verbose and offset >= train_size-BATCH_SIZE:
130 | print
131 | print "Accuracy:", accuracy.eval(feed_dict={x: test_data, y_: test_labels})
132 |
133 | if __name__ == '__main__':
134 | tf.app.run()
135 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/alexnet.py:
--------------------------------------------------------------------------------
1 | '''
2 | AlexNet implementation example using TensorFlow library.
3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
4 | AlexNet Paper (http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)
5 |
6 | Author: Aymeric Damien
7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/
8 | '''
9 |
10 | # Import MINST data
11 | import input_data
12 | mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
13 |
14 | import tensorflow as tf
15 |
16 | # Parameters
17 | learning_rate = 0.001
18 | training_iters = 200000
19 | batch_size = 64
20 | display_step = 20
21 |
22 | # Network Parameters
23 | n_input = 784 # MNIST data input (img shape: 28*28)
24 | n_classes = 10 # MNIST total classes (0-9 digits)
25 | dropout = 0.8 # Dropout, probability to keep units
26 |
27 | # tf Graph input
28 | x = tf.placeholder(tf.types.float32, [None, n_input])
29 | y = tf.placeholder(tf.types.float32, [None, n_classes])
30 | keep_prob = tf.placeholder(tf.types.float32) # dropout (keep probability)
31 |
32 | # Create AlexNet model
33 | def conv2d(name, l_input, w, b):
34 | return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b), name=name)
35 |
36 | def max_pool(name, l_input, k):
37 | return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)
38 |
39 | def norm(name, l_input, lsize=4):
40 | return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
41 |
42 | def alex_net(_X, _weights, _biases, _dropout):
43 | # Reshape input picture
44 | _X = tf.reshape(_X, shape=[-1, 28, 28, 1])
45 |
46 | # Convolution Layer
47 | conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])
48 | # Max Pooling (down-sampling)
49 | pool1 = max_pool('pool1', conv1, k=2)
50 | # Apply Normalization
51 | norm1 = norm('norm1', pool1, lsize=4)
52 | # Apply Dropout
53 | norm1 = tf.nn.dropout(norm1, _dropout)
54 |
55 | # Convolution Layer
56 | conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
57 | # Max Pooling (down-sampling)
58 | pool2 = max_pool('pool2', conv2, k=2)
59 | # Apply Normalization
60 | norm2 = norm('norm2', pool2, lsize=4)
61 | # Apply Dropout
62 | norm2 = tf.nn.dropout(norm2, _dropout)
63 |
64 | # Convolution Layer
65 | conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
66 | # Max Pooling (down-sampling)
67 | pool3 = max_pool('pool3', conv3, k=2)
68 | # Apply Normalization
69 | norm3 = norm('norm3', pool3, lsize=4)
70 | # Apply Dropout
71 | norm3 = tf.nn.dropout(norm3, _dropout)
72 |
73 | # Fully connected layer
74 | dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv3 output to fit dense layer input
75 | dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1') # Relu activation
76 |
77 | dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') # Relu activation
78 |
79 | # Output, class prediction
80 | out = tf.matmul(dense2, _weights['out']) + _biases['out']
81 | return out
82 |
83 | # Store layers weight & bias
84 | weights = {
85 | 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
86 | 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),
87 | 'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
88 | 'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),
89 | 'wd2': tf.Variable(tf.random_normal([1024, 1024])),
90 | 'out': tf.Variable(tf.random_normal([1024, 10]))
91 | }
92 | biases = {
93 | 'bc1': tf.Variable(tf.random_normal([64])),
94 | 'bc2': tf.Variable(tf.random_normal([128])),
95 | 'bc3': tf.Variable(tf.random_normal([256])),
96 | 'bd1': tf.Variable(tf.random_normal([1024])),
97 | 'bd2': tf.Variable(tf.random_normal([1024])),
98 | 'out': tf.Variable(tf.random_normal([n_classes]))
99 | }
100 |
101 | # Construct model
102 | pred = alex_net(x, weights, biases, keep_prob)
103 |
104 | # Define loss and optimizer
105 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
106 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
107 |
108 | # Evaluate model
109 | correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
110 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.types.float32))
111 |
112 | # Initializing the variables
113 | init = tf.initialize_all_variables()
114 |
115 | # Launch the graph
116 | with tf.Session() as sess:
117 | sess.run(init)
118 | step = 1
119 | # Keep training until reach max iterations
120 | while step * batch_size < training_iters:
121 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
122 | # Fit training using batch data
123 | sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
124 | if step % display_step == 0:
125 | # Calculate batch accuracy
126 | acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
127 | # Calculate batch loss
128 | loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
129 | print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
130 | step += 1
131 | print "Optimization Finished!"
132 | # Calculate accuracy for 256 mnist test images
133 | print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})
134 |
--------------------------------------------------------------------------------
/bcomposes-examples/hidden.py:
--------------------------------------------------------------------------------
1 | import tensorflow.python.platform
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | # Global variables.
7 | NUM_LABELS = 2 # The number of labels.
8 | BATCH_SIZE = 100 # The number of training examples to use per training step.
9 |
10 | tf.app.flags.DEFINE_string('train', None,
11 | 'File containing the training data (labels & features).')
12 | tf.app.flags.DEFINE_string('test', None,
13 | 'File containing the test data (labels & features).')
14 | tf.app.flags.DEFINE_integer('num_epochs', 1,
15 | 'Number of passes over the training data.')
16 | tf.app.flags.DEFINE_integer('num_hidden', 1,
17 | 'Number of nodes in the hidden layer.')
18 | tf.app.flags.DEFINE_boolean('verbose', False, 'Produce verbose output.')
19 | FLAGS = tf.app.flags.FLAGS
20 |
21 | # Extract numpy representations of the labels and features given rows consisting of:
22 | # label, feat_0, feat_1, ..., feat_n
23 | def extract_data(filename):
24 |
25 | # Arrays to hold the labels and feature vectors.
26 | labels = []
27 | fvecs = []
28 |
29 | # Iterate over the rows, splitting the label from the features. Convert labels
30 | # to integers and features to floats.
31 | for line in file(filename):
32 | row = line.split(",")
33 | labels.append(int(row[0]))
34 | fvecs.append([float(x) for x in row[1:]])
35 |
36 | # Convert the array of float arrays into a numpy float matrix.
37 | fvecs_np = np.matrix(fvecs).astype(np.float32)
38 |
39 | # Convert the array of int labels into a numpy array.
40 | labels_np = np.array(labels).astype(dtype=np.uint8)
41 |
42 | # Convert the int numpy array into a one-hot matrix.
43 | labels_onehot = (np.arange(NUM_LABELS) == labels_np[:, None]).astype(np.float32)
44 |
45 | # Return a pair of the feature matrix and the one-hot label matrix.
46 | return fvecs_np,labels_onehot
47 |
48 | # Init weights method. (Lifted from Delip Rao: http://deliprao.com/archives/100)
49 | def init_weights(shape, init_method='xavier', xavier_params = (None, None)):
50 | if init_method == 'zeros':
51 | return tf.Variable(tf.zeros(shape, dtype=tf.float32))
52 | elif init_method == 'uniform':
53 | return tf.Variable(tf.random_normal(shape, stddev=0.01, dtype=tf.float32))
54 | else: #xavier
55 | (fan_in, fan_out) = xavier_params
56 | low = -4*np.sqrt(6.0/(fan_in + fan_out)) # {sigmoid:4, tanh:1}
57 | high = 4*np.sqrt(6.0/(fan_in + fan_out))
58 | return tf.Variable(tf.random_uniform(shape, minval=low, maxval=high, dtype=tf.float32))
59 |
60 | def main(argv=None):
61 | # Be verbose?
62 | verbose = FLAGS.verbose
63 |
64 | # Get the data.
65 | train_data_filename = FLAGS.train
66 | test_data_filename = FLAGS.test
67 |
68 | # Extract it into numpy arrays.
69 | train_data,train_labels = extract_data(train_data_filename)
70 | test_data, test_labels = extract_data(test_data_filename)
71 |
72 | # Get the shape of the training data.
73 | train_size,num_features = train_data.shape
74 |
75 | # Get the number of epochs for training.
76 | num_epochs = FLAGS.num_epochs
77 |
78 | # Get the size of layer one.
79 | num_hidden = FLAGS.num_hidden
80 |
81 | # This is where training samples and labels are fed to the graph.
82 | # These placeholder nodes will be fed a batch of training data at each
83 | # training step using the {feed_dict} argument to the Run() call below.
84 | x = tf.placeholder("float", shape=[None, num_features])
85 | y_ = tf.placeholder("float", shape=[None, NUM_LABELS])
86 |
87 | # For the test data, hold the entire dataset in one constant node.
88 | test_data_node = tf.constant(test_data)
89 |
90 | # Define and initialize the network.
91 |
92 | # Initialize the hidden weights and biases.
93 | w_hidden = init_weights(
94 | [num_features, num_hidden],
95 | 'xavier',
96 | xavier_params=(num_features, num_hidden))
97 |
98 | b_hidden = init_weights([1,num_hidden],'zeros')
99 |
100 | # The hidden layer.
101 | hidden = tf.nn.tanh(tf.matmul(x,w_hidden) + b_hidden)
102 |
103 | # Initialize the output weights and biases.
104 | w_out = init_weights(
105 | [num_hidden, NUM_LABELS],
106 | 'xavier',
107 | xavier_params=(num_hidden, NUM_LABELS))
108 |
109 | b_out = init_weights([1,NUM_LABELS],'zeros')
110 |
111 | # The output layer.
112 | y = tf.nn.softmax(tf.matmul(hidden, w_out) + b_out)
113 |
114 | # Optimization.
115 | cross_entropy = -tf.reduce_sum(y_*tf.log(y))
116 | train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
117 |
118 | # Evaluation.
119 | correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
120 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
121 |
122 | # Create a local session to run this computation.
123 | with tf.Session() as s:
124 | # Run all the initializers to prepare the trainable parameters.
125 | tf.initialize_all_variables().run()
126 | if verbose:
127 | print 'Initialized!'
128 | print
129 | print 'Training.'
130 |
131 | # Iterate and train.
132 | for step in xrange(num_epochs * train_size // BATCH_SIZE):
133 | if verbose:
134 | print step,
135 |
136 | offset = (step * BATCH_SIZE) % train_size
137 | batch_data = train_data[offset:(offset + BATCH_SIZE), :]
138 | batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
139 | train_step.run(feed_dict={x: batch_data, y_: batch_labels})
140 | if verbose and offset >= train_size-BATCH_SIZE:
141 | print
142 | print "Accuracy:", accuracy.eval(feed_dict={x: test_data, y_: test_labels})
143 |
144 | if __name__ == '__main__':
145 | tf.app.run()
146 |
--------------------------------------------------------------------------------
/bcomposes-examples/hidden-backup.py:
--------------------------------------------------------------------------------
1 | import tensorflow.python.platform
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | # Global variables.
7 | NUM_LABELS = 2 # The number of labels.
8 | BATCH_SIZE = 100 # The number of training examples to use per training step.
9 |
10 | tf.app.flags.DEFINE_string('train', None,
11 | 'File containing the training data (labels & features).')
12 | tf.app.flags.DEFINE_string('test', None,
13 | 'File containing the test data (labels & features).')
14 | tf.app.flags.DEFINE_integer('num_epochs', 1,
15 | 'Number of passes over the training data.')
16 | tf.app.flags.DEFINE_integer('num_hidden', 1,
17 | 'Number of nodes in the hidden layer.')
18 | tf.app.flags.DEFINE_boolean('verbose', False, 'Produce verbose output.')
19 | FLAGS = tf.app.flags.FLAGS
20 |
21 | # Extract numpy representations of the labels and features given rows consisting of:
22 | # label, feat_0, feat_1, ..., feat_n
23 | def extract_data(filename):
24 |
25 | # Arrays to hold the labels and feature vectors.
26 | labels = []
27 | fvecs = []
28 |
29 | # Iterate over the rows, splitting the label from the features. Convert labels
30 | # to integers and features to floats.
31 | for line in file(filename):
32 | row = line.split(",")
33 | labels.append(int(row[0]))
34 | fvecs.append([float(x) for x in row[1:]])
35 |
36 | # Convert the array of float arrays into a numpy float matrix.
37 | fvecs_np = np.matrix(fvecs).astype(np.float32)
38 |
39 | # Convert the array of int labels into a numpy array.
40 | labels_np = np.array(labels).astype(dtype=np.uint8)
41 |
42 | # Convert the int numpy array into a one-hot matrix.
43 | labels_onehot = (np.arange(NUM_LABELS) == labels_np[:, None]).astype(np.float32)
44 |
45 | # Return a pair of the feature matrix and the one-hot label matrix.
46 | return fvecs_np,labels_onehot
47 |
48 | # Init weights method. (Lifted from Delip Rao: http://deliprao.com/archives/100)
49 | def init_weights(shape, init_method='xavier', xavier_params = (None, None)):
50 | if init_method == 'zeros':
51 | return tf.Variable(tf.zeros(shape, dtype=tf.float32))
52 | elif init_method == 'uniform':
53 | return tf.Variable(tf.random_normal(shape, stddev=0.01, dtype=tf.float32))
54 | else: #xavier
55 | (fan_in, fan_out) = xavier_params
56 | low = -4*np.sqrt(6.0/(fan_in + fan_out)) # {sigmoid:4, tanh:1}
57 | high = 4*np.sqrt(6.0/(fan_in + fan_out))
58 | return tf.Variable(tf.random_uniform(shape, minval=low, maxval=high, dtype=tf.float32))
59 |
60 | def main(argv=None):
61 | # Be verbose?
62 | verbose = FLAGS.verbose
63 |
64 | # Get the data.
65 | train_data_filename = FLAGS.train
66 | test_data_filename = FLAGS.test
67 |
68 | # Extract it into numpy arrays.
69 | train_data,train_labels = extract_data(train_data_filename)
70 | test_data, test_labels = extract_data(test_data_filename)
71 |
72 | # Get the shape of the training data.
73 | train_size,num_features = train_data.shape
74 |
75 | # Get the number of epochs for training.
76 | num_epochs = FLAGS.num_epochs
77 |
78 | # Get the size of layer one.
79 | num_hidden = FLAGS.num_hidden
80 |
81 | # This is where training samples and labels are fed to the graph.
82 | # These placeholder nodes will be fed a batch of training data at each
83 | # training step using the {feed_dict} argument to the Run() call below.
84 | x = tf.placeholder("float", shape=[None, num_features])
85 | y_ = tf.placeholder("float", shape=[None, NUM_LABELS])
86 |
87 | # For the test data, hold the entire dataset in one constant node.
88 | test_data_node = tf.constant(test_data)
89 |
90 | # Define and initialize the network.
91 |
92 | # Initialize the hidden weights and biases.
93 | w_hidden = init_weights(
94 | [num_features, num_hidden],
95 | 'xavier',
96 | xavier_params=(num_features, num_hidden))
97 |
98 | b_hidden = init_weights([1,num_hidden],'zeros')
99 |
100 | # The hidden layer.
101 | hidden = tf.nn.tanh(tf.matmul(x,w_hidden) + b_hidden)
102 |
103 | # Initialize the output weights and biases.
104 | w_out = init_weights(
105 | [num_hidden, NUM_LABELS],
106 | 'xavier',
107 | xavier_params=(num_hidden, NUM_LABELS))
108 |
109 | b_out = init_weights([1,NUM_LABELS],'zeros')
110 |
111 | # The output layer.
112 | y = tf.nn.softmax(tf.matmul(hidden, w_out) + b_out)
113 |
114 | # Optimization.
115 | cross_entropy = -tf.reduce_sum(y_*tf.log(y))
116 | train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
117 |
118 | # Evaluation.
119 | correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
120 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
121 |
122 | # Create a local session to run this computation.
123 | with tf.Session() as s:
124 | # Run all the initializers to prepare the trainable parameters.
125 | tf.initialize_all_variables().run()
126 | if verbose:
127 | print 'Initialized!'
128 | print
129 | print 'Training.'
130 |
131 | # Iterate and train.
132 | for step in xrange(num_epochs * train_size // BATCH_SIZE):
133 | if verbose:
134 | print step,
135 |
136 | offset = (step * BATCH_SIZE) % train_size
137 | batch_data = train_data[offset:(offset + BATCH_SIZE), :]
138 | batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
139 | train_step.run(feed_dict={x: batch_data, y_: batch_labels})
140 | if verbose and offset >= train_size-BATCH_SIZE:
141 | print
142 | print "Accuracy:", accuracy.eval(feed_dict={x: test_data, y_: test_labels})
143 |
144 | if __name__ == '__main__':
145 | tf.app.run()
146 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/input_data.py:
--------------------------------------------------------------------------------
1 | """Functions for downloading and reading MNIST data."""
2 | from __future__ import print_function
3 | import gzip
4 | import os
5 | import urllib
6 | import numpy
7 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
8 | def maybe_download(filename, work_directory):
9 | """Download the data from Yann's website, unless it's already here."""
10 | if not os.path.exists(work_directory):
11 | os.mkdir(work_directory)
12 | filepath = os.path.join(work_directory, filename)
13 | if not os.path.exists(filepath):
14 | filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
15 | statinfo = os.stat(filepath)
16 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
17 | return filepath
18 | def _read32(bytestream):
19 | dt = numpy.dtype(numpy.uint32).newbyteorder('>')
20 | return numpy.frombuffer(bytestream.read(4), dtype=dt)
21 | def extract_images(filename):
22 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
23 | print('Extracting', filename)
24 | with gzip.open(filename) as bytestream:
25 | magic = _read32(bytestream)
26 | if magic != 2051:
27 | raise ValueError(
28 | 'Invalid magic number %d in MNIST image file: %s' %
29 | (magic, filename))
30 | num_images = _read32(bytestream)
31 | rows = _read32(bytestream)
32 | cols = _read32(bytestream)
33 | buf = bytestream.read(rows * cols * num_images)
34 | data = numpy.frombuffer(buf, dtype=numpy.uint8)
35 | data = data.reshape(num_images, rows, cols, 1)
36 | return data
37 | def dense_to_one_hot(labels_dense, num_classes=10):
38 | """Convert class labels from scalars to one-hot vectors."""
39 | num_labels = labels_dense.shape[0]
40 | index_offset = numpy.arange(num_labels) * num_classes
41 | labels_one_hot = numpy.zeros((num_labels, num_classes))
42 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
43 | return labels_one_hot
44 | def extract_labels(filename, one_hot=False):
45 | """Extract the labels into a 1D uint8 numpy array [index]."""
46 | print('Extracting', filename)
47 | with gzip.open(filename) as bytestream:
48 | magic = _read32(bytestream)
49 | if magic != 2049:
50 | raise ValueError(
51 | 'Invalid magic number %d in MNIST label file: %s' %
52 | (magic, filename))
53 | num_items = _read32(bytestream)
54 | buf = bytestream.read(num_items)
55 | labels = numpy.frombuffer(buf, dtype=numpy.uint8)
56 | if one_hot:
57 | return dense_to_one_hot(labels)
58 | return labels
59 | class DataSet(object):
60 | def __init__(self, images, labels, fake_data=False):
61 | if fake_data:
62 | self._num_examples = 10000
63 | else:
64 | assert images.shape[0] == labels.shape[0], (
65 | "images.shape: %s labels.shape: %s" % (images.shape,
66 | labels.shape))
67 | self._num_examples = images.shape[0]
68 | # Convert shape from [num examples, rows, columns, depth]
69 | # to [num examples, rows*columns] (assuming depth == 1)
70 | assert images.shape[3] == 1
71 | images = images.reshape(images.shape[0],
72 | images.shape[1] * images.shape[2])
73 | # Convert from [0, 255] -> [0.0, 1.0].
74 | images = images.astype(numpy.float32)
75 | images = numpy.multiply(images, 1.0 / 255.0)
76 | self._images = images
77 | self._labels = labels
78 | self._epochs_completed = 0
79 | self._index_in_epoch = 0
80 | @property
81 | def images(self):
82 | return self._images
83 | @property
84 | def labels(self):
85 | return self._labels
86 | @property
87 | def num_examples(self):
88 | return self._num_examples
89 | @property
90 | def epochs_completed(self):
91 | return self._epochs_completed
92 | def next_batch(self, batch_size, fake_data=False):
93 | """Return the next `batch_size` examples from this data set."""
94 | if fake_data:
95 | fake_image = [1.0 for _ in xrange(784)]
96 | fake_label = 0
97 | return [fake_image for _ in xrange(batch_size)], [
98 | fake_label for _ in xrange(batch_size)]
99 | start = self._index_in_epoch
100 | self._index_in_epoch += batch_size
101 | if self._index_in_epoch > self._num_examples:
102 | # Finished epoch
103 | self._epochs_completed += 1
104 | # Shuffle the data
105 | perm = numpy.arange(self._num_examples)
106 | numpy.random.shuffle(perm)
107 | self._images = self._images[perm]
108 | self._labels = self._labels[perm]
109 | # Start next epoch
110 | start = 0
111 | self._index_in_epoch = batch_size
112 | assert batch_size <= self._num_examples
113 | end = self._index_in_epoch
114 | return self._images[start:end], self._labels[start:end]
115 | def read_data_sets(train_dir, fake_data=False, one_hot=False):
116 | class DataSets(object):
117 | pass
118 | data_sets = DataSets()
119 | if fake_data:
120 | data_sets.train = DataSet([], [], fake_data=True)
121 | data_sets.validation = DataSet([], [], fake_data=True)
122 | data_sets.test = DataSet([], [], fake_data=True)
123 | return data_sets
124 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
125 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
126 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
127 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
128 | VALIDATION_SIZE = 5000
129 | local_file = maybe_download(TRAIN_IMAGES, train_dir)
130 | train_images = extract_images(local_file)
131 | local_file = maybe_download(TRAIN_LABELS, train_dir)
132 | train_labels = extract_labels(local_file, one_hot=one_hot)
133 | local_file = maybe_download(TEST_IMAGES, train_dir)
134 | test_images = extract_images(local_file)
135 | local_file = maybe_download(TEST_LABELS, train_dir)
136 | test_labels = extract_labels(local_file, one_hot=one_hot)
137 | validation_images = train_images[:VALIDATION_SIZE]
138 | validation_labels = train_labels[:VALIDATION_SIZE]
139 | train_images = train_images[VALIDATION_SIZE:]
140 | train_labels = train_labels[VALIDATION_SIZE:]
141 | data_sets.train = DataSet(train_images, train_labels)
142 | data_sets.validation = DataSet(validation_images, validation_labels)
143 | data_sets.test = DataSet(test_images, test_labels)
144 | return data_sets
--------------------------------------------------------------------------------
/rocksetta-examples/input_data.py:
--------------------------------------------------------------------------------
1 | """Functions for downloading and reading MNIST data."""
2 | from __future__ import print_function
3 | import gzip
4 | import os
5 | import urllib
6 | import numpy
7 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
8 | def maybe_download(filename, work_directory):
9 | """Download the data from Yann's website, unless it's already here."""
10 | if not os.path.exists(work_directory):
11 | os.mkdir(work_directory)
12 | filepath = os.path.join(work_directory, filename)
13 | if not os.path.exists(filepath):
14 | filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
15 | statinfo = os.stat(filepath)
16 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
17 | return filepath
18 | def _read32(bytestream):
19 | dt = numpy.dtype(numpy.uint32).newbyteorder('>')
20 | return numpy.frombuffer(bytestream.read(4), dtype=dt)
21 | def extract_images(filename):
22 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
23 | print('Extracting', filename)
24 | with gzip.open(filename) as bytestream:
25 | magic = _read32(bytestream)
26 | if magic != 2051:
27 | raise ValueError(
28 | 'Invalid magic number %d in MNIST image file: %s' %
29 | (magic, filename))
30 | num_images = _read32(bytestream)
31 | rows = _read32(bytestream)
32 | cols = _read32(bytestream)
33 | buf = bytestream.read(rows * cols * num_images)
34 | data = numpy.frombuffer(buf, dtype=numpy.uint8)
35 | data = data.reshape(num_images, rows, cols, 1)
36 | return data
37 | def dense_to_one_hot(labels_dense, num_classes=10):
38 | """Convert class labels from scalars to one-hot vectors."""
39 | num_labels = labels_dense.shape[0]
40 | index_offset = numpy.arange(num_labels) * num_classes
41 | labels_one_hot = numpy.zeros((num_labels, num_classes))
42 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
43 | return labels_one_hot
44 | def extract_labels(filename, one_hot=False):
45 | """Extract the labels into a 1D uint8 numpy array [index]."""
46 | print('Extracting', filename)
47 | with gzip.open(filename) as bytestream:
48 | magic = _read32(bytestream)
49 | if magic != 2049:
50 | raise ValueError(
51 | 'Invalid magic number %d in MNIST label file: %s' %
52 | (magic, filename))
53 | num_items = _read32(bytestream)
54 | buf = bytestream.read(num_items)
55 | labels = numpy.frombuffer(buf, dtype=numpy.uint8)
56 | if one_hot:
57 | return dense_to_one_hot(labels)
58 | return labels
59 | class DataSet(object):
60 | def __init__(self, images, labels, fake_data=False):
61 | if fake_data:
62 | self._num_examples = 10000
63 | else:
64 | assert images.shape[0] == labels.shape[0], (
65 | "images.shape: %s labels.shape: %s" % (images.shape,
66 | labels.shape))
67 | self._num_examples = images.shape[0]
68 | # Convert shape from [num examples, rows, columns, depth]
69 | # to [num examples, rows*columns] (assuming depth == 1)
70 | assert images.shape[3] == 1
71 | images = images.reshape(images.shape[0],
72 | images.shape[1] * images.shape[2])
73 | # Convert from [0, 255] -> [0.0, 1.0].
74 | images = images.astype(numpy.float32)
75 | images = numpy.multiply(images, 1.0 / 255.0)
76 | self._images = images
77 | self._labels = labels
78 | self._epochs_completed = 0
79 | self._index_in_epoch = 0
80 | @property
81 | def images(self):
82 | return self._images
83 | @property
84 | def labels(self):
85 | return self._labels
86 | @property
87 | def num_examples(self):
88 | return self._num_examples
89 | @property
90 | def epochs_completed(self):
91 | return self._epochs_completed
92 | def next_batch(self, batch_size, fake_data=False):
93 | """Return the next `batch_size` examples from this data set."""
94 | if fake_data:
95 | fake_image = [1.0 for _ in xrange(784)]
96 | fake_label = 0
97 | return [fake_image for _ in xrange(batch_size)], [
98 | fake_label for _ in xrange(batch_size)]
99 | start = self._index_in_epoch
100 | self._index_in_epoch += batch_size
101 | if self._index_in_epoch > self._num_examples:
102 | # Finished epoch
103 | self._epochs_completed += 1
104 | # Shuffle the data
105 | perm = numpy.arange(self._num_examples)
106 | numpy.random.shuffle(perm)
107 | self._images = self._images[perm]
108 | self._labels = self._labels[perm]
109 | # Start next epoch
110 | start = 0
111 | self._index_in_epoch = batch_size
112 | assert batch_size <= self._num_examples
113 | end = self._index_in_epoch
114 | return self._images[start:end], self._labels[start:end]
115 | def read_data_sets(train_dir, fake_data=False, one_hot=False):
116 | class DataSets(object):
117 | pass
118 | data_sets = DataSets()
119 | if fake_data:
120 | data_sets.train = DataSet([], [], fake_data=True)
121 | data_sets.validation = DataSet([], [], fake_data=True)
122 | data_sets.test = DataSet([], [], fake_data=True)
123 | return data_sets
124 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
125 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
126 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
127 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
128 | VALIDATION_SIZE = 5000
129 | local_file = maybe_download(TRAIN_IMAGES, train_dir)
130 | train_images = extract_images(local_file)
131 | local_file = maybe_download(TRAIN_LABELS, train_dir)
132 | train_labels = extract_labels(local_file, one_hot=one_hot)
133 | local_file = maybe_download(TEST_IMAGES, train_dir)
134 | test_images = extract_images(local_file)
135 | local_file = maybe_download(TEST_LABELS, train_dir)
136 | test_labels = extract_labels(local_file, one_hot=one_hot)
137 | validation_images = train_images[:VALIDATION_SIZE]
138 | validation_labels = train_labels[:VALIDATION_SIZE]
139 | train_images = train_images[VALIDATION_SIZE:]
140 | train_labels = train_labels[VALIDATION_SIZE:]
141 | data_sets.train = DataSet(train_images, train_labels)
142 | data_sets.validation = DataSet(validation_images, validation_labels)
143 | data_sets.test = DataSet(test_images, test_labels)
144 | return data_sets
145 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/examples/input_data.py:
--------------------------------------------------------------------------------
1 | """Functions for downloading and reading MNIST data."""
2 | from __future__ import print_function
3 | import gzip
4 | import os
5 | import urllib
6 | import numpy
7 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
8 | def maybe_download(filename, work_directory):
9 | """Download the data from Yann's website, unless it's already here."""
10 | if not os.path.exists(work_directory):
11 | os.mkdir(work_directory)
12 | filepath = os.path.join(work_directory, filename)
13 | if not os.path.exists(filepath):
14 | filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
15 | statinfo = os.stat(filepath)
16 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
17 | return filepath
18 | def _read32(bytestream):
19 | dt = numpy.dtype(numpy.uint32).newbyteorder('>')
20 | return numpy.frombuffer(bytestream.read(4), dtype=dt)
21 | def extract_images(filename):
22 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
23 | print('Extracting', filename)
24 | with gzip.open(filename) as bytestream:
25 | magic = _read32(bytestream)
26 | if magic != 2051:
27 | raise ValueError(
28 | 'Invalid magic number %d in MNIST image file: %s' %
29 | (magic, filename))
30 | num_images = _read32(bytestream)
31 | rows = _read32(bytestream)
32 | cols = _read32(bytestream)
33 | buf = bytestream.read(rows * cols * num_images)
34 | data = numpy.frombuffer(buf, dtype=numpy.uint8)
35 | data = data.reshape(num_images, rows, cols, 1)
36 | return data
37 | def dense_to_one_hot(labels_dense, num_classes=10):
38 | """Convert class labels from scalars to one-hot vectors."""
39 | num_labels = labels_dense.shape[0]
40 | index_offset = numpy.arange(num_labels) * num_classes
41 | labels_one_hot = numpy.zeros((num_labels, num_classes))
42 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
43 | return labels_one_hot
44 | def extract_labels(filename, one_hot=False):
45 | """Extract the labels into a 1D uint8 numpy array [index]."""
46 | print('Extracting', filename)
47 | with gzip.open(filename) as bytestream:
48 | magic = _read32(bytestream)
49 | if magic != 2049:
50 | raise ValueError(
51 | 'Invalid magic number %d in MNIST label file: %s' %
52 | (magic, filename))
53 | num_items = _read32(bytestream)
54 | buf = bytestream.read(num_items)
55 | labels = numpy.frombuffer(buf, dtype=numpy.uint8)
56 | if one_hot:
57 | return dense_to_one_hot(labels)
58 | return labels
59 | class DataSet(object):
60 | def __init__(self, images, labels, fake_data=False):
61 | if fake_data:
62 | self._num_examples = 10000
63 | else:
64 | assert images.shape[0] == labels.shape[0], (
65 | "images.shape: %s labels.shape: %s" % (images.shape,
66 | labels.shape))
67 | self._num_examples = images.shape[0]
68 | # Convert shape from [num examples, rows, columns, depth]
69 | # to [num examples, rows*columns] (assuming depth == 1)
70 | assert images.shape[3] == 1
71 | images = images.reshape(images.shape[0],
72 | images.shape[1] * images.shape[2])
73 | # Convert from [0, 255] -> [0.0, 1.0].
74 | images = images.astype(numpy.float32)
75 | images = numpy.multiply(images, 1.0 / 255.0)
76 | self._images = images
77 | self._labels = labels
78 | self._epochs_completed = 0
79 | self._index_in_epoch = 0
80 | @property
81 | def images(self):
82 | return self._images
83 | @property
84 | def labels(self):
85 | return self._labels
86 | @property
87 | def num_examples(self):
88 | return self._num_examples
89 | @property
90 | def epochs_completed(self):
91 | return self._epochs_completed
92 | def next_batch(self, batch_size, fake_data=False):
93 | """Return the next `batch_size` examples from this data set."""
94 | if fake_data:
95 | fake_image = [1.0 for _ in xrange(784)]
96 | fake_label = 0
97 | return [fake_image for _ in xrange(batch_size)], [
98 | fake_label for _ in xrange(batch_size)]
99 | start = self._index_in_epoch
100 | self._index_in_epoch += batch_size
101 | if self._index_in_epoch > self._num_examples:
102 | # Finished epoch
103 | self._epochs_completed += 1
104 | # Shuffle the data
105 | perm = numpy.arange(self._num_examples)
106 | numpy.random.shuffle(perm)
107 | self._images = self._images[perm]
108 | self._labels = self._labels[perm]
109 | # Start next epoch
110 | start = 0
111 | self._index_in_epoch = batch_size
112 | assert batch_size <= self._num_examples
113 | end = self._index_in_epoch
114 | return self._images[start:end], self._labels[start:end]
115 | def read_data_sets(train_dir, fake_data=False, one_hot=False):
116 | class DataSets(object):
117 | pass
118 | data_sets = DataSets()
119 | if fake_data:
120 | data_sets.train = DataSet([], [], fake_data=True)
121 | data_sets.validation = DataSet([], [], fake_data=True)
122 | data_sets.test = DataSet([], [], fake_data=True)
123 | return data_sets
124 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
125 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
126 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
127 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
128 | VALIDATION_SIZE = 5000
129 | local_file = maybe_download(TRAIN_IMAGES, train_dir)
130 | train_images = extract_images(local_file)
131 | local_file = maybe_download(TRAIN_LABELS, train_dir)
132 | train_labels = extract_labels(local_file, one_hot=one_hot)
133 | local_file = maybe_download(TEST_IMAGES, train_dir)
134 | test_images = extract_images(local_file)
135 | local_file = maybe_download(TEST_LABELS, train_dir)
136 | test_labels = extract_labels(local_file, one_hot=one_hot)
137 | validation_images = train_images[:VALIDATION_SIZE]
138 | validation_labels = train_labels[:VALIDATION_SIZE]
139 | train_images = train_images[VALIDATION_SIZE:]
140 | train_labels = train_labels[VALIDATION_SIZE:]
141 | data_sets.train = DataSet(train_images, train_labels)
142 | data_sets.validation = DataSet(validation_images, validation_labels)
143 | data_sets.test = DataSet(test_images, test_labels)
144 | return data_sets
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/linear_data_eval.csv:
--------------------------------------------------------------------------------
1 | 0,0.147562141324833,0.243518270820358
2 | 0,0.179868989766322,0.0922537025547999
3 | 1,0.754244045840797,0.52387485552728
4 | 0,0.248663780798734,0.175587276306351
5 | 0,0.397217489998824,0.0342134948381493
6 | 0,0.45098160780959,0.0328858982745571
7 | 0,0.335532917252522,0.16654442982869
8 | 0,0.371372049777255,0.167201755443297
9 | 0,0.280985655458144,0.214982885821991
10 | 0,0.313304894476342,0.00521976760984659
11 | 1,0.638465839375771,0.59044662666132
12 | 0,0.431591142988843,0.0470830726734468
13 | 0,0.207774186228136,0.0819718701225306
14 | 1,0.74160948940248,0.48471276227826
15 | 1,0.953514363329195,0.639625829881579
16 | 1,0.642944532413742,0.561453314573865
17 | 0,0.226494744243105,0.054182457565834
18 | 0,0.303693007460946,0.0231174890601685
19 | 0,0.442909334900624,0.26458738107626
20 | 1,0.689641361293936,0.441606457994801
21 | 1,0.808516873219165,0.460988843699898
22 | 0,0.265847526966108,-0.0217612302359918
23 | 0,0.277512671350018,-0.0351946196596575
24 | 1,0.650996504600044,0.281211163745124
25 | 1,0.604564151274163,0.581607217589511
26 | 0,0.361767990760365,0.08894162662105
27 | 1,0.666911463766987,0.658382180098643
28 | 0,0.23063312840159,-0.018832997639963
29 | 0,0.137952214405966,0.11362939867057
30 | 1,0.736452219294899,0.491761590839456
31 | 1,0.59710827462338,0.550563581924152
32 | 1,0.602609504754365,0.4272558830412
33 | 0,0.259748077463861,0.0830864567718866
34 | 1,0.65936769953152,0.398195251750205
35 | 1,0.68433814756797,0.243306417023253
36 | 1,0.855013781033494,0.552123366487809
37 | 1,0.960752241879643,0.589326478652242
38 | 0,0.218532689151841,0.149970476606352
39 | 0,0.340460187615256,0.163897051668133
40 | 0,0.355834203480917,0.336747248250862
41 | 1,0.692211516077662,0.577457901954487
42 | 1,0.662154960387187,0.570323144740557
43 | 0,0.372511985939819,0.0857198602571986
44 | 0,0.22900535800054,0.0267345028505001
45 | 0,0.309531718124159,0.144331015797212
46 | 0,0.263227253895867,0.127925334758116
47 | 0,0.308466277418238,0.0910351363893737
48 | 0,0.0930774599145935,0.113987148810076
49 | 1,0.6229369231208,0.456717036684569
50 | 1,0.706148123157656,0.490803325682998
51 | 1,0.758732482141232,0.456626655986977
52 | 1,0.694577645162027,0.622832560922402
53 | 0,0.369820198735285,0.0550497386655174
54 | 0,0.239404252953096,0.132757785197318
55 | 0,0.218428111377801,-0.219178158477306
56 | 1,0.692946434725903,0.544563975400229
57 | 0,0.245371538967234,0.101855416754447
58 | 1,0.56913442403047,0.294387382973145
59 | 1,0.714809619410276,0.538163512680377
60 | 0,0.358067954360732,0.17044538896009
61 | 0,0.320547428963535,0.251711615665619
62 | 0,0.331588068345858,0.107835653776275
63 | 0,0.304030747179208,0.0547750174185641
64 | 0,0.321995718652107,0.00579217815345798
65 | 1,0.865360178690971,0.428046918332584
66 | 1,0.671798231854105,0.344725091990453
67 | 1,0.828788452478336,0.71100719973842
68 | 1,0.781619318840399,0.632570613805373
69 | 1,0.731475532721693,0.257914929544682
70 | 1,0.629915796829575,0.284014988011287
71 | 0,0.422354335685084,0.114636655496169
72 | 1,0.673380826362505,0.577899254008376
73 | 0,0.411902241550325,-0.0328305542767028
74 | 1,0.819974434826139,0.549032982583942
75 | 1,0.749879048884502,0.521106457290703
76 | 1,0.865129108041695,0.505063149340243
77 | 1,0.802865060766911,0.534926888417566
78 | 1,0.757857478161499,0.353704731938664
79 | 1,0.795630162325943,0.351825595995113
80 | 0,0.462310717991569,0.101365623468382
81 | 0,0.269750572403886,-0.149506765860705
82 | 1,0.832120799383439,0.579782802852318
83 | 0,0.389685944569249,0.163719794768627
84 | 0,0.300495737942904,0.0420527462802183
85 | 0,0.301934594176741,0.0839837366231943
86 | 0,0.150962586848513,0.233882915802165
87 | 0,0.25452948081291,0.0859799579364082
88 | 1,0.758924966011327,0.51452726603244
89 | 1,0.688094757573297,0.451071027859505
90 | 0,0.308508646709991,0.124020803218039
91 | 1,0.55488440319187,0.712912973753406
92 | 1,0.734968534679093,0.291312226056919
93 | 0,0.424844828588299,0.0185442193071348
94 | 0,0.307133843847762,-0.0280343474796379
95 | 0,0.398740975001251,-0.0767590748067974
96 | 0,0.273728056742917,0.251849536602207
97 | 1,0.740350011751019,0.531733530441795
98 | 0,0.286412335506547,0.0834310455481071
99 | 1,0.844565148785342,0.556675571552102
100 | 1,0.570621351148686,0.405651266329464
101 | 1,0.780228461639442,0.682045157407263
102 | 1,0.808610830575502,0.48055917875417
103 | 0,0.404736393317852,0.0534461559608683
104 | 0,0.0182219815354657,-0.0890842632482053
105 | 1,0.580213837410484,0.434088367477381
106 | 0,0.439028571589774,-0.00891622428961927
107 | 0,0.169013026906772,0.139040747231028
108 | 1,0.721813199806076,0.453310307451834
109 | 1,0.579448726121783,0.601098430630891
110 | 1,0.59757790717587,0.582346229610322
111 | 1,0.706844037534213,0.444478339486168
112 | 0,0.311812492223678,-0.0245609488864325
113 | 1,0.696270344001157,0.559918986782424
114 | 0,0.517693836106899,0.136882581439463
115 | 1,0.711330813645891,0.368148477498463
116 | 1,0.651106025565724,0.645855201514964
117 | 0,0.312846178583358,0.167410567066292
118 | 0,0.245212502207789,0.0788996016352575
119 | 0,0.281584872796863,0.16330117675989
120 | 0,0.224177537883604,0.198729749727135
121 | 0,0.21360730861153,0.0659637945032196
122 | 0,0.236451258508006,0.0652936599183583
123 | 1,0.769116120629594,0.529177074205597
124 | 0,0.221575998142235,0.130222505896903
125 | 1,0.624409461092592,0.379450781522128
126 | 1,0.547121964828769,0.273564135415305
127 | 0,0.17592618523832,0.160037295166501
128 | 1,0.736121699674151,0.506024281122913
129 | 0,0.425480843904003,-0.0479291016901599
130 | 1,0.618590118156231,0.562390207752306
131 | 0,0.534384693355842,-0.00458315713932619
132 | 0,0.470829423375905,0.0567177119203065
133 | 0,0.375738399369332,0.10675402978279
134 | 0,0.319635320093047,0.129350314422456
135 | 0,0.243349669666206,0.116546106571295
136 | 0,0.34357354135486,-0.00338666050798213
137 | 0,0.186259256050742,0.049140403307173
138 | 1,0.646103731473393,0.476865433197868
139 | 1,0.742216880358217,0.390083115275135
140 | 1,0.839287534868438,0.550027764170609
141 | 1,0.800012129202795,0.466861127031535
142 | 0,0.216202082309659,0.176368365536743
143 | 1,0.780494991593676,0.375525772107199
144 | 1,0.726960940964144,0.580849892908468
145 | 1,0.743612641049324,0.552738236722576
146 | 1,0.597827269636531,0.314015371971029
147 | 1,0.753161899049477,0.612163367781747
148 | 1,0.672106155440207,0.541402426363875
149 | 0,0.611648647701236,-0.176614445962288
150 | 1,0.643311341435752,0.53917536733336
151 | 1,0.652427513768223,0.386155037786121
152 | 0,0.301520618087258,0.00779725506341844
153 | 1,0.580917034140212,0.318993261238895
154 | 1,0.658638758875195,0.58533531721232
155 | 1,0.522655469981122,0.526482285184902
156 | 1,0.619107709006306,0.395568496047808
157 | 0,0.342854385955556,0.00779057938184129
158 | 1,0.86999733382526,0.574001573496694
159 | 0,0.318543715852861,0.114391118336483
160 | 0,0.228396072599371,0.0106065027681785
161 | 1,0.727737130943151,0.38382553707226
162 | 0,0.242654358004674,0.229605566380433
163 | 1,0.849147311410968,0.485542675609488
164 | 1,0.688785680721008,0.568723327471366
165 | 1,0.601072641696223,0.472949199587446
166 | 0,0.386597757697056,-0.0462553172550934
167 | 0,0.173427736038098,0.0661032211070728
168 | 0,0.212619485253217,0.226220588353548
169 | 1,0.676379387810006,0.478200807508695
170 | 1,0.814570220992963,0.343974473335934
171 | 0,0.385176265775491,0.0321607979792773
172 | 1,0.749057204428228,0.460541975098555
173 | 0,0.249098133601278,0.135190023877858
174 | 1,0.755616419656037,0.499437877798512
175 | 1,0.605289891647501,0.576382527441655
176 | 0,0.220124707599109,0.100802583684052
177 | 1,0.611660353946956,0.582417425243852
178 | 1,0.638657314638631,0.567476782208443
179 | 1,0.707364959529237,0.553460099560961
180 | 1,0.750636527933954,0.418064859633507
181 | 0,0.258629634330844,0.205122708778633
182 | 1,0.561659196951965,0.302892925040985
183 | 0,0.261830597304793,0.0675225213946725
184 | 1,0.614308570486587,0.6059587413353
185 | 0,0.414439463469767,0.00965425791140319
186 | 0,0.252322879548744,0.0506559735947343
187 | 0,0.290671271152809,0.00155405446676163
188 | 0,0.235453515294468,0.110972028714464
189 | 1,0.732335416418508,0.448978572463179
190 | 1,0.810313994896262,0.495563824762476
191 | 0,0.421663017868487,-0.0356477152171603
192 | 1,0.696456152042301,0.492043579296949
193 | 1,0.648005874898818,0.474735589412081
194 | 1,0.586856365928848,0.486846359431101
195 | 0,0.311153185052651,0.0667137376136715
196 | 0,0.238197827120521,0.329800929790695
197 | 0,0.152745736144961,-0.0169226770608605
198 | 0,0.383676691365755,-0.0613888293971788
199 | 1,0.58794957455425,0.759074050955085
200 | 0,0.298771398500675,0.181005648077388
201 |
--------------------------------------------------------------------------------
/bcomposes-examples/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 |
179 |
--------------------------------------------------------------------------------
/aymericdamien-Examples/Setup_TensorFlow.md:
--------------------------------------------------------------------------------
1 | _From TensorFlow Official docs_
2 |
3 | # Download and Setup
4 |
5 | You can install TensorFlow using our provided binary packages or from source.
6 |
7 | ## Binary Installation
8 |
9 | The TensorFlow Python API currently requires Python 2.7: we are
10 | [working](https://github.com/tensorflow/tensorflow/issues/1) on adding support
11 | for Python 3.
12 |
13 | The simplest way to install TensorFlow is using
14 | [pip](https://pypi.python.org/pypi/pip) for both Linux and Mac.
15 |
16 | If you encounter installation errors, see
17 | [common problems](#common_install_problems) for some solutions. To simplify
18 | installation, please consider using our virtualenv-based instructions
19 | [here](#virtualenv_install).
20 |
21 | ### Ubuntu/Linux 64-bit
22 |
23 | ```bash
24 | # For CPU-only version
25 | $ pip install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
26 |
27 | # For GPU-enabled version (only install this version if you have the CUDA sdk installed)
28 | $ pip install https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
29 | ```
30 |
31 | ### Mac OS X
32 |
33 | On OS X, we recommend installing [homebrew](http://brew.sh) and `brew install
34 | python` before proceeding, or installing TensorFlow within [virtualenv](#virtualenv_install).
35 |
36 | ```bash
37 | # Only CPU-version is available at the moment.
38 | $ pip install https://storage.googleapis.com/tensorflow/mac/tensorflow-0.5.0-py2-none-any.whl
39 | ```
40 |
41 | ## Docker-based installation
42 |
43 | We also support running TensorFlow via [Docker](http://docker.com/), which lets
44 | you avoid worrying about setting up dependencies.
45 |
46 | First, [install Docker](http://docs.docker.com/engine/installation/). Once
47 | Docker is up and running, you can start a container with one command:
48 |
49 | ```bash
50 | $ docker run -it b.gcr.io/tensorflow/tensorflow
51 | ```
52 |
53 | This will start a container with TensorFlow and all its dependencies already
54 | installed.
55 |
56 | ### Additional images
57 |
58 | The default Docker image above contains just a minimal set of libraries for
59 | getting up and running with TensorFlow. We also have the following container,
60 | which you can use in the `docker run` command above:
61 |
62 | * `b.gcr.io/tensorflow/tensorflow-full`: Contains a complete TensorFlow source
63 | installation, including all utilities needed to build and run TensorFlow. This
64 | makes it easy to experiment directly with the source, without needing to
65 | install any of the dependencies described above.
66 |
67 | ## VirtualEnv-based installation
68 |
69 | We recommend using [virtualenv](https://pypi.python.org/pypi/virtualenv) to
70 | create an isolated container and install TensorFlow in that container -- it is
71 | optional but makes verifying installation issues easier.
72 |
73 | First, install all required tools:
74 |
75 | ```bash
76 | # On Linux:
77 | $ sudo apt-get install python-pip python-dev python-virtualenv
78 |
79 | # On Mac:
80 | $ sudo easy_install pip # If pip is not already installed
81 | $ sudo pip install --upgrade virtualenv
82 | ```
83 |
84 | Next, set up a new virtualenv environment. To set it up in the
85 | directory `~/tensorflow`, run:
86 |
87 | ```bash
88 | $ virtualenv --system-site-packages ~/tensorflow
89 | $ cd ~/tensorflow
90 | ```
91 |
92 | Then activate the virtualenv:
93 |
94 | ```bash
95 | $ source bin/activate # If using bash
96 | $ source bin/activate.csh # If using csh
97 | (tensorflow)$ # Your prompt should change
98 | ```
99 |
100 | Inside the virtualenv, install TensorFlow:
101 |
102 | ```bash
103 | # For CPU-only linux x86_64 version
104 | (tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
105 |
106 | # For GPU-enabled linux x86_64 version
107 | (tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
108 |
109 | # For Mac CPU-only version
110 | (tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.5.0-py2-none-any.whl
111 | ```
112 |
113 | Make sure you have downloaded the source code for TensorFlow, and then you can
114 | then run an example TensorFlow program like:
115 |
116 | ```bash
117 | (tensorflow)$ cd tensorflow/models/image/mnist
118 | (tensorflow)$ python convolutional.py
119 |
120 | # When you are done using TensorFlow:
121 | (tensorflow)$ deactivate # Deactivate the virtualenv
122 |
123 | $ # Your prompt should change back
124 | ```
125 |
126 | ## Try your first TensorFlow program
127 |
128 | ### (Optional) Enable GPU Support
129 |
130 | If you installed the GPU-enabled TensorFlow pip binary, you must have the
131 | correct versions of the CUDA SDK and CUDNN installed on your
132 | system. Please see [the CUDA installation instructions](#install_cuda).
133 |
134 | You also need to set the `LD_LIBRARY_PATH` and `CUDA_HOME` environment
135 | variables. Consider adding the commands below to your `~/.bash_profile`. These
136 | assume your CUDA installation is in `/usr/local/cuda`:
137 |
138 | ```bash
139 | export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64"
140 | export CUDA_HOME=/usr/local/cuda
141 | ```
142 |
143 | ### Run TensorFlow
144 |
145 | Open a python terminal:
146 |
147 | ```bash
148 | $ python
149 |
150 | >>> import tensorflow as tf
151 | >>> hello = tf.constant('Hello, TensorFlow!')
152 | >>> sess = tf.Session()
153 | >>> print sess.run(hello)
154 | Hello, TensorFlow!
155 | >>> a = tf.constant(10)
156 | >>> b = tf.constant(32)
157 | >>> print sess.run(a+b)
158 | 42
159 | >>>
160 |
161 | ```
162 |
163 | ## Installing from sources
164 |
165 | ### Clone the TensorFlow repository
166 |
167 | ```bash
168 | $ git clone --recurse-submodules https://github.com/tensorflow/tensorflow
169 | ```
170 |
171 | `--recurse-submodules` is required to fetch the protobuf library that TensorFlow
172 | depends on.
173 |
174 | ### Installation for Linux
175 |
176 | #### Install Bazel
177 |
178 |
179 | Follow instructions [here](http://bazel.io/docs/install.html) to install the
180 | dependencies for Bazel. Then download bazel version 0.1.1 using the
181 | [installer for your system](https://github.com/bazelbuild/bazel/releases) and
182 | run the installer as mentioned there:
183 |
184 | ```bash
185 | $ chmod +x PATH_TO_INSTALL.SH
186 | $ ./PATH_TO_INSTALL.SH --user
187 | ```
188 |
189 | Remember to replace `PATH_TO_INSTALL.SH` to point to the location where you
190 | downloaded the installer.
191 |
192 | Finally, follow the instructions in that script to place bazel into your binary
193 | path.
194 |
195 | #### Install other dependencies
196 |
197 | ```bash
198 | $ sudo apt-get install python-numpy swig python-dev
199 | ```
200 |
201 | #### Optional: Install CUDA (GPUs on Linux)
202 |
203 | In order to build or run TensorFlow with GPU support, both Cuda Toolkit 7.0 and
204 | CUDNN 6.5 V2 from NVIDIA need to be installed.
205 |
206 | TensorFlow GPU support requires having a GPU card with NVidia Compute Capability >= 3.5. Supported cards include but are not limited to:
207 |
208 | * NVidia Titan
209 | * NVidia Titan X
210 | * NVidia K20
211 | * NVidia K40
212 |
213 | ##### Download and install Cuda Toolkit 7.0
214 |
215 | https://developer.nvidia.com/cuda-toolkit-70
216 |
217 | Install the toolkit into e.g. `/usr/local/cuda`
218 |
219 | ##### Download and install CUDNN Toolkit 6.5
220 |
221 | https://developer.nvidia.com/rdp/cudnn-archive
222 |
223 | Uncompress and copy the cudnn files into the toolkit directory. Assuming the
224 | toolkit is installed in `/usr/local/cuda`:
225 |
226 | ``` bash
227 | tar xvzf cudnn-6.5-linux-x64-v2.tgz
228 | sudo cp cudnn-6.5-linux-x64-v2/cudnn.h /usr/local/cuda/include
229 | sudo cp cudnn-6.5-linux-x64-v2/libcudnn* /usr/local/cuda/lib64
230 | ```
231 |
232 | ##### Configure TensorFlow's canonical view of Cuda libraries
233 | From the root of your source tree, run:
234 |
235 | ``` bash
236 | $ ./configure
237 | Do you wish to build TensorFlow with GPU support? [y/n] y
238 | GPU support will be enabled for TensorFlow
239 |
240 | Please specify the location where CUDA 7.0 toolkit is installed. Refer to
241 | README.md for more details. [default is: /usr/local/cuda]: /usr/local/cuda
242 |
243 | Please specify the location where CUDNN 6.5 V2 library is installed. Refer to
244 | README.md for more details. [default is: /usr/local/cuda]: /usr/local/cuda
245 |
246 | Setting up Cuda include
247 | Setting up Cuda lib64
248 | Setting up Cuda bin
249 | Setting up Cuda nvvm
250 | Configuration finished
251 | ```
252 |
253 | This creates a canonical set of symbolic links to the Cuda libraries on your system.
254 | Every time you change the Cuda library paths you need to run this step again before
255 | you invoke the bazel build command.
256 |
257 | ##### Build your target with GPU support.
258 | From the root of your source tree, run:
259 |
260 | ```bash
261 | $ bazel build -c opt --config=cuda //tensorflow/cc:tutorials_example_trainer
262 |
263 | $ bazel-bin/tensorflow/cc/tutorials_example_trainer --use_gpu
264 | # Lots of output. This tutorial iteratively calculates the major eigenvalue of
265 | # a 2x2 matrix, on GPU. The last few lines look like this.
266 | 000009/000005 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
267 | 000006/000001 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
268 | 000009/000009 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
269 | ```
270 |
271 | Note that "--config=cuda" is needed to enable the GPU support.
272 |
273 | ##### Enabling Cuda 3.0.
274 | TensorFlow officially supports Cuda devices with 3.5 and 5.2 compute
275 | capabilities. In order to enable earlier Cuda devices such as Grid K520, you
276 | need to target Cuda 3.0. This can be done through TensorFlow unofficial
277 | settings with "configure".
278 |
279 | ```bash
280 | $ TF_UNOFFICIAL_SETTING=1 ./configure
281 |
282 | # Same as the official settings above
283 |
284 | WARNING: You are configuring unofficial settings in TensorFlow. Because some
285 | external libraries are not backward compatible, these settings are largely
286 | untested and unsupported.
287 |
288 | Please specify a list of comma-separated Cuda compute capabilities you want to
289 | build with. You can find the compute capability of your device at:
290 | https://developer.nvidia.com/cuda-gpus.
291 | Please note that each additional compute capability significantly increases
292 | your build time and binary size. [Default is: "3.5,5.2"]: 3.0
293 |
294 | Setting up Cuda include
295 | Setting up Cuda lib64
296 | Setting up Cuda bin
297 | Setting up Cuda nvvm
298 | Configuration finished
299 | ```
300 |
301 | ##### Known issues
302 |
303 | * Although it is possible to build both Cuda and non-Cuda configs under the same
304 | source tree, we recommend to run "bazel clean" when switching between these two
305 | configs in the same source tree.
306 |
307 | * You have to run configure before running bazel build. Otherwise, the build
308 | will fail with a clear error message. In the future, we might consider making
309 | this more conveninent by including the configure step in our build process,
310 | given necessary bazel new feature support.
311 |
312 | ### Installation for Mac OS X
313 |
314 | Mac needs the same set of dependencies as Linux, however their installing those
315 | dependencies is different. Here is a set of useful links to help with installing
316 | the dependencies on Mac OS X :
317 |
318 | #### Bazel
319 |
320 | Look for installation instructions for Mac OS X on
321 | [this](http://bazel.io/docs/install.html) page.
322 |
323 | #### SWIG
324 |
325 | [Mac OS X installation](http://www.swig.org/Doc3.0/Preface.html#Preface_osx_installation).
326 |
327 | Notes : You need to install
328 | [PCRE](ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/) and *NOT* PCRE2.
329 |
330 | #### Numpy
331 |
332 | Follow installation instructions [here](http://docs.scipy.org/doc/numpy/user/install.html).
333 |
334 |
335 | ### Create the pip package and install
336 |
337 | ```bash
338 | $ bazel build -c opt //tensorflow/tools/pip_package:build_pip_package
339 |
340 | # To build with GPU support:
341 | $ bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_package
342 |
343 | $ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
344 |
345 | # The name of the .whl file will depend on your platform.
346 | $ pip install /tmp/tensorflow_pkg/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
347 | ```
348 |
349 | ## Train your first TensorFlow neural net model
350 |
351 | Starting from the root of your source tree, run:
352 |
353 | ```python
354 | $ cd tensorflow/models/image/mnist
355 | $ python convolutional.py
356 | Succesfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
357 | Succesfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
358 | Succesfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
359 | Succesfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
360 | Extracting data/train-images-idx3-ubyte.gz
361 | Extracting data/train-labels-idx1-ubyte.gz
362 | Extracting data/t10k-images-idx3-ubyte.gz
363 | Extracting data/t10k-labels-idx1-ubyte.gz
364 | Initialized!
365 | Epoch 0.00
366 | Minibatch loss: 12.054, learning rate: 0.010000
367 | Minibatch error: 90.6%
368 | Validation error: 84.6%
369 | Epoch 0.12
370 | Minibatch loss: 3.285, learning rate: 0.010000
371 | Minibatch error: 6.2%
372 | Validation error: 7.0%
373 | ...
374 | ...
375 | ```
376 |
377 | ## Common Problems
378 |
379 | ### GPU-related issues
380 |
381 | If you encounter the following when trying to run a TensorFlow program:
382 |
383 | ```python
384 | ImportError: libcudart.so.7.0: cannot open shared object file: No such file or directory
385 | ```
386 |
387 | Make sure you followed the the GPU installation [instructions](#install_cuda).
388 |
389 | ### Pip installation issues
390 |
391 | #### Can't find setup.py
392 |
393 | If, during `pip install`, you encounter an error like:
394 |
395 | ```bash
396 | ...
397 | IOError: [Errno 2] No such file or directory: '/tmp/pip-o6Tpui-build/setup.py'
398 | ```
399 |
400 | Solution: upgrade your version of `pip`:
401 |
402 | ```bash
403 | pip install --upgrade pip
404 | ```
405 |
406 | This may require `sudo`, depending on how `pip` is installed.
407 |
408 | #### SSLError: SSL_VERIFY_FAILED
409 |
410 | If, during pip install from a URL, you encounter an error like:
411 |
412 | ```bash
413 | ...
414 | SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed
415 | ```
416 |
417 | Solution: Download the wheel manually via curl or wget, and pip install locally.
418 |
419 | ### On Linux
420 |
421 | If you encounter:
422 |
423 | ```python
424 | ...
425 | "__add__", "__radd__",
426 | ^
427 | SyntaxError: invalid syntax
428 | ```
429 |
430 | Solution: make sure you are using Python 2.7.
431 |
432 | ### On MacOSX
433 |
434 |
435 | If you encounter:
436 |
437 | ```python
438 | import six.moves.copyreg as copyreg
439 |
440 | ImportError: No module named copyreg
441 | ```
442 |
443 | Solution: TensorFlow depends on protobuf, which requires `six-1.10.0`. Apple's
444 | default python environment has `six-1.4.1` and may be difficult to upgrade.
445 | There are several ways to fix this:
446 |
447 | 1. Upgrade the system-wide copy of `six`:
448 |
449 | ```bash
450 | sudo easy_install -U six
451 | ```
452 |
453 | 2. Install a separate copy of python via homebrew:
454 |
455 | ```bash
456 | brew install python
457 | ```
458 |
459 | 3. Build or use TensorFlow
460 | [within `virtualenv`](#virtualenv_install).
461 |
462 |
463 |
464 | If you encounter:
465 |
466 | ```
467 | >>> import tensorflow as tf
468 | Traceback (most recent call last):
469 | File "", line 1, in
470 | File "/usr/local/lib/python2.7/site-packages/tensorflow/__init__.py", line 4, in
471 | from tensorflow.python import *
472 | File "/usr/local/lib/python2.7/site-packages/tensorflow/python/__init__.py", line 13, in
473 | from tensorflow.core.framework.graph_pb2 import *
474 | ...
475 | File "/usr/local/lib/python2.7/site-packages/tensorflow/core/framework/tensor_shape_pb2.py", line 22, in
476 | serialized_pb=_b('\n,tensorflow/core/framework/tensor_shape.proto\x12\ntensorflow\"d\n\x10TensorShapeProto\x12-\n\x03\x64im\x18\x02 \x03(\x0b\x32 .tensorflow.TensorShapeProto.Dim\x1a!\n\x03\x44im\x12\x0c\n\x04size\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\tb\x06proto3')
477 | TypeError: __init__() got an unexpected keyword argument 'syntax'
478 | ```
479 |
480 | This is due to a conflict between protobuf versions (we require protobuf 3.0.0).
481 | The best current solution is to make sure older versions of protobuf are not
482 | installed, such as:
483 |
484 | ```bash
485 | brew reinstall --devel protobuf
486 | ```
--------------------------------------------------------------------------------
/bcomposes-examples/simdata/saturn_data_train.csv:
--------------------------------------------------------------------------------
1 | 1,-7.1239700674365,-5.05175898010314
2 | 0,1.80771566423302,0.770505522143023
3 | 1,8.43184823707231,-4.2287794074931
4 | 0,0.451276074541732,0.669574142606103
5 | 0,1.52519959303934,-0.953055551414968
6 | 0,2.6244640096065,-3.57517214574177
7 | 1,-9.67984097020372,2.43273985198413
8 | 0,1.76662111169025,-0.599065119089674
9 | 1,-9.54072620047797,-2.44549459765326
10 | 1,-4.04223072334724,9.13685386209343
11 | 0,2.12168571110583,1.09436523562788
12 | 0,-0.683614264815307,0.401363797043046
13 | 0,-0.976997367730473,-0.594055002855628
14 | 0,-2.77224622666051,3.94657421847961
15 | 1,-9.27205318506747,1.24216037652738
16 | 0,-1.71173169597796,0.584266032844234
17 | 0,-2.15368098415523,2.98009468989564
18 | 0,0.426199438668344,-2.04674949884645
19 | 1,-1.01334285269519,-10.3345489508607
20 | 1,-9.54101504300598,-4.55047974493486
21 | 1,10.1957066568018,0.254420103019349
22 | 1,-8.6778473681861,-3.27270683674903
23 | 1,6.47809481278742,8.96100657254648
24 | 0,-1.95411001422583,-1.14169962947211
25 | 1,11.8505019698607,-0.880526995586208
26 | 1,9.85201428822543,-1.98092728566354
27 | 1,-2.61930621246295,-9.91735995764943
28 | 1,-9.57702511533498,0.856047877803597
29 | 1,1.07211379460816,10.0475383208642
30 | 1,10.2114266188495,2.56413556126083
31 | 1,-1.0837815752106,10.4989367949914
32 | 0,-2.67378400738832,-0.261205121872508
33 | 1,-8.53007726968589,-5.67998129587997
34 | 1,-10.7663138511873,-1.29515496010343
35 | 0,-2.55338710545442,1.75125814188713
36 | 0,1.35469670851131,0.223877677617805
37 | 1,-1.84504413574705,-9.33192341793193
38 | 1,-5.80946534065145,-9.47640833659501
39 | 0,-1.04008162688905,-4.49097940004103
40 | 1,-5.32311107475766,7.53870059615664
41 | 1,2.53744100730897,-11.2206190940572
42 | 1,10.0004938360844,0.95507808057832
43 | 1,-7.63830322423253,-2.50856029879941
44 | 1,8.43366809886814,-6.2195651470462
45 | 1,-2.91023170983615,8.48032045150948
46 | 0,1.45578752738673,0.308974309209017
47 | 1,6.47268350416766,-6.61767616970341
48 | 1,7.5798589614718,-5.95840297257091
49 | 1,3.57609238872552,-10.8153763028483
50 | 0,-1.94175602111866,3.25323642785476
51 | 1,-8.57430992466047,0.0162163356664646
52 | 1,-2.28973318237138,-9.18916703684439
53 | 0,-2.07413895223307,-2.84246386431687
54 | 0,1.82662162321882,1.19275832563431
55 | 1,7.53222089679843,3.50851984721159
56 | 1,-8.63645268048514,-5.51919306313108
57 | 1,-1.38283835552521,-9.20403015220651
58 | 0,-0.0693929757023144,-0.120923632938589
59 | 1,8.94157505646838,4.1511541665216
60 | 0,-2.21638326393234,-0.743642766186011
61 | 1,6.92970234039982,-5.7299855716108
62 | 1,-9.96045721031378,-3.55960137096896
63 | 1,-5.08716904419667,-7.71311752380464
64 | 0,0.432315244441807,0.146519155172826
65 | 0,3.81359256480327,-0.949161768843296
66 | 0,-2.67813066150075,0.155970525921464
67 | 1,7.06099787525839,7.47853003826184
68 | 0,-3.6022241782501,2.87563655800124
69 | 1,-8.56205298519004,-5.59296554895657
70 | 0,2.27194803379196,-0.697309431763461
71 | 0,0.414869754722005,0.773457746708829
72 | 0,4.57901963248404,-0.581826336269988
73 | 0,0.345418471438973,0.341721440272724
74 | 1,2.13088143827608,11.1248802622644
75 | 1,-9.28621855455942,3.66602302094549
76 | 0,-1.05044463591027,-2.26198313837955
77 | 0,-0.203877849613951,1.77850652695302
78 | 1,-11.5347464575896,4.04493198309583
79 | 0,0.515441830198029,-1.48118858476755
80 | 0,-1.34492291408213,-2.21045863112778
81 | 0,-2.74619790193218,2.01868126297085
82 | 0,-0.0610050091577645,2.45055169641884
83 | 1,-10.9333944290643,2.16597032988926
84 | 0,2.10185914521208,2.50441889905131
85 | 0,-1.95060135192445,1.0221208136267
86 | 1,-3.93631134497805,-11.5948459098007
87 | 1,-1.77353334274379,10.9805317915229
88 | 1,-10.0125131496373,2.87350793431108
89 | 0,0.224152299355375,1.05534430338697
90 | 1,-7.56460268914936,6.34358229167367
91 | 1,-4.73459089779698,8.21880829353707
92 | 0,0.507713445092494,1.51044358214825
93 | 1,-0.92935781010476,10.138681083383
94 | 1,-7.1254412107155,-6.959519579293
95 | 0,-2.77394317025641,2.32841485779483
96 | 1,1.48198390389601,-9.20739827901074
97 | 0,-0.626710721992932,0.88992747103403
98 | 0,0.00481475570487226,-0.0076767885430906
99 | 0,1.36976033043542,-0.911796278185519
100 | 0,0.580325270116439,0.194246997750822
101 | 0,0.0269864269953635,0.308211189424495
102 | 1,9.62694841049478,1.73787010859398
103 | 1,3.31743060164764,-8.27434267939844
104 | 1,9.29743641003553,1.83876116471785
105 | 0,-1.56634889087478,1.12087375501131
106 | 0,0.234380373042369,-0.190852897944166
107 | 0,-2.2265741418854,0.541654777530827
108 | 1,-7.91642966575721,-6.56424201265321
109 | 1,-3.37861182479723,-8.15311367827467
110 | 0,-2.30671347876545,1.27602436352785
111 | 0,0.567730139692622,0.658488776316152
112 | 0,-1.8765363614178,3.31819158474277
113 | 1,9.32250255379486,6.67570571550008
114 | 1,6.87236057429256,6.31206298993164
115 | 0,1.24283341259685,-2.43335284418338
116 | 1,-8.85389030392406,-4.86565417551327
117 | 0,0.422414911283583,-1.78131047947536
118 | 0,0.00463276189188866,1.91938006596196
119 | 0,0.788171009954018,-1.40060317554696
120 | 1,-5.15648839125392,7.93637022030082
121 | 1,-8.88719630635514,-4.77641418214191
122 | 0,4.65847607993914,0.573533671794049
123 | 0,4.10489797677036,1.46331719664337
124 | 1,-5.64988236745269,7.59535604779533
125 | 1,1.97988634662085,-9.34147961488475
126 | 1,-9.56786717092285,6.15062630347382
127 | 0,-0.305509920415386,-0.816687799655518
128 | 0,4.37640279752176,-0.208692920893062
129 | 0,-2.99618409119118,-1.33982190816661
130 | 0,-3.5051709982934,-3.26706600506422
131 | 1,-0.444385483407193,-11.1351261211535
132 | 0,-0.908983892610002,4.33860668853704
133 | 0,0.308859742393393,0.668636267443342
134 | 1,4.26133424202836,-10.9742307794595
135 | 0,0.0549530959883597,0.0130070694151358
136 | 1,7.53390793895624,-5.07453255571314
137 | 0,0.040282157515584,-0.0413994226910768
138 | 0,0.364807994500752,-1.41501146682248
139 | 1,-9.8371849383414,-0.532494657299655
140 | 1,-8.92446101991649,-6.55325261517569
141 | 1,5.42742176639377,7.64488548818019
142 | 0,-0.0231829153121243,-0.0826624332581433
143 | 1,9.98354791862655,-2.21244212488613
144 | 0,-0.245484760362165,0.708915138075223
145 | 0,1.00809963140419,1.80528790612361
146 | 1,7.96253309805136,-7.85313040622627
147 | 1,-4.85802016477551,8.84980056346979
148 | 1,-2.2377711461735,12.1651953006766
149 | 1,-9.26259991078727,6.12948894225519
150 | 1,-2.50174647515082,9.94275644334929
151 | 0,1.69989746467005,-1.85933593163952
152 | 0,-1.55162808416059,2.51764424788514
153 | 1,10.2067319750296,1.44620992046507
154 | 0,0.395047528277998,2.75099869255495
155 | 0,3.47235286038504,0.312787622637351
156 | 0,0.572346258154881,0.128281214038418
157 | 0,-2.71563517805396,-0.555094438250771
158 | 0,0.0327901304785445,0.38291078109353
159 | 0,-0.498060478251128,-1.7347019542345
160 | 0,3.36656051937686,3.4067759448776
161 | 1,5.46174197424899,-9.54155164348352
162 | 1,8.1986107522222,-4.78277561362211
163 | 0,-3.41158980446985,1.82009660206347
164 | 1,-8.73380239355277,6.55987672658631
165 | 1,4.16415800597914,-9.96329401057575
166 | 1,0.0635559384201306,-9.97106599315458
167 | 1,10.6010735746994,-4.48171435604685
168 | 1,-7.29353120905952,7.77094608053753
169 | 1,-4.28656352697919,7.86961326195471
170 | 1,6.87878620757886,-8.5880001448525
171 | 0,-0.00596364426059484,-0.312808418406871
172 | 0,2.44510351847744,-0.61797711888748
173 | 1,-10.0840960767766,1.16202560086554
174 | 1,2.62845369667177,-8.69414146855081
175 | 1,5.55113670271272,-9.39642126569543
176 | 0,-0.594048098785318,0.996897492935077
177 | 0,-4.40699157911697,-1.52196900587115
178 | 0,1.12118760598014,0.457381308714515
179 | 0,2.94573263955246,1.71445582937027
180 | 0,0.421372932702674,-0.72130233384774
181 | 0,1.30805321432315,1.71906146831119
182 | 0,-1.02594303892191,-2.97572485299017
183 | 0,-2.27067165274015,0.422842952161015
184 | 1,10.6456910698005,-3.31028616534108
185 | 1,-6.20302052061548,7.36628634276438
186 | 0,-0.890365500283691,4.49479121231346
187 | 0,2.06094009366537,-0.492963725757071
188 | 0,-0.0932576441584378,0.035383386386386
189 | 0,-0.945507509093508,-1.59607616730411
190 | 1,7.88114689193623,-4.89056480426439
191 | 1,0.223906883492946,11.7432077852699
192 | 0,2.84591368720827,2.46926767037653
193 | 0,3.55565289348271,-0.258461484780092
194 | 1,-9.50178734944532,2.69678575087567
195 | 1,-10.342252581035,-1.84082834217332
196 | 1,-8.90024124086661,4.12315811788013
197 | 1,-9.15481878470657,-6.78170261760227
198 | 0,-3.2979728880191,-1.12113005646469
199 | 1,-8.20732169834139,-7.00081442556793
200 | 0,0.0992702573431094,-1.17817785449192
201 | 1,4.17694627875903,-11.543308379581
202 | 0,-0.036663809603862,0.498632860654854
203 | 0,-0.201069585544108,1.17033543172605
204 | 0,2.80328218434308,-2.43129523797881
205 | 1,-4.21178149153774,-7.46428961884393
206 | 0,2.09921429016358,0.55304807869477
207 | 0,1.31248181814165,3.20562148619731
208 | 0,2.85883233366319,-0.759820972487253
209 | 1,-10.1650537770967,-2.0801791464585
210 | 0,-2.1670183576218,1.13972157472014
211 | 0,0.0108340841362071,1.95019307061518
212 | 1,-8.90542819752757,-8.09167777511843
213 | 1,-7.18547658943247,-6.38352000910207
214 | 0,-3.83651353187629,-2.6679417051265
215 | 1,-8.55731495030576,-5.12139053015818
216 | 1,-4.37309536404361,9.02683827698961
217 | 1,-9.95153189289823,7.25419730239411
218 | 1,-5.77792243229521,3.29897405088575
219 | 0,-0.978257858632217,1.33994057196346
220 | 1,-11.0490912634278,-3.63285164237176
221 | 0,4.34998518587393,-1.66190225921625
222 | 1,-2.28037410757814,-11.0065414827351
223 | 1,-6.48182023032073,-5.32008165848267
224 | 1,10.9321896085361,2.57625752854501
225 | 0,-0.855414880783667,-1.54582066524341
226 | 0,-2.48694191253009,-3.59242366223534
227 | 1,5.86833030067717,6.3060069020669
228 | 0,-4.52524274333023,0.597159014354813
229 | 1,9.65570106735639,3.69546395614109
230 | 1,-0.0145260662996758,10.1955086142379
231 | 1,5.86424469886416,7.53022893335345
232 | 1,10.3293467448726,-1.46270278118589
233 | 1,-9.6979144376165,-0.823221977645361
234 | 0,-0.0864989675056687,-0.0145488715039846
235 | 1,-2.3009184215667,7.25363336122079
236 | 0,0.0539809946522458,-0.011196844242836
237 | 1,11.2360322931931,-3.54284413133745
238 | 0,0.46915024957354,2.50185938269418
239 | 0,1.44720642012143,-3.67414872787175
240 | 1,4.96807922437839,7.57036410988707
241 | 1,-10.2400248695332,-1.00556394001087
242 | 0,1.79701924182876,-3.08521139838892
243 | 0,2.77693436795065,-0.0483701255937625
244 | 0,1.79652802751289,-2.11565505711717
245 | 0,-1.92427883670382,-0.709061204830383
246 | 1,5.61764309511674,-8.73229448001942
247 | 0,-1.00322969639514,2.59680977945393
248 | 1,-4.68928444646833,10.1266922500795
249 | 0,-1.65336618508314,3.73537411747847
250 | 1,-4.53519757096137,8.34607508200337
251 | 0,-2.62093993693279,-4.03020564623166
252 | 1,3.13516337853046,9.56780525203028
253 | 1,-11.5693229568301,-1.08355293885503
254 | 1,6.65823956882569,6.19625646514916
255 | 0,0.486472535548376,-0.341617441886455
256 | 1,-4.74131778067451,9.17698644950461
257 | 1,-9.31629777687801,6.13871622365375
258 | 0,0.169753410009909,1.51553810602403
259 | 1,4.51265360811713,9.31056986576143
260 | 1,-11.5298906428818,-4.02648933898779
261 | 0,0.556936314561493,2.08485141648561
262 | 1,-9.30419408079191,4.83282206801119
263 | 0,2.64016942808667,-0.384054680864585
264 | 1,3.43184413979082,11.7058713082919
265 | 1,-8.07712675897402,-2.93810931806213
266 | 1,8.63215175866594,3.36668334278562
267 | 1,5.73393846246632,7.39759173411551
268 | 0,-0.662201424760142,0.215099882614474
269 | 0,4.60918258996901,0.650773023760416
270 | 0,-0.261473663189791,-2.19519201727349
271 | 0,-3.84077466633499,-0.212959082915917
272 | 1,1.82636522939007,-7.44304156943001
273 | 1,-7.21525698398697,-7.33394786861225
274 | 1,-10.3118393281703,3.2358546270545
275 | 0,0.215654356311328,-0.289456577175344
276 | 1,4.2529449732302,8.28218987873138
277 | 1,3.76374781341379,7.76200699974713
278 | 0,-0.94078232675739,-1.24243636864211
279 | 0,-1.4265522936326,-1.27078895818712
280 | 0,0.0572953680023405,1.73248590303423
281 | 1,-5.68157377068265,5.98595810182127
282 | 0,-2.63384081068015,-0.892352521483994
283 | 1,6.10474472964248,-7.67250589797721
284 | 0,-0.602510651621868,2.45918007234019
285 | 1,-7.80571298811699,-8.33643924522971
286 | 1,8.78000776532509,-2.15033601616451
287 | 0,-1.46430040138404,-3.77495770956946
288 | 0,-2.66103963980917,0.412474104123609
289 | 0,-0.530804429669786,-0.564925540744405
290 | 0,-0.650359299721333,-3.02282869266444
291 | 0,0.414369656705535,-0.349880043226695
292 | 0,-0.128547997046947,-1.01830367639969
293 | 1,5.55668360698597,-9.12570236535847
294 | 0,0.355100082643081,1.37300364593414
295 | 1,2.12057341641049,9.04247486828233
296 | 0,2.19931919280748,-1.45505785720368
297 | 1,6.53987795735302,5.57674559973847
298 | 1,-8.85329571875129,-2.86357142773942
299 | 1,-9.48955292204604,3.15834735037315
300 | 1,-0.913342950045948,-9.25404050453199
301 | 0,-1.10655097576856,1.75146348802199
302 | 1,-9.39974823202092,-1.01560623441376
303 | 1,5.14815392183269,9.33131342390757
304 | 1,1.04909320547358,9.82224135822256
305 | 1,1.03149158375957,10.8579495514345
306 | 1,4.01261486260903,-8.67833646589891
307 | 0,2.02632360049172,-2.03154697616943
308 | 1,9.47359843510278,-2.49418195261994
309 | 1,-1.82965121553759,-11.0386285491209
310 | 1,7.96272092800234,-5.01336554234833
311 | 1,3.76887331843534,8.40803649028227
312 | 1,9.7383931076913,3.3538227961213
313 | 1,1.78152776056893,9.05720468556054
314 | 1,4.33463663877414,9.00675114796895
315 | 1,11.0459363233516,-2.47102305504371
316 | 0,0.0160930568002092,-0.00527210785890367
317 | 0,0.200360393279074,-1.20473808506873
318 | 1,-9.03404028932402,2.4524119406066
319 | 1,4.82756479874551,9.54294111536609
320 | 1,9.82608323971342,-0.490140627425152
321 | 0,0.317561289908611,1.29827531053109
322 | 1,-6.04556676130117,-7.88520748369331
323 | 0,2.59457392106183,-4.15472619292352
324 | 0,-3.37986460368526,1.52391489458251
325 | 0,0.021585687554024,2.33853514499505
326 | 0,1.034240013822,4.59312695611448
327 | 0,-1.23703103466187,-2.86647507908145
328 | 1,6.77387554884675,-7.7432809690283
329 | 1,-9.33954022371436,5.05647852994881
330 | 1,10.7084590543593,-3.30382360443407
331 | 0,-2.2842516670118,-1.05802289945986
332 | 0,-0.888627852525752,-0.199529689433506
333 | 0,-3.49937141832903,-2.78920098209679
334 | 1,-8.54444734703623,5.15290642411556
335 | 1,-10.9991359516716,1.56550203945429
336 | 0,-1.62271758630075,-1.06679195796304
337 | 0,3.40938612174097,-1.743378716954
338 | 1,-2.71823572876396,-8.65082288280461
339 | 0,-1.14854086435445,2.17489782751366
340 | 0,-0.196117913297385,-0.469117003631568
341 | 0,1.8380083954889,-0.875301647316829
342 | 0,4.64455220124379,0.409699374145785
343 | 1,8.47405129064004,-4.105974342275
344 | 1,-8.24221229974011,-4.59862113718691
345 | 1,3.81127217175893,-10.1050492972145
346 | 0,0.519431081604702,1.80614226236606
347 | 1,7.63615706995123,-7.75610168323522
348 | 0,-1.08437481639501,1.36288900412428
349 | 1,2.02905335541104,-10.1295889685264
350 | 1,0.161528961632774,-10.137125846199
351 | 1,10.4765957714668,4.1090897240005
352 | 1,-0.166629728788782,-8.85661502275487
353 | 0,-2.28121826990176,2.53351787365609
354 | 0,1.13928572721706,-1.8571950727544
355 | 0,2.64666722634757,0.0684503922307458
356 | 0,0.988953107819114,3.04082827262379
357 | 0,-1.39114927907483,1.4733394477615
358 | 1,0.649922120356765,-9.50083325228564
359 | 0,-2.51072609791792,-3.73602525799966
360 | 0,1.76110299666534,-1.87344808043147
361 | 0,2.71502354960309,-0.380105408579504
362 | 1,9.91666560314039,-2.4011289843972
363 | 0,0.00276493386156799,-0.00047389739896824
364 | 1,-8.31177930184917,3.14755376054514
365 | 1,-1.39691313380189,10.7843324422809
366 | 1,8.41027575454507,8.15286478379426
367 | 0,0.0538155161432796,-0.0274959922475747
368 | 1,-6.68173143062251,-4.86130800934844
369 | 1,2.06226906947099,-8.83087995033647
370 | 0,-0.514244407468289,0.56340709581946
371 | 0,0.904204500451216,0.145890291084222
372 | 1,-0.373772532438791,-10.5540046056149
373 | 0,-0.64733166168444,-2.50972745926982
374 | 0,2.49066331770437,-0.38980117684316
375 | 0,-0.121861266350661,-0.975691969493998
376 | 0,0.0136936554077183,0.00905354378238528
377 | 1,-1.35634217401442,-10.2435578196301
378 | 0,1.19400319228345,0.0425435453573504
379 | 1,-6.31685439869954,7.05270156576201
380 | 0,3.95613090853654,2.26118736341486
381 | 1,-3.80935681740784,-6.93877248608993
382 | 1,9.16078006408909,5.21362287942432
383 | 0,-0.0961663520905861,1.14391133898005
384 | 1,6.69807957205987,8.01721502179085
385 | 1,6.10061383293858,-10.1254957027826
386 | 0,-0.145830006419021,-3.21051314597282
387 | 1,-9.644505921852,-4.68322890529518
388 | 0,3.49401374272763,-1.93328418402963
389 | 1,-8.1777170322131,3.67898840300946
390 | 0,0.789934350816148,-3.61381835417044
391 | 0,0.12557832514292,0.0574660263274467
392 | 0,3.66138256455025,0.746035857019821
393 | 0,-1.74978761846171,-3.83359047434783
394 | 0,-0.120644041263509,-0.15610430606642
395 | 1,-8.95462335917438,1.53367857645982
396 | 1,-8.45802746715784,-9.01314440064012
397 | 1,8.50331338295575,-1.67841696346795
398 | 1,-9.04203661019369,-0.655294863928458
399 | 1,10.062124111155,0.707837481479977
400 | 1,5.52377610489494,-9.66710501600297
401 | 1,5.94168173950735,-7.32976055377187
402 | 1,8.72984887597856,-4.45601084674185
403 | 0,0.247164836253612,0.191640153898579
404 | 1,2.51675618592276,-8.90377643185965
405 | 0,0.938453174791395,1.2401746914312
406 | 0,-1.61315367175214,-1.74181961997504
407 | 1,-2.00770937575851,-9.79543686818336
408 | 1,-10.6081758993541,-2.30586962693893
409 | 1,4.37593142103152,8.82205859828211
410 | 1,-0.807364582015188,-9.02865735574655
411 | 1,-8.65026309883851,-5.01060749421735
412 | 0,-1.56773710593331,-4.04392399326692
413 | 1,7.47825752278111,7.63682951990349
414 | 1,5.06004894214798,6.73863502211331
415 | 0,4.00022648929161,0.0372091773790018
416 | 0,-1.42421041251485,-0.356806512913475
417 | 0,-4.31549925866844,0.0160878368964697
418 | 0,-1.41624769404774,0.643758491802536
419 | 1,9.88666834786708,-3.90849935346593
420 | 1,2.2416201243829,-9.30374355745025
421 | 1,-10.0514333178811,1.33169925415424
422 | 0,-1.49293875157692,-0.0803784815129737
423 | 0,-0.373542134581573,0.0739314629850316
424 | 1,7.1304325571835,7.61169055909981
425 | 0,-2.78948913081346,2.64235449427601
426 | 1,8.5590872976283,-5.90499063570252
427 | 0,-2.21492907088892,-0.198719575664393
428 | 0,-1.27420071548796,0.880972164214771
429 | 1,2.20572171345621,8.92586340554095
430 | 1,-4.37764848123899,-8.49532796666674
431 | 0,0.661821170388373,0.702696272016525
432 | 0,1.09455979165081,-0.3171514027456
433 | 1,5.85232430912185,7.51292054362969
434 | 1,-8.7572670015153,-5.29486930179609
435 | 1,-8.71995807316605,2.73843845779256
436 | 1,-10.469306922401,0.677912104009433
437 | 0,-0.238834278775273,2.66637814479462
438 | 1,-9.97402925726769,0.516751990249783
439 | 0,4.52499972514605,-0.210723879797662
440 | 1,-9.50349393933319,-4.90914644583363
441 | 0,3.09292443183826,0.691094755846385
442 | 1,6.52451117529468,-6.16749398471218
443 | 1,-7.89142284201564,6.22771592008867
444 | 0,1.34160906824512,-4.29457210905755
445 | 0,3.37690030445055,3.23823171065721
446 | 0,-1.74756460040412,0.115161636705417
447 | 1,2.09259891027732,10.7133984068071
448 | 0,-1.55897832550468,-0.54500742923725
449 | 1,-10.1510948130152,0.705391307620071
450 | 1,9.87819060488774,1.56149257723905
451 | 1,0.664193123754842,-10.3450571304519
452 | 1,-2.16919495074005,11.5422720203636
453 | 1,-10.5964911037683,-4.48693512558455
454 | 0,-0.322151945415054,-0.149942682756622
455 | 1,10.0468947277568,-1.69126438563845
456 | 1,-7.20530950510333,8.35002058558393
457 | 1,-10.199068740954,1.69761763138409
458 | 0,-0.886715286531061,-1.15315250839658
459 | 0,1.00028439905777,4.78286803165714
460 | 0,-0.796134471471364,3.3803218609938
461 | 0,0.879628835052097,-0.347048944632013
462 | 1,3.316360621084,-9.29064304093852
463 | 1,9.52490785592518,0.768107493010713
464 | 0,-3.6179464683711,3.08367245980074
465 | 0,-2.50129521328432,2.16638503017564
466 | 1,3.50796070658719,-8.82085259291101
467 | 0,1.7874192589039,-1.28557477831587
468 | 0,1.47090174592303,-1.49000209727276
469 | 1,2.187792192926,10.98122734842
470 | 0,3.18521173851152,1.64120290792636
471 | 0,0.0804722743095204,-0.00491169081515815
472 | 1,2.72292660820893,10.3267262991723
473 | 1,-5.50481123132574,10.8808726886333
474 | 1,-0.752106622674431,-11.9449770648393
475 | 0,0.597743773625992,0.118823361672357
476 | 1,7.74804187657779,-2.64517373831667
477 | 1,9.41725961538244,4.47617853340803
478 | 1,8.7005416319304,1.46344559888605
479 | 1,2.13873428675928,-9.99996555050612
480 | 1,6.23703955182193,-8.33833450626592
481 | 1,-7.34246515228146,-7.28411491888873
482 | 1,8.19256506542502,4.66499857335704
483 | 0,1.08386770608604,2.04170123691303
484 | 0,0.792918657651094,-0.665874555793935
485 | 1,8.01455681864082,-6.91718657478915
486 | 1,-6.60565490957551,-6.77312146274872
487 | 0,-0.698826810679956,-0.132635132827256
488 | 1,-8.07291096364784,3.60841779203007
489 | 0,-2.86976134371763,1.61068512039112
490 | 0,-0.153519843634597,0.143814571936753
491 | 0,0.751526133391785,4.83144598607007
492 | 1,2.30364533084953,9.63050615755962
493 | 1,-8.78162413153049,0.604223388050903
494 | 1,4.40980981866989,7.88898941741628
495 | 1,7.47449640415546,-4.77502520934164
496 | 1,-4.74145482074682,7.30976083268096
497 | 0,-0.909361015237169,-2.24009871761788
498 | 0,0.509643813824878,1.72612970266538
499 | 1,7.53013498254882,-7.65865946560226
500 | 0,-0.0925880663722012,-0.111777543954269
501 |
--------------------------------------------------------------------------------