├── .gitignore
├── .idea
└── vcs.xml
├── CNN
├── CNN_targetClassification
│ ├── emotion_baseOnVGG.py
│ ├── test_fvgg_emo.txt
│ ├── train_fvgg_emo.txt
│ └── 卷积神经网络实现目标分类
├── CNN_tensorflow_mnist.py
└── 卷积神经网络
├── MNIST_DATA
└── mnist.pkl.gz
├── NN
├── NN_tensorflow_mnist.py
└── 传统神经网络
├── README.md
├── TensorFlow_Learning
├── _10_tensorflow_visual1_demo.py
├── _1_tensorflow_struct_demo.py
├── _2_session_demo.py
├── _3_variable_demo.py
├── _4_placeholder_demo.py
├── _5_activationFunction_demo.py
├── _6_hiddenLayer_demo.py
├── _7_NNstruct_demo.py
├── _8_visual_demo.py
└── _9_optimizer_demo.py
└── TensorFlow_Test.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # SageMath parsed files
80 | *.sage.py
81 |
82 | # dotenv
83 | .env
84 |
85 | # virtualenv
86 | .venv
87 | venv/
88 | ENV/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 | .spyproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | # mkdocs documentation
98 | /site
99 |
100 | # mypy
101 | .mypy_cache/
102 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 | GAN(生成对抗网络)
7 | |
8 | 线性回归 -- NN(传统神经网络) -- CNN(卷积神经网络) -- DRL(深度强化学习)
9 | |
10 | RNN(递归神经网络) -- LSTM(长短期记忆网络)
11 |
12 |
13 | ## 算法列表:
14 |
15 | #### {NN传统神经网络算法使用TensorFlow实现MNIST手写数字识别}算法:
16 | > 代码及数据:https://github.com/YEN-GitHub/DeepLearning_BasicByTensorFlow/tree/master/ANN/NN_tensorflow_mnist.py
17 |
18 | > 博客及结果:http://blog.csdn.net/yen_csdn/article/details/79268446
19 |
20 |
21 | #### {CNN卷积神经网络使用TensorFlow框架实现MNIST手写数字识别}算法:
22 | > 代码及数据:https://github.com/YEN-GitHub/DeepLearning_BasicByTensorFlow/tree/master/CNN/CNN_tensorflow_mnist.py
23 |
24 | > 博客及结果:http://blog.csdn.net/yen_csdn/article/details/79293238
25 |
26 |
27 | #### {基于VGG进行人脸表情识别}算法:
28 | > 代码及数据:https://github.com/YEN-GitHub/DeepLearning_BasicByTensorFlow/tree/master/CNN_targetClassification/emotion_baseOnVGG.py
29 |
30 | > 博客及结果:http://blog.csdn.net/yen_csdn/article/details/79303022
31 |
32 |
--------------------------------------------------------------------------------
/TensorFlow_Learning/_10_tensorflow_visual1_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-08 20:14
4 | # @Abstract:可视化神经网络框架z
5 | import tensorflow as tf
6 | import numpy as np
7 |
8 | x_data=np.linspace(-1,1,300)[:,np.newaxis]
9 | y_noise=np.random.normal(0,0.05,x_data.shape) #y的噪音
10 | y_data=np.square(x_data)-0.5+y_noise #y=x^2-0.5
11 |
12 | #定义添加隐藏层的函数
13 | def add_layer(inputs,in_size,out_size,activation_function=None):
14 | with tf.name_scope('layer'):
15 | with tf.name_scope('weigt'):
16 | Weights=tf.Variable(tf.random_normal([in_size,out_size]),name='WEIGHT')#in_size行out_size列的矩阵
17 | with tf.name_scope('weigt'):
18 | Biases=tf.Variable(tf.random_normal([1,out_size]),name='BIASE') #1行out_size列
19 | with tf.name_scope('WEIGHT_BIASE'):
20 | wx_b=tf.matmul(inputs,Weights)+Biases
21 | if activation_function is None:#线性关系 不用加activation_function
22 | outputs=wx_b
23 | else:
24 | outputs=activation_function(wx_b)
25 |
26 | return outputs
27 |
28 | #输入模块
29 | with tf.name_scope('inputs'):
30 | xs=tf.placeholder(tf.float32, [None,1],name="x_input")
31 | ys=tf.placeholder(tf.float32, [None,1],name="y_input")
32 |
33 | #隐藏层模块
34 |
35 | #第一层:输入层1个神经元 隐藏层10个神经元 输出层10个神经元
36 | layer_1=add_layer(xs,1,10,activation_function=tf.nn.relu)
37 |
38 | #第二层:输入层10个神经元 隐藏层1个神经元 输出层1个神经元
39 | prediction=add_layer(layer_1,10,1,activation_function=None)
40 | with tf.name_scope('LOSS'):
41 | cost=tf.reduce_mean(
42 | tf.reduce_sum(tf.square(ys-prediction),
43 | reduction_indices=[1])
44 | )
45 |
46 | learning_rate=0.5
47 | #优化器 更正误差
48 | with tf.name_scope('TRAIN'):
49 | optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
50 |
51 | init=tf.initialize_all_variables()
52 | with tf.Session() as sess:
53 | sess.run(init)
54 | writer = tf.summary.FileWriter("logs/", sess.graph)
55 | for epoch in range(200):
56 | sess.run(optimizer,feed_dict={xs:x_data,ys:y_data})
57 | if epoch%10==0:
58 | print(sess.run(cost,feed_dict={xs:x_data,ys:y_data}))
--------------------------------------------------------------------------------
/TensorFlow_Learning/_1_tensorflow_struct_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-07 19:59
4 | # @Abstract:假设有直线y=1*x+3,我们搭建神经网络结构,训练weight和baise,使之经过tensorflow神经网络训练,weight慢慢接近1 biase慢慢接近3
5 |
6 | import tensorflow as tf
7 | import numpy as np
8 |
9 | #数据
10 | x_data=np.random.rand(100).astype(np.float32)
11 | #真实的Y
12 | y_data=x_data*1+3
13 |
14 | #定义tensorflow Struct-------------begin--------------------
15 | Weights=tf.Variable(tf.random_uniform([1],-1.0,1.0))#权重 初始随机产生-1~1的数
16 | Biases=tf.Variable(tf.zeros([1])) #Biases初始=0
17 |
18 | #预测的Y
19 | y=Weights*x_data+Biases
20 |
21 | #预测的Y与实际的Y
22 | cost=tf.reduce_mean(tf.square(y-y_data))
23 |
24 | #优化器
25 | learning_rate=0.5
26 | optimizer=tf.train.GradientDescentOptimizer(learning_rate)
27 | train=optimizer.minimize(cost)
28 |
29 | init=tf.initialize_all_variables()
30 | #定义tensorflow Struct-------------end--------------------
31 |
32 |
33 | #神经网络参数训练---------------------------
34 | with tf.Session() as sess:
35 | #激活
36 | sess.run(init)
37 | print("---------------train begin-------------------")
38 | for epoch in range(100):
39 | sess.run(train)
40 | if epoch%10==0: #每隔10步输出一次训练结果
41 | print("epoch:",epoch,"\tWeights:",sess.run(Weights),"\tBiases:",sess.run(Biases),)
42 | print("---------------train end-------------------")
43 |
44 |
45 |
46 | '''
47 | 执行结果:
48 | ---------------train begin-------------------
49 | epoch: 0 Weights: [2.3087187] Biases: [3.1080055]
50 | epoch: 10 Weights: [1.5514023] Biases: [2.7151241]
51 | epoch: 20 Weights: [1.3074411] Biases: [2.8411634]
52 | epoch: 30 Weights: [1.1714177] Biases: [2.9114387]
53 | epoch: 40 Weights: [1.0955762] Biases: [2.9506216]
54 | epoch: 50 Weights: [1.0532894] Biases: [2.9724684]
55 | epoch: 60 Weights: [1.0297122] Biases: [2.9846494]
56 | epoch: 70 Weights: [1.0165664] Biases: [2.991441]
57 | epoch: 80 Weights: [1.0092368] Biases: [2.9952278]
58 | epoch: 90 Weights: [1.0051502] Biases: [2.9973392]
59 | ---------------train end-------------------
60 | '''
61 |
62 |
63 |
--------------------------------------------------------------------------------
/TensorFlow_Learning/_2_session_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-07 20:21
4 | # @Abstract:Session打开方式
5 |
6 | import tensorflow as tf
7 |
8 |
9 | matrix1=tf.constant([[3,6]]) #1行2列
10 | matrix2=tf.constant([[2],
11 | [4]]) #2行1列
12 |
13 |
14 | product=tf.multiply(matrix1,matrix2)
15 |
16 | print(product)
17 |
18 | #method one:
19 | sess1=tf.Session()
20 | result1=sess1.run(product)
21 | print(result1)
22 | sess1.close()
23 |
24 |
25 | #method two:
26 | with tf.Session() as sess2:
27 | result2 = sess2.run(product)
28 | print(result2)
29 |
30 | '''
31 | ----执行结果----
32 | [[ 6 12]
33 | [12 24]]
34 | '''
--------------------------------------------------------------------------------
/TensorFlow_Learning/_3_variable_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-07 20:31
4 | # @Abstract:使用tensorflow定义变量
5 |
6 |
7 | import tensorflow as tf
8 |
9 | #定义变量
10 | var1=tf.Variable
11 |
12 | var2=tf.Variable(0,name='count')
13 | print(var2.name)
14 |
15 | #定义常量
16 | var3=tf.constant(5)
17 | new_value=tf.add(var2,var3)
18 | update=tf.assign(var2,new_value)
19 | print(var2)
20 |
21 | init=tf.initialize_all_variables() # 初试化所有的变量才会被激活
22 |
23 | with tf.Session() as sess:
24 | sess.run(init)
25 |
26 | for x in range(3):
27 | sess.run(update)
28 | print(sess.run(var2))
29 |
30 | '''
31 | ---执行结果----
32 | 5
33 | 10
34 | 15
35 | '''
--------------------------------------------------------------------------------
/TensorFlow_Learning/_4_placeholder_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-07 20:40
4 | # @Abstract:tensorflow placeholder 用的时候传入值
5 |
6 | import tensorflow as tf
7 |
8 | input_1=tf.placeholder(tf.float32) # tensorflow大部分情况下只能处理float32形式
9 | input_1_2=tf.placeholder(tf.float32,[2,2]) #规定只能两行两列
10 | input_2=tf.placeholder(tf.float32)
11 |
12 | output=tf.multiply(input_1,input_2)
13 |
14 | with tf.Session() as sess:
15 | print(sess.run(output,feed_dict={input_1:[5.2],input_2:[2]}))
16 |
17 |
18 | '''
19 | -------执行结果:------
20 | [10.4]
21 | '''
22 |
23 |
24 |
--------------------------------------------------------------------------------
/TensorFlow_Learning/_5_activationFunction_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-07 20:45
4 | # @Abstract:激励函数
5 |
6 | import tensorflow as tf
7 |
8 | '''
9 | http://wiki.jikexueyuan.com/project/tensorflow-zh/api_docs/python/nn.html
10 | 常用的Activation Functions:
11 | tf.nn.relu(features, name=None)
12 | tf.nn.relu6(features, name=None)
13 | tf.nn.softplus(features, name=None)
14 | tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None) #为了减少overfiting
15 | tf.nn.bias_add(value, bias, name=None)
16 | tf.sigmoid(x, name=None)
17 | tf.tanh(x, name=None)
18 | '''
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/TensorFlow_Learning/_6_hiddenLayer_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-07 20:53
4 | # @Abstract:定义神经网络hidden层
5 |
6 | import tensorflow as tf
7 |
8 | #定义添加隐藏层的函数
9 | def add_layer(inputs,in_size,out_size,activation_function=None):
10 | Weights=tf.Variable(tf.random_normal([in_size,out_size]))#in_size行out_size列的矩阵
11 | Biases=tf.Variable(tf.random_normal([1,out_size])) #1行out_size列
12 | wx_b=tf.matmul(inputs,Weights)+Biases
13 | if activation_function is None:#线性关系 不用加activation_function
14 | outputs=wx_b
15 | else:
16 | outputs=activation_function(wx_b)
17 |
18 | return outputs
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/TensorFlow_Learning/_7_NNstruct_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-07 21:00
4 | # @Abstract:定义神经网络结构
5 |
6 | import tensorflow as tf
7 | import numpy as np
8 | import _6_hiddenLayer_demo
9 |
10 |
11 | x_data=np.linspace(-1,1,300)[:,np.newaxis]
12 | y_noise=np.random.normal(0,0.05,x_data.shape) #y的噪音
13 | y_data=np.square(x_data)-0.5+y_noise #y=x^2-0.5
14 |
15 | xs=tf.placeholder(tf.float32, [None,1])
16 | ys=tf.placeholder(tf.float32, [None,1])
17 |
18 | #第一层:输入层1个神经元 隐藏层10个神经元 输出层10个神经元
19 | layer_1=_6_hiddenLayer_demo.add_layer(xs,1,10,activation_function=tf.nn.relu)
20 |
21 | #第二层:输入层10个神经元 隐藏层1个神经元 输出层1个神经元
22 | prediction=_6_hiddenLayer_demo.add_layer(layer_1,10,1,activation_function=None)
23 |
24 | cost=tf.reduce_mean(
25 | tf.reduce_sum(tf.square(ys-prediction),
26 | reduction_indices=[1])
27 | )
28 |
29 | learning_rate=0.5
30 | #优化器 更正误差
31 | optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
32 |
33 | init=tf.initialize_all_variables()
34 |
35 | with tf.Session() as sess:
36 | sess.run(init)
37 |
38 | for epoch in range(200):
39 | sess.run(optimizer,feed_dict={xs:x_data,ys:y_data})
40 | if epoch%10==0:
41 | print(sess.run(cost,feed_dict={xs:x_data,ys:y_data}))
42 |
43 |
44 | '''
45 | -----执行结果-------
46 | 8.163688
47 | 0.09541403
48 | 0.09541403
49 | 0.09541403
50 | 0.09541403
51 | 0.09541403
52 | 0.09541403
53 | 0.09541403
54 | 0.09541403
55 | 0.09541404
56 | 0.09541404
57 | 0.09541403
58 | 0.09541403
59 | 0.09541403
60 | 0.09541403
61 | 0.09541403
62 | 0.09541403
63 | 0.09541403
64 | 0.09541403
65 | 0.09541403
66 | 可见,误差在慢慢减小...
67 | '''
68 |
69 |
70 |
--------------------------------------------------------------------------------
/TensorFlow_Learning/_8_visual_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-07 21:27
4 | # @Abstract:画图可视化数据
5 |
6 | import tensorflow as tf
7 | import numpy as np
8 | import _6_hiddenLayer_demo
9 | import matplotlib.pyplot as plt
10 |
11 |
12 | x_data=np.linspace(-1,1,300)[:,np.newaxis]
13 | y_noise=np.random.normal(0,0.05,x_data.shape) #y的噪音
14 | y_data=np.square(x_data)-0.5+y_noise #y=x^2-0.5
15 |
16 | xs=tf.placeholder(tf.float32, [None,1])
17 | ys=tf.placeholder(tf.float32, [None,1])
18 |
19 | #第一层:输入层1个神经元 隐藏层10个神经元 输出层10个神经元
20 | layer_1=_6_hiddenLayer_demo.add_layer(xs,1,10,activation_function=tf.nn.relu)
21 |
22 | #第二层:输入层10个神经元 隐藏层1个神经元 输出层1个神经元
23 | prediction=_6_hiddenLayer_demo.add_layer(layer_1,10,1,activation_function=None)
24 |
25 | cost=tf.reduce_mean(
26 | tf.reduce_sum(tf.square(ys-prediction),
27 | reduction_indices=[1])
28 | )
29 |
30 | learning_rate=0.5
31 | #优化器 更正误差
32 | optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
33 |
34 | init=tf.initialize_all_variables()
35 |
36 | fig=plt.figure()
37 | ax=fig.add_subplot(1,1,1) #原始数据的图像
38 | ax.scatter(x_data,y_data)
39 | plt.ion()
40 | plt.show()
41 |
42 | with tf.Session() as sess:
43 | sess.run(init)
44 |
45 | for epoch in range(1000):
46 | sess.run(optimizer,feed_dict={xs:x_data,ys:y_data})
47 | if epoch % 50==0:
48 | # print(sess.run(cost,feed_dict={xs:x_data,ys:y_data}))
49 | try:
50 | ax.lines.remove(lines[0]) # 去除上一条
51 | except Exception: #因为第一次没有上一条
52 | pass
53 | prediction_value=sess.run(prediction,feed_dict={xs:x_data})
54 | lines=ax.plot(x_data,prediction_value,'r-',lw=5) #用曲线画上去
55 | plt.pause(1)
56 |
57 |
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/TensorFlow_Learning/_9_optimizer_demo.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-07 21:44
4 | # @Abstract:优化器
5 |
6 | import tensorflow as tf
7 |
8 | '''
9 | tf.train.Optimizer
10 | tf.train.GradientDescentOptimizer
11 | tf.train.AdadeltaOptimizer
12 | tf.train.AdagradOptimizer
13 | tf.train.AdagradDAOptimizer
14 | tf.train.MomentumOptimizer
15 | tf.train.AdamOptimizer
16 | tf.train.FtrlOptimizer
17 | tf.train.ProximalGradientDescentOptimizer
18 | tf.train.ProximalAdagradOptimizer
19 | tf.train.RMSPropOptimizer #AlpaGo使用的方式
20 | '''
21 |
22 |
23 |
--------------------------------------------------------------------------------
/TensorFlow_Test.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | # @Author: yangenneng
3 | # @Time: 2018-02-05 18:23
4 | # @Abstract:测试tensorflow导入情况
5 |
6 | # 引入 tensorflow 模块
7 |
8 | import tensorflow as tf
9 | # # 创建一个整型常量,即0阶 Tensor
10 | # t0 = tf.constant(3, dtype=tf.int32)
11 | #
12 | # # 创建一个浮点数的一维数组,即1阶 Tensor
13 | # t1 = tf.constant([3., 4.1, 5.2], dtype=tf.float32)
14 | #
15 | # # 创建一个字符串的2x2数组,即2阶Tensor
16 | # t2 = tf.constant([['1', 'YEN'], ['2', 'LMC']], dtype=tf.string)
17 | #
18 | # # 打印上面创建的几个 Tensor
19 | # print("t0:",t0)
20 | # print("t1:",t1)
21 | # print("t2:",t2)
22 |
23 | #官方教程代码测试:
24 | #Creates a graph.
25 | a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
26 | b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
27 | c = tf.matmul(a, b)
28 | #Creates a session with log_device_placement set to True.
29 | sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
30 | #Runs the op.
31 | print(sess.run(c))
32 |
--------------------------------------------------------------------------------